diff --git a/.github/workflows/arbitrator-ci.yml b/.github/workflows/arbitrator-ci.yml index 392eb876c0..a8980a0e31 100644 --- a/.github/workflows/arbitrator-ci.yml +++ b/.github/workflows/arbitrator-ci.yml @@ -6,30 +6,36 @@ on: inputs: enable_tmate: type: boolean - description: 'Enable tmate' + description: "Enable tmate" required: false default: false merge_group: pull_request: paths: - - 'arbitrator/**' - - 'contracts' - - '.github/workflows/arbitrator-ci.yml' - - 'Makefile' + - "arbitrator/**" + - "contracts" + - ".github/workflows/arbitrator-ci.yml" + - "Makefile" push: branches: - master env: RUST_BACKTRACE: 1 -# RUSTFLAGS: -Dwarnings # TODO: re-enable after wasmer upgrade + # RUSTFLAGS: -Dwarnings # TODO: re-enable after wasmer upgrade WABT_VERSION: 1.0.32 jobs: arbitrator: name: Run Arbitrator tests - runs-on: ubuntu-8 + runs-on: self-hosted steps: + - name: Cleanup build folder + run: | + ls -la ./ + rm -rf ./* || true + rm -rf ./.??* || true + ls -la ./ - name: Setup tmate session uses: mxschmitt/action-tmate@v3 if: ${{ github.event_name == 'workflow_dispatch' && inputs.enable_tmate }} @@ -64,23 +70,23 @@ jobs: - name: Setup nodejs uses: actions/setup-node@v3 with: - node-version: '18' - cache: 'yarn' - cache-dependency-path: '**/yarn.lock' + node-version: "18" + cache: "yarn" + cache-dependency-path: "**/yarn.lock" - name: Install rust stable uses: dtolnay/rust-toolchain@stable with: - toolchain: 'stable' - components: 'llvm-tools-preview, rustfmt, clippy' + toolchain: "stable" + components: "llvm-tools-preview, rustfmt, clippy" - name: Install rust nightly uses: dtolnay/rust-toolchain@nightly id: install-rust-nightly with: - toolchain: 'nightly-2024-08-06' - targets: 'wasm32-wasi, wasm32-unknown-unknown' - components: 'rust-src, rustfmt, clippy' + toolchain: "nightly-2024-08-06" + targets: "wasm32-wasi, wasm32-unknown-unknown" + components: "rust-src, rustfmt, clippy" - name: Set STYLUS_NIGHTLY_VER environment variable run: echo "STYLUS_NIGHTLY_VER=+$(rustup toolchain list | grep '^nightly' | head -n1 | cut -d' ' -f1)" >> "$GITHUB_ENV" @@ -148,7 +154,7 @@ jobs: # This is the name of the cache folder. # The cache folder will be placed in the build directory, # so make sure it doesn't conflict with anything! - actions-cache-folder: 'emsdk-cache' + actions-cache-folder: "emsdk-cache" no-cache: true - name: Build cbrotli-wasm diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index acd6295b7c..4a482ced96 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -13,14 +13,14 @@ on: jobs: test: name: Go Tests - runs-on: ubuntu-8 + runs-on: self-hosted # Creates a redis container for redis tests services: redis: image: redis ports: - - 6379:6379 + - 6379:6379 strategy: fail-fast: false @@ -39,9 +39,9 @@ jobs: - name: Setup nodejs uses: actions/setup-node@v3 with: - node-version: '18' - cache: 'yarn' - cache-dependency-path: '**/yarn.lock' + node-version: "18" + cache: "yarn" + cache-dependency-path: "**/yarn.lock" - name: Install go uses: actions/setup-go@v4 @@ -56,17 +56,17 @@ jobs: - name: Install rust stable uses: dtolnay/rust-toolchain@stable with: - toolchain: 'stable' - targets: 'wasm32-wasi, wasm32-unknown-unknown' - components: 'llvm-tools-preview, rustfmt, clippy' + toolchain: "stable" + targets: "wasm32-wasi, wasm32-unknown-unknown" + components: "llvm-tools-preview, rustfmt, clippy" - name: Install rust nightly uses: dtolnay/rust-toolchain@nightly id: install-rust-nightly with: - toolchain: 'nightly-2024-08-06' - targets: 'wasm32-wasi, wasm32-unknown-unknown' - components: 'rust-src, rustfmt, clippy' + toolchain: "nightly-2024-08-06" + targets: "wasm32-wasi, wasm32-unknown-unknown" + components: "rust-src, rustfmt, clippy" - name: Set STYLUS_NIGHTLY_VER environment variable run: echo "STYLUS_NIGHTLY_VER=+$(rustup toolchain list | grep '^nightly' | head -n1 | cut -d' ' -f1)" >> "$GITHUB_ENV" diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 1cde8f06b9..170e300ecf 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -13,20 +13,20 @@ name: "CodeQL" on: push: - branches: [ "master" ] + branches: ["master"] merge_group: - branches: [ "master" ] + branches: ["master"] pull_request: # The branches below must be a subset of the branches above - branches: [ "master" ] + branches: ["master"] schedule: - - cron: '18 21 * * 5' + - cron: "18 21 * * 5" jobs: analyze: name: Analyze if: github.repository == 'OffchainLabs/nitro' # don't run in any forks without "Advanced Security" enabled - runs-on: ubuntu-8 + runs-on: ubuntu-latest permissions: actions: read contents: read @@ -37,108 +37,108 @@ jobs: strategy: fail-fast: false matrix: - language: [ 'go' ] + language: ["go"] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support steps: - - name: Checkout - uses: actions/checkout@v4 - with: - submodules: true - - - name: Install dependencies - run: sudo apt update && sudo apt install -y wabt - - # Initializes the CodeQL tools for scanning. - - name: Initialize CodeQL - uses: github/codeql-action/init@v2 - with: - languages: ${{ matrix.language }} - # If you wish to specify custom queries, you can do so here or in a config file. - # By default, queries listed here will override any specified in a config file. - # Prefix the list here with "+" to use these queries and those in the config file. - - # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs - # queries: security-extended,security-and-quality - config-file: ./.github/codeql/codeql-config.yml - - - name: Setup nodejs - uses: actions/setup-node@v3 - with: - node-version: '18' - cache: 'yarn' - cache-dependency-path: '**/yarn.lock' - - - name: Install go - uses: actions/setup-go@v4 - with: - go-version: 1.21.x - - - name: Install rust stable - uses: dtolnay/rust-toolchain@stable - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@v1 - - - name: Cache Rust Build Products - uses: actions/cache@v3 - with: - path: | - ~/.cargo/registry/ - ~/.cargo/git/ - arbitrator/target/ - arbitrator/wasm-libraries/target/ - arbitrator/wasm-libraries/soft-float/SoftFloat/build - target/etc/initial-machine-cache/ - key: ${{ runner.os }}-cargo-${{ steps.install-rust.outputs.rustc_hash }}-min-${{ hashFiles('arbitrator/Cargo.lock') }} - restore-keys: ${{ runner.os }}-cargo-${{ steps.install-rust.outputs.rustc_hash }}- - - - name: Cache wabt build - id: cache-wabt - uses: actions/cache@v3 - with: - path: ~/wabt-prefix - key: ${{ runner.os }}-wabt-codeql-${{ env.WABT_VERSION }} - - - name: Cache cbrotli - uses: actions/cache@v3 - id: cache-cbrotli - with: - path: | - target/include/brotli/ - target/lib-wasm/ - target/lib/libbrotlicommon-static.a - target/lib/libbrotlienc-static.a - target/lib/libbrotlidec-static.a - key: ${{ runner.os }}-brotli-3a-${{ hashFiles('scripts/build-brotli.sh') }}-${{ hashFiles('.github/workflows/arbitrator-ci.yaml') }} - restore-keys: ${{ runner.os }}-brotli- - - - name: Build cbrotli-local - if: steps.cache-cbrotli.outputs.cache-hit != 'true' - run: ./scripts/build-brotli.sh -l - - - name: Cache Build Products - uses: actions/cache@v3 - with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: ${{ runner.os }}-go- - - - name: Build all lint dependencies - run: make -j build-node-deps - - # ℹ️ Command-line programs to run using the OS shell. - # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun - - # If the Autobuild fails above, remove it and uncomment the following three lines. - # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. - - # - run: | - # echo "Run, Build Application using script" - # ./location_of_script_within_repo/buildscript.sh - - - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: true + + - name: Install dependencies + run: sudo apt update && sudo apt install -y wabt + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # Details on CodeQL's query packs refer to : https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + config-file: ./.github/codeql/codeql-config.yml + + - name: Setup nodejs + uses: actions/setup-node@v3 + with: + node-version: "18" + cache: "yarn" + cache-dependency-path: "**/yarn.lock" + + - name: Install go + uses: actions/setup-go@v4 + with: + go-version: 1.21.x + + - name: Install rust stable + uses: dtolnay/rust-toolchain@stable + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@v1 + + - name: Cache Rust Build Products + uses: actions/cache@v3 + with: + path: | + ~/.cargo/registry/ + ~/.cargo/git/ + arbitrator/target/ + arbitrator/wasm-libraries/target/ + arbitrator/wasm-libraries/soft-float/SoftFloat/build + target/etc/initial-machine-cache/ + key: ${{ runner.os }}-cargo-${{ steps.install-rust.outputs.rustc_hash }}-min-${{ hashFiles('arbitrator/Cargo.lock') }} + restore-keys: ${{ runner.os }}-cargo-${{ steps.install-rust.outputs.rustc_hash }}- + + - name: Cache wabt build + id: cache-wabt + uses: actions/cache@v3 + with: + path: ~/wabt-prefix + key: ${{ runner.os }}-wabt-codeql-${{ env.WABT_VERSION }} + + - name: Cache cbrotli + uses: actions/cache@v3 + id: cache-cbrotli + with: + path: | + target/include/brotli/ + target/lib-wasm/ + target/lib/libbrotlicommon-static.a + target/lib/libbrotlienc-static.a + target/lib/libbrotlidec-static.a + key: ${{ runner.os }}-brotli-3a-${{ hashFiles('scripts/build-brotli.sh') }}-${{ hashFiles('.github/workflows/arbitrator-ci.yaml') }} + restore-keys: ${{ runner.os }}-brotli- + + - name: Build cbrotli-local + if: steps.cache-cbrotli.outputs.cache-hit != 'true' + run: ./scripts/build-brotli.sh -l + + - name: Cache Build Products + uses: actions/cache@v3 + with: + path: | + ~/go/pkg/mod + ~/.cache/go-build + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-go- + + - name: Build all lint dependencies + run: make -j build-node-deps + + # ℹ️ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + + # If the Autobuild fails above, remove it and uncomment the following three lines. + # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. + + # - run: | + # echo "Run, Build Application using script" + # ./location_of_script_within_repo/buildscript.sh + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 2aacf32f00..ba6239b050 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -13,7 +13,7 @@ on: jobs: docker: name: Docker build - runs-on: ubuntu-8 + runs-on: ubuntu-latest services: # local registery registry: diff --git a/.github/workflows/release-ci.yml b/.github/workflows/release-ci.yml index 5282510e87..b7156cd303 100644 --- a/.github/workflows/release-ci.yml +++ b/.github/workflows/release-ci.yml @@ -6,25 +6,25 @@ on: jobs: build_and_run: - runs-on: ubuntu-8 + runs-on: ubuntu-latest steps: - - name: Checkout - uses: actions/checkout@v4 - with: - submodules: recursive + - name: Checkout + uses: actions/checkout@v4 + with: + submodules: recursive - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 - with: - driver-opts: network=host + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + with: + driver-opts: network=host - - name: Cache Docker layers - uses: actions/cache@v3 - with: - path: /tmp/.buildx-cache - key: ${{ runner.os }}-buildx-${{ hashFiles('Dockerfile') }} - restore-keys: ${{ runner.os }}-buildx- + - name: Cache Docker layers + uses: actions/cache@v3 + with: + path: /tmp/.buildx-cache + key: ${{ runner.os }}-buildx-${{ hashFiles('Dockerfile') }} + restore-keys: ${{ runner.os }}-buildx- - - name: Startup Nitro testnode - run: ./scripts/startup-testnode.bash + - name: Startup Nitro testnode + run: ./scripts/startup-testnode.bash diff --git a/.github/workflows/shellcheck-ci.yml b/.github/workflows/shellcheck-ci.yml index d1c7b58580..987c528e2d 100644 --- a/.github/workflows/shellcheck-ci.yml +++ b/.github/workflows/shellcheck-ci.yml @@ -12,7 +12,7 @@ on: jobs: shellcheck: name: Run ShellCheck - runs-on: ubuntu-8 + runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 diff --git a/.gitmodules b/.gitmodules index d4d26282ae..3a662abbe2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -12,8 +12,7 @@ url = https://github.com/google/brotli.git [submodule "contracts"] path = contracts - url = https://github.com/OffchainLabs/nitro-contracts.git - branch = develop + url = https://github.com/celestiaorg/nitro-contracts.git [submodule "arbitrator/wasm-testsuite/testsuite"] path = arbitrator/wasm-testsuite/testsuite url = https://github.com/WebAssembly/testsuite.git @@ -22,7 +21,7 @@ url = https://github.com/OffchainLabs/wasmer.git [submodule "nitro-testnode"] path = nitro-testnode - url = https://github.com/OffchainLabs/nitro-testnode.git + url = https://github.com/celestiaorg/nitro-testnode [submodule "arbitrator/langs/rust"] path = arbitrator/langs/rust url = https://github.com/OffchainLabs/stylus-sdk-rs.git diff --git a/Dockerfile b/Dockerfile index 9138ed30ad..fefb6699d8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -42,10 +42,10 @@ WORKDIR /workspace RUN apt-get update && apt-get install -y curl build-essential=12.9 FROM wasm-base AS wasm-libs-builder - # clang / lld used by soft-float wasm +# clang / lld used by soft-float wasm RUN apt-get update && \ apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt - # pinned rust 1.80.1 +# pinned rust 1.80.1 RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.80.1 --target x86_64-unknown-linux-gnu wasm32-unknown-unknown wasm32-wasi COPY ./Makefile ./ COPY arbitrator/Cargo.* arbitrator/ @@ -77,6 +77,7 @@ COPY ./blsSignatures ./blsSignatures COPY ./cmd/chaininfo ./cmd/chaininfo COPY ./cmd/replay ./cmd/replay COPY ./das/dastree ./das/dastree +COPY ./das/celestia ./das/celestia COPY ./precompiles ./precompiles COPY ./statetransfer ./statetransfer COPY ./util ./util @@ -90,6 +91,7 @@ COPY ./fastcache ./fastcache COPY ./go-ethereum ./go-ethereum COPY --from=brotli-wasm-export / target/ COPY --from=contracts-builder workspace/contracts/build/contracts/src/precompiles/ contracts/build/contracts/src/precompiles/ +COPY --from=contracts-builder workspace/contracts/build/contracts/src/celestia/ contracts/build/contracts/src/celestia/ COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/ COPY --from=contracts-builder workspace/.make/ .make/ RUN PATH="$PATH:/usr/local/go/bin" NITRO_BUILD_IGNORE_TIMESTAMPS=1 make build-wasm-bin @@ -219,6 +221,7 @@ COPY ./scripts/download-machine.sh . RUN ./download-machine.sh consensus-v30 0xb0de9cb89e4d944ae6023a3b62276e54804c242fd8c4c2d8e6cc4450f5fa8b1b && true RUN ./download-machine.sh consensus-v31 0x260f5fa5c3176a856893642e149cf128b5a8de9f828afec8d11184415dd8dc69 RUN ./download-machine.sh consensus-v32 0x184884e1eb9fefdc158f6c8ac912bb183bf3cf83f0090317e0bc4ac5860baa39 +RUN ./download-machine.sh v3.2.1-rc.1 0xe81f986823a85105c5fd91bb53b4493d38c0c26652d23f76a7405ac889908287 celestiaorg FROM golang:1.21.10-bookworm AS node-builder WORKDIR /workspace diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index 0f537478eb..8a89076c40 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -417,3 +417,11 @@ pub unsafe extern "C" fn arbitrator_gen_proof(mach: *mut Machine) -> RustByteArr pub unsafe extern "C" fn arbitrator_free_proof(proof: RustByteArray) { drop(Vec::from_raw_parts(proof.ptr, proof.len, proof.capacity)) } + +#[no_mangle] +pub unsafe extern "C" fn arbitrator_get_opcode(mach: *mut Machine) -> u16 { + match (*mach).get_next_instruction() { + Some(instruction) => instruction.opcode.repr(), + None => panic!("Failed to get next opcode for Machine"), + } +} diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index 4ece1f7bf2..5a782cc3a6 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -3051,6 +3051,13 @@ impl Machine { { data.push(0); // inbox proof type out!(msg_data); + match inbox_identifier { + InboxIdentifier::Sequencer => { + out!(msg_idx.to_be_bytes()); + data.push(0x0); + } + InboxIdentifier::Delayed => data.push(0x1), + } } } else { unreachable!() diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 44b360e76e..a7882af2aa 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -105,8 +105,8 @@ type BatchPoster struct { bridgeAddr common.Address gasRefunderAddr common.Address building *buildingBatch - dapWriter daprovider.Writer dapReaders []daprovider.Reader + dapWriters []daprovider.Writer dataPoster *dataposter.DataPoster redisLock *redislock.Simple messagesPerBatch *arbmath.MovingAverage[uint64] @@ -299,7 +299,7 @@ type BatchPosterOpts struct { Config BatchPosterConfigFetcher DeployInfo *chaininfo.RollupAddresses TransactOpts *bind.TransactOpts - DAPWriter daprovider.Writer + DAPWriters []daprovider.Writer ParentChainID *big.Int DAPReaders []daprovider.Reader } @@ -346,7 +346,7 @@ func NewBatchPoster(ctx context.Context, opts *BatchPosterOpts) (*BatchPoster, e seqInboxAddr: opts.DeployInfo.SequencerInbox, gasRefunderAddr: opts.Config().gasRefunder, bridgeAddr: opts.DeployInfo.Bridge, - dapWriter: opts.DAPWriter, + dapWriters: opts.DAPWriters, redisLock: redisLock, dapReaders: opts.DAPReaders, } @@ -1126,7 +1126,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) } var use4844 bool config := b.config() - if config.Post4844Blobs && b.dapWriter == nil && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { + if config.Post4844Blobs && len(b.dapWriters) == 0 && latestHeader.ExcessBlobGas != nil && latestHeader.BlobGasUsed != nil { arbOSVersion, err := b.arbOSVersionGetter.ArbOSVersionForMessageNumber(arbutil.MessageIndex(arbmath.SaturatingUSub(uint64(batchPosition.MessageCount), 1))) if err != nil { return false, err @@ -1351,7 +1351,7 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) return false, nil } - if b.dapWriter != nil { + if len(b.dapWriters) > 0 { if !b.redisLock.AttemptLock(ctx) { return false, errAttemptLockFailed } @@ -1365,15 +1365,24 @@ func (b *BatchPoster) maybePostSequencerBatch(ctx context.Context) (bool, error) batchPosterDAFailureCounter.Inc(1) return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce) } - // #nosec G115 - sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.DisableDapFallbackStoreDataOnChain) - if err != nil { - batchPosterDAFailureCounter.Inc(1) - return false, err + + // attempt to store data using one of the dapWriters, if it fails and fallbacks are disabled, return a hard error + seqMsg := sequencerMsg + for _, writer := range b.dapWriters { + log.Info("Attempting to store data with dapWriter", "type", writer.Type()) + sequencerMsg, err = writer.Store(ctx, seqMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.DisableDapFallbackStoreDataOnChain) + if err != nil { + if config.DisableDapFallbackStoreDataOnChain { + log.Error("Error while attempting to post batch and on chain fallback is disabled", "error", err) + return false, err + } + log.Error("Error when trying to store data with dapWriter", "type", writer.Type()) + continue + } + // if we succesffuly posted a batch with a dapWriter, we move on and ignore the rest + break } - batchPosterDASuccessCounter.Inc(1) - batchPosterDALastSuccessfulActionGauge.Update(time.Now().Unix()) } prevMessageCount := batchPosition.MessageCount diff --git a/arbnode/node.go b/arbnode/node.go index a9da4ea24b..81cfa239c4 100644 --- a/arbnode/node.go +++ b/arbnode/node.go @@ -34,6 +34,8 @@ import ( "github.com/offchainlabs/nitro/broadcaster" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/das" + "github.com/offchainlabs/nitro/das/celestia" + celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types" "github.com/offchainlabs/nitro/execution" "github.com/offchainlabs/nitro/execution/gethexec" "github.com/offchainlabs/nitro/solgen/go/bridgegen" @@ -93,6 +95,8 @@ type Config struct { TransactionStreamer TransactionStreamerConfig `koanf:"transaction-streamer" reload:"hot"` Maintenance MaintenanceConfig `koanf:"maintenance" reload:"hot"` ResourceMgmt resourcemanager.Config `koanf:"resource-mgmt" reload:"hot"` + Celestia celestia.CelestiaConfig `koanf:"celestia-cfg"` + DAPreference []string `koanf:"da-preference"` // SnapSyncConfig is only used for testing purposes, these should not be configured in production. SnapSyncTest SnapSyncConfig } @@ -158,6 +162,7 @@ func ConfigAddOptions(prefix string, f *flag.FlagSet, feedInputEnable bool, feed DangerousConfigAddOptions(prefix+".dangerous", f) TransactionStreamerConfigAddOptions(prefix+".transaction-streamer", f) MaintenanceConfigAddOptions(prefix+".maintenance", f) + celestia.CelestiaDAConfigAddOptions(prefix+".celestia-cfg", f) } var ConfigDefault = Config{ @@ -548,6 +553,8 @@ func createNodeImpl( var daReader das.DataAvailabilityServiceReader var dasLifecycleManager *das.LifecycleManager var dasKeysetFetcher *das.KeysetFetcher + var celestiaReader celestiaTypes.CelestiaReader + var celestiaWriter celestiaTypes.CelestiaWriter if config.DataAvailability.Enable { if config.BatchPoster.Enable { daWriter, daReader, dasKeysetFetcher, dasLifecycleManager, err = das.CreateBatchPosterDAS(ctx, &config.DataAvailability, dataSigner, l1client, deployInfo.SequencerInbox) @@ -573,6 +580,16 @@ func createNodeImpl( return nil, errors.New("a data availability service is required for this chain, but it was not configured") } + if config.Celestia.Enable { + celestiaService, err := celestia.NewCelestiaDASRPCClient(config.Celestia.URL) + if err != nil { + return nil, err + } + + celestiaReader = celestiaService + celestiaWriter = celestiaService + } + // We support a nil txStreamer for the pruning code if txStreamer != nil && txStreamer.chainConfig.ArbitrumChainParams.DataAvailabilityCommittee && daReader == nil { return nil, errors.New("data availability service required but unconfigured") @@ -584,6 +601,9 @@ func createNodeImpl( if blobReader != nil { dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(blobReader)) } + if celestiaReader != nil { + dapReaders = append(dapReaders, celestiaTypes.NewReaderForCelestia(celestiaReader)) + } inboxTracker, err := NewInboxTracker(arbDb, txStreamer, dapReaders, config.SnapSyncTest) if err != nil { return nil, err @@ -708,9 +728,27 @@ func createNodeImpl( if txOptsBatchPoster == nil && config.BatchPoster.DataPoster.ExternalSigner.URL == "" { return nil, errors.New("batchposter, but no TxOpts") } - var dapWriter daprovider.Writer - if daWriter != nil { - dapWriter = daprovider.NewWriterForDAS(daWriter) + dapWriters := []daprovider.Writer{} + for _, providerName := range config.DAPreference { + nilWriter := false + switch strings.ToLower(providerName) { + case "anytrust": + if daWriter != nil { + dapWriters = append(dapWriters, daprovider.NewWriterForDAS(daWriter)) + } else { + nilWriter = true + } + case "celestia": + if celestiaWriter != nil { + dapWriters = append(dapWriters, celestiaTypes.NewWriterForCelestia(celestiaWriter)) + } else { + nilWriter = true + } + } + + if nilWriter { + log.Error("encountered nil daWriter", "daWriter", providerName) + } } batchPoster, err = NewBatchPoster(ctx, &BatchPosterOpts{ DataPosterDB: rawdb.NewTable(arbDb, storage.BatchPosterPrefix), @@ -722,7 +760,7 @@ func createNodeImpl( Config: func() *BatchPosterConfig { return &configFetcher.Get().BatchPoster }, DeployInfo: deployInfo, TransactOpts: txOptsBatchPoster, - DAPWriter: dapWriter, + DAPWriters: dapWriters, ParentChainID: parentChainID, DAPReaders: dapReaders, }) diff --git a/arbstate/daprovider/util.go b/arbstate/daprovider/util.go index d5a369bf3c..f99fca1041 100644 --- a/arbstate/daprovider/util.go +++ b/arbstate/daprovider/util.go @@ -72,6 +72,10 @@ func RecordPreimagesTo(preimages map[arbutil.PreimageType]map[common.Hash][]byte // which will retrieve the full batch data. const DASMessageHeaderFlag byte = 0x80 +// CelestiaMessageHeaderFlag indicates that this data is a Blob Pointer +// which will be used to retrieve data from Celestia +const CelestiaMessageHeaderFlag byte = 0x63 + // TreeDASMessageHeaderFlag indicates that this DAS certificate data employs the new merkelization strategy. // Ignored when DASMessageHeaderFlag is not set. const TreeDASMessageHeaderFlag byte = 0x08 @@ -89,7 +93,7 @@ const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x5 const BrotliMessageHeaderByte byte = 0 // KnownHeaderBits is all header bits with known meaning to this nitro version -const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte +const KnownHeaderBits byte = DASMessageHeaderFlag | CelestiaMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte // hasBits returns true if `checking` has all `bits` func hasBits(checking byte, bits byte) bool { @@ -116,6 +120,10 @@ func IsBlobHashesHeaderByte(header byte) bool { return hasBits(header, BlobHashesHeaderFlag) } +func IsCelestiaMessageHeaderByte(header byte) bool { + return hasBits(header, CelestiaMessageHeaderFlag) +} + func IsBrotliMessageHeaderByte(b uint8) bool { return b == BrotliMessageHeaderByte } diff --git a/arbstate/daprovider/writer.go b/arbstate/daprovider/writer.go index a26e53c94d..040d2a1880 100644 --- a/arbstate/daprovider/writer.go +++ b/arbstate/daprovider/writer.go @@ -19,6 +19,9 @@ type Writer interface { timeout uint64, disableFallbackStoreDataOnChain bool, ) ([]byte, error) + + // Identifies a Writer by type + Type() string } // DAProviderWriterForDAS is generally meant to be only used by nitro. @@ -45,3 +48,7 @@ func (d *writerForDAS) Store(ctx context.Context, message []byte, timeout uint64 return Serialize(cert), nil } } + +func (d *writerForDAS) Type() string { + return "anytrust" +} diff --git a/arbstate/inbox.go b/arbstate/inbox.go index b58a7420b7..494c477a24 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -105,6 +105,8 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash if !foundDA { if daprovider.IsDASMessageHeaderByte(payload[0]) { log.Error("No DAS Reader configured, but sequencer message found with DAS header") + } else if daprovider.IsCelestiaMessageHeaderByte(payload[0]) { + log.Error("No Celestia Reader configured, but sequencer message found with Celestia header") } else if daprovider.IsBlobHashesHeaderByte(payload[0]) { return nil, daprovider.ErrNoBlobReader } diff --git a/audits/celestia/arbitrum_nitro_celestia_audit_report.pdf b/audits/celestia/arbitrum_nitro_celestia_audit_report.pdf new file mode 100644 index 0000000000..a2fa72183c Binary files /dev/null and b/audits/celestia/arbitrum_nitro_celestia_audit_report.pdf differ diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index c70ceb1d94..9e99073745 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -139,6 +139,7 @@ func main() { } loserEscrowAddress := common.HexToAddress(*loserEscrowAddressString) + if sequencerAddress != (common.Address{}) && ownerAddress != l1TransactionOpts.From { panic("cannot specify sequencer address if owner is not deployer") } diff --git a/cmd/ipfshelper/ipfshelper.bkup_go b/cmd/ipfshelper/ipfshelper.bkup_go index ccde492ca6..e1d8507a3a 100644 --- a/cmd/ipfshelper/ipfshelper.bkup_go +++ b/cmd/ipfshelper/ipfshelper.bkup_go @@ -14,9 +14,9 @@ import ( "sync" "github.com/ethereum/go-ethereum/log" + iface "github.com/ipfs/boxo/coreiface" + "github.com/ipfs/boxo/coreiface/options" "github.com/ipfs/go-libipfs/files" - coreiface "github.com/ipfs/interface-go-ipfs-core" - "github.com/ipfs/interface-go-ipfs-core/options" "github.com/ipfs/interface-go-ipfs-core/path" "github.com/ipfs/kubo/config" "github.com/ipfs/kubo/core" @@ -33,7 +33,7 @@ import ( const DefaultIpfsProfiles = "" type IpfsHelper struct { - api coreiface.CoreAPI + api iface.CoreAPI node *core.IpfsNode cfg *config.Config repoPath string @@ -276,6 +276,6 @@ func CanBeIpfsPath(pathString string) bool { } // TODO break abstraction for now til we figure out what fns are needed -func (h *IpfsHelper) GetAPI() coreiface.CoreAPI { +func (h *IpfsHelper) GetAPI() iface.CoreAPI { return h.api } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 1078f44808..bb088849ea 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -573,9 +573,9 @@ func mainImpl() int { return 1 } } - // If batchPoster is enabled, validate MaxSize to be at least 10kB below the sequencer inbox’s maxDataSize if the data availability service is not enabled. + // If batchPoster is enabled, validate MaxSize to be at least 10kB below the sequencer inbox’s maxDataSize if the data availability service and celestia DA are not enabled. // The 10kB gap is because its possible for the batch poster to exceed its MaxSize limit and produce batches of slightly larger size. - if nodeConfig.Node.BatchPoster.Enable && !nodeConfig.Node.DataAvailability.Enable { + if nodeConfig.Node.BatchPoster.Enable && (!nodeConfig.Node.DataAvailability.Enable && !nodeConfig.Node.Celestia.Enable) { if nodeConfig.Node.BatchPoster.MaxSize > seqInboxMaxDataSize-10000 { log.Error("batchPoster's MaxSize is too large") return 1 diff --git a/cmd/replay/main.go b/cmd/replay/main.go index 0fe56eb4c9..fc920fe18a 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -6,6 +6,7 @@ package main import ( "bytes" "context" + "encoding/binary" "encoding/hex" "encoding/json" "fmt" @@ -31,6 +32,8 @@ import ( "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" + "github.com/offchainlabs/nitro/das/celestia/tree" + celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types" "github.com/offchainlabs/nitro/das/dastree" "github.com/offchainlabs/nitro/gethhook" "github.com/offchainlabs/nitro/wavmio" @@ -153,6 +156,126 @@ func (r *BlobPreimageReader) Initialize(ctx context.Context) error { return nil } +type PreimageCelestiaReader struct { +} + +func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *celestiaTypes.BlobPointer) ([]byte, *celestiaTypes.SquareData, error) { + oracle := func(hash common.Hash) ([]byte, error) { + return wavmio.ResolveTypedPreimage(arbutil.Sha2_256PreimageType, hash) + } + + if blobPointer.SharesLength == 0 { + return nil, nil, fmt.Errorf("Error, shares length is %v", blobPointer.SharesLength) + } + // first, walk down the merkle tree + leaves, err := tree.MerkleTreeContent(oracle, common.BytesToHash(blobPointer.DataRoot[:])) + if err != nil { + log.Warn("Error revealing contents behind data root", "err", err) + return nil, nil, err + } + + squareSize := uint64(len(leaves)) / 2 + // split leaves in half to get row roots + rowRoots := leaves[:squareSize] + // We get the original data square size, wich is (size_of_the_extended_square / 2) + odsSize := squareSize / 2 + + startRow := blobPointer.Start / odsSize + + if blobPointer.Start >= odsSize*odsSize { + // check that the square isn't just our share (very niche case, should only happens on local testing) + if blobPointer.Start != odsSize*odsSize && odsSize > 1 { + return nil, nil, fmt.Errorf("Error Start Index out of ODS bounds: index=%v odsSize=%v", blobPointer.Start, odsSize) + } + } + + // adjusted_end_index = adjusted_start_index + length - 1 + if blobPointer.Start+blobPointer.SharesLength < 1 { + return nil, nil, fmt.Errorf("Error getting number of shares in first row: index+length %v > 1", blobPointer.Start+blobPointer.SharesLength) + } + endIndexOds := blobPointer.Start + blobPointer.SharesLength - 1 + if endIndexOds >= odsSize*odsSize { + // check that the square isn't just our share (very niche case, should only happens on local testing) + if endIndexOds != odsSize*odsSize && odsSize > 1 { + return nil, nil, fmt.Errorf("Error End Index out of ODS bounds: index=%v odsSize=%v", endIndexOds, odsSize) + } + } + endRow := endIndexOds / odsSize + + if endRow >= odsSize || startRow >= odsSize { + return nil, nil, fmt.Errorf("Error rows out of bounds: startRow=%v endRow=%v odsSize=%v", startRow, endRow, odsSize) + } + + startColumn := blobPointer.Start % odsSize + endColumn := endIndexOds % odsSize + + if startRow == endRow && startColumn > endColumn { + log.Error("start and end row are the same, and startColumn >= endColumn", "startColumn", startColumn, "endColumn ", endColumn) + return []byte{}, nil, nil + } + + // adjust the math in the CelestiaPayload function in the inbox + + // we can take ods * ods -> end index in ods + // then we check that start index is in bounds, otherwise ignore -> return empty batch + // then we check that end index is in bounds, otherwise ignore + + // get rows behind row root and shares for our blob + rows := [][][]byte{} + shares := [][]byte{} + for i := startRow; i <= endRow; i++ { + row, err := tree.NmtContent(oracle, rowRoots[i]) + if err != nil { + return nil, nil, err + } + rows = append(rows, row) + + odsRow := row[:odsSize] + + // TODO explain the logic behind this branching + if startRow == endRow { + shares = append(shares, odsRow[startColumn:endColumn+1]...) + break + } else if i == startRow { + shares = append(shares, odsRow[startColumn:]...) + } else if i == endRow { + shares = append(shares, odsRow[:endColumn+1]...) + } else { + shares = append(shares, odsRow...) + } + } + + data := []byte{} + if tree.NamespaceSize*2+1 > uint64(len(shares[0])) || tree.NamespaceSize*2+5 > uint64(len(shares[0])) { + return nil, nil, fmt.Errorf("Error getting sequence length on share of size %v", len(shares[0])) + } + sequenceLength := binary.BigEndian.Uint32(shares[0][tree.NamespaceSize*2+1 : tree.NamespaceSize*2+5]) + for i, share := range shares { + // trim extra namespace + share := share[tree.NamespaceSize:] + if i == 0 { + data = append(data, share[tree.NamespaceSize+5:]...) + continue + } + data = append(data, share[tree.NamespaceSize+1:]...) + } + + data = data[:sequenceLength] + squareData := celestiaTypes.SquareData{ + RowRoots: rowRoots, + ColumnRoots: leaves[squareSize:], + Rows: rows, + SquareSize: squareSize, + StartRow: startRow, + EndRow: endRow, + } + return data, &squareData, nil +} + +func (dasReader *PreimageCelestiaReader) GetProof(ctx context.Context, msg []byte) ([]byte, error) { + return nil, nil +} + // To generate: // key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001") // sig, _ := crypto.Sign(make([]byte, 32), key) @@ -210,27 +333,19 @@ func main() { } return wavmio.ReadInboxMessage(batchNum), nil } - readMessage := func(dasEnabled bool) *arbostypes.MessageWithMetadata { + readMessage := func() *arbostypes.MessageWithMetadata { var delayedMessagesRead uint64 if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - var dasReader daprovider.DASReader - var dasKeysetFetcher daprovider.DASKeysetFetcher - if dasEnabled { - // DAS batch and keysets are all together in the same preimage binary. - dasReader = &PreimageDASReader{} - dasKeysetFetcher = &PreimageDASReader{} - } backend := WavmInbox{} var keysetValidationMode = daprovider.KeysetPanicIfInvalid if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = daprovider.KeysetDontValidate } var dapReaders []daprovider.Reader - if dasReader != nil { - dapReaders = append(dapReaders, daprovider.NewReaderForDAS(dasReader, dasKeysetFetcher)) - } + dapReaders = append(dapReaders, daprovider.NewReaderForDAS(&PreimageDASReader{}, &PreimageDASReader{})) + dapReaders = append(dapReaders, celestiaTypes.NewReaderForCelestia(&PreimageCelestiaReader{})) dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode) ctx := context.Background() @@ -288,7 +403,10 @@ func main() { } } - message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee) + // need to add Celestia or just "ExternalDA" as an option to the ArbitrumChainParams + // for now we hard code Cthis to treu and hardcode Celestia in `readMessage` + // to test the integration + message := readMessage() chainContext := WavmChainContext{} newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, chainConfig, false) @@ -298,7 +416,7 @@ func main() { } else { // Initialize ArbOS with this init message and create the genesis block. - message := readMessage(false) + message := readMessage() initMessage, err := message.Message.ParseInitMessage() if err != nil { diff --git a/contracts b/contracts index 7396313311..c5777c0e2b 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 7396313311ab17cb30e2eef27cccf96f0a9e8f7f +Subproject commit c5777c0e2b4090052022b043c95ddf4bebe6eab7 diff --git a/das/celestia/celestiaDasRpcClient.go b/das/celestia/celestiaDasRpcClient.go new file mode 100644 index 0000000000..2808351ace --- /dev/null +++ b/das/celestia/celestiaDasRpcClient.go @@ -0,0 +1,93 @@ +package celestia + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/spf13/pflag" + + "github.com/ethereum/go-ethereum/rpc" + celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types" + "github.com/offchainlabs/nitro/util/pretty" +) + +type CelestiaConfig struct { + Enable bool `koanf:"enable"` + URL string `koanf:"url"` +} + +type CelestiaDASClient struct { + clnt *rpc.Client + url string +} + +func CelestiaDAConfigAddOptions(prefix string, f *pflag.FlagSet) { + f.Bool(prefix+".enable", false, "Enable Celestia DA") + f.String(prefix+".url", "http://localhost:9876", "address to use against Celestia DA RPC service") +} + +func NewCelestiaDASRPCClient(target string) (*CelestiaDASClient, error) { + clnt, err := rpc.Dial(target) + if err != nil { + log.Error("Could not dial to Celestia DAS", "err", err) + return nil, err + } + return &CelestiaDASClient{ + clnt: clnt, + url: target, + }, nil +} + +func (c *CelestiaDASClient) Store(ctx context.Context, message []byte) ([]byte, error) { + log.Trace("celestia.CelestiaDASClient.Store(...)", "message", pretty.FirstFewBytes(message)) + ret := []byte{} + if err := c.clnt.CallContext(ctx, &ret, "celestia_store", hexutil.Bytes(message)); err != nil { + return nil, err + } + log.Info("Got result from Celestia DAS", "result", ret) + return ret, nil +} + +func (c *CelestiaDASClient) String() string { + return fmt.Sprintf("CelestiaDASClient{url:%s}", c.url) +} + +type ReadResult struct { + Message []byte `json:"message"` + RowRoots [][]byte `json:"row_roots"` + ColumnRoots [][]byte `json:"column_roots"` + Rows [][][]byte `json:"rows"` + SquareSize uint64 `json:"square_size"` // Refers to original data square size + StartRow uint64 `json:"start_row"` + EndRow uint64 `json:"end_row"` +} + +func (c *CelestiaDASClient) Read(ctx context.Context, blobPointer *celestiaTypes.BlobPointer) ([]byte, *celestiaTypes.SquareData, error) { + log.Trace("celestia.CelestiaDASClient.Read(...)", "blobPointer", blobPointer) + var ret ReadResult + if err := c.clnt.CallContext(ctx, &ret, "celestia_read", blobPointer); err != nil { + return nil, nil, err + } + + squareData := celestiaTypes.SquareData{ + RowRoots: ret.RowRoots, + ColumnRoots: ret.ColumnRoots, + Rows: ret.Rows, + SquareSize: ret.SquareSize, + StartRow: ret.StartRow, + EndRow: ret.EndRow, + } + + return ret.Message, &squareData, nil +} + +func (c *CelestiaDASClient) GetProof(ctx context.Context, msg []byte) ([]byte, error) { + res := []byte{} + err := c.clnt.CallContext(ctx, &res, "celestia_getProof", msg) + if err != nil { + return nil, err + } + return res, nil +} diff --git a/das/celestia/tree/hash.go b/das/celestia/tree/hash.go new file mode 100644 index 0000000000..bef9fcd7de --- /dev/null +++ b/das/celestia/tree/hash.go @@ -0,0 +1,37 @@ +package tree + +import ( + "github.com/offchainlabs/nitro/arbutil" + "github.com/tendermint/tendermint/crypto/tmhash" + + "github.com/ethereum/go-ethereum/common" +) + +// TODO: make these have a large predefined capacity +var ( + leafPrefix = []byte{0} + innerPrefix = []byte{1} +) + +// returns tmhash() +func emptyHash() []byte { + return tmhash.Sum([]byte{}) +} + +// returns tmhash(0x00 || leaf) +func leafHash(record func(bytes32, []byte, arbutil.PreimageType), leaf []byte) []byte { + preimage := append(leafPrefix, leaf...) + hash := tmhash.Sum(preimage) + + record(common.BytesToHash(hash), preimage, arbutil.Sha2_256PreimageType) + return hash +} + +// returns tmhash(0x01 || left || right) +func innerHash(record func(bytes32, []byte, arbutil.PreimageType), left []byte, right []byte) []byte { + preimage := append(innerPrefix, append(left, right...)...) + hash := tmhash.Sum(preimage) + + record(common.BytesToHash(hash), preimage, arbutil.Sha2_256PreimageType) + return tmhash.Sum(append(innerPrefix, append(left, right...)...)) +} diff --git a/das/celestia/tree/merkle_tree.go b/das/celestia/tree/merkle_tree.go new file mode 100644 index 0000000000..3fb8c19faf --- /dev/null +++ b/das/celestia/tree/merkle_tree.go @@ -0,0 +1,78 @@ +package tree + +import ( + "math/bits" + + "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/arbutil" +) + +type bytes32 = common.Hash + +// HashFromByteSlices computes a Merkle tree where the leaves are the byte slice, +// in the provided order. It follows RFC-6962. +func HashFromByteSlices(record func(bytes32, []byte, arbutil.PreimageType), items [][]byte) []byte { + switch len(items) { + case 0: + emptyHash := emptyHash() + record(common.BytesToHash(emptyHash), []byte{}, arbutil.Sha2_256PreimageType) + return emptyHash + case 1: + return leafHash(record, items[0]) + default: + k := getSplitPoint(int64(len(items))) + left := HashFromByteSlices(record, items[:k]) + right := HashFromByteSlices(record, items[k:]) + return innerHash(record, left, right) + } +} + +// getSplitPoint returns the largest power of 2 less than length +func getSplitPoint(length int64) int64 { + if length < 1 { + panic("Trying to split a tree with size < 1") + } + uLength := uint(length) + bitlen := bits.Len(uLength) + k := int64(1 << uint(bitlen-1)) + if k == length { + k >>= 1 + } + return k +} + +// getChildrenHashes splits the preimage into the hashes of the left and right children. +func getChildrenHashes(preimage []byte) (leftChild, rightChild common.Hash, err error) { + leftChild = common.BytesToHash(preimage[:32]) + rightChild = common.BytesToHash(preimage[32:]) + return leftChild, rightChild, nil +} + +// MerkleTreeContent recursively walks down the Merkle tree and collects leaf node data. +func MerkleTreeContent(oracle func(bytes32) ([]byte, error), rootHash common.Hash) ([][]byte, error) { + stack := []common.Hash{rootHash} + var data [][]byte + + for len(stack) > 0 { + currentHash := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + preimage, err := oracle(currentHash) + if err != nil { + return nil, err + } + + if preimage[0] == leafPrefix[0] { + data = append(data, preimage[1:]) + } else { + leftChildHash, rightChildHash, err := getChildrenHashes(preimage[1:]) + if err != nil { + return nil, err + } + stack = append(stack, rightChildHash) + stack = append(stack, leftChildHash) + } + } + + return data, nil +} diff --git a/das/celestia/tree/nmt.go b/das/celestia/tree/nmt.go new file mode 100644 index 0000000000..f0d2a7b953 --- /dev/null +++ b/das/celestia/tree/nmt.go @@ -0,0 +1,74 @@ +package tree + +import ( + "errors" + + "github.com/celestiaorg/rsmt2d" + "github.com/ethereum/go-ethereum/common" +) + +// need to pass square size and axis index +func ComputeNmtRoot(createTreeFn rsmt2d.TreeConstructorFn, index uint, shares [][]byte) ([]byte, error) { + // create NMT with custom Hasher + // use create tree function, pass it to the ComputeNmtRoot function + tree := createTreeFn(rsmt2d.Row, index) + if !isComplete(shares) { + return nil, errors.New("can not compute root of incomplete row") + } + for _, d := range shares { + err := tree.Push(d) + if err != nil { + return nil, err + } + } + + return tree.Root() +} + +// isComplete returns true if all the shares are non-nil. +func isComplete(shares [][]byte) bool { + for _, share := range shares { + if share == nil { + return false + } + } + return true +} + +// getNmtChildrenHashes splits the preimage into the hashes of the left and right children of the NMT +// note that a leaf has the format minNID || maxNID || hash, here hash is the hash of the left and right +// (NodePrefix) || (leftMinNID || leftMaxNID || leftHash) || (rightMinNID || rightMaxNID || rightHash) +func getNmtChildrenHashes(hash []byte) (leftChild, rightChild []byte) { + hash = hash[1:] + flagLen := int(NamespaceSize * 2) + sha256Len := 32 + leftChild = hash[:flagLen+sha256Len] + rightChild = hash[flagLen+sha256Len:] + return leftChild, rightChild +} + +// walkMerkleTree recursively walks down the Merkle tree and collects leaf node data. +func NmtContent(oracle func(bytes32) ([]byte, error), rootHash []byte) ([][]byte, error) { + stack := [][]byte{rootHash} + var data [][]byte + + for len(stack) > 0 { + currentHash := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + preimage, err := oracle(common.BytesToHash(currentHash[NamespaceSize*2:])) + if err != nil { + return nil, err + } + + if preimage[0] == leafPrefix[0] { + data = append(data, preimage[1:]) + } else { + leftChildHash, rightChildHash := getNmtChildrenHashes(preimage) + stack = append(stack, rightChildHash) + stack = append(stack, leftChildHash) + } + } + + return data, nil +} diff --git a/das/celestia/tree/nmt_hasher.go b/das/celestia/tree/nmt_hasher.go new file mode 100644 index 0000000000..c7c5a23cd8 --- /dev/null +++ b/das/celestia/tree/nmt_hasher.go @@ -0,0 +1,43 @@ +package tree + +import ( + "crypto/sha256" + "hash" + + "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/arbutil" +) + +// customHasher embeds hash.Hash and includes a map for the hash-to-preimage mapping +type NmtPreimageHasher struct { + hash.Hash + record func(bytes32, []byte, arbutil.PreimageType) + data []byte +} + +// Need to make sure this is writting relevant data into the tree +// Override the Sum method to capture the preimage +func (h *NmtPreimageHasher) Sum(b []byte) []byte { + hashed := h.Hash.Sum(nil) + hashKey := common.BytesToHash(hashed) + h.record(hashKey, append([]byte(nil), h.data...), arbutil.Sha2_256PreimageType) + return h.Hash.Sum(b) +} + +func (h *NmtPreimageHasher) Write(p []byte) (n int, err error) { + h.data = append(h.data, p...) + return h.Hash.Write(p) +} + +// Override the Reset method to clean the hash state and the data slice +func (h *NmtPreimageHasher) Reset() { + h.Hash.Reset() + h.data = h.data[:0] // Reset the data slice to be empty, but keep the underlying array +} + +func newNmtPreimageHasher(record func(bytes32, []byte, arbutil.PreimageType)) hash.Hash { + return &NmtPreimageHasher{ + Hash: sha256.New(), + record: record, + } +} diff --git a/das/celestia/tree/nmt_wrapper.go b/das/celestia/tree/nmt_wrapper.go new file mode 100644 index 0000000000..2ab8abd6a7 --- /dev/null +++ b/das/celestia/tree/nmt_wrapper.go @@ -0,0 +1,175 @@ +package tree + +import ( + "bytes" + "fmt" + "math" + + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/nmt/namespace" + "github.com/celestiaorg/rsmt2d" + "github.com/offchainlabs/nitro/arbutil" +) + +// NMT Wrapper from celestia-app with support for populating a mapping of preimages + +const ( + NamespaceSize uint64 = 29 + NamespaceIDSize = 28 + NamespaceVersionMax = math.MaxUint8 +) + +// Fulfills the rsmt2d.Tree interface and rsmt2d.TreeConstructorFn function +var ( + _ rsmt2d.Tree = &ErasuredNamespacedMerkleTree{} + ParitySharesNamespace = secondaryReservedNamespace(0xFF) +) + +func secondaryReservedNamespace(lastByte byte) Namespace { + return Namespace{ + Version: NamespaceVersionMax, + ID: append(bytes.Repeat([]byte{0xFF}, NamespaceIDSize-1), lastByte), + } +} + +type Namespace struct { + Version uint8 + ID []byte +} + +// Bytes returns this namespace as a byte slice. +func (n Namespace) Bytes() []byte { + return append([]byte{n.Version}, n.ID...) +} + +// ErasuredNamespacedMerkleTree wraps NamespaceMerkleTree to conform to the +// rsmt2d.Tree interface while also providing the correct namespaces to the +// underlying NamespaceMerkleTree. It does this by adding the already included +// namespace to the first half of the tree, and then uses the parity namespace +// ID for each share pushed to the second half of the tree. This allows for the +// namespaces to be included in the erasure data, while also keeping the nmt +// library sufficiently general +type ErasuredNamespacedMerkleTree struct { + squareSize uint64 // note: this refers to the width of the original square before erasure-coded + options []nmt.Option + tree Tree + // axisIndex is the index of the axis (row or column) that this tree is on. This is passed + // by rsmt2d and used to help determine which quadrant each leaf belongs to. + axisIndex uint64 + // shareIndex is the index of the share in a row or column that is being + // pushed to the tree. It is expected to be in the range: 0 <= shareIndex < + // 2*squareSize. shareIndex is used to help determine which quadrant each + // leaf belongs to, along with keeping track of how many leaves have been + // added to the tree so far. + shareIndex uint64 +} + +// Tree is an interface that wraps the methods of the underlying +// NamespaceMerkleTree that are used by ErasuredNamespacedMerkleTree. This +// interface is mainly used for testing. It is not recommended to use this +// interface by implementing a different implementation. +type Tree interface { + Root() ([]byte, error) + Push(namespacedData namespace.PrefixedData) error + ProveRange(start, end int) (nmt.Proof, error) +} + +// NewErasuredNamespacedMerkleTree creates a new ErasuredNamespacedMerkleTree +// with an underlying NMT of namespace size `29` and with +// `ignoreMaxNamespace=true`. axisIndex is the index of the row or column that +// this tree is committing to. squareSize must be greater than zero. +func NewErasuredNamespacedMerkleTree(record func(bytes32, []byte, arbutil.PreimageType), squareSize uint64, axisIndex uint, options ...nmt.Option) ErasuredNamespacedMerkleTree { + if squareSize == 0 { + panic("cannot create a ErasuredNamespacedMerkleTree of squareSize == 0") + } + options = append(options, nmt.NamespaceIDSize(29)) + options = append(options, nmt.IgnoreMaxNamespace(true)) + tree := nmt.New(newNmtPreimageHasher(record), options...) + return ErasuredNamespacedMerkleTree{squareSize: squareSize, options: options, tree: tree, axisIndex: uint64(axisIndex), shareIndex: 0} +} + +type constructor struct { + record func(bytes32, []byte, arbutil.PreimageType) + squareSize uint64 + opts []nmt.Option +} + +// NewConstructor creates a tree constructor function as required by rsmt2d to +// calculate the data root. It creates that tree using the +// wrapper.ErasuredNamespacedMerkleTree. +func NewConstructor(record func(bytes32, []byte, arbutil.PreimageType), squareSize uint64, opts ...nmt.Option) rsmt2d.TreeConstructorFn { + return constructor{ + record: record, + squareSize: squareSize, + opts: opts, + }.NewTree +} + +// NewTree creates a new rsmt2d.Tree using the +// wrapper.ErasuredNamespacedMerkleTree with predefined square size and +// nmt.Options +func (c constructor) NewTree(_ rsmt2d.Axis, axisIndex uint) rsmt2d.Tree { + newTree := NewErasuredNamespacedMerkleTree(c.record, c.squareSize, axisIndex, c.opts...) + return &newTree +} + +// Push adds the provided data to the underlying NamespaceMerkleTree, and +// automatically uses the first DefaultNamespaceIDLen number of bytes as the +// namespace unless the data pushed to the second half of the tree. Fulfills the +// rsmt.Tree interface. NOTE: panics if an error is encountered while pushing or +// if the tree size is exceeded. +func (w *ErasuredNamespacedMerkleTree) Push(data []byte) error { + if w.axisIndex+1 > 2*w.squareSize || w.shareIndex+1 > 2*w.squareSize { + return fmt.Errorf("pushed past predetermined square size: boundary at %d index at %d %d", 2*w.squareSize, w.axisIndex, w.shareIndex) + } + // + if len(data) < int(NamespaceSize) { + return fmt.Errorf("data is too short to contain namespace ID") + } + nidAndData := make([]byte, int(NamespaceSize)+len(data)) + copy(nidAndData[NamespaceSize:], data) + // use the parity namespace if the cell is not in Q0 of the extended data square + if w.isQuadrantZero() { + copy(nidAndData[:NamespaceSize], data[:NamespaceSize]) + } else { + copy(nidAndData[:NamespaceSize], ParitySharesNamespace.Bytes()) + } + err := w.tree.Push(nidAndData) + if err != nil { + return err + } + w.incrementShareIndex() + return nil +} + +// Root fulfills the rsmt.Tree interface by generating and returning the +// underlying NamespaceMerkleTree Root. +func (w *ErasuredNamespacedMerkleTree) Root() ([]byte, error) { + root, err := w.tree.Root() + if err != nil { + return nil, err + } + return root, nil +} + +// ProveRange returns a Merkle range proof for the leaf range [start, end] where `end` is non-inclusive. +func (w *ErasuredNamespacedMerkleTree) ProveRange(start, end int) (nmt.Proof, error) { + return w.tree.ProveRange(start, end) +} + +// incrementShareIndex increments the share index by one. +func (w *ErasuredNamespacedMerkleTree) incrementShareIndex() { + w.shareIndex++ +} + +// isQuadrantZero returns true if the current share index and axis index are both +// in the original data square. +func (w *ErasuredNamespacedMerkleTree) isQuadrantZero() bool { + return w.shareIndex < w.squareSize && w.axisIndex < w.squareSize +} + +// SetTree sets the underlying tree to the provided tree. This is used for +// testing purposes only. +func (w *ErasuredNamespacedMerkleTree) SetTree(tree Tree) { + w.tree = tree +} diff --git a/das/celestia/types/blob.go b/das/celestia/types/blob.go new file mode 100644 index 0000000000..f711cc0117 --- /dev/null +++ b/das/celestia/types/blob.go @@ -0,0 +1,77 @@ +package types + +import ( + "bytes" + "encoding/binary" +) + + +// BlobPointer contains the reference to the data blob on Celestia +type BlobPointer struct { + BlockHeight uint64 `json:"block_height"` + Start uint64 `json:"start"` + SharesLength uint64 `json:"shares_length"` + TxCommitment [32]byte `json:"tx_commitment"` + DataRoot [32]byte `json:"data_root"` +} + +// MarshalBinary encodes the BlobPointer to binary +// serialization format: height + start + end + commitment + data root +func (b *BlobPointer) MarshalBinary() ([]byte, error) { + buf := new(bytes.Buffer) + + // Writing fixed-size values + if err := binary.Write(buf, binary.BigEndian, b.BlockHeight); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, b.Start); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, b.SharesLength); err != nil { + return nil, err + } + + // Writing fixed-size byte arrays directly + if _, err := buf.Write(b.TxCommitment[:]); err != nil { + return nil, err + } + if _, err := buf.Write(b.DataRoot[:]); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary decodes the binary to BlobPointer +// serialization format: height + start + end + commitment + data root +func (b *BlobPointer) UnmarshalBinary(data []byte) error { + buf := bytes.NewReader(data) + // Reading fixed-size values + if err := binary.Read(buf, binary.BigEndian, &b.BlockHeight); err != nil { + return err + } + if err := binary.Read(buf, binary.BigEndian, &b.Start); err != nil { + return err + } + if err := binary.Read(buf, binary.BigEndian, &b.SharesLength); err != nil { + return err + } + + // Reading fixed-size byte arrays directly + if err := readFixedBytes(buf, b.TxCommitment[:]); err != nil { + return err + } + if err := readFixedBytes(buf, b.DataRoot[:]); err != nil { + return err + } + + return nil +} + +// readFixedBytes reads a fixed number of bytes into a byte slice +func readFixedBytes(buf *bytes.Reader, data []byte) error { + if _, err := buf.Read(data); err != nil { + return err + } + return nil +} diff --git a/das/celestia/types/da_interface.go b/das/celestia/types/da_interface.go new file mode 100644 index 0000000000..e6483d740a --- /dev/null +++ b/das/celestia/types/da_interface.go @@ -0,0 +1,23 @@ +package types + +import ( + "context" +) + +type CelestiaWriter interface { + Store(context.Context, []byte) ([]byte, error) +} + +type SquareData struct { + RowRoots [][]byte `json:"row_roots"` + ColumnRoots [][]byte `json:"column_roots"` + Rows [][][]byte `json:"rows"` + SquareSize uint64 `json:"square_size"` // Refers to original data square size + StartRow uint64 `json:"start_row"` + EndRow uint64 `json:"end_row"` +} + +type CelestiaReader interface { + Read(context.Context, *BlobPointer) ([]byte, *SquareData, error) + GetProof(ctx context.Context, msg []byte) ([]byte, error) +} diff --git a/das/celestia/types/reader.go b/das/celestia/types/reader.go new file mode 100644 index 0000000000..32a08c88ed --- /dev/null +++ b/das/celestia/types/reader.go @@ -0,0 +1,132 @@ +package types + +import ( + "bytes" + "context" + "errors" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + daprovider "github.com/offchainlabs/nitro/arbstate/daprovider" + "github.com/offchainlabs/nitro/das/celestia/tree" +) + +func NewReaderForCelestia(celestiaReader CelestiaReader) *readerForCelestia { + return &readerForCelestia{celestiaReader: celestiaReader} +} + +type readerForCelestia struct { + celestiaReader CelestiaReader +} + +func (c *readerForCelestia) IsValidHeaderByte(headerByte byte) bool { + return IsCelestiaMessageHeaderByte(headerByte) +} + +// CelestiaMessageHeaderFlag indicates that this data is a Blob Pointer +// which will be used to retrieve data from Celestia +const CelestiaMessageHeaderFlag byte = 0x63 + +func hasBits(checking byte, bits byte) bool { + return (checking & bits) == bits +} + +func IsCelestiaMessageHeaderByte(header byte) bool { + return hasBits(header, CelestiaMessageHeaderFlag) +} + +func (c *readerForCelestia) GetProof(ctx context.Context, msg []byte) ([]byte, error) { + return c.celestiaReader.GetProof(ctx, msg) +} + +func (c *readerForCelestia) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimageRecorder daprovider.PreimageRecorder, + validateSeqMsg bool, +) ([]byte, error) { + return RecoverPayloadFromCelestiaBatch(ctx, batchNum, sequencerMsg, c.celestiaReader, preimageRecorder, validateSeqMsg) +} + +func RecoverPayloadFromCelestiaBatch( + ctx context.Context, + batchNum uint64, + sequencerMsg []byte, + celestiaReader CelestiaReader, + preimageRecorder daprovider.PreimageRecorder, + validateSeqMsg bool, +) ([]byte, error) { + buf := bytes.NewBuffer(sequencerMsg[40:]) + + header, err := buf.ReadByte() + if err != nil { + log.Error("Couldn't deserialize Celestia header byte", "err", err) + return nil, nil + } + if !IsCelestiaMessageHeaderByte(header) { + log.Error("Couldn't deserialize Celestia header byte", "err", errors.New("tried to deserialize a message that doesn't have the Celestia header")) + return nil, nil + } + + blobPointer := BlobPointer{} + blobBytes := buf.Bytes() + err = blobPointer.UnmarshalBinary(blobBytes) + if err != nil { + log.Error("Couldn't unmarshal Celestia blob pointer", "err", err) + return nil, nil + } + + payload, squareData, err := celestiaReader.Read(ctx, &blobPointer) + if err != nil { + log.Error("Failed to resolve blob pointer from celestia", "err", err) + return nil, err + } + + // we read a batch that is to be discarded, so we return the empty batch + if len(payload) == 0 { + return payload, nil + } + + if preimageRecorder != nil { + if squareData == nil { + log.Error("squareData is nil, read from replay binary, but preimages are empty") + return nil, err + } + + odsSize := squareData.SquareSize / 2 + rowIndex := squareData.StartRow + for _, row := range squareData.Rows { + treeConstructor := tree.NewConstructor(preimageRecorder, odsSize) + root, err := tree.ComputeNmtRoot(treeConstructor, uint(rowIndex), row) + if err != nil { + log.Error("Failed to compute row root", "err", err) + return nil, err + } + + rowRootMatches := bytes.Equal(squareData.RowRoots[rowIndex], root) + if !rowRootMatches { + log.Error("Row roots do not match", "eds row root", squareData.RowRoots[rowIndex], "calculated", root) + log.Error("Row roots", "row_roots", squareData.RowRoots) + return nil, err + } + rowIndex += 1 + } + + rowsCount := len(squareData.RowRoots) + slices := make([][]byte, rowsCount+rowsCount) + copy(slices[0:rowsCount], squareData.RowRoots) + copy(slices[rowsCount:], squareData.ColumnRoots) + + dataRoot := tree.HashFromByteSlices(preimageRecorder, slices) + + dataRootMatches := bytes.Equal(dataRoot, blobPointer.DataRoot[:]) + if !dataRootMatches { + log.Error("Data Root do not match", "blobPointer data root", blobPointer.DataRoot, "calculated", dataRoot) + return nil, nil + } + } + + return payload, nil +} diff --git a/das/celestia/types/writer.go b/das/celestia/types/writer.go new file mode 100644 index 0000000000..bdf547fc83 --- /dev/null +++ b/das/celestia/types/writer.go @@ -0,0 +1,30 @@ +package types + +import ( + "context" + "errors" +) + +func NewWriterForCelestia(celestiaWriter CelestiaWriter) *writerForCelestia { + return &writerForCelestia{celestiaWriter: celestiaWriter} +} + +type writerForCelestia struct { + celestiaWriter CelestiaWriter +} + +func (c *writerForCelestia) Store(ctx context.Context, message []byte, timeout uint64, disableFallbackStoreDataOnChain bool) ([]byte, error) { + msg, err := c.celestiaWriter.Store(ctx, message) + if err != nil { + if disableFallbackStoreDataOnChain { + return nil, errors.New("unable to batch to Celestia and fallback storing data on chain is disabled") + } + return nil, err + } + message = msg + return message, nil +} + +func (d *writerForCelestia) Type() string { + return "celestia" +} diff --git a/docs/celestia/docs.md b/docs/celestia/docs.md new file mode 100644 index 0000000000..eec2127375 --- /dev/null +++ b/docs/celestia/docs.md @@ -0,0 +1,60 @@ +# Orbit with Celestia Underneath ✨ +![image](https://github.com/celestiaorg/nitro/assets/31937514/dfe451b5-21ee-446b-8140-869ea4e2a7eb) + + +## Overview + +The integration of Celestia with Arbitrum Orbit and the Nitro tech stack marks the first external contribution to the Arbitrum Orbit protocol layer, offering developers an additional option for selecting a data availability layer alongside Arbitrum AnyTrust. The integration allows developers to deploy an Orbit Chain that uses Celestia for data availability and settles on Arbitrum One, Ethereum, or other EVM chains. + +## Key Components + +The integration of Celestia with Arbitrum orbit is possible thanks to 3 components: +- DA Provider Implementation +- Preimage Oracle +- Blobstream + +# DA Provider Implementation + +The Arbitrum Nitro code has a `DataAvailabilityProvider` interface that is used across the codebase to store and retrieve data from a specific provider (eip4844 blobs, Anytrust, and now Celestia). + +This integration implements the [`DataAvailabilityProvider` interface for Celestia DA](https://github.com/celestiaorg/nitro/blob/966e631f1a03b49d49f25bea67a92b275d3bacb9/arbstate/inbox.go#L366-L477) + +Additionally, this integrations comes with the necessary code for a Nitro chain node to post and retrieve data from Celestia, which can be found [here.](https://github.com/celestiaorg/nitro/tree/celestia-v2.3.1/das/celestia) + +The core logic behind posting and retrieving data happens in [celestia.go](https://github.com/celestiaorg/nitro/blob/celestia-v2.3.1/das/celestia/celestia.go) where data is stored on Celestia and serialized into a small batch of data that gets published once the necessary range of headers (data roots) has been relayed to the [BlobstreamX contract](https://github.com/succinctlabs/blobstreamx). +Then the `Read` logic takes care of taking the deserialized Blob Pointer struct and consuming it in order to fetch the data from Celestia and additionally inform the fetcher about the position of the data on Celestia (we'll get back to this in the next section). + +The following represents a non-exhaustive list of considerations when running a Batch Poster node for a chain with Celestia underneath: +- You will need to use a consensus full node RPC endpoint, you can find a list of them for Mocha [here](https://docs.celestia.org/nodes/mocha-testnet#rpc-endpoints) +- The Batch Poster will only post a Celestia batch to the underlying chain if the height for which it posted is in a recent range in BlobstreamX and if the verification succeeds, otherwise it will discard the batch. Since it will wait until a range is relayed, it can take several minutes for a batch to be posted, but one can always make an on-chain request for the BlobstreamX contract to relay a header promptly. +- + +The following represents a non-exhaustive list of considerations when running a Nitro node for a chain with Celestia underneath: +- The `TendermintRpc` endpoint is only needed by the batch poster, every other node can operate without a connection to a full node. +- The message header flag for Celestia batches is `0x0c`. +- You will need to know the namespace for the chain that you are trying to connect to, but don't worry if you don't find it, as the information in the BlobPointer can be used to identify where a batch of data is in the Celestia Data Square for a given height, and thus can be used to find out the namespace as well! + +# Preimage Oracle Implementation + +In order to support fraud proofs, this integration has the necessary code for a Nitro validator to pupolate its preimage mapping with Celestia hashes that then get "unpealed" in order to reveal the full data for a Blob. You can read more about the "Hash Oracle Trick" [here.](https://docs.arbitrum.io/inside-arbitrum-nitro/#readpreimage-and-the-hash-oracle-trick) + +The data structures and hashing functions for this can be found in the [`nitro/das/celestia/tree` folder](https://github.com/celestiaorg/nitro/tree/celestia-v2.3.1/das/celestia/tree) + +You can see where the preimage oracle gets used in the fraud proof replay binary [here](https://github.com/celestiaorg/nitro/blob/966e631f1a03b49d49f25bea67a92b275d3bacb9/cmd/replay/main.go#L153-L294) + +Something important to note is that the preimage oracle only keeps track of hashes for the rows in the Celestia data square in which a blob resides in, this way each Orbit chain with Celestia underneath does not need validators to recompute an entire Celestia Data Square, but instead, only have to compute the row roots for the rows in which it's data lives in, and the header data root, which is the binary merkle tree hash built using the row roots and column roots fetched from a Celestia node. Because only data roots that can be confirmed on Blobstream get accepted into the sequencer inbox, one can have a high degree of certainty that the canonical data root being unpealed as well as the row roots are in fact correct. + +# DA Proof and BlobstreamX + +Finally, the integration only accepts batches of 89 bytes in length for a celestia header flag. This means that a Celestia Batch has 88 bytes of information, which are the block height, the start index of the blob, the length in shares of the blob, the transaction commitment, and the data root for the given height. + +In the case of a challenge, for a celestia batch, the OSP will require an additionally appended "da proof", which is verified against BlobstreamX. Here's what happens based on the result of the BlobstreamX verification: + +- **IN_BLOBSTREAM**: means the batch was verified against blobstrea, the height and data root in the batch match, and the start + legth do not go out of bounds. This will cause the rest of the OSP to proceed as normal. +- **COUNTERFACTUAL_COMMITMENT**: the height can be verified against blobstream, but the posted data root does not match, or the start + length go out of bounds. Or the Batch Poster tried posting a height too far into the ftureu (1000 blocks ahead of BlobstreamX). This will cause the OSP to proceed with an empty batch. Note that Nitro nodes for a chain with Celestia DA will also discard any batches that cannot be correctly validated. +- **UNDECIDED**: the height has not been relayed yet, so we revert and wait until the latest height in blobstream is the greater than the batch's height. + +You can see how BlobstreamX is integrated into the `OneStepProverHostIO.sol` contract [here]([https://github.com/celestiaorg/nitro-contracts/blob/celestia-v1.2.1/src/bridge/SequencerInbox.sol#L584-L630](https://github.com/celestiaorg/nitro-contracts/blob/contracts-v1.2.1/src/osp/OneStepProverHostIo.sol#L301)), which allows us to discard batches with otherwise faulty data roots, thus giving us a high degree of confidence that the data root can be safely unpacked in case of a challenge. + + + diff --git a/go.mod b/go.mod index da49b0d8b9..656168d717 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/offchainlabs/nitro -go 1.21 +go 1.21.5 replace github.com/VictoriaMetrics/fastcache => ./fastcache @@ -17,11 +17,13 @@ require ( github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.10 github.com/aws/aws-sdk-go-v2/service/s3 v1.26.9 github.com/cavaliergopher/grab/v3 v3.0.1 + github.com/celestiaorg/nmt v0.20.0 + github.com/celestiaorg/rsmt2d v0.11.0 github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v4 v4.2.0 github.com/enescakir/emoji v1.0.0 - github.com/ethereum/go-ethereum v1.10.26 + github.com/ethereum/go-ethereum v1.13.10 github.com/fatih/structtag v1.2.0 github.com/gdamore/tcell/v2 v2.7.1 github.com/go-redis/redis/v8 v8.11.5 @@ -35,15 +37,16 @@ require ( github.com/holiman/uint256 v1.2.4 github.com/knadh/koanf v1.4.0 github.com/mailru/easygo v0.0.0-20190618140210-3c14a0dc985f - github.com/mitchellh/mapstructure v1.4.1 + github.com/mitchellh/mapstructure v1.5.0 github.com/pkg/errors v0.9.1 github.com/r3labs/diff/v3 v3.0.1 github.com/rivo/tview v0.0.0-20240307173318-e804876934a1 github.com/spf13/pflag v1.0.5 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + github.com/tendermint/tendermint v0.34.29 github.com/wealdtech/go-merkletree v1.0.0 golang.org/x/crypto v0.21.0 - golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa + golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb golang.org/x/sys v0.18.0 golang.org/x/term v0.18.0 golang.org/x/tools v0.16.0 @@ -74,7 +77,8 @@ require ( github.com/aws/smithy-go v1.15.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.2.1 // indirect + github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f // indirect @@ -108,7 +112,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/glog v1.0.0 // indirect - github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/flatbuffers v1.12.1 // indirect @@ -126,30 +130,33 @@ require ( github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 // indirect github.com/juju/loggo v0.0.0-20180524022052-584905176618 // indirect github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/klauspost/reedsolomon v1.11.8 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/opentracing/opentracing-go v1.1.0 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/prometheus/client_golang v1.14.0 // indirect github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect github.com/rhnvrm/simples3 v0.6.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect - github.com/rs/cors v1.7.0 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rs/cors v1.8.2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/status-im/keycard-go v0.2.0 // indirect + github.com/stretchr/testify v1.9.0 // indirect github.com/supranational/blst v0.3.11 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect @@ -159,7 +166,7 @@ require ( github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yuin/gopher-lua v1.1.1 // indirect - go.opencensus.io v0.22.5 // indirect + go.opencensus.io v0.23.0 // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/oauth2 v0.22.0 @@ -169,3 +176,8 @@ require ( google.golang.org/protobuf v1.33.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) + +replace ( + github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29 +) diff --git a/go.sum b/go.sum index c0193be769..d47d8db926 100644 --- a/go.sum +++ b/go.sum @@ -136,12 +136,20 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bits-and-blooms/bitset v1.10.0 h1:ePXTeiPEazB5+opbv5fr8umg2R/1NlzgDsyepwsSr88= github.com/bits-and-blooms/bitset v1.10.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= -github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= -github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= +github.com/btcsuite/btcd/btcec/v2 v2.2.1 h1:xP60mv8fvp+0khmrN0zTdPC3cNm24rfeE6lh2R/Yv3E= +github.com/btcsuite/btcd/btcec/v2 v2.2.1/go.mod h1:9/CSmJxmuvqzX9Wh2fXMWToLOHhPd11lSPuIupwTkI8= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIHXju0Fr4= github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4= +github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29 h1:Fd7ymPUzExPGNl2gZw4i5S74arMw+iDHLE78M/cCxl4= +github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29/go.mod h1:xrICN0PBhp3AdTaZ8q4wS5Jvi32V02HNjaC2EsWiEKk= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= +github.com/celestiaorg/nmt v0.20.0 h1:9i7ultZ8Wv5ytt8ZRaxKQ5KOOMo4A2K2T/aPGjIlSas= +github.com/celestiaorg/nmt v0.20.0/go.mod h1:Oz15Ub6YPez9uJV0heoU4WpFctxazuIhKyUtaYNio7E= +github.com/celestiaorg/rsmt2d v0.11.0 h1:lcto/637WyTEZR3dLRoNvyuExfnUbxvdvKi3qz/2V4k= +github.com/celestiaorg/rsmt2d v0.11.0/go.mod h1:6Y580I3gVr0+OVFfW6m2JTwnCCmvW3WfbwSLfuT+HCA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= @@ -294,10 +302,6 @@ github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= @@ -307,8 +311,9 @@ github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -353,6 +358,7 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -485,6 +491,10 @@ github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY= +github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A= github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -530,8 +540,8 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -543,8 +553,9 @@ github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go. github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -583,12 +594,14 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= @@ -628,6 +641,8 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5 github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/r3labs/diff/v3 v3.0.1 h1:CBKqf3XmNRHXKmdU7mZP1w7TV0pDyVCis1AUHtA4Xtg= github.com/r3labs/diff/v3 v3.0.1/go.mod h1:f1S9bourRbiM66NskseyUdo0fTmEE0qKrikYJX63dgo= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= github.com/rhnvrm/simples3 v0.6.1 h1:H0DJwybR6ryQE+Odi9eqkHuzjYAeJgtGcGtuBwOhsH8= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rivo/tview v0.0.0-20240307173318-e804876934a1 h1:bWLHTRekAy497pE7+nXSuzXwwFHI0XauRzz6roUvY+s= @@ -639,10 +654,11 @@ github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -676,12 +692,18 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -724,13 +746,18 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 h1:L/ENs/Ar1bFzUeKx6m3XjlmBgIUlykX9dzvp5k9NGxc= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= golang.org/x/crypto v0.0.0-20180214000028-650f4a345ab4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -740,6 +767,7 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -755,8 +783,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= -golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb h1:c0vyKkb6yr3KR7jEfJaOSv4lG7xPkbN6r52aJz1d8a8= +golang.org/x/exp v0.0.0-20231206192017-f3f8817b8deb/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -819,6 +847,7 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= @@ -1053,6 +1082,7 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1078,6 +1108,7 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/nitro-testnode b/nitro-testnode index f328006579..4b37c1caf9 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit f328006579cbefe22c6c57de3d6b86397fde4438 +Subproject commit 4b37c1caf9de0004bbf7c834c4f25a7c26bba5ac diff --git a/scripts/download-machine.sh b/scripts/download-machine.sh index 3022c350a0..a957ccbd29 100755 --- a/scripts/download-machine.sh +++ b/scripts/download-machine.sh @@ -5,7 +5,8 @@ mkdir "$2" ln -sfT "$2" latest cd "$2" echo "$2" > module-root.txt -url_base="https://github.com/OffchainLabs/nitro/releases/download/$1" +url_org="${3:-OffchainLabs}" +url_base="https://github.com/$url_org/nitro/releases/download/$1" wget "$url_base/machine.wavm.br" status_code="$(curl -LI "$url_base/replay.wasm" -so /dev/null -w '%{http_code}')" diff --git a/solgen/gen.go b/solgen/gen.go index 2ad71b0c79..996ef6d7a7 100644 --- a/solgen/gen.go +++ b/solgen/gen.go @@ -139,6 +139,7 @@ func main() { if err := json.Unmarshal(data, &artifact); err != nil { log.Fatal("failed to parse contract", name, err) } + fmt.Printf("Contract name: %v\n", name) yulModInfo.addArtifact(HardHatArtifact{ ContractName: name, Abi: artifact.Abi, diff --git a/staker/challenge_manager.go b/staker/challenge_manager.go index 27cb92a5c7..9cbf9817af 100644 --- a/staker/challenge_manager.go +++ b/staker/challenge_manager.go @@ -4,6 +4,7 @@ package staker import ( + "bytes" "context" "encoding/binary" "errors" @@ -20,6 +21,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbutil" + celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types" "github.com/offchainlabs/nitro/solgen/go/challengegen" "github.com/offchainlabs/nitro/validator" ) @@ -32,6 +34,8 @@ var initiatedChallengeID common.Hash var challengeBisectedID common.Hash var executionChallengeBegunID common.Hash +const ReadInboxMessage uint16 = 0x8021 + func init() { parsedChallengeManagerABI, err := challengegen.ChallengeManagerMetaData.GetAbi() if err != nil { @@ -446,6 +450,11 @@ func (m *ChallengeManager) IssueOneStepProof( if err != nil { return nil, fmt.Errorf("error getting OSP from challenge %v backend at step %v: %w", m.challengeIndex, position, err) } + proof, err = m.getDAProof(ctx, proof) + if err != nil { + return nil, fmt.Errorf("error getting DA Proof for OSP for challenge %v at step %v: %w", m.challengeIndex, position, err) + } + return m.challengeCore.con.OneStepProveExecution( m.challengeCore.auth, m.challengeCore.challengeIndex, @@ -580,3 +589,57 @@ func (m *ChallengeManager) Act(ctx context.Context) (*types.Transaction, error) machineStepCount, ) } + +func (m *ChallengeManager) getDAProof(ctx context.Context, proof []byte) ([]byte, error) { + // get the proof's opcode + opCodeBytes := proof[len(proof)-2:] + opCode := binary.BigEndian.Uint16(opCodeBytes) + // remove opcode bytes + proof = proof[:len(proof)-2] + if opCode == ReadInboxMessage { + messageType := proof[len(proof)-1] + // remove inbox message type byte + proof = proof[:len(proof)-1] + if messageType == 0x0 { + // Read the last 8 bytes as a uint64 to get our batch number + batchNumBytes := proof[len(proof)-8:] + batchNum := binary.BigEndian.Uint64(batchNumBytes) + batchData, _, err := m.validator.inboxReader.GetSequencerMessageBytes(ctx, batchNum) + if err != nil { + log.Error("Couldn't get sequencer message bytes", "err", err) + return nil, err + } + + buf := bytes.NewBuffer(batchData[40:]) + + header, err := buf.ReadByte() + if err != nil { + log.Error("Couldn't deserialize Celestia header byte", "err", err) + return nil, nil + } + daProof := []byte{} + if celestiaTypes.IsCelestiaMessageHeaderByte(header) { + log.Info("Fetching da proof for Celestia", "batchNum", batchNum) + blobBytes := buf.Bytes() + + var celestiaReader celestiaTypes.CelestiaReader + for _, dapReader := range m.validator.dapReaders { + switch reader := dapReader.(type) { + case celestiaTypes.CelestiaReader: + celestiaReader = reader + } + } + daProof, err = celestiaReader.GetProof(ctx, blobBytes) + if err != nil { + return nil, err + } + } + + // remove batch number from proof + proof = proof[:len(proof)-8] + proof = append(proof, daProof...) + } + } + + return proof, nil +} diff --git a/system_tests/batch_poster_test.go b/system_tests/batch_poster_test.go index 0ec03e84c4..1d94e38733 100644 --- a/system_tests/batch_poster_test.go +++ b/system_tests/batch_poster_test.go @@ -21,6 +21,7 @@ import ( "github.com/offchainlabs/nitro/arbnode" "github.com/offchainlabs/nitro/arbnode/dataposter" "github.com/offchainlabs/nitro/arbnode/dataposter/externalsignertest" + "github.com/offchainlabs/nitro/arbstate/daprovider" "github.com/offchainlabs/nitro/solgen/go/bridgegen" "github.com/offchainlabs/nitro/solgen/go/upgrade_executorgen" "github.com/offchainlabs/nitro/util/redisutil" @@ -159,7 +160,7 @@ func testBatchPosterParallel(t *testing.T, useRedis bool) { Config: func() *arbnode.BatchPosterConfig { return &batchPosterConfig }, DeployInfo: builder.L2.ConsensusNode.DeployInfo, TransactOpts: &seqTxOpts, - DAPWriter: nil, + DAPWriters: []daprovider.Writer{}, ParentChainID: parentChainID, }, ) diff --git a/system_tests/full_celestia_challenge_test.backup_go b/system_tests/full_celestia_challenge_test.backup_go new file mode 100644 index 0000000000..3ff9d62793 --- /dev/null +++ b/system_tests/full_celestia_challenge_test.backup_go @@ -0,0 +1,481 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +// race detection makes things slow and miss timeouts +//go:build !race +// +build !race + +package arbtest + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math/big" + "net/http" + _ "net/http/pprof" + "os" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + + "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos" + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" + "github.com/offchainlabs/nitro/das/celestia" + celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/solgen/go/ospgen" + + "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/validator" + "github.com/offchainlabs/nitro/validator/server_common" + "github.com/offchainlabs/nitro/validator/valnode" +) + +func init() { + go func() { + fmt.Println(http.ListenAndServe("localhost:6060", nil)) + }() +} + +func DeployOneStepProofEntryCelestia(t *testing.T, ctx context.Context, auth *bind.TransactOpts, client *ethclient.Client) common.Address { + osp0, tx, _, err := ospgen.DeployOneStepProver0(auth, client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + ospMem, tx, _, err := ospgen.DeployOneStepProverMemory(auth, client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + ospMath, tx, _, err := ospgen.DeployOneStepProverMath(auth, client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + ospHostIo, tx, _, err := mocksgen.DeployOneStepProverHostIoCelestiaMock(auth, client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + ospEntry, tx, _, err := ospgen.DeployOneStepProofEntry(auth, client, osp0, ospMem, ospMath, ospHostIo) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + return ospEntry +} + +func writeTxToCelestiaBatch(writer io.Writer, tx *types.Transaction) error { + txData, err := tx.MarshalBinary() + if err != nil { + return err + } + var segment []byte + segment = append(segment, arbstate.BatchSegmentKindL2Message) + segment = append(segment, arbos.L2MessageKind_SignedTx) + segment = append(segment, txData...) + err = rlp.Encode(writer, segment) + return err +} + +func makeCelestiaBatch(t *testing.T, l2Node *arbnode.Node, celestiaDA *celestia.CelestiaDASClient, undecided bool, counterfactual bool, mockStream *mocksgen.Mockstream, deployer *bind.TransactOpts, l2Info *BlockchainTestInfo, backend *ethclient.Client, sequencer *bind.TransactOpts, seqInbox *mocksgen.SequencerInboxStub, seqInboxAddr common.Address, modStep int64) { + ctx := context.Background() + + batchBuffer := bytes.NewBuffer([]byte{}) + for i := int64(0); i < makeBatch_MsgsPerBatch; i++ { + value := i + if i == modStep { + value++ + } + err := writeTxToCelestiaBatch(batchBuffer, l2Info.PrepareTx("Owner", "Destination", 1000000, big.NewInt(value), []byte{})) + Require(t, err) + } + compressed, err := arbcompress.CompressWell(batchBuffer.Bytes()) + Require(t, err) + message := append([]byte{0}, compressed...) + message, err = celestiaDA.Store(ctx, message) + Require(t, err) + + buf := bytes.NewBuffer(message) + + header, err := buf.ReadByte() + Require(t, err) + if !celestiaTypes.IsCelestiaMessageHeaderByte(header) { + err := errors.New("tried to deserialize a message that doesn't have the Celestia header") + Require(t, err) + } + + blobPointer := celestiaTypes.BlobPointer{} + blobBytes := buf.Bytes() + err = blobPointer.UnmarshalBinary(blobBytes) + Require(t, err) + + dataCommitment, err := celestiaDA.Prover.Trpc.DataCommitment(ctx, blobPointer.BlockHeight-1, blobPointer.BlockHeight+1) + if err != nil { + t.Log("Error when fetching data commitment:", err) + } + Require(t, err) + mockStream.SubmitDataCommitment(deployer, [32]byte(dataCommitment.DataCommitment), blobPointer.BlockHeight-1, blobPointer.BlockHeight+1) + if counterfactual { + mockStream.UpdateGenesisState(deployer, (blobPointer.BlockHeight - 1100)) + } else if undecided { + t.Log("Block Height before change: ", blobPointer.BlockHeight) + mockStream.UpdateGenesisState(deployer, (blobPointer.BlockHeight - 100)) + } + seqNum := new(big.Int).Lsh(common.Big1, 256) + seqNum.Sub(seqNum, common.Big1) + tx, err := seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(sequencer, seqNum, message, big.NewInt(1), common.Address{}, big.NewInt(0), big.NewInt(0)) + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + + nodeSeqInbox, err := arbnode.NewSequencerInbox(backend, seqInboxAddr, 0) + Require(t, err) + batches, err := nodeSeqInbox.LookupBatchesInRange(ctx, receipt.BlockNumber, receipt.BlockNumber) + Require(t, err) + if len(batches) == 0 { + Fatal(t, "batch not found after AddSequencerL2BatchFromOrigin") + } + err = l2Node.InboxTracker.AddSequencerBatches(ctx, backend, batches) + Require(t, err) + _, err = l2Node.InboxTracker.GetBatchMetadata(0) + Require(t, err, "failed to get batch metadata after adding batch:") +} + +func RunCelestiaChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, challengeMsgIdx int64, undecided bool, counterFactual bool) { + + glogger := log.NewGlogHandler( + log.NewTerminalHandler(io.Writer(os.Stderr), false)) + glogger.Verbosity(log.LvlInfo) + log.SetDefault(log.NewLogger(glogger)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialBalance := new(big.Int).Lsh(big.NewInt(1), 200) + l1Info := NewL1TestInfo(t) + l1Info.GenerateGenesisAccount("deployer", initialBalance) + l1Info.GenerateGenesisAccount("asserter", initialBalance) + l1Info.GenerateGenesisAccount("challenger", initialBalance) + l1Info.GenerateGenesisAccount("sequencer", initialBalance) + + chainConfig := params.ArbitrumDevTestChainConfig() + l1Info, l1Backend, _, _ := createTestL1BlockChain(t, l1Info) + conf := arbnode.ConfigDefaultL1Test() + conf.BlockValidator.Enable = false + conf.BatchPoster.Enable = false + conf.InboxReader.CheckDelay = time.Second + + deployerTxOpts := l1Info.GetDefaultTransactOpts("deployer", ctx) + blobstream, tx, mockStreamWrapper, err := mocksgen.DeployMockstream(&deployerTxOpts, l1Backend) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1Backend, tx) + Require(t, err) + + conf.Celestia = celestia.DAConfig{ + Enable: true, + GasPrice: 0.1, + Rpc: "http://localhost:26658", + NamespaceId: "000008e5f679bf7116cb", + AuthToken: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJwdWJsaWMiLCJyZWFkIiwid3JpdGUiLCJhZG1pbiJdfQ.8iCpZJaiui7QPTCj4m5f2M7JyHkJtr6Xha0bmE5Vv7Y", + ValidatorConfig: &celestia.ValidatorConfig{ + TendermintRPC: "http://localhost:26657", + BlobstreamAddr: blobstream.Hex(), + }, + } + + t.Log("Blobstream Address: ", blobstream.Hex()) + + celestiaDa, err := celestia.NewCelestiaDA(&conf.Celestia, l1Backend) + Require(t, err) + // Initialize Mockstream before the tests + header, err := celestiaDa.Client.Header.NetworkHead(ctx) + Require(t, err) + mockStreamWrapper.Initialize(&deployerTxOpts, header.Height()) + + var valStack *node.Node + var mockSpawn *mockSpawner + if useStubs { + mockSpawn, valStack = createMockValidationNode(t, ctx, &valnode.TestValidationConfig.Arbitrator) + } else { + _, valStack = createTestValidationNode(t, ctx, &valnode.TestValidationConfig) + } + configByValidationNode(conf, valStack) + + fatalErrChan := make(chan error, 10) + asserterRollupAddresses, initMessage := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig) + + sequencerTxOpts := l1Info.GetDefaultTransactOpts("sequencer", ctx) + asserterTxOpts := l1Info.GetDefaultTransactOpts("asserter", ctx) + challengerTxOpts := l1Info.GetDefaultTransactOpts("challenger", ctx) + + asserterBridgeAddr, asserterSeqInbox, asserterSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) + challengerBridgeAddr, challengerSeqInbox, challengerSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) + + asserterL2Info, asserterL2Stack, asserterL2ChainDb, asserterL2ArbDb, asserterL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) + asserterRollupAddresses.Bridge = asserterBridgeAddr + asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr + asserterExec, err := gethexec.CreateExecutionNode(ctx, asserterL2Stack, asserterL2ChainDb, asserterL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) + Require(t, err) + parentChainID := big.NewInt(1337) + asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID, nil) + Require(t, err) + err = asserterL2.Start(ctx) + Require(t, err) + + challengerL2Info, challengerL2Stack, challengerL2ChainDb, challengerL2ArbDb, challengerL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) + challengerRollupAddresses := *asserterRollupAddresses + challengerRollupAddresses.Bridge = challengerBridgeAddr + challengerRollupAddresses.SequencerInbox = challengerSeqInboxAddr + challengerExec, err := gethexec.CreateExecutionNode(ctx, challengerL2Stack, challengerL2ChainDb, challengerL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) + Require(t, err) + challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID, nil) + Require(t, err) + err = challengerL2.Start(ctx) + Require(t, err) + + asserterL2Info.GenerateAccount("Destination") + challengerL2Info.SetFullAccountInfo("Destination", asserterL2Info.GetInfoWithPrivKey("Destination")) + + if challengeMsgIdx < 1 || challengeMsgIdx > 3*makeBatch_MsgsPerBatch { + Fatal(t, "challengeMsgIdx illegal") + } + + // seqNum := common.Big2 + makeCelestiaBatch(t, asserterL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, asserterL2Info, l1Backend, &sequencerTxOpts, asserterSeqInbox, asserterSeqInboxAddr, -1) + makeCelestiaBatch(t, challengerL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, challengerL2Info, l1Backend, &sequencerTxOpts, challengerSeqInbox, challengerSeqInboxAddr, challengeMsgIdx-1) + + // seqNum.Add(seqNum, common.Big1) + makeCelestiaBatch(t, asserterL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, asserterL2Info, l1Backend, &sequencerTxOpts, asserterSeqInbox, asserterSeqInboxAddr, -1) + makeCelestiaBatch(t, challengerL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, challengerL2Info, l1Backend, &sequencerTxOpts, challengerSeqInbox, challengerSeqInboxAddr, challengeMsgIdx-makeBatch_MsgsPerBatch-1) + + // seqNum.Add(seqNum, common.Big1) + makeCelestiaBatch(t, asserterL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, asserterL2Info, l1Backend, &sequencerTxOpts, asserterSeqInbox, asserterSeqInboxAddr, -1) + makeCelestiaBatch(t, challengerL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, challengerL2Info, l1Backend, &sequencerTxOpts, challengerSeqInbox, challengerSeqInboxAddr, challengeMsgIdx-makeBatch_MsgsPerBatch*2-1) + + trueSeqInboxAddr := challengerSeqInboxAddr + trueDelayedBridge := challengerBridgeAddr + expectedWinner := l1Info.GetAddress("challenger") + if asserterIsCorrect { + trueSeqInboxAddr = asserterSeqInboxAddr + trueDelayedBridge = asserterBridgeAddr + expectedWinner = l1Info.GetAddress("asserter") + } + ospEntry := DeployOneStepProofEntryCelestia(t, ctx, &deployerTxOpts, l1Backend) + + locator, err := server_common.NewMachineLocator("") + if err != nil { + Fatal(t, err) + } + var wasmModuleRoot common.Hash + if useStubs { + wasmModuleRoot = mockWasmModuleRoots[0] + } else { + wasmModuleRoot = locator.LatestWasmModuleRoot() + if (wasmModuleRoot == common.Hash{}) { + Fatal(t, "latest machine not found") + } + } + + asserterGenesis := asserterExec.ArbInterface.BlockChain().Genesis() + challengerGenesis := challengerExec.ArbInterface.BlockChain().Genesis() + if asserterGenesis.Hash() != challengerGenesis.Hash() { + Fatal(t, "asserter and challenger have different genesis hashes") + } + asserterLatestBlock := asserterExec.ArbInterface.BlockChain().CurrentBlock() + challengerLatestBlock := challengerExec.ArbInterface.BlockChain().CurrentBlock() + if asserterLatestBlock.Hash() == challengerLatestBlock.Hash() { + Fatal(t, "asserter and challenger have the same end block") + } + + asserterStartGlobalState := validator.GoGlobalState{ + BlockHash: asserterGenesis.Hash(), + Batch: 1, + PosInBatch: 0, + } + asserterEndGlobalState := validator.GoGlobalState{ + BlockHash: asserterLatestBlock.Hash(), + Batch: 4, + PosInBatch: 0, + } + numBlocks := asserterLatestBlock.Number.Uint64() - asserterGenesis.NumberU64() + + resultReceiver, challengeManagerAddr := CreateChallenge( + t, + ctx, + &deployerTxOpts, + l1Backend, + ospEntry, + trueSeqInboxAddr, + trueDelayedBridge, + wasmModuleRoot, + asserterStartGlobalState, + asserterEndGlobalState, + numBlocks, + l1Info.GetAddress("asserter"), + l1Info.GetAddress("challenger"), + ) + + confirmLatestBlock(ctx, t, l1Info, l1Backend) + + // Add the L1 backend to Celestia DA + celestiaDa.Prover.EthClient = l1Backend + + celestiaReader := celestiaTypes.NewReaderForCelestia(celestiaDa) + + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, []daprovider.Reader{celestiaReader}, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + if err != nil { + Fatal(t, err) + } + if useStubs { + asserterRecorder := newMockRecorder(asserterValidator, asserterL2.TxStreamer) + asserterValidator.OverrideRecorder(t, asserterRecorder) + } + err = asserterValidator.Start(ctx) + if err != nil { + Fatal(t, err) + } + defer asserterValidator.Stop() + asserterManager, err := staker.NewChallengeManager(ctx, l1Backend, &asserterTxOpts, asserterTxOpts.From, challengeManagerAddr, 1, asserterValidator, 0, 0) + if err != nil { + Fatal(t, err) + } + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, []daprovider.Reader{celestiaReader}, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + if err != nil { + Fatal(t, err) + } + if useStubs { + challengerRecorder := newMockRecorder(challengerValidator, challengerL2.TxStreamer) + challengerValidator.OverrideRecorder(t, challengerRecorder) + } + err = challengerValidator.Start(ctx) + if err != nil { + Fatal(t, err) + } + defer challengerValidator.Stop() + challengerManager, err := staker.NewChallengeManager(ctx, l1Backend, &challengerTxOpts, challengerTxOpts.From, challengeManagerAddr, 1, challengerValidator, 0, 0) + if err != nil { + Fatal(t, err) + } + + confirmLatestBlock(ctx, t, l1Info, l1Backend) + + for i := 0; i < 100; i++ { + var tx *types.Transaction + var currentCorrect bool + // Gas cost is slightly reduced if done in the same timestamp or block as previous call. + // This might make gas estimation undersestimate next move. + // Invoke a new L1 block, with a new timestamp, before estimating. + time.Sleep(time.Second) + SendWaitTestTransactions(t, ctx, l1Backend, []*types.Transaction{ + l1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + }) + + if i%2 == 0 { + currentCorrect = !asserterIsCorrect + tx, err = challengerManager.Act(ctx) + } else { + currentCorrect = asserterIsCorrect + tx, err = asserterManager.Act(ctx) + } + if err != nil { + if !currentCorrect && (strings.Contains(err.Error(), "lost challenge") || + strings.Contains(err.Error(), "SAME_OSP_END") || + strings.Contains(err.Error(), "BAD_SEQINBOX_MESSAGE")) || + strings.Contains(err.Error(), "BLOBSTREAM_UNDECIDED") { + t.Log("challenge completed! asserter hit expected error:", err) + return + } else if (currentCorrect && counterFactual) && strings.Contains(err.Error(), "BAD_SEQINBOX_MESSAGE") { + t.Log("counterfactual challenge challenge completed! asserter hit expected error:", err) + return + } + Fatal(t, "challenge step", i, "hit error:", err) + } + if tx == nil { + Fatal(t, "no move") + } + + if useStubs { + if len(mockSpawn.ExecSpawned) != 0 { + if len(mockSpawn.ExecSpawned) != 1 { + Fatal(t, "bad number of spawned execRuns: ", len(mockSpawn.ExecSpawned)) + } + if mockSpawn.ExecSpawned[0] != uint64(challengeMsgIdx) { + Fatal(t, "wrong spawned execRuns: ", mockSpawn.ExecSpawned[0], " expected: ", challengeMsgIdx) + } + return + } + } + + _, err = EnsureTxSucceeded(ctx, l1Backend, tx) + if err != nil { + if !currentCorrect && strings.Contains(err.Error(), "BAD_SEQINBOX_MESSAGE") { + t.Log("challenge complete! Tx failed as expected:", err) + return + } + Fatal(t, err) + } + + confirmLatestBlock(ctx, t, l1Info, l1Backend) + + winner, err := resultReceiver.Winner(&bind.CallOpts{}) + if err != nil { + Fatal(t, err) + } + if winner == (common.Address{}) { + continue + } + if winner != expectedWinner { + Fatal(t, "wrong party won challenge") + } + } + + Fatal(t, "challenge timed out without winner") +} + +func TestCelestiaChallengeManagerFullAsserterIncorrect(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, false, false, makeBatch_MsgsPerBatch+1, false, false) +} + +func TestCelestiaChallengeManagerFullAsserterCorrect(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, true, false, makeBatch_MsgsPerBatch+2, false, false) +} + +func TestCelestiaChallengeManagerFullAsserterIncorrectUndecided(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, false, false, makeBatch_MsgsPerBatch+1, true, false) +} + +func TestCelestiaChallengeManagerFullAsserterCorrectUndecided(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, true, false, makeBatch_MsgsPerBatch+2, true, false) +} + +func TestCelestiaChallengeManagerFullAsserterIncorrectCounterfactual(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, false, false, makeBatch_MsgsPerBatch+1, false, true) +} + +func TestCelestiaChallengeManagerFullAsserterCorrectCounterfactual(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, true, false, makeBatch_MsgsPerBatch+2, false, true) +} diff --git a/system_tests/rpc_test.go b/system_tests/rpc_test.go deleted file mode 100644 index 511a608e67..0000000000 --- a/system_tests/rpc_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021-2022, Offchain Labs, Inc. -// For license information, see https://github.com/nitro/blob/master/LICENSE - -package arbtest - -import ( - "context" - "path/filepath" - "testing" - - "github.com/ethereum/go-ethereum/ethclient" -) - -func TestIpcRpc(t *testing.T) { - ipcPath := filepath.Join(t.TempDir(), "test.ipc") - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - builder := NewNodeBuilder(ctx).DefaultConfig(t, true) - builder.l2StackConfig.IPCPath = ipcPath - cleanup := builder.Build(t) - defer cleanup() - - _, err := ethclient.Dial(ipcPath) - Require(t, err) -} diff --git a/validator/server_arb/execution_run.go b/validator/server_arb/execution_run.go index d29a88d34d..25dabcbb11 100644 --- a/validator/server_arb/execution_run.go +++ b/validator/server_arb/execution_run.go @@ -5,6 +5,7 @@ package server_arb import ( "context" + "encoding/binary" "fmt" "sync" "time" @@ -187,7 +188,21 @@ func (e *executionRun) GetProofAt(position uint64) containers.PromiseInterface[[ if err != nil { return nil, err } - return machine.ProveNextStep(), nil + + opcodeBytes := make([]byte, 2) + if machine.IsRunning() { + opcode := machine.GetNextOpcode() + + binary.BigEndian.PutUint16(opcodeBytes, opcode) + } else { + // append dummy opcode if the machine is halted + binary.BigEndian.PutUint16(opcodeBytes, 0xFFFF) + } + + proof := machine.ProveNextStep() + + proof = append(proof, opcodeBytes...) + return proof, nil }) } diff --git a/validator/server_arb/machine.go b/validator/server_arb/machine.go index adca9695e2..6ea37c163c 100644 --- a/validator/server_arb/machine.go +++ b/validator/server_arb/machine.go @@ -43,6 +43,7 @@ type MachineInterface interface { Hash() common.Hash GetGlobalState() validator.GoGlobalState ProveNextStep() []byte + GetNextOpcode() uint16 Freeze() Destroy() } @@ -276,6 +277,14 @@ func (m *ArbitratorMachine) ProveNextStep() []byte { return proofBytes } +func (m *ArbitratorMachine) GetNextOpcode() uint16 { + defer runtime.KeepAlive(m) + m.mutex.Lock() + defer m.mutex.Unlock() + + return uint16(C.arbitrator_get_opcode(m.ptr)) +} + func (m *ArbitratorMachine) SerializeState(path string) error { defer runtime.KeepAlive(m) m.mutex.Lock() diff --git a/validator/server_arb/mock_machine.go b/validator/server_arb/mock_machine.go index 3cf0f9f771..adec8d269c 100644 --- a/validator/server_arb/mock_machine.go +++ b/validator/server_arb/mock_machine.go @@ -98,6 +98,10 @@ func (m *IncorrectMachine) ProveNextStep() []byte { return m.inner.ProveNextStep() } +func (m *IncorrectMachine) GetNextOpcode() uint16 { + return m.inner.GetNextOpcode() +} + func (m *IncorrectMachine) Freeze() { m.inner.Freeze() }