diff --git a/.github/workflows/nightly-ci.yml b/.github/workflows/nightly-ci.yml index 6e99f95424..4a7f82ca91 100644 --- a/.github/workflows/nightly-ci.yml +++ b/.github/workflows/nightly-ci.yml @@ -16,7 +16,7 @@ jobs: # Only run on schedule AND main branch tests-scheduled: name: Scheduled tests - runs-on: ubuntu-8 + runs-on: ubuntu-latest services: redis: diff --git a/.gitmodules b/.gitmodules index 109e3385c2..c04abeb059 100644 --- a/.gitmodules +++ b/.gitmodules @@ -12,8 +12,8 @@ url = https://github.com/google/brotli.git [submodule "contracts"] path = contracts - url = https://github.com/OffchainLabs/nitro-contracts.git - branch = develop + url = https://github.com/celestiaorg/nitro-contracts.git + branch = contracts-v1.2.1 [submodule "arbitrator/wasm-testsuite/testsuite"] path = arbitrator/wasm-testsuite/testsuite url = https://github.com/WebAssembly/testsuite.git @@ -22,7 +22,7 @@ url = https://github.com/OffchainLabs/wasmer.git [submodule "nitro-testnode"] path = nitro-testnode - url = https://github.com/OffchainLabs/nitro-testnode.git + url = https://github.com/celestiaorg/nitro-testnode.git [submodule "bold"] path = bold url = https://github.com/OffchainLabs/bold.git diff --git a/Dockerfile b/Dockerfile index a0c40b998a..2d8152a2f3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -46,10 +46,10 @@ WORKDIR /workspace RUN apt-get update && apt-get install -y curl build-essential=12.9 FROM wasm-base AS wasm-libs-builder - # clang / lld used by soft-float wasm +# clang / lld used by soft-float wasm RUN apt-get update && \ apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt - # pinned rust 1.84.1 +# pinned rust 1.84.1 RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.84.1 --target x86_64-unknown-linux-gnu,wasm32-unknown-unknown,wasm32-wasip1 COPY ./Makefile ./ COPY arbitrator/Cargo.* arbitrator/ @@ -85,6 +85,7 @@ COPY ./cmd/replay ./cmd/replay COPY ./daprovider ./daprovider COPY ./daprovider/das/dasutil ./daprovider/das/dasutil COPY ./daprovider/das/dastree ./daprovider/das/dastree +COPY ./daprovider/celestia ./daprovider/celestia COPY ./precompiles ./precompiles COPY ./statetransfer ./statetransfer COPY ./util ./util @@ -104,6 +105,8 @@ COPY ./go-ethereum ./go-ethereum COPY scripts/remove_reference_types.sh scripts/ COPY --from=brotli-wasm-export / target/ COPY --from=contracts-builder workspace/contracts/build/contracts/src/precompiles/ contracts/build/contracts/src/precompiles/ +COPY --from=contracts-builder workspace/contracts/build/contracts/src/celestia/ contracts/build/contracts/src/celestia/ +COPY --from=contracts-builder workspace/contracts/build/contracts/src/celestia/ contracts/build/contracts/src/celestia/ COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/ COPY --from=contracts-builder workspace/contracts-legacy/build/contracts/src/precompiles/ contracts-legacy/build/contracts/src/precompiles/ COPY --from=contracts-builder workspace/.make/ .make/ @@ -236,9 +239,10 @@ COPY ./scripts/download-machine.sh . RUN ./download-machine.sh consensus-v30 0xb0de9cb89e4d944ae6023a3b62276e54804c242fd8c4c2d8e6cc4450f5fa8b1b && true RUN ./download-machine.sh consensus-v31 0x260f5fa5c3176a856893642e149cf128b5a8de9f828afec8d11184415dd8dc69 RUN ./download-machine.sh consensus-v32 0x184884e1eb9fefdc158f6c8ac912bb183bf3cf83f0090317e0bc4ac5860baa39 -#RUN ./download-machine.sh consensus-v40-rc.1 0x6dae396b0b7644a2d63b4b22e6452b767aa6a04b6778dadebdd74aa40f40a5c5 -#RUN ./download-machine.sh consensus-v40-rc.2 0xa8206be13d53e456c7ab061d94bab5b229d674ac57ffe7281216479a8820fcc0 RUN ./download-machine.sh consensus-v40 0xdb698a2576298f25448bc092e52cf13b1e24141c997135d70f217d674bbeb69a +RUN ./download-machine.sh v3.2.1-rc.1 0xe81f986823a85105c5fd91bb53b4493d38c0c26652d23f76a7405ac889908287 celestiaorg +RUN ./download-machine.sh v3.3.2 0xaf1dbdfceb871c00bfbb1675983133df04f0ed04e89647812513c091e3a982b3 celestiaorg +RUN ./download-machine.sh consensus-v40 0x597de35fc2ee60e5b2840157370d037542d6a4bc587af7f88202636c54e6bd8d celestiaorg FROM golang:1.23.1-bookworm AS node-builder WORKDIR /workspace diff --git a/Makefile b/Makefile index f897df7e90..75760750e0 100644 --- a/Makefile +++ b/Makefile @@ -601,7 +601,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(prover_bin) go run solgen/gen.go @touch $@ -.make/solidity: $(DEP_PREDICATE) safe-smart-account/contracts/*/*.sol safe-smart-account/contracts/*.sol contracts/src/*/*.sol contracts-legacy/src/*/*.sol contracts-local/src/*/*.sol contracts-local/gas-dimensions/src/*.sol .make/yarndeps $(ORDER_ONLY_PREDICATE) .make +.make/solidity: $(DEP_PREDICATE) contracts/src/*/*.sol contracts-legacy/src/*/*.sol contracts-local/src/*/*.sol contracts-local/gas-dimensions/src/*.sol .make/yarndeps $(ORDER_ONLY_PREDICATE) .make yarn --cwd safe-smart-account build yarn --cwd contracts build yarn --cwd contracts build:forge:yul diff --git a/arbitrator/prover/src/lib.rs b/arbitrator/prover/src/lib.rs index 06dd333480..a888e5a4b2 100644 --- a/arbitrator/prover/src/lib.rs +++ b/arbitrator/prover/src/lib.rs @@ -470,3 +470,11 @@ pub unsafe extern "C" fn arbitrator_module_root(mach: *mut Machine) -> Bytes32 { pub unsafe extern "C" fn arbitrator_gen_proof(mach: *mut Machine, out: *mut RustBytes) { (*out).write((*mach).serialize_proof()); } + +#[no_mangle] +pub unsafe extern "C" fn arbitrator_get_opcode(mach: *mut Machine) -> u16 { + match (*mach).get_next_instruction() { + Some(instruction) => return instruction.opcode.repr(), + None => panic!("Failed to get next opcode for Machine"), + } +} diff --git a/arbitrator/prover/src/machine.rs b/arbitrator/prover/src/machine.rs index 7c18908201..bf86c043a1 100644 --- a/arbitrator/prover/src/machine.rs +++ b/arbitrator/prover/src/machine.rs @@ -3095,6 +3095,13 @@ impl Machine { { data.push(0); // inbox proof type out!(msg_data); + match inbox_identifier { + InboxIdentifier::Sequencer => { + out!(msg_idx.to_be_bytes()); + data.push(0x0); + } + InboxIdentifier::Delayed => data.push(0x1), + } } } else { unreachable!() diff --git a/arbnode/batch_poster.go b/arbnode/batch_poster.go index 92f87fd65d..d0b297cbdd 100644 --- a/arbnode/batch_poster.go +++ b/arbnode/batch_poster.go @@ -106,8 +106,8 @@ type BatchPoster struct { bridgeAddr common.Address gasRefunderAddr common.Address building *buildingBatch - dapWriter daprovider.Writer dapReaders []daprovider.Reader + dapWriter daprovider.Writer dataPoster *dataposter.DataPoster redisLock *redislock.Simple messagesPerBatch *arbmath.MovingAverage[uint64] @@ -1670,6 +1670,7 @@ func (b *BatchPoster) MaybePostSequencerBatch(ctx context.Context) (bool, error) batchPosterDAFailureCounter.Inc(1) return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce) } + // #nosec G115 sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.DisableDapFallbackStoreDataOnChain) if err != nil { @@ -1679,6 +1680,7 @@ func (b *BatchPoster) MaybePostSequencerBatch(ctx context.Context) (bool, error) batchPosterDASuccessCounter.Inc(1) batchPosterDALastSuccessfulActionGauge.Update(time.Now().Unix()) + } prevMessageCount := batchPosition.MessageCount diff --git a/arbstate/inbox.go b/arbstate/inbox.go index 80de62b136..d2ae743fc2 100644 --- a/arbstate/inbox.go +++ b/arbstate/inbox.go @@ -106,6 +106,8 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash if !foundDA { if daprovider.IsDASMessageHeaderByte(payload[0]) { log.Error("No DAS Reader configured, but sequencer message found with DAS header") + } else if daprovider.IsCelestiaMessageHeaderByte(payload[0]) { + log.Error("No Celestia Reader configured, but sequencer message found with Celestia header") } else if daprovider.IsBlobHashesHeaderByte(payload[0]) { return nil, daprovider.ErrNoBlobReader } diff --git a/audits/celestia/arbitrum_nitro_celestia_audit_report.pdf b/audits/celestia/arbitrum_nitro_celestia_audit_report.pdf new file mode 100644 index 0000000000..a2fa72183c Binary files /dev/null and b/audits/celestia/arbitrum_nitro_celestia_audit_report.pdf differ diff --git a/cmd/deploy/deploy.go b/cmd/deploy/deploy.go index 14d772c76a..ab8914c4fa 100644 --- a/cmd/deploy/deploy.go +++ b/cmd/deploy/deploy.go @@ -138,6 +138,7 @@ func main() { } loserEscrowAddress := common.HexToAddress(*loserEscrowAddressString) + if sequencerAddress != (common.Address{}) && ownerAddress != l1TransactionOpts.From { panic("cannot specify sequencer address if owner is not deployer") } diff --git a/cmd/nitro/nitro.go b/cmd/nitro/nitro.go index 22d6321cf9..6a349a60a4 100644 --- a/cmd/nitro/nitro.go +++ b/cmd/nitro/nitro.go @@ -499,12 +499,6 @@ func mainImpl() int { return 1 } - if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee != nodeConfig.Node.DataAvailability.Enable { - flag.Usage() - log.Error(fmt.Sprintf("data availability service usage for this chain is set to %v but --node.data-availability.enable is set to %v", l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee, nodeConfig.Node.DataAvailability.Enable)) - return 1 - } - var valNode *valnode.ValidationNode if sameProcessValidationNodeEnabled { valNode, err = valnode.CreateValidationNode( @@ -582,9 +576,9 @@ func mainImpl() int { return 1 } } - // If batchPoster is enabled, validate MaxSize to be at least 10kB below the sequencer inbox’s maxDataSize if the data availability service is not enabled. + // If batchPoster is enabled, validate MaxSize to be at least 10kB below the sequencer inbox’s maxDataSize if the data availability service and celestia DA are not enabled. // The 10kB gap is because its possible for the batch poster to exceed its MaxSize limit and produce batches of slightly larger size. - if nodeConfig.Node.BatchPoster.Enable && !nodeConfig.Node.DataAvailability.Enable { + if nodeConfig.Node.BatchPoster.Enable && (!nodeConfig.Node.DataAvailability.Enable && !nodeConfig.Node.DAProvider.Enable) { if nodeConfig.Node.BatchPoster.MaxSize > seqInboxMaxDataSize-10000 { log.Error("batchPoster's MaxSize is too large") return 1 diff --git a/cmd/replay/main.go b/cmd/replay/main.go index a89f28489f..9102b07a33 100644 --- a/cmd/replay/main.go +++ b/cmd/replay/main.go @@ -6,6 +6,7 @@ package main import ( "bytes" "context" + "encoding/binary" "encoding/hex" "encoding/json" "fmt" @@ -33,6 +34,8 @@ import ( "github.com/offchainlabs/nitro/arbutil" "github.com/offchainlabs/nitro/cmd/chaininfo" "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/celestia/tree" + celestiaTypes "github.com/offchainlabs/nitro/daprovider/celestia/types" "github.com/offchainlabs/nitro/daprovider/das/dastree" "github.com/offchainlabs/nitro/daprovider/das/dasutil" "github.com/offchainlabs/nitro/gethhook" @@ -162,6 +165,126 @@ func (r *BlobPreimageReader) Initialize(ctx context.Context) error { return nil } +type PreimageCelestiaReader struct { +} + +func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *celestiaTypes.BlobPointer) ([]byte, *celestiaTypes.SquareData, error) { + oracle := func(hash common.Hash) ([]byte, error) { + return wavmio.ResolveTypedPreimage(arbutil.Sha2_256PreimageType, hash) + } + + if blobPointer.SharesLength == 0 { + return nil, nil, fmt.Errorf("Error, shares length is %v", blobPointer.SharesLength) + } + // first, walk down the merkle tree + leaves, err := tree.MerkleTreeContent(oracle, common.BytesToHash(blobPointer.DataRoot[:])) + if err != nil { + log.Warn("Error revealing contents behind data root", "err", err) + return nil, nil, err + } + + squareSize := uint64(len(leaves)) / 2 + // split leaves in half to get row roots + rowRoots := leaves[:squareSize] + // We get the original data square size, wich is (size_of_the_extended_square / 2) + odsSize := squareSize / 2 + + startRow := blobPointer.Start / odsSize + + if blobPointer.Start >= odsSize*odsSize { + // check that the square isn't just our share (very niche case, should only happens on local testing) + if blobPointer.Start != odsSize*odsSize && odsSize > 1 { + return nil, nil, fmt.Errorf("Error Start Index out of ODS bounds: index=%v odsSize=%v", blobPointer.Start, odsSize) + } + } + + // adjusted_end_index = adjusted_start_index + length - 1 + if blobPointer.Start+blobPointer.SharesLength < 1 { + return nil, nil, fmt.Errorf("Error getting number of shares in first row: index+length %v > 1", blobPointer.Start+blobPointer.SharesLength) + } + endIndexOds := blobPointer.Start + blobPointer.SharesLength - 1 + if endIndexOds >= odsSize*odsSize { + // check that the square isn't just our share (very niche case, should only happens on local testing) + if endIndexOds != odsSize*odsSize && odsSize > 1 { + return nil, nil, fmt.Errorf("Error End Index out of ODS bounds: index=%v odsSize=%v", endIndexOds, odsSize) + } + } + endRow := endIndexOds / odsSize + + if endRow >= odsSize || startRow >= odsSize { + return nil, nil, fmt.Errorf("Error rows out of bounds: startRow=%v endRow=%v odsSize=%v", startRow, endRow, odsSize) + } + + startColumn := blobPointer.Start % odsSize + endColumn := endIndexOds % odsSize + + if startRow == endRow && startColumn > endColumn { + log.Error("start and end row are the same, and startColumn >= endColumn", "startColumn", startColumn, "endColumn ", endColumn) + return []byte{}, nil, nil + } + + // adjust the math in the CelestiaPayload function in the inbox + + // we can take ods * ods -> end index in ods + // then we check that start index is in bounds, otherwise ignore -> return empty batch + // then we check that end index is in bounds, otherwise ignore + + // get rows behind row root and shares for our blob + rows := [][][]byte{} + shares := [][]byte{} + for i := startRow; i <= endRow; i++ { + row, err := tree.NmtContent(oracle, rowRoots[i]) + if err != nil { + return nil, nil, err + } + rows = append(rows, row) + + odsRow := row[:odsSize] + + // TODO explain the logic behind this branching + if startRow == endRow { + shares = append(shares, odsRow[startColumn:endColumn+1]...) + break + } else if i == startRow { + shares = append(shares, odsRow[startColumn:]...) + } else if i == endRow { + shares = append(shares, odsRow[:endColumn+1]...) + } else { + shares = append(shares, odsRow...) + } + } + + data := []byte{} + if tree.NamespaceSize*2+1 > uint64(len(shares[0])) || tree.NamespaceSize*2+5 > uint64(len(shares[0])) { + return nil, nil, fmt.Errorf("Error getting sequence length on share of size %v", len(shares[0])) + } + sequenceLength := binary.BigEndian.Uint32(shares[0][tree.NamespaceSize*2+1 : tree.NamespaceSize*2+5]) + for i, share := range shares { + // trim extra namespace + share := share[tree.NamespaceSize:] + if i == 0 { + data = append(data, share[tree.NamespaceSize+5:]...) + continue + } + data = append(data, share[tree.NamespaceSize+1:]...) + } + + data = data[:sequenceLength] + squareData := celestiaTypes.SquareData{ + RowRoots: rowRoots, + ColumnRoots: leaves[squareSize:], + Rows: rows, + SquareSize: squareSize, + StartRow: startRow, + EndRow: endRow, + } + return data, &squareData, nil +} + +func (dasReader *PreimageCelestiaReader) GetProof(ctx context.Context, msg []byte) ([]byte, error) { + return nil, nil +} + // To generate: // key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001") // sig, _ := crypto.Sign(make([]byte, 32), key) @@ -219,27 +342,19 @@ func main() { } return wavmio.ReadInboxMessage(batchNum), nil } - readMessage := func(dasEnabled bool) *arbostypes.MessageWithMetadata { + readMessage := func() *arbostypes.MessageWithMetadata { var delayedMessagesRead uint64 if lastBlockHeader != nil { delayedMessagesRead = lastBlockHeader.Nonce.Uint64() } - var dasReader dasutil.DASReader - var dasKeysetFetcher dasutil.DASKeysetFetcher - if dasEnabled { - // DAS batch and keysets are all together in the same preimage binary. - dasReader = &PreimageDASReader{} - dasKeysetFetcher = &PreimageDASReader{} - } backend := WavmInbox{} var keysetValidationMode = daprovider.KeysetPanicIfInvalid if backend.GetPositionWithinMessage() > 0 { keysetValidationMode = daprovider.KeysetDontValidate } var dapReaders []daprovider.Reader - if dasReader != nil { - dapReaders = append(dapReaders, dasutil.NewReaderForDAS(dasReader, dasKeysetFetcher)) - } + dapReaders = append(dapReaders, dasutil.NewReaderForDAS(&PreimageDASReader{}, &PreimageDASReader{})) + dapReaders = append(dapReaders, celestiaTypes.NewReaderForCelestia(&PreimageCelestiaReader{})) dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{})) inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode) ctx := context.Background() @@ -297,7 +412,10 @@ func main() { } } - message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee) + // need to add Celestia or just "ExternalDA" as an option to the ArbitrumChainParams + // for now we hard code Cthis to treu and hardcode Celestia in `readMessage` + // to test the integration + message := readMessage() chainContext := WavmChainContext{chainConfig: chainConfig} newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, false, core.MessageReplayMode) @@ -307,7 +425,7 @@ func main() { } else { // Initialize ArbOS with this init message and create the genesis block. - message := readMessage(false) + message := readMessage() initMessage, err := message.Message.ParseInitMessage() if err != nil { diff --git a/contracts b/contracts index bdb8f8c68b..8b17454ca8 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit bdb8f8c68b2229fe9309fe9c03b37017abd1a2cd +Subproject commit 8b17454ca87e4aa0c66d70844e6fbdcf57f7c43c diff --git a/daprovider/celestia/celestiaDasRpcClient.go b/daprovider/celestia/celestiaDasRpcClient.go new file mode 100644 index 0000000000..4b0778c600 --- /dev/null +++ b/daprovider/celestia/celestiaDasRpcClient.go @@ -0,0 +1,93 @@ +package celestia + +import ( + "context" + "fmt" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + "github.com/spf13/pflag" + + "github.com/ethereum/go-ethereum/rpc" + celestiaTypes "github.com/offchainlabs/nitro/daprovider/celestia/types" + "github.com/offchainlabs/nitro/util/pretty" +) + +type CelestiaConfig struct { + Enable bool `koanf:"enable"` + URL string `koanf:"url"` +} + +type CelestiaDASClient struct { + clnt *rpc.Client + url string +} + +func CelestiaDAConfigAddOptions(prefix string, f *pflag.FlagSet) { + f.Bool(prefix+".enable", false, "Enable Celestia DA") + f.String(prefix+".url", "http://localhost:9876", "address to use against Celestia DA RPC service") +} + +func NewCelestiaDASRPCClient(target string) (*CelestiaDASClient, error) { + clnt, err := rpc.Dial(target) + if err != nil { + log.Error("Could not dial to Celestia DAS", "err", err) + return nil, err + } + return &CelestiaDASClient{ + clnt: clnt, + url: target, + }, nil +} + +func (c *CelestiaDASClient) Store(ctx context.Context, message []byte) ([]byte, error) { + log.Trace("celestia.CelestiaDASClient.Store(...)", "message", pretty.FirstFewBytes(message)) + ret := []byte{} + if err := c.clnt.CallContext(ctx, &ret, "celestia_store", hexutil.Bytes(message)); err != nil { + return nil, err + } + log.Info("Got result from Celestia DAS", "result", ret) + return ret, nil +} + +func (c *CelestiaDASClient) String() string { + return fmt.Sprintf("CelestiaDASClient{url:%s}", c.url) +} + +type ReadResult struct { + Message []byte `json:"message"` + RowRoots [][]byte `json:"row_roots"` + ColumnRoots [][]byte `json:"column_roots"` + Rows [][][]byte `json:"rows"` + SquareSize uint64 `json:"square_size"` // Refers to original data square size + StartRow uint64 `json:"start_row"` + EndRow uint64 `json:"end_row"` +} + +func (c *CelestiaDASClient) Read(ctx context.Context, blobPointer *celestiaTypes.BlobPointer) ([]byte, *celestiaTypes.SquareData, error) { + log.Trace("celestia.CelestiaDASClient.Read(...)", "blobPointer", blobPointer) + var ret ReadResult + if err := c.clnt.CallContext(ctx, &ret, "celestia_read", blobPointer); err != nil { + return nil, nil, err + } + + squareData := celestiaTypes.SquareData{ + RowRoots: ret.RowRoots, + ColumnRoots: ret.ColumnRoots, + Rows: ret.Rows, + SquareSize: ret.SquareSize, + StartRow: ret.StartRow, + EndRow: ret.EndRow, + } + + return ret.Message, &squareData, nil +} + +func (c *CelestiaDASClient) GetProof(ctx context.Context, msg []byte) ([]byte, error) { + res := []byte{} + err := c.clnt.CallContext(ctx, &res, "celestia_getProof", msg) + if err != nil { + return nil, err + } + return res, nil +} diff --git a/daprovider/celestia/tree/hash.go b/daprovider/celestia/tree/hash.go new file mode 100644 index 0000000000..bef9fcd7de --- /dev/null +++ b/daprovider/celestia/tree/hash.go @@ -0,0 +1,37 @@ +package tree + +import ( + "github.com/offchainlabs/nitro/arbutil" + "github.com/tendermint/tendermint/crypto/tmhash" + + "github.com/ethereum/go-ethereum/common" +) + +// TODO: make these have a large predefined capacity +var ( + leafPrefix = []byte{0} + innerPrefix = []byte{1} +) + +// returns tmhash() +func emptyHash() []byte { + return tmhash.Sum([]byte{}) +} + +// returns tmhash(0x00 || leaf) +func leafHash(record func(bytes32, []byte, arbutil.PreimageType), leaf []byte) []byte { + preimage := append(leafPrefix, leaf...) + hash := tmhash.Sum(preimage) + + record(common.BytesToHash(hash), preimage, arbutil.Sha2_256PreimageType) + return hash +} + +// returns tmhash(0x01 || left || right) +func innerHash(record func(bytes32, []byte, arbutil.PreimageType), left []byte, right []byte) []byte { + preimage := append(innerPrefix, append(left, right...)...) + hash := tmhash.Sum(preimage) + + record(common.BytesToHash(hash), preimage, arbutil.Sha2_256PreimageType) + return tmhash.Sum(append(innerPrefix, append(left, right...)...)) +} diff --git a/daprovider/celestia/tree/merkle_tree.go b/daprovider/celestia/tree/merkle_tree.go new file mode 100644 index 0000000000..3fb8c19faf --- /dev/null +++ b/daprovider/celestia/tree/merkle_tree.go @@ -0,0 +1,78 @@ +package tree + +import ( + "math/bits" + + "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/arbutil" +) + +type bytes32 = common.Hash + +// HashFromByteSlices computes a Merkle tree where the leaves are the byte slice, +// in the provided order. It follows RFC-6962. +func HashFromByteSlices(record func(bytes32, []byte, arbutil.PreimageType), items [][]byte) []byte { + switch len(items) { + case 0: + emptyHash := emptyHash() + record(common.BytesToHash(emptyHash), []byte{}, arbutil.Sha2_256PreimageType) + return emptyHash + case 1: + return leafHash(record, items[0]) + default: + k := getSplitPoint(int64(len(items))) + left := HashFromByteSlices(record, items[:k]) + right := HashFromByteSlices(record, items[k:]) + return innerHash(record, left, right) + } +} + +// getSplitPoint returns the largest power of 2 less than length +func getSplitPoint(length int64) int64 { + if length < 1 { + panic("Trying to split a tree with size < 1") + } + uLength := uint(length) + bitlen := bits.Len(uLength) + k := int64(1 << uint(bitlen-1)) + if k == length { + k >>= 1 + } + return k +} + +// getChildrenHashes splits the preimage into the hashes of the left and right children. +func getChildrenHashes(preimage []byte) (leftChild, rightChild common.Hash, err error) { + leftChild = common.BytesToHash(preimage[:32]) + rightChild = common.BytesToHash(preimage[32:]) + return leftChild, rightChild, nil +} + +// MerkleTreeContent recursively walks down the Merkle tree and collects leaf node data. +func MerkleTreeContent(oracle func(bytes32) ([]byte, error), rootHash common.Hash) ([][]byte, error) { + stack := []common.Hash{rootHash} + var data [][]byte + + for len(stack) > 0 { + currentHash := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + preimage, err := oracle(currentHash) + if err != nil { + return nil, err + } + + if preimage[0] == leafPrefix[0] { + data = append(data, preimage[1:]) + } else { + leftChildHash, rightChildHash, err := getChildrenHashes(preimage[1:]) + if err != nil { + return nil, err + } + stack = append(stack, rightChildHash) + stack = append(stack, leftChildHash) + } + } + + return data, nil +} diff --git a/daprovider/celestia/tree/nmt.go b/daprovider/celestia/tree/nmt.go new file mode 100644 index 0000000000..f0d2a7b953 --- /dev/null +++ b/daprovider/celestia/tree/nmt.go @@ -0,0 +1,74 @@ +package tree + +import ( + "errors" + + "github.com/celestiaorg/rsmt2d" + "github.com/ethereum/go-ethereum/common" +) + +// need to pass square size and axis index +func ComputeNmtRoot(createTreeFn rsmt2d.TreeConstructorFn, index uint, shares [][]byte) ([]byte, error) { + // create NMT with custom Hasher + // use create tree function, pass it to the ComputeNmtRoot function + tree := createTreeFn(rsmt2d.Row, index) + if !isComplete(shares) { + return nil, errors.New("can not compute root of incomplete row") + } + for _, d := range shares { + err := tree.Push(d) + if err != nil { + return nil, err + } + } + + return tree.Root() +} + +// isComplete returns true if all the shares are non-nil. +func isComplete(shares [][]byte) bool { + for _, share := range shares { + if share == nil { + return false + } + } + return true +} + +// getNmtChildrenHashes splits the preimage into the hashes of the left and right children of the NMT +// note that a leaf has the format minNID || maxNID || hash, here hash is the hash of the left and right +// (NodePrefix) || (leftMinNID || leftMaxNID || leftHash) || (rightMinNID || rightMaxNID || rightHash) +func getNmtChildrenHashes(hash []byte) (leftChild, rightChild []byte) { + hash = hash[1:] + flagLen := int(NamespaceSize * 2) + sha256Len := 32 + leftChild = hash[:flagLen+sha256Len] + rightChild = hash[flagLen+sha256Len:] + return leftChild, rightChild +} + +// walkMerkleTree recursively walks down the Merkle tree and collects leaf node data. +func NmtContent(oracle func(bytes32) ([]byte, error), rootHash []byte) ([][]byte, error) { + stack := [][]byte{rootHash} + var data [][]byte + + for len(stack) > 0 { + currentHash := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + preimage, err := oracle(common.BytesToHash(currentHash[NamespaceSize*2:])) + if err != nil { + return nil, err + } + + if preimage[0] == leafPrefix[0] { + data = append(data, preimage[1:]) + } else { + leftChildHash, rightChildHash := getNmtChildrenHashes(preimage) + stack = append(stack, rightChildHash) + stack = append(stack, leftChildHash) + } + } + + return data, nil +} diff --git a/daprovider/celestia/tree/nmt_hasher.go b/daprovider/celestia/tree/nmt_hasher.go new file mode 100644 index 0000000000..c7c5a23cd8 --- /dev/null +++ b/daprovider/celestia/tree/nmt_hasher.go @@ -0,0 +1,43 @@ +package tree + +import ( + "crypto/sha256" + "hash" + + "github.com/ethereum/go-ethereum/common" + "github.com/offchainlabs/nitro/arbutil" +) + +// customHasher embeds hash.Hash and includes a map for the hash-to-preimage mapping +type NmtPreimageHasher struct { + hash.Hash + record func(bytes32, []byte, arbutil.PreimageType) + data []byte +} + +// Need to make sure this is writting relevant data into the tree +// Override the Sum method to capture the preimage +func (h *NmtPreimageHasher) Sum(b []byte) []byte { + hashed := h.Hash.Sum(nil) + hashKey := common.BytesToHash(hashed) + h.record(hashKey, append([]byte(nil), h.data...), arbutil.Sha2_256PreimageType) + return h.Hash.Sum(b) +} + +func (h *NmtPreimageHasher) Write(p []byte) (n int, err error) { + h.data = append(h.data, p...) + return h.Hash.Write(p) +} + +// Override the Reset method to clean the hash state and the data slice +func (h *NmtPreimageHasher) Reset() { + h.Hash.Reset() + h.data = h.data[:0] // Reset the data slice to be empty, but keep the underlying array +} + +func newNmtPreimageHasher(record func(bytes32, []byte, arbutil.PreimageType)) hash.Hash { + return &NmtPreimageHasher{ + Hash: sha256.New(), + record: record, + } +} diff --git a/daprovider/celestia/tree/nmt_wrapper.go b/daprovider/celestia/tree/nmt_wrapper.go new file mode 100644 index 0000000000..2ab8abd6a7 --- /dev/null +++ b/daprovider/celestia/tree/nmt_wrapper.go @@ -0,0 +1,175 @@ +package tree + +import ( + "bytes" + "fmt" + "math" + + "github.com/celestiaorg/nmt" + "github.com/celestiaorg/nmt/namespace" + "github.com/celestiaorg/rsmt2d" + "github.com/offchainlabs/nitro/arbutil" +) + +// NMT Wrapper from celestia-app with support for populating a mapping of preimages + +const ( + NamespaceSize uint64 = 29 + NamespaceIDSize = 28 + NamespaceVersionMax = math.MaxUint8 +) + +// Fulfills the rsmt2d.Tree interface and rsmt2d.TreeConstructorFn function +var ( + _ rsmt2d.Tree = &ErasuredNamespacedMerkleTree{} + ParitySharesNamespace = secondaryReservedNamespace(0xFF) +) + +func secondaryReservedNamespace(lastByte byte) Namespace { + return Namespace{ + Version: NamespaceVersionMax, + ID: append(bytes.Repeat([]byte{0xFF}, NamespaceIDSize-1), lastByte), + } +} + +type Namespace struct { + Version uint8 + ID []byte +} + +// Bytes returns this namespace as a byte slice. +func (n Namespace) Bytes() []byte { + return append([]byte{n.Version}, n.ID...) +} + +// ErasuredNamespacedMerkleTree wraps NamespaceMerkleTree to conform to the +// rsmt2d.Tree interface while also providing the correct namespaces to the +// underlying NamespaceMerkleTree. It does this by adding the already included +// namespace to the first half of the tree, and then uses the parity namespace +// ID for each share pushed to the second half of the tree. This allows for the +// namespaces to be included in the erasure data, while also keeping the nmt +// library sufficiently general +type ErasuredNamespacedMerkleTree struct { + squareSize uint64 // note: this refers to the width of the original square before erasure-coded + options []nmt.Option + tree Tree + // axisIndex is the index of the axis (row or column) that this tree is on. This is passed + // by rsmt2d and used to help determine which quadrant each leaf belongs to. + axisIndex uint64 + // shareIndex is the index of the share in a row or column that is being + // pushed to the tree. It is expected to be in the range: 0 <= shareIndex < + // 2*squareSize. shareIndex is used to help determine which quadrant each + // leaf belongs to, along with keeping track of how many leaves have been + // added to the tree so far. + shareIndex uint64 +} + +// Tree is an interface that wraps the methods of the underlying +// NamespaceMerkleTree that are used by ErasuredNamespacedMerkleTree. This +// interface is mainly used for testing. It is not recommended to use this +// interface by implementing a different implementation. +type Tree interface { + Root() ([]byte, error) + Push(namespacedData namespace.PrefixedData) error + ProveRange(start, end int) (nmt.Proof, error) +} + +// NewErasuredNamespacedMerkleTree creates a new ErasuredNamespacedMerkleTree +// with an underlying NMT of namespace size `29` and with +// `ignoreMaxNamespace=true`. axisIndex is the index of the row or column that +// this tree is committing to. squareSize must be greater than zero. +func NewErasuredNamespacedMerkleTree(record func(bytes32, []byte, arbutil.PreimageType), squareSize uint64, axisIndex uint, options ...nmt.Option) ErasuredNamespacedMerkleTree { + if squareSize == 0 { + panic("cannot create a ErasuredNamespacedMerkleTree of squareSize == 0") + } + options = append(options, nmt.NamespaceIDSize(29)) + options = append(options, nmt.IgnoreMaxNamespace(true)) + tree := nmt.New(newNmtPreimageHasher(record), options...) + return ErasuredNamespacedMerkleTree{squareSize: squareSize, options: options, tree: tree, axisIndex: uint64(axisIndex), shareIndex: 0} +} + +type constructor struct { + record func(bytes32, []byte, arbutil.PreimageType) + squareSize uint64 + opts []nmt.Option +} + +// NewConstructor creates a tree constructor function as required by rsmt2d to +// calculate the data root. It creates that tree using the +// wrapper.ErasuredNamespacedMerkleTree. +func NewConstructor(record func(bytes32, []byte, arbutil.PreimageType), squareSize uint64, opts ...nmt.Option) rsmt2d.TreeConstructorFn { + return constructor{ + record: record, + squareSize: squareSize, + opts: opts, + }.NewTree +} + +// NewTree creates a new rsmt2d.Tree using the +// wrapper.ErasuredNamespacedMerkleTree with predefined square size and +// nmt.Options +func (c constructor) NewTree(_ rsmt2d.Axis, axisIndex uint) rsmt2d.Tree { + newTree := NewErasuredNamespacedMerkleTree(c.record, c.squareSize, axisIndex, c.opts...) + return &newTree +} + +// Push adds the provided data to the underlying NamespaceMerkleTree, and +// automatically uses the first DefaultNamespaceIDLen number of bytes as the +// namespace unless the data pushed to the second half of the tree. Fulfills the +// rsmt.Tree interface. NOTE: panics if an error is encountered while pushing or +// if the tree size is exceeded. +func (w *ErasuredNamespacedMerkleTree) Push(data []byte) error { + if w.axisIndex+1 > 2*w.squareSize || w.shareIndex+1 > 2*w.squareSize { + return fmt.Errorf("pushed past predetermined square size: boundary at %d index at %d %d", 2*w.squareSize, w.axisIndex, w.shareIndex) + } + // + if len(data) < int(NamespaceSize) { + return fmt.Errorf("data is too short to contain namespace ID") + } + nidAndData := make([]byte, int(NamespaceSize)+len(data)) + copy(nidAndData[NamespaceSize:], data) + // use the parity namespace if the cell is not in Q0 of the extended data square + if w.isQuadrantZero() { + copy(nidAndData[:NamespaceSize], data[:NamespaceSize]) + } else { + copy(nidAndData[:NamespaceSize], ParitySharesNamespace.Bytes()) + } + err := w.tree.Push(nidAndData) + if err != nil { + return err + } + w.incrementShareIndex() + return nil +} + +// Root fulfills the rsmt.Tree interface by generating and returning the +// underlying NamespaceMerkleTree Root. +func (w *ErasuredNamespacedMerkleTree) Root() ([]byte, error) { + root, err := w.tree.Root() + if err != nil { + return nil, err + } + return root, nil +} + +// ProveRange returns a Merkle range proof for the leaf range [start, end] where `end` is non-inclusive. +func (w *ErasuredNamespacedMerkleTree) ProveRange(start, end int) (nmt.Proof, error) { + return w.tree.ProveRange(start, end) +} + +// incrementShareIndex increments the share index by one. +func (w *ErasuredNamespacedMerkleTree) incrementShareIndex() { + w.shareIndex++ +} + +// isQuadrantZero returns true if the current share index and axis index are both +// in the original data square. +func (w *ErasuredNamespacedMerkleTree) isQuadrantZero() bool { + return w.shareIndex < w.squareSize && w.axisIndex < w.squareSize +} + +// SetTree sets the underlying tree to the provided tree. This is used for +// testing purposes only. +func (w *ErasuredNamespacedMerkleTree) SetTree(tree Tree) { + w.tree = tree +} diff --git a/daprovider/celestia/types/blob.go b/daprovider/celestia/types/blob.go new file mode 100644 index 0000000000..f711cc0117 --- /dev/null +++ b/daprovider/celestia/types/blob.go @@ -0,0 +1,77 @@ +package types + +import ( + "bytes" + "encoding/binary" +) + + +// BlobPointer contains the reference to the data blob on Celestia +type BlobPointer struct { + BlockHeight uint64 `json:"block_height"` + Start uint64 `json:"start"` + SharesLength uint64 `json:"shares_length"` + TxCommitment [32]byte `json:"tx_commitment"` + DataRoot [32]byte `json:"data_root"` +} + +// MarshalBinary encodes the BlobPointer to binary +// serialization format: height + start + end + commitment + data root +func (b *BlobPointer) MarshalBinary() ([]byte, error) { + buf := new(bytes.Buffer) + + // Writing fixed-size values + if err := binary.Write(buf, binary.BigEndian, b.BlockHeight); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, b.Start); err != nil { + return nil, err + } + if err := binary.Write(buf, binary.BigEndian, b.SharesLength); err != nil { + return nil, err + } + + // Writing fixed-size byte arrays directly + if _, err := buf.Write(b.TxCommitment[:]); err != nil { + return nil, err + } + if _, err := buf.Write(b.DataRoot[:]); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary decodes the binary to BlobPointer +// serialization format: height + start + end + commitment + data root +func (b *BlobPointer) UnmarshalBinary(data []byte) error { + buf := bytes.NewReader(data) + // Reading fixed-size values + if err := binary.Read(buf, binary.BigEndian, &b.BlockHeight); err != nil { + return err + } + if err := binary.Read(buf, binary.BigEndian, &b.Start); err != nil { + return err + } + if err := binary.Read(buf, binary.BigEndian, &b.SharesLength); err != nil { + return err + } + + // Reading fixed-size byte arrays directly + if err := readFixedBytes(buf, b.TxCommitment[:]); err != nil { + return err + } + if err := readFixedBytes(buf, b.DataRoot[:]); err != nil { + return err + } + + return nil +} + +// readFixedBytes reads a fixed number of bytes into a byte slice +func readFixedBytes(buf *bytes.Reader, data []byte) error { + if _, err := buf.Read(data); err != nil { + return err + } + return nil +} diff --git a/daprovider/celestia/types/da_interface.go b/daprovider/celestia/types/da_interface.go new file mode 100644 index 0000000000..e6483d740a --- /dev/null +++ b/daprovider/celestia/types/da_interface.go @@ -0,0 +1,23 @@ +package types + +import ( + "context" +) + +type CelestiaWriter interface { + Store(context.Context, []byte) ([]byte, error) +} + +type SquareData struct { + RowRoots [][]byte `json:"row_roots"` + ColumnRoots [][]byte `json:"column_roots"` + Rows [][][]byte `json:"rows"` + SquareSize uint64 `json:"square_size"` // Refers to original data square size + StartRow uint64 `json:"start_row"` + EndRow uint64 `json:"end_row"` +} + +type CelestiaReader interface { + Read(context.Context, *BlobPointer) ([]byte, *SquareData, error) + GetProof(ctx context.Context, msg []byte) ([]byte, error) +} diff --git a/daprovider/celestia/types/reader.go b/daprovider/celestia/types/reader.go new file mode 100644 index 0000000000..d18b430481 --- /dev/null +++ b/daprovider/celestia/types/reader.go @@ -0,0 +1,136 @@ +package types + +import ( + "bytes" + "context" + "errors" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/offchainlabs/nitro/daprovider" + "github.com/offchainlabs/nitro/daprovider/celestia/tree" +) + +func NewReaderForCelestia(celestiaReader CelestiaReader) *readerForCelestia { + return &readerForCelestia{celestiaReader: celestiaReader} +} + +type readerForCelestia struct { + celestiaReader CelestiaReader +} + +func (c *readerForCelestia) IsValidHeaderByte(ctx context.Context, headerByte byte) bool { + return IsCelestiaMessageHeaderByte(headerByte) +} + +// CelestiaMessageHeaderFlag indicates that this data is a Blob Pointer +// which will be used to retrieve data from Celestia +const CelestiaMessageHeaderFlag byte = 0x63 + +func hasBits(checking byte, bits byte) bool { + return (checking & bits) == bits +} + +func IsCelestiaMessageHeaderByte(header byte) bool { + return hasBits(header, CelestiaMessageHeaderFlag) +} + +func (c *readerForCelestia) GetProof(ctx context.Context, msg []byte) ([]byte, error) { + return c.celestiaReader.GetProof(ctx, msg) +} + +func (c *readerForCelestia) RecoverPayloadFromBatch( + ctx context.Context, + batchNum uint64, + batchBlockHash common.Hash, + sequencerMsg []byte, + preimages daprovider.PreimagesMap, + validateSeqMsg bool, +) ([]byte, daprovider.PreimagesMap, error) { + return RecoverPayloadFromCelestiaBatch(ctx, batchNum, sequencerMsg, c.celestiaReader, preimages, validateSeqMsg) +} + +func RecoverPayloadFromCelestiaBatch( + ctx context.Context, + batchNum uint64, + sequencerMsg []byte, + celestiaReader CelestiaReader, + preimages daprovider.PreimagesMap, + validateSeqMsg bool, +) ([]byte, daprovider.PreimagesMap, error) { + var preimageRecorder daprovider.PreimageRecorder + if preimages != nil { + preimageRecorder = daprovider.RecordPreimagesTo(preimages) + } + buf := bytes.NewBuffer(sequencerMsg[40:]) + + header, err := buf.ReadByte() + if err != nil { + log.Error("Couldn't deserialize Celestia header byte", "err", err) + return nil, nil, nil + } + if !IsCelestiaMessageHeaderByte(header) { + log.Error("Couldn't deserialize Celestia header byte", "err", errors.New("tried to deserialize a message that doesn't have the Celestia header")) + return nil, nil, nil + } + + blobPointer := BlobPointer{} + blobBytes := buf.Bytes() + err = blobPointer.UnmarshalBinary(blobBytes) + if err != nil { + log.Error("Couldn't unmarshal Celestia blob pointer", "err", err) + return nil, nil, nil + } + + payload, squareData, err := celestiaReader.Read(ctx, &blobPointer) + if err != nil { + log.Error("Failed to resolve blob pointer from celestia", "err", err) + return nil, nil, err + } + + // we read a batch that is to be discarded, so we return the empty batch + if len(payload) == 0 { + return payload, nil, nil + } + + if preimageRecorder != nil { + if squareData == nil { + log.Error("squareData is nil, read from replay binary, but preimages are empty") + return nil, nil, err + } + + odsSize := squareData.SquareSize / 2 + rowIndex := squareData.StartRow + for _, row := range squareData.Rows { + treeConstructor := tree.NewConstructor(preimageRecorder, odsSize) + root, err := tree.ComputeNmtRoot(treeConstructor, uint(rowIndex), row) + if err != nil { + log.Error("Failed to compute row root", "err", err) + return nil, nil, err + } + + rowRootMatches := bytes.Equal(squareData.RowRoots[rowIndex], root) + if !rowRootMatches { + log.Error("Row roots do not match", "eds row root", squareData.RowRoots[rowIndex], "calculated", root) + log.Error("Row roots", "row_roots", squareData.RowRoots) + return nil, nil, err + } + rowIndex += 1 + } + + rowsCount := len(squareData.RowRoots) + slices := make([][]byte, rowsCount+rowsCount) + copy(slices[0:rowsCount], squareData.RowRoots) + copy(slices[rowsCount:], squareData.ColumnRoots) + + dataRoot := tree.HashFromByteSlices(preimageRecorder, slices) + + dataRootMatches := bytes.Equal(dataRoot, blobPointer.DataRoot[:]) + if !dataRootMatches { + log.Error("Data Root do not match", "blobPointer data root", blobPointer.DataRoot, "calculated", dataRoot) + return nil, nil, nil + } + } + + return payload, preimages, nil +} diff --git a/daprovider/celestia/types/writer.go b/daprovider/celestia/types/writer.go new file mode 100644 index 0000000000..bdf547fc83 --- /dev/null +++ b/daprovider/celestia/types/writer.go @@ -0,0 +1,30 @@ +package types + +import ( + "context" + "errors" +) + +func NewWriterForCelestia(celestiaWriter CelestiaWriter) *writerForCelestia { + return &writerForCelestia{celestiaWriter: celestiaWriter} +} + +type writerForCelestia struct { + celestiaWriter CelestiaWriter +} + +func (c *writerForCelestia) Store(ctx context.Context, message []byte, timeout uint64, disableFallbackStoreDataOnChain bool) ([]byte, error) { + msg, err := c.celestiaWriter.Store(ctx, message) + if err != nil { + if disableFallbackStoreDataOnChain { + return nil, errors.New("unable to batch to Celestia and fallback storing data on chain is disabled") + } + return nil, err + } + message = msg + return message, nil +} + +func (d *writerForCelestia) Type() string { + return "celestia" +} diff --git a/daprovider/das/dasserver/dasserver.go b/daprovider/das/dasserver/dasserver.go index c161db96c2..cc24927c9d 100644 --- a/daprovider/das/dasserver/dasserver.go +++ b/daprovider/das/dasserver/dasserver.go @@ -59,7 +59,7 @@ func ServerConfigAddOptions(prefix string, f *flag.FlagSet) { f.Uint64(prefix+".port", DefaultServerConfig.Port, "JSON rpc server listening port") f.String(prefix+".jwtsecret", DefaultServerConfig.JWTSecret, "path to file with jwtsecret for validation") f.Bool(prefix+".enable-da-writer", DefaultServerConfig.EnableDAWriter, "implies if the das server supports daprovider's writer interface") - f.Int("rpc-server-body-limit", DefaultServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes; the default (0) uses geth's 5MB limit") + f.Int(prefix+".rpc-server-body-limit", DefaultServerConfig.RPCServerBodyLimit, "HTTP-RPC server maximum request body size in bytes; the default (0) uses geth's 5MB limit") das.DataAvailabilityConfigAddNodeOptions(prefix+".data-availability", f) genericconf.HTTPServerTimeoutConfigAddOptions(prefix+".server-timeouts", f) } diff --git a/daprovider/util.go b/daprovider/util.go index 7244498cfc..8a9ec7edd9 100644 --- a/daprovider/util.go +++ b/daprovider/util.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/offchainlabs/nitro/arbutil" ) @@ -72,11 +71,15 @@ const ZeroheavyMessageHeaderFlag byte = 0x20 // BlobHashesHeaderFlag indicates that this message contains EIP 4844 versioned hashes of the commitments calculated over the blob data for the batch data. const BlobHashesHeaderFlag byte = L1AuthenticatedMessageHeaderFlag | 0x10 // 0x50 +// CelestiaMessageHeaderFlag indicates that this data is a Blob Pointer +// which will be used to retrieve data from Celestia +const CelestiaMessageHeaderFlag byte = 0x63 + // BrotliMessageHeaderByte indicates that the message is brotli-compressed. const BrotliMessageHeaderByte byte = 0 // KnownHeaderBits is all header bits with known meaning to this nitro version -const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte +const KnownHeaderBits byte = DASMessageHeaderFlag | TreeDASMessageHeaderFlag | CelestiaMessageHeaderFlag | L1AuthenticatedMessageHeaderFlag | ZeroheavyMessageHeaderFlag | BlobHashesHeaderFlag | BrotliMessageHeaderByte var DefaultDASRetentionPeriod time.Duration = time.Hour * 24 * 15 @@ -105,6 +108,10 @@ func IsBlobHashesHeaderByte(header byte) bool { return hasBits(header, BlobHashesHeaderFlag) } +func IsCelestiaMessageHeaderByte(header byte) bool { + return hasBits(header, CelestiaMessageHeaderFlag) +} + func IsBrotliMessageHeaderByte(b uint8) bool { return b == BrotliMessageHeaderByte } diff --git a/docs/celestia/docs.md b/docs/celestia/docs.md new file mode 100644 index 0000000000..eec2127375 --- /dev/null +++ b/docs/celestia/docs.md @@ -0,0 +1,60 @@ +# Orbit with Celestia Underneath ✨ +![image](https://github.com/celestiaorg/nitro/assets/31937514/dfe451b5-21ee-446b-8140-869ea4e2a7eb) + + +## Overview + +The integration of Celestia with Arbitrum Orbit and the Nitro tech stack marks the first external contribution to the Arbitrum Orbit protocol layer, offering developers an additional option for selecting a data availability layer alongside Arbitrum AnyTrust. The integration allows developers to deploy an Orbit Chain that uses Celestia for data availability and settles on Arbitrum One, Ethereum, or other EVM chains. + +## Key Components + +The integration of Celestia with Arbitrum orbit is possible thanks to 3 components: +- DA Provider Implementation +- Preimage Oracle +- Blobstream + +# DA Provider Implementation + +The Arbitrum Nitro code has a `DataAvailabilityProvider` interface that is used across the codebase to store and retrieve data from a specific provider (eip4844 blobs, Anytrust, and now Celestia). + +This integration implements the [`DataAvailabilityProvider` interface for Celestia DA](https://github.com/celestiaorg/nitro/blob/966e631f1a03b49d49f25bea67a92b275d3bacb9/arbstate/inbox.go#L366-L477) + +Additionally, this integrations comes with the necessary code for a Nitro chain node to post and retrieve data from Celestia, which can be found [here.](https://github.com/celestiaorg/nitro/tree/celestia-v2.3.1/das/celestia) + +The core logic behind posting and retrieving data happens in [celestia.go](https://github.com/celestiaorg/nitro/blob/celestia-v2.3.1/das/celestia/celestia.go) where data is stored on Celestia and serialized into a small batch of data that gets published once the necessary range of headers (data roots) has been relayed to the [BlobstreamX contract](https://github.com/succinctlabs/blobstreamx). +Then the `Read` logic takes care of taking the deserialized Blob Pointer struct and consuming it in order to fetch the data from Celestia and additionally inform the fetcher about the position of the data on Celestia (we'll get back to this in the next section). + +The following represents a non-exhaustive list of considerations when running a Batch Poster node for a chain with Celestia underneath: +- You will need to use a consensus full node RPC endpoint, you can find a list of them for Mocha [here](https://docs.celestia.org/nodes/mocha-testnet#rpc-endpoints) +- The Batch Poster will only post a Celestia batch to the underlying chain if the height for which it posted is in a recent range in BlobstreamX and if the verification succeeds, otherwise it will discard the batch. Since it will wait until a range is relayed, it can take several minutes for a batch to be posted, but one can always make an on-chain request for the BlobstreamX contract to relay a header promptly. +- + +The following represents a non-exhaustive list of considerations when running a Nitro node for a chain with Celestia underneath: +- The `TendermintRpc` endpoint is only needed by the batch poster, every other node can operate without a connection to a full node. +- The message header flag for Celestia batches is `0x0c`. +- You will need to know the namespace for the chain that you are trying to connect to, but don't worry if you don't find it, as the information in the BlobPointer can be used to identify where a batch of data is in the Celestia Data Square for a given height, and thus can be used to find out the namespace as well! + +# Preimage Oracle Implementation + +In order to support fraud proofs, this integration has the necessary code for a Nitro validator to pupolate its preimage mapping with Celestia hashes that then get "unpealed" in order to reveal the full data for a Blob. You can read more about the "Hash Oracle Trick" [here.](https://docs.arbitrum.io/inside-arbitrum-nitro/#readpreimage-and-the-hash-oracle-trick) + +The data structures and hashing functions for this can be found in the [`nitro/das/celestia/tree` folder](https://github.com/celestiaorg/nitro/tree/celestia-v2.3.1/das/celestia/tree) + +You can see where the preimage oracle gets used in the fraud proof replay binary [here](https://github.com/celestiaorg/nitro/blob/966e631f1a03b49d49f25bea67a92b275d3bacb9/cmd/replay/main.go#L153-L294) + +Something important to note is that the preimage oracle only keeps track of hashes for the rows in the Celestia data square in which a blob resides in, this way each Orbit chain with Celestia underneath does not need validators to recompute an entire Celestia Data Square, but instead, only have to compute the row roots for the rows in which it's data lives in, and the header data root, which is the binary merkle tree hash built using the row roots and column roots fetched from a Celestia node. Because only data roots that can be confirmed on Blobstream get accepted into the sequencer inbox, one can have a high degree of certainty that the canonical data root being unpealed as well as the row roots are in fact correct. + +# DA Proof and BlobstreamX + +Finally, the integration only accepts batches of 89 bytes in length for a celestia header flag. This means that a Celestia Batch has 88 bytes of information, which are the block height, the start index of the blob, the length in shares of the blob, the transaction commitment, and the data root for the given height. + +In the case of a challenge, for a celestia batch, the OSP will require an additionally appended "da proof", which is verified against BlobstreamX. Here's what happens based on the result of the BlobstreamX verification: + +- **IN_BLOBSTREAM**: means the batch was verified against blobstrea, the height and data root in the batch match, and the start + legth do not go out of bounds. This will cause the rest of the OSP to proceed as normal. +- **COUNTERFACTUAL_COMMITMENT**: the height can be verified against blobstream, but the posted data root does not match, or the start + length go out of bounds. Or the Batch Poster tried posting a height too far into the ftureu (1000 blocks ahead of BlobstreamX). This will cause the OSP to proceed with an empty batch. Note that Nitro nodes for a chain with Celestia DA will also discard any batches that cannot be correctly validated. +- **UNDECIDED**: the height has not been relayed yet, so we revert and wait until the latest height in blobstream is the greater than the batch's height. + +You can see how BlobstreamX is integrated into the `OneStepProverHostIO.sol` contract [here]([https://github.com/celestiaorg/nitro-contracts/blob/celestia-v1.2.1/src/bridge/SequencerInbox.sol#L584-L630](https://github.com/celestiaorg/nitro-contracts/blob/contracts-v1.2.1/src/osp/OneStepProverHostIo.sol#L301)), which allows us to discard batches with otherwise faulty data roots, thus giving us a high degree of confidence that the data root can be safely unpacked in case of a challenge. + + + diff --git a/go.mod b/go.mod index 331e6858f9..dd350d2fa8 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,8 @@ require ( github.com/aws/aws-sdk-go-v2/service/s3 v1.64.1 github.com/cavaliergopher/grab/v3 v3.0.1 github.com/ccoveille/go-safecast v1.1.0 + github.com/celestiaorg/nmt v0.20.0 + github.com/celestiaorg/rsmt2d v0.11.0 github.com/cockroachdb/pebble v1.1.2 github.com/codeclysm/extract/v3 v3.0.2 github.com/dgraph-io/badger/v4 v4.2.0 @@ -42,7 +44,7 @@ require ( github.com/knadh/koanf v1.4.0 github.com/mailru/easygo v0.0.0-20190618140210-3c14a0dc985f github.com/mattn/go-sqlite3 v1.14.22 - github.com/mitchellh/mapstructure v1.4.1 + github.com/mitchellh/mapstructure v1.5.0 github.com/offchainlabs/bold v0.0.3-0.20250313062923-4b76649f2abc github.com/pkg/errors v0.9.1 github.com/r3labs/diff/v3 v3.0.1 @@ -51,6 +53,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.10.0 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + github.com/tendermint/tendermint v0.34.29 github.com/wealdtech/go-merkletree v1.0.0 go.uber.org/automaxprocs v1.5.2 golang.org/x/crypto v0.36.0 @@ -68,6 +71,7 @@ require ( cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect + github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 // indirect github.com/cockroachdb/fifo v0.0.0-20240606204812-0bbfbd93a7ce // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -166,6 +170,8 @@ require ( github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5 // indirect github.com/juju/loggo v0.0.0-20180524022052-584905176618 // indirect github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/klauspost/reedsolomon v1.11.8 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect @@ -187,7 +193,7 @@ require ( github.com/rhnvrm/simples3 v0.6.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect - github.com/rs/cors v1.7.0 // indirect + github.com/rs/cors v1.8.2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/supranational/blst v0.3.14 // indirect @@ -205,3 +211,8 @@ require ( golang.org/x/oauth2 v0.22.0 rsc.io/tmplfunc v0.0.3 // indirect ) + +replace ( + github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 + github.com/tendermint/tendermint => github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29 +) diff --git a/go.sum b/go.sum index 3812f81fae..6f556dcd6c 100644 --- a/go.sum +++ b/go.sum @@ -99,6 +99,14 @@ github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIH github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4= github.com/ccoveille/go-safecast v1.1.0 h1:iHKNWaZm+OznO7Eh6EljXPjGfGQsSfa6/sxPlIEKO+g= github.com/ccoveille/go-safecast v1.1.0/go.mod h1:QqwNjxQ7DAqY0C721OIO9InMk9zCwcsO7tnRuHytad8= +github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29 h1:Fd7ymPUzExPGNl2gZw4i5S74arMw+iDHLE78M/cCxl4= +github.com/celestiaorg/celestia-core v1.29.0-tm-v0.34.29/go.mod h1:xrICN0PBhp3AdTaZ8q4wS5Jvi32V02HNjaC2EsWiEKk= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4 h1:CJdIpo8n5MFP2MwK0gSRcOVlDlFdQJO1p+FqdxYzmvc= +github.com/celestiaorg/merkletree v0.0.0-20210714075610-a84dc3ddbbe4/go.mod h1:fzuHnhzj1pUygGz+1ZkB3uQbEUL4htqCGJ4Qs2LwMZA= +github.com/celestiaorg/nmt v0.20.0 h1:9i7ultZ8Wv5ytt8ZRaxKQ5KOOMo4A2K2T/aPGjIlSas= +github.com/celestiaorg/nmt v0.20.0/go.mod h1:Oz15Ub6YPez9uJV0heoU4WpFctxazuIhKyUtaYNio7E= +github.com/celestiaorg/rsmt2d v0.11.0 h1:lcto/637WyTEZR3dLRoNvyuExfnUbxvdvKi3qz/2V4k= +github.com/celestiaorg/rsmt2d v0.11.0/go.mod h1:6Y580I3gVr0+OVFfW6m2JTwnCCmvW3WfbwSLfuT+HCA= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= @@ -222,8 +230,6 @@ github.com/gobwas/ws-examples v0.0.0-20190625122829-a9e8908d9484 h1:XC9N1eiAyO1z github.com/gobwas/ws-examples v0.0.0-20190625122829-a9e8908d9484/go.mod h1:5nDZF4afNA1S7ZKcBXCMvDo4nuCTp1931DND7/W4aXo= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -236,6 +242,8 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -355,6 +363,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/reedsolomon v1.11.8 h1:s8RpUW5TK4hjr+djiOpbZJB4ksx+TdYbRH7vHQpwPOY= +github.com/klauspost/reedsolomon v1.11.8/go.mod h1:4bXRN+cVzMdml6ti7qLouuYi32KHJ5MGv0Qd8a47h6A= github.com/knadh/koanf v1.4.0 h1:/k0Bh49SqLyLNfte9r6cvuZWrApOQhglOmhIU3L/zDw= github.com/knadh/koanf v1.4.0/go.mod h1:1cfH5223ZeZUOs8FU2UdTmaNfHpqgtjV0+NHjRO43gs= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -399,8 +411,9 @@ github.com/mitchellh/go-testing-interface v0.0.0-20171004221916-a61a99592b77/go. github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -426,8 +439,9 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.7.0 h1:7utD74fnzVc/cpcyy8sjrlFr5vYpypUixARcHIMIGuI= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= @@ -462,6 +476,8 @@ github.com/r3labs/diff/v3 v3.0.1 h1:CBKqf3XmNRHXKmdU7mZP1w7TV0pDyVCis1AUHtA4Xtg= github.com/r3labs/diff/v3 v3.0.1/go.mod h1:f1S9bourRbiM66NskseyUdo0fTmEE0qKrikYJX63dgo= github.com/redis/go-redis/v9 v9.6.3 h1:8Dr5ygF1QFXRxIH/m3Xg9MMG1rS8YCtAgosrsewT6i0= github.com/redis/go-redis/v9 v9.6.3/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= +github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= github.com/rhnvrm/simples3 v0.6.1 h1:H0DJwybR6ryQE+Odi9eqkHuzjYAeJgtGcGtuBwOhsH8= github.com/rhnvrm/simples3 v0.6.1/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rivo/tview v0.0.0-20240307173318-e804876934a1 h1:bWLHTRekAy497pE7+nXSuzXwwFHI0XauRzz6roUvY+s= @@ -474,8 +490,8 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -505,6 +521,12 @@ github.com/supranational/blst v0.3.14 h1:xNMoHRJOTwMn63ip6qoWJ2Ymgvj7E2b9jY2FAwY github.com/supranational/blst v0.3.14/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM= +github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -526,6 +548,11 @@ github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +gitlab.com/NebulousLabs/errors v0.0.0-20171229012116-7ead97ef90b8/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975 h1:L/ENs/Ar1bFzUeKx6m3XjlmBgIUlykX9dzvp5k9NGxc= +gitlab.com/NebulousLabs/errors v0.0.0-20200929122200-06c536cf6975/go.mod h1:ZkMZ0dpQyWwlENaeZVBiQRjhMEZvk6VTXquzl3FOFP8= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40 h1:dizWJqTWjwyD8KGcMOwgrkqu1JIkofYgKkmDeNE7oAs= +gitlab.com/NebulousLabs/fastrand v0.0.0-20181126182046-603482d69e40/go.mod h1:rOnSnoRyxMI3fe/7KIbVcsHRGxe30OONv8dEgo+vCfA= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= @@ -547,6 +574,7 @@ go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.0.0-20180214000028-650f4a345ab4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200109152110-61a87790db17/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= @@ -574,6 +602,7 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -685,6 +714,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d h1:PksQg4dV6Sem3/HkBX+Ltq8T0ke0PKIRBNBatoDTVls= google.golang.org/genproto v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:s7iA721uChleev562UJO2OYB0PPT9CMFjV+Ce7VJH5M= @@ -698,6 +728,7 @@ google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= diff --git a/nitro-testnode b/nitro-testnode index f695cb8981..b5dd956575 160000 --- a/nitro-testnode +++ b/nitro-testnode @@ -1 +1 @@ -Subproject commit f695cb8981fda310ad688fd27c12d52c073f7aec +Subproject commit b5dd95657554d3ebd3f8bd2767871de79437e636 diff --git a/scripts/download-machine.sh b/scripts/download-machine.sh index 3022c350a0..8e93f16854 100755 --- a/scripts/download-machine.sh +++ b/scripts/download-machine.sh @@ -5,10 +5,11 @@ mkdir "$2" ln -sfT "$2" latest cd "$2" echo "$2" > module-root.txt -url_base="https://github.com/OffchainLabs/nitro/releases/download/$1" +url_org="${3:-OffchainLabs}" +url_base="https://github.com/$url_org/nitro/releases/download/$1" wget "$url_base/machine.wavm.br" status_code="$(curl -LI "$url_base/replay.wasm" -so /dev/null -w '%{http_code}')" if [ "$status_code" -ne 404 ]; then wget "$url_base/replay.wasm" -fi +fi \ No newline at end of file diff --git a/solgen/gen.go b/solgen/gen.go index 878a90ce77..6290b2665e 100644 --- a/solgen/gen.go +++ b/solgen/gen.go @@ -155,6 +155,7 @@ func main() { if err := json.Unmarshal(data, &artifact); err != nil { log.Fatal("failed to parse contract", name, err) } + fmt.Printf("Contract name: %v\n", name) yulModInfo.addArtifact(HardHatArtifact{ ContractName: name, Abi: artifact.Abi, diff --git a/staker/legacy/challenge_manager.go b/staker/legacy/challenge_manager.go index 7db7cc2653..ef3ccf07e8 100644 --- a/staker/legacy/challenge_manager.go +++ b/staker/legacy/challenge_manager.go @@ -4,6 +4,7 @@ package legacystaker import ( + "bytes" "context" "encoding/binary" "errors" @@ -21,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/rpc" "github.com/offchainlabs/nitro/arbutil" + celestiaTypes "github.com/offchainlabs/nitro/daprovider/celestia/types" "github.com/offchainlabs/nitro/solgen/go/challenge_legacy_gen" "github.com/offchainlabs/nitro/staker" "github.com/offchainlabs/nitro/validator" @@ -34,6 +36,8 @@ var initiatedChallengeID common.Hash var challengeBisectedID common.Hash var executionChallengeBegunID common.Hash +const ReadInboxMessage uint16 = 0x8021 + func init() { parsedChallengeManagerABI, err := challenge_legacy_gen.ChallengeManagerMetaData.GetAbi() if err != nil { @@ -450,6 +454,11 @@ func (m *ChallengeManager) IssueOneStepProof( if err != nil { return nil, fmt.Errorf("error getting OSP from challenge %v backend at step %v: %w", m.challengeIndex, position, err) } + proof, err = m.getDAProof(ctx, proof) + if err != nil { + return nil, fmt.Errorf("error getting DA Proof for OSP for challenge %v at step %v: %w", m.challengeIndex, position, err) + } + return m.challengeCore.con.OneStepProveExecution( m.challengeCore.auth, m.challengeCore.challengeIndex, @@ -584,3 +593,57 @@ func (m *ChallengeManager) Act(ctx context.Context) (*types.Transaction, error) machineStepCount, ) } + +func (m *ChallengeManager) getDAProof(ctx context.Context, proof []byte) ([]byte, error) { + // get the proof's opcode + opCodeBytes := proof[len(proof)-2:] + opCode := binary.BigEndian.Uint16(opCodeBytes) + // remove opcode bytes + proof = proof[:len(proof)-2] + if opCode == ReadInboxMessage { + messageType := proof[len(proof)-1] + // remove inbox message type byte + proof = proof[:len(proof)-1] + if messageType == 0x0 { + // Read the last 8 bytes as a uint64 to get our batch number + batchNumBytes := proof[len(proof)-8:] + batchNum := binary.BigEndian.Uint64(batchNumBytes) + batchData, _, err := m.validator.InboxReader().GetSequencerMessageBytes(ctx, batchNum) + if err != nil { + log.Error("Couldn't get sequencer message bytes", "err", err) + return nil, err + } + + buf := bytes.NewBuffer(batchData[40:]) + + header, err := buf.ReadByte() + if err != nil { + log.Error("Couldn't deserialize Celestia header byte", "err", err) + return nil, nil + } + daProof := []byte{} + if celestiaTypes.IsCelestiaMessageHeaderByte(header) { + log.Info("Fetching da proof for Celestia", "batchNum", batchNum) + blobBytes := buf.Bytes() + + var celestiaReader celestiaTypes.CelestiaReader + for _, dapReader := range m.validator.DapReaders() { + switch reader := dapReader.(type) { + case celestiaTypes.CelestiaReader: + celestiaReader = reader + } + } + daProof, err = celestiaReader.GetProof(ctx, blobBytes) + if err != nil { + return nil, err + } + } + + // remove batch number from proof + proof = proof[:len(proof)-8] + proof = append(proof, daProof...) + } + } + + return proof, nil +} diff --git a/staker/legacy/mock_machine_test.go b/staker/legacy/mock_machine_test.go index 37f22435b7..71a63e691f 100644 --- a/staker/legacy/mock_machine_test.go +++ b/staker/legacy/mock_machine_test.go @@ -106,6 +106,10 @@ func (m *IncorrectMachine) ProveNextStep() []byte { return m.inner.ProveNextStep() } +func (m *IncorrectMachine) GetNextOpcode() uint16 { + return m.inner.GetNextOpcode() +} + func (m *IncorrectMachine) Freeze() { m.inner.Freeze() } diff --git a/staker/stateless_block_validator.go b/staker/stateless_block_validator.go index 65f835b30f..d863356909 100644 --- a/staker/stateless_block_validator.go +++ b/staker/stateless_block_validator.go @@ -299,10 +299,18 @@ func (v *StatelessBlockValidator) ExecutionSpawners() []validator.ExecutionSpawn return v.execSpawners } +func (v *StatelessBlockValidator) InboxReader() InboxReaderInterface { + return v.inboxReader +} + func (v *StatelessBlockValidator) BOLDExecutionSpawners() []validator.BOLDExecutionSpawner { return v.boldExecSpawners } +func (v *StatelessBlockValidator) DapReaders() []daprovider.Reader { + return v.dapReaders +} + func (v *StatelessBlockValidator) readFullBatch(ctx context.Context, batchNum uint64) (bool, *FullBatchInfo, error) { batchCount, err := v.inboxTracker.GetBatchCount() if err != nil { diff --git a/system_tests/full_celestia_challenge_test.backup_go b/system_tests/full_celestia_challenge_test.backup_go new file mode 100644 index 0000000000..3ff9d62793 --- /dev/null +++ b/system_tests/full_celestia_challenge_test.backup_go @@ -0,0 +1,481 @@ +// Copyright 2021-2022, Offchain Labs, Inc. +// For license information, see https://github.com/nitro/blob/master/LICENSE + +// race detection makes things slow and miss timeouts +//go:build !race +// +build !race + +package arbtest + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math/big" + "net/http" + _ "net/http/pprof" + "os" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + + "github.com/offchainlabs/nitro/arbcompress" + "github.com/offchainlabs/nitro/arbnode" + "github.com/offchainlabs/nitro/arbos" + "github.com/offchainlabs/nitro/arbstate" + "github.com/offchainlabs/nitro/arbstate/daprovider" + "github.com/offchainlabs/nitro/das/celestia" + celestiaTypes "github.com/offchainlabs/nitro/das/celestia/types" + "github.com/offchainlabs/nitro/execution/gethexec" + "github.com/offchainlabs/nitro/solgen/go/mocksgen" + "github.com/offchainlabs/nitro/solgen/go/ospgen" + + "github.com/offchainlabs/nitro/staker" + "github.com/offchainlabs/nitro/validator" + "github.com/offchainlabs/nitro/validator/server_common" + "github.com/offchainlabs/nitro/validator/valnode" +) + +func init() { + go func() { + fmt.Println(http.ListenAndServe("localhost:6060", nil)) + }() +} + +func DeployOneStepProofEntryCelestia(t *testing.T, ctx context.Context, auth *bind.TransactOpts, client *ethclient.Client) common.Address { + osp0, tx, _, err := ospgen.DeployOneStepProver0(auth, client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + ospMem, tx, _, err := ospgen.DeployOneStepProverMemory(auth, client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + ospMath, tx, _, err := ospgen.DeployOneStepProverMath(auth, client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + ospHostIo, tx, _, err := mocksgen.DeployOneStepProverHostIoCelestiaMock(auth, client) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + ospEntry, tx, _, err := ospgen.DeployOneStepProofEntry(auth, client, osp0, ospMem, ospMath, ospHostIo) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, client, tx) + Require(t, err) + + return ospEntry +} + +func writeTxToCelestiaBatch(writer io.Writer, tx *types.Transaction) error { + txData, err := tx.MarshalBinary() + if err != nil { + return err + } + var segment []byte + segment = append(segment, arbstate.BatchSegmentKindL2Message) + segment = append(segment, arbos.L2MessageKind_SignedTx) + segment = append(segment, txData...) + err = rlp.Encode(writer, segment) + return err +} + +func makeCelestiaBatch(t *testing.T, l2Node *arbnode.Node, celestiaDA *celestia.CelestiaDASClient, undecided bool, counterfactual bool, mockStream *mocksgen.Mockstream, deployer *bind.TransactOpts, l2Info *BlockchainTestInfo, backend *ethclient.Client, sequencer *bind.TransactOpts, seqInbox *mocksgen.SequencerInboxStub, seqInboxAddr common.Address, modStep int64) { + ctx := context.Background() + + batchBuffer := bytes.NewBuffer([]byte{}) + for i := int64(0); i < makeBatch_MsgsPerBatch; i++ { + value := i + if i == modStep { + value++ + } + err := writeTxToCelestiaBatch(batchBuffer, l2Info.PrepareTx("Owner", "Destination", 1000000, big.NewInt(value), []byte{})) + Require(t, err) + } + compressed, err := arbcompress.CompressWell(batchBuffer.Bytes()) + Require(t, err) + message := append([]byte{0}, compressed...) + message, err = celestiaDA.Store(ctx, message) + Require(t, err) + + buf := bytes.NewBuffer(message) + + header, err := buf.ReadByte() + Require(t, err) + if !celestiaTypes.IsCelestiaMessageHeaderByte(header) { + err := errors.New("tried to deserialize a message that doesn't have the Celestia header") + Require(t, err) + } + + blobPointer := celestiaTypes.BlobPointer{} + blobBytes := buf.Bytes() + err = blobPointer.UnmarshalBinary(blobBytes) + Require(t, err) + + dataCommitment, err := celestiaDA.Prover.Trpc.DataCommitment(ctx, blobPointer.BlockHeight-1, blobPointer.BlockHeight+1) + if err != nil { + t.Log("Error when fetching data commitment:", err) + } + Require(t, err) + mockStream.SubmitDataCommitment(deployer, [32]byte(dataCommitment.DataCommitment), blobPointer.BlockHeight-1, blobPointer.BlockHeight+1) + if counterfactual { + mockStream.UpdateGenesisState(deployer, (blobPointer.BlockHeight - 1100)) + } else if undecided { + t.Log("Block Height before change: ", blobPointer.BlockHeight) + mockStream.UpdateGenesisState(deployer, (blobPointer.BlockHeight - 100)) + } + seqNum := new(big.Int).Lsh(common.Big1, 256) + seqNum.Sub(seqNum, common.Big1) + tx, err := seqInbox.AddSequencerL2BatchFromOrigin8f111f3c(sequencer, seqNum, message, big.NewInt(1), common.Address{}, big.NewInt(0), big.NewInt(0)) + Require(t, err) + receipt, err := EnsureTxSucceeded(ctx, backend, tx) + Require(t, err) + + nodeSeqInbox, err := arbnode.NewSequencerInbox(backend, seqInboxAddr, 0) + Require(t, err) + batches, err := nodeSeqInbox.LookupBatchesInRange(ctx, receipt.BlockNumber, receipt.BlockNumber) + Require(t, err) + if len(batches) == 0 { + Fatal(t, "batch not found after AddSequencerL2BatchFromOrigin") + } + err = l2Node.InboxTracker.AddSequencerBatches(ctx, backend, batches) + Require(t, err) + _, err = l2Node.InboxTracker.GetBatchMetadata(0) + Require(t, err, "failed to get batch metadata after adding batch:") +} + +func RunCelestiaChallengeTest(t *testing.T, asserterIsCorrect bool, useStubs bool, challengeMsgIdx int64, undecided bool, counterFactual bool) { + + glogger := log.NewGlogHandler( + log.NewTerminalHandler(io.Writer(os.Stderr), false)) + glogger.Verbosity(log.LvlInfo) + log.SetDefault(log.NewLogger(glogger)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + initialBalance := new(big.Int).Lsh(big.NewInt(1), 200) + l1Info := NewL1TestInfo(t) + l1Info.GenerateGenesisAccount("deployer", initialBalance) + l1Info.GenerateGenesisAccount("asserter", initialBalance) + l1Info.GenerateGenesisAccount("challenger", initialBalance) + l1Info.GenerateGenesisAccount("sequencer", initialBalance) + + chainConfig := params.ArbitrumDevTestChainConfig() + l1Info, l1Backend, _, _ := createTestL1BlockChain(t, l1Info) + conf := arbnode.ConfigDefaultL1Test() + conf.BlockValidator.Enable = false + conf.BatchPoster.Enable = false + conf.InboxReader.CheckDelay = time.Second + + deployerTxOpts := l1Info.GetDefaultTransactOpts("deployer", ctx) + blobstream, tx, mockStreamWrapper, err := mocksgen.DeployMockstream(&deployerTxOpts, l1Backend) + Require(t, err) + _, err = EnsureTxSucceeded(ctx, l1Backend, tx) + Require(t, err) + + conf.Celestia = celestia.DAConfig{ + Enable: true, + GasPrice: 0.1, + Rpc: "http://localhost:26658", + NamespaceId: "000008e5f679bf7116cb", + AuthToken: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJwdWJsaWMiLCJyZWFkIiwid3JpdGUiLCJhZG1pbiJdfQ.8iCpZJaiui7QPTCj4m5f2M7JyHkJtr6Xha0bmE5Vv7Y", + ValidatorConfig: &celestia.ValidatorConfig{ + TendermintRPC: "http://localhost:26657", + BlobstreamAddr: blobstream.Hex(), + }, + } + + t.Log("Blobstream Address: ", blobstream.Hex()) + + celestiaDa, err := celestia.NewCelestiaDA(&conf.Celestia, l1Backend) + Require(t, err) + // Initialize Mockstream before the tests + header, err := celestiaDa.Client.Header.NetworkHead(ctx) + Require(t, err) + mockStreamWrapper.Initialize(&deployerTxOpts, header.Height()) + + var valStack *node.Node + var mockSpawn *mockSpawner + if useStubs { + mockSpawn, valStack = createMockValidationNode(t, ctx, &valnode.TestValidationConfig.Arbitrator) + } else { + _, valStack = createTestValidationNode(t, ctx, &valnode.TestValidationConfig) + } + configByValidationNode(conf, valStack) + + fatalErrChan := make(chan error, 10) + asserterRollupAddresses, initMessage := DeployOnTestL1(t, ctx, l1Info, l1Backend, chainConfig) + + sequencerTxOpts := l1Info.GetDefaultTransactOpts("sequencer", ctx) + asserterTxOpts := l1Info.GetDefaultTransactOpts("asserter", ctx) + challengerTxOpts := l1Info.GetDefaultTransactOpts("challenger", ctx) + + asserterBridgeAddr, asserterSeqInbox, asserterSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) + challengerBridgeAddr, challengerSeqInbox, challengerSeqInboxAddr := setupSequencerInboxStub(ctx, t, l1Info, l1Backend, chainConfig) + + asserterL2Info, asserterL2Stack, asserterL2ChainDb, asserterL2ArbDb, asserterL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) + asserterRollupAddresses.Bridge = asserterBridgeAddr + asserterRollupAddresses.SequencerInbox = asserterSeqInboxAddr + asserterExec, err := gethexec.CreateExecutionNode(ctx, asserterL2Stack, asserterL2ChainDb, asserterL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) + Require(t, err) + parentChainID := big.NewInt(1337) + asserterL2, err := arbnode.CreateNode(ctx, asserterL2Stack, asserterExec, asserterL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, asserterRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID, nil) + Require(t, err) + err = asserterL2.Start(ctx) + Require(t, err) + + challengerL2Info, challengerL2Stack, challengerL2ChainDb, challengerL2ArbDb, challengerL2Blockchain := createL2BlockChainWithStackConfig(t, nil, "", chainConfig, initMessage, nil, nil) + challengerRollupAddresses := *asserterRollupAddresses + challengerRollupAddresses.Bridge = challengerBridgeAddr + challengerRollupAddresses.SequencerInbox = challengerSeqInboxAddr + challengerExec, err := gethexec.CreateExecutionNode(ctx, challengerL2Stack, challengerL2ChainDb, challengerL2Blockchain, l1Backend, gethexec.ConfigDefaultTest) + Require(t, err) + challengerL2, err := arbnode.CreateNode(ctx, challengerL2Stack, challengerExec, challengerL2ArbDb, NewFetcherFromConfig(conf), chainConfig, l1Backend, &challengerRollupAddresses, nil, nil, nil, fatalErrChan, parentChainID, nil) + Require(t, err) + err = challengerL2.Start(ctx) + Require(t, err) + + asserterL2Info.GenerateAccount("Destination") + challengerL2Info.SetFullAccountInfo("Destination", asserterL2Info.GetInfoWithPrivKey("Destination")) + + if challengeMsgIdx < 1 || challengeMsgIdx > 3*makeBatch_MsgsPerBatch { + Fatal(t, "challengeMsgIdx illegal") + } + + // seqNum := common.Big2 + makeCelestiaBatch(t, asserterL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, asserterL2Info, l1Backend, &sequencerTxOpts, asserterSeqInbox, asserterSeqInboxAddr, -1) + makeCelestiaBatch(t, challengerL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, challengerL2Info, l1Backend, &sequencerTxOpts, challengerSeqInbox, challengerSeqInboxAddr, challengeMsgIdx-1) + + // seqNum.Add(seqNum, common.Big1) + makeCelestiaBatch(t, asserterL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, asserterL2Info, l1Backend, &sequencerTxOpts, asserterSeqInbox, asserterSeqInboxAddr, -1) + makeCelestiaBatch(t, challengerL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, challengerL2Info, l1Backend, &sequencerTxOpts, challengerSeqInbox, challengerSeqInboxAddr, challengeMsgIdx-makeBatch_MsgsPerBatch-1) + + // seqNum.Add(seqNum, common.Big1) + makeCelestiaBatch(t, asserterL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, asserterL2Info, l1Backend, &sequencerTxOpts, asserterSeqInbox, asserterSeqInboxAddr, -1) + makeCelestiaBatch(t, challengerL2, celestiaDa, undecided, counterFactual, mockStreamWrapper, &deployerTxOpts, challengerL2Info, l1Backend, &sequencerTxOpts, challengerSeqInbox, challengerSeqInboxAddr, challengeMsgIdx-makeBatch_MsgsPerBatch*2-1) + + trueSeqInboxAddr := challengerSeqInboxAddr + trueDelayedBridge := challengerBridgeAddr + expectedWinner := l1Info.GetAddress("challenger") + if asserterIsCorrect { + trueSeqInboxAddr = asserterSeqInboxAddr + trueDelayedBridge = asserterBridgeAddr + expectedWinner = l1Info.GetAddress("asserter") + } + ospEntry := DeployOneStepProofEntryCelestia(t, ctx, &deployerTxOpts, l1Backend) + + locator, err := server_common.NewMachineLocator("") + if err != nil { + Fatal(t, err) + } + var wasmModuleRoot common.Hash + if useStubs { + wasmModuleRoot = mockWasmModuleRoots[0] + } else { + wasmModuleRoot = locator.LatestWasmModuleRoot() + if (wasmModuleRoot == common.Hash{}) { + Fatal(t, "latest machine not found") + } + } + + asserterGenesis := asserterExec.ArbInterface.BlockChain().Genesis() + challengerGenesis := challengerExec.ArbInterface.BlockChain().Genesis() + if asserterGenesis.Hash() != challengerGenesis.Hash() { + Fatal(t, "asserter and challenger have different genesis hashes") + } + asserterLatestBlock := asserterExec.ArbInterface.BlockChain().CurrentBlock() + challengerLatestBlock := challengerExec.ArbInterface.BlockChain().CurrentBlock() + if asserterLatestBlock.Hash() == challengerLatestBlock.Hash() { + Fatal(t, "asserter and challenger have the same end block") + } + + asserterStartGlobalState := validator.GoGlobalState{ + BlockHash: asserterGenesis.Hash(), + Batch: 1, + PosInBatch: 0, + } + asserterEndGlobalState := validator.GoGlobalState{ + BlockHash: asserterLatestBlock.Hash(), + Batch: 4, + PosInBatch: 0, + } + numBlocks := asserterLatestBlock.Number.Uint64() - asserterGenesis.NumberU64() + + resultReceiver, challengeManagerAddr := CreateChallenge( + t, + ctx, + &deployerTxOpts, + l1Backend, + ospEntry, + trueSeqInboxAddr, + trueDelayedBridge, + wasmModuleRoot, + asserterStartGlobalState, + asserterEndGlobalState, + numBlocks, + l1Info.GetAddress("asserter"), + l1Info.GetAddress("challenger"), + ) + + confirmLatestBlock(ctx, t, l1Info, l1Backend) + + // Add the L1 backend to Celestia DA + celestiaDa.Prover.EthClient = l1Backend + + celestiaReader := celestiaTypes.NewReaderForCelestia(celestiaDa) + + asserterValidator, err := staker.NewStatelessBlockValidator(asserterL2.InboxReader, asserterL2.InboxTracker, asserterL2.TxStreamer, asserterExec.Recorder, asserterL2ArbDb, []daprovider.Reader{celestiaReader}, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + if err != nil { + Fatal(t, err) + } + if useStubs { + asserterRecorder := newMockRecorder(asserterValidator, asserterL2.TxStreamer) + asserterValidator.OverrideRecorder(t, asserterRecorder) + } + err = asserterValidator.Start(ctx) + if err != nil { + Fatal(t, err) + } + defer asserterValidator.Stop() + asserterManager, err := staker.NewChallengeManager(ctx, l1Backend, &asserterTxOpts, asserterTxOpts.From, challengeManagerAddr, 1, asserterValidator, 0, 0) + if err != nil { + Fatal(t, err) + } + challengerValidator, err := staker.NewStatelessBlockValidator(challengerL2.InboxReader, challengerL2.InboxTracker, challengerL2.TxStreamer, challengerExec.Recorder, challengerL2ArbDb, []daprovider.Reader{celestiaReader}, StaticFetcherFrom(t, &conf.BlockValidator), valStack) + if err != nil { + Fatal(t, err) + } + if useStubs { + challengerRecorder := newMockRecorder(challengerValidator, challengerL2.TxStreamer) + challengerValidator.OverrideRecorder(t, challengerRecorder) + } + err = challengerValidator.Start(ctx) + if err != nil { + Fatal(t, err) + } + defer challengerValidator.Stop() + challengerManager, err := staker.NewChallengeManager(ctx, l1Backend, &challengerTxOpts, challengerTxOpts.From, challengeManagerAddr, 1, challengerValidator, 0, 0) + if err != nil { + Fatal(t, err) + } + + confirmLatestBlock(ctx, t, l1Info, l1Backend) + + for i := 0; i < 100; i++ { + var tx *types.Transaction + var currentCorrect bool + // Gas cost is slightly reduced if done in the same timestamp or block as previous call. + // This might make gas estimation undersestimate next move. + // Invoke a new L1 block, with a new timestamp, before estimating. + time.Sleep(time.Second) + SendWaitTestTransactions(t, ctx, l1Backend, []*types.Transaction{ + l1Info.PrepareTx("Faucet", "User", 30000, big.NewInt(1e12), nil), + }) + + if i%2 == 0 { + currentCorrect = !asserterIsCorrect + tx, err = challengerManager.Act(ctx) + } else { + currentCorrect = asserterIsCorrect + tx, err = asserterManager.Act(ctx) + } + if err != nil { + if !currentCorrect && (strings.Contains(err.Error(), "lost challenge") || + strings.Contains(err.Error(), "SAME_OSP_END") || + strings.Contains(err.Error(), "BAD_SEQINBOX_MESSAGE")) || + strings.Contains(err.Error(), "BLOBSTREAM_UNDECIDED") { + t.Log("challenge completed! asserter hit expected error:", err) + return + } else if (currentCorrect && counterFactual) && strings.Contains(err.Error(), "BAD_SEQINBOX_MESSAGE") { + t.Log("counterfactual challenge challenge completed! asserter hit expected error:", err) + return + } + Fatal(t, "challenge step", i, "hit error:", err) + } + if tx == nil { + Fatal(t, "no move") + } + + if useStubs { + if len(mockSpawn.ExecSpawned) != 0 { + if len(mockSpawn.ExecSpawned) != 1 { + Fatal(t, "bad number of spawned execRuns: ", len(mockSpawn.ExecSpawned)) + } + if mockSpawn.ExecSpawned[0] != uint64(challengeMsgIdx) { + Fatal(t, "wrong spawned execRuns: ", mockSpawn.ExecSpawned[0], " expected: ", challengeMsgIdx) + } + return + } + } + + _, err = EnsureTxSucceeded(ctx, l1Backend, tx) + if err != nil { + if !currentCorrect && strings.Contains(err.Error(), "BAD_SEQINBOX_MESSAGE") { + t.Log("challenge complete! Tx failed as expected:", err) + return + } + Fatal(t, err) + } + + confirmLatestBlock(ctx, t, l1Info, l1Backend) + + winner, err := resultReceiver.Winner(&bind.CallOpts{}) + if err != nil { + Fatal(t, err) + } + if winner == (common.Address{}) { + continue + } + if winner != expectedWinner { + Fatal(t, "wrong party won challenge") + } + } + + Fatal(t, "challenge timed out without winner") +} + +func TestCelestiaChallengeManagerFullAsserterIncorrect(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, false, false, makeBatch_MsgsPerBatch+1, false, false) +} + +func TestCelestiaChallengeManagerFullAsserterCorrect(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, true, false, makeBatch_MsgsPerBatch+2, false, false) +} + +func TestCelestiaChallengeManagerFullAsserterIncorrectUndecided(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, false, false, makeBatch_MsgsPerBatch+1, true, false) +} + +func TestCelestiaChallengeManagerFullAsserterCorrectUndecided(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, true, false, makeBatch_MsgsPerBatch+2, true, false) +} + +func TestCelestiaChallengeManagerFullAsserterIncorrectCounterfactual(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, false, false, makeBatch_MsgsPerBatch+1, false, true) +} + +func TestCelestiaChallengeManagerFullAsserterCorrectCounterfactual(t *testing.T) { + t.Parallel() + RunCelestiaChallengeTest(t, true, false, makeBatch_MsgsPerBatch+2, false, true) +} diff --git a/validator/server_arb/bold_machine.go b/validator/server_arb/bold_machine.go index 6ca48ba228..b76b0321e8 100644 --- a/validator/server_arb/bold_machine.go +++ b/validator/server_arb/bold_machine.go @@ -143,3 +143,10 @@ func (m *BoldMachine) ProveNextStep() []byte { } return m.inner.ProveNextStep() } + +func (m *BoldMachine) GetNextOpcode() uint16 { + if !m.hasStepped { + return m.zeroMachine.GetNextOpcode() + } + return m.inner.GetNextOpcode() +} diff --git a/validator/server_arb/execution_run.go b/validator/server_arb/execution_run.go index 615015001b..b27af29a75 100644 --- a/validator/server_arb/execution_run.go +++ b/validator/server_arb/execution_run.go @@ -5,6 +5,7 @@ package server_arb import ( "context" + "encoding/binary" "fmt" "sync" "time" @@ -175,7 +176,21 @@ func (e *executionRun) GetProofAt(position uint64) containers.PromiseInterface[[ if err != nil { return nil, err } - return machine.ProveNextStep(), nil + + opcodeBytes := make([]byte, 2) + if machine.IsRunning() { + opcode := machine.GetNextOpcode() + + binary.BigEndian.PutUint16(opcodeBytes, opcode) + } else { + // append dummy opcode if the machine is halted + binary.BigEndian.PutUint16(opcodeBytes, 0xFFFF) + } + + proof := machine.ProveNextStep() + + proof = append(proof, opcodeBytes...) + return proof, nil }) } diff --git a/validator/server_arb/execution_run_test.go b/validator/server_arb/execution_run_test.go index 381cfa63a8..f6d9cd8aee 100644 --- a/validator/server_arb/execution_run_test.go +++ b/validator/server_arb/execution_run_test.go @@ -60,6 +60,9 @@ func (m *mockMachine) Status() uint8 { func (m *mockMachine) ProveNextStep() []byte { return nil } +func (m *mockMachine) GetNextOpcode() uint16 { + return 0 +} func (m *mockMachine) Freeze() {} func (m *mockMachine) Destroy() {} diff --git a/validator/server_arb/machine.go b/validator/server_arb/machine.go index 719a223369..4b38ce3ddf 100644 --- a/validator/server_arb/machine.go +++ b/validator/server_arb/machine.go @@ -46,6 +46,7 @@ type MachineInterface interface { Hash() common.Hash GetGlobalState() validator.GoGlobalState ProveNextStep() []byte + GetNextOpcode() uint16 Freeze() Destroy() } @@ -315,6 +316,14 @@ func (m *ArbitratorMachine) ProveNextStep() []byte { return proofBytes } +func (m *ArbitratorMachine) GetNextOpcode() uint16 { + defer runtime.KeepAlive(m) + m.mutex.Lock() + defer m.mutex.Unlock() + + return uint16(C.arbitrator_get_opcode(m.ptr)) +} + func (m *ArbitratorMachine) SerializeState(path string) error { defer runtime.KeepAlive(m) m.mutex.Lock()