Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/nightly-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ jobs:
# Only run on schedule AND main branch
tests-scheduled:
name: Scheduled tests
runs-on: ubuntu-8
runs-on: ubuntu-latest

services:
redis:
Expand Down
6 changes: 3 additions & 3 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
url = https://github.com/google/brotli.git
[submodule "contracts"]
path = contracts
url = https://github.com/OffchainLabs/nitro-contracts.git
branch = develop
url = https://github.com/celestiaorg/nitro-contracts.git
branch = contracts-v1.2.1
[submodule "arbitrator/wasm-testsuite/testsuite"]
path = arbitrator/wasm-testsuite/testsuite
url = https://github.com/WebAssembly/testsuite.git
Expand All @@ -22,7 +22,7 @@
url = https://github.com/OffchainLabs/wasmer.git
[submodule "nitro-testnode"]
path = nitro-testnode
url = https://github.com/OffchainLabs/nitro-testnode.git
url = https://github.com/celestiaorg/nitro-testnode.git
[submodule "bold"]
path = bold
url = https://github.com/OffchainLabs/bold.git
Expand Down
12 changes: 8 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ WORKDIR /workspace
RUN apt-get update && apt-get install -y curl build-essential=12.9

FROM wasm-base AS wasm-libs-builder
# clang / lld used by soft-float wasm
# clang / lld used by soft-float wasm
RUN apt-get update && \
apt-get install -y clang=1:14.0-55.7~deb12u1 lld=1:14.0-55.7~deb12u1 wabt
# pinned rust 1.84.1
# pinned rust 1.84.1
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.84.1 --target x86_64-unknown-linux-gnu,wasm32-unknown-unknown,wasm32-wasip1
COPY ./Makefile ./
COPY arbitrator/Cargo.* arbitrator/
Expand Down Expand Up @@ -85,6 +85,7 @@ COPY ./cmd/replay ./cmd/replay
COPY ./daprovider ./daprovider
COPY ./daprovider/das/dasutil ./daprovider/das/dasutil
COPY ./daprovider/das/dastree ./daprovider/das/dastree
COPY ./daprovider/celestia ./daprovider/celestia
COPY ./precompiles ./precompiles
COPY ./statetransfer ./statetransfer
COPY ./util ./util
Expand All @@ -104,6 +105,8 @@ COPY ./go-ethereum ./go-ethereum
COPY scripts/remove_reference_types.sh scripts/
COPY --from=brotli-wasm-export / target/
COPY --from=contracts-builder workspace/contracts/build/contracts/src/precompiles/ contracts/build/contracts/src/precompiles/
COPY --from=contracts-builder workspace/contracts/build/contracts/src/celestia/ contracts/build/contracts/src/celestia/
COPY --from=contracts-builder workspace/contracts/build/contracts/src/celestia/ contracts/build/contracts/src/celestia/
COPY --from=contracts-builder workspace/contracts/node_modules/@offchainlabs/upgrade-executor/build/contracts/src/UpgradeExecutor.sol/UpgradeExecutor.json contracts/
COPY --from=contracts-builder workspace/contracts-legacy/build/contracts/src/precompiles/ contracts-legacy/build/contracts/src/precompiles/
COPY --from=contracts-builder workspace/.make/ .make/
Expand Down Expand Up @@ -236,9 +239,10 @@ COPY ./scripts/download-machine.sh .
RUN ./download-machine.sh consensus-v30 0xb0de9cb89e4d944ae6023a3b62276e54804c242fd8c4c2d8e6cc4450f5fa8b1b && true
RUN ./download-machine.sh consensus-v31 0x260f5fa5c3176a856893642e149cf128b5a8de9f828afec8d11184415dd8dc69
RUN ./download-machine.sh consensus-v32 0x184884e1eb9fefdc158f6c8ac912bb183bf3cf83f0090317e0bc4ac5860baa39
#RUN ./download-machine.sh consensus-v40-rc.1 0x6dae396b0b7644a2d63b4b22e6452b767aa6a04b6778dadebdd74aa40f40a5c5
#RUN ./download-machine.sh consensus-v40-rc.2 0xa8206be13d53e456c7ab061d94bab5b229d674ac57ffe7281216479a8820fcc0
RUN ./download-machine.sh consensus-v40 0xdb698a2576298f25448bc092e52cf13b1e24141c997135d70f217d674bbeb69a
RUN ./download-machine.sh v3.2.1-rc.1 0xe81f986823a85105c5fd91bb53b4493d38c0c26652d23f76a7405ac889908287 celestiaorg
RUN ./download-machine.sh v3.3.2 0xaf1dbdfceb871c00bfbb1675983133df04f0ed04e89647812513c091e3a982b3 celestiaorg
RUN ./download-machine.sh consensus-v40 0x597de35fc2ee60e5b2840157370d037542d6a4bc587af7f88202636c54e6bd8d celestiaorg

FROM golang:1.23.1-bookworm AS node-builder
WORKDIR /workspace
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -601,7 +601,7 @@ contracts/test/prover/proofs/%.json: $(arbitrator_cases)/%.wasm $(prover_bin)
go run solgen/gen.go
@touch $@

.make/solidity: $(DEP_PREDICATE) safe-smart-account/contracts/*/*.sol safe-smart-account/contracts/*.sol contracts/src/*/*.sol contracts-legacy/src/*/*.sol contracts-local/src/*/*.sol contracts-local/gas-dimensions/src/*.sol .make/yarndeps $(ORDER_ONLY_PREDICATE) .make
.make/solidity: $(DEP_PREDICATE) contracts/src/*/*.sol contracts-legacy/src/*/*.sol contracts-local/src/*/*.sol contracts-local/gas-dimensions/src/*.sol .make/yarndeps $(ORDER_ONLY_PREDICATE) .make
yarn --cwd safe-smart-account build
yarn --cwd contracts build
yarn --cwd contracts build:forge:yul
Expand Down
8 changes: 8 additions & 0 deletions arbitrator/prover/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -470,3 +470,11 @@ pub unsafe extern "C" fn arbitrator_module_root(mach: *mut Machine) -> Bytes32 {
pub unsafe extern "C" fn arbitrator_gen_proof(mach: *mut Machine, out: *mut RustBytes) {
(*out).write((*mach).serialize_proof());
}

#[no_mangle]
pub unsafe extern "C" fn arbitrator_get_opcode(mach: *mut Machine) -> u16 {
match (*mach).get_next_instruction() {
Some(instruction) => return instruction.opcode.repr(),
None => panic!("Failed to get next opcode for Machine"),
}
}
7 changes: 7 additions & 0 deletions arbitrator/prover/src/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3095,6 +3095,13 @@ impl Machine {
{
data.push(0); // inbox proof type
out!(msg_data);
match inbox_identifier {
InboxIdentifier::Sequencer => {
out!(msg_idx.to_be_bytes());
data.push(0x0);
}
InboxIdentifier::Delayed => data.push(0x1),
}
}
} else {
unreachable!()
Expand Down
4 changes: 3 additions & 1 deletion arbnode/batch_poster.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,8 @@ type BatchPoster struct {
bridgeAddr common.Address
gasRefunderAddr common.Address
building *buildingBatch
dapWriter daprovider.Writer
dapReaders []daprovider.Reader
dapWriter daprovider.Writer
dataPoster *dataposter.DataPoster
redisLock *redislock.Simple
messagesPerBatch *arbmath.MovingAverage[uint64]
Expand Down Expand Up @@ -1670,6 +1670,7 @@ func (b *BatchPoster) MaybePostSequencerBatch(ctx context.Context) (bool, error)
batchPosterDAFailureCounter.Inc(1)
return false, fmt.Errorf("%w: nonce changed from %d to %d while creating batch", storage.ErrStorageRace, nonce, gotNonce)
}

// #nosec G115
sequencerMsg, err = b.dapWriter.Store(ctx, sequencerMsg, uint64(time.Now().Add(config.DASRetentionPeriod).Unix()), config.DisableDapFallbackStoreDataOnChain)
if err != nil {
Expand All @@ -1679,6 +1680,7 @@ func (b *BatchPoster) MaybePostSequencerBatch(ctx context.Context) (bool, error)

batchPosterDASuccessCounter.Inc(1)
batchPosterDALastSuccessfulActionGauge.Update(time.Now().Unix())

}

prevMessageCount := batchPosition.MessageCount
Expand Down
2 changes: 2 additions & 0 deletions arbstate/inbox.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,6 +106,8 @@ func parseSequencerMessage(ctx context.Context, batchNum uint64, batchBlockHash
if !foundDA {
if daprovider.IsDASMessageHeaderByte(payload[0]) {
log.Error("No DAS Reader configured, but sequencer message found with DAS header")
} else if daprovider.IsCelestiaMessageHeaderByte(payload[0]) {
log.Error("No Celestia Reader configured, but sequencer message found with Celestia header")
} else if daprovider.IsBlobHashesHeaderByte(payload[0]) {
return nil, daprovider.ErrNoBlobReader
}
Expand Down
Binary file not shown.
1 change: 1 addition & 0 deletions cmd/deploy/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@ func main() {
}

loserEscrowAddress := common.HexToAddress(*loserEscrowAddressString)

if sequencerAddress != (common.Address{}) && ownerAddress != l1TransactionOpts.From {
panic("cannot specify sequencer address if owner is not deployer")
}
Expand Down
10 changes: 2 additions & 8 deletions cmd/nitro/nitro.go
Original file line number Diff line number Diff line change
Expand Up @@ -499,12 +499,6 @@ func mainImpl() int {
return 1
}

if l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee != nodeConfig.Node.DataAvailability.Enable {
flag.Usage()
log.Error(fmt.Sprintf("data availability service usage for this chain is set to %v but --node.data-availability.enable is set to %v", l2BlockChain.Config().ArbitrumChainParams.DataAvailabilityCommittee, nodeConfig.Node.DataAvailability.Enable))
return 1
}

var valNode *valnode.ValidationNode
if sameProcessValidationNodeEnabled {
valNode, err = valnode.CreateValidationNode(
Expand Down Expand Up @@ -582,9 +576,9 @@ func mainImpl() int {
return 1
}
}
// If batchPoster is enabled, validate MaxSize to be at least 10kB below the sequencer inbox’s maxDataSize if the data availability service is not enabled.
// If batchPoster is enabled, validate MaxSize to be at least 10kB below the sequencer inbox’s maxDataSize if the data availability service and celestia DA are not enabled.
// The 10kB gap is because its possible for the batch poster to exceed its MaxSize limit and produce batches of slightly larger size.
if nodeConfig.Node.BatchPoster.Enable && !nodeConfig.Node.DataAvailability.Enable {
if nodeConfig.Node.BatchPoster.Enable && (!nodeConfig.Node.DataAvailability.Enable && !nodeConfig.Node.DAProvider.Enable) {
if nodeConfig.Node.BatchPoster.MaxSize > seqInboxMaxDataSize-10000 {
log.Error("batchPoster's MaxSize is too large")
return 1
Expand Down
144 changes: 131 additions & 13 deletions cmd/replay/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ package main
import (
"bytes"
"context"
"encoding/binary"
"encoding/hex"
"encoding/json"
"fmt"
Expand Down Expand Up @@ -33,6 +34,8 @@ import (
"github.com/offchainlabs/nitro/arbutil"
"github.com/offchainlabs/nitro/cmd/chaininfo"
"github.com/offchainlabs/nitro/daprovider"
"github.com/offchainlabs/nitro/daprovider/celestia/tree"
celestiaTypes "github.com/offchainlabs/nitro/daprovider/celestia/types"
"github.com/offchainlabs/nitro/daprovider/das/dastree"
"github.com/offchainlabs/nitro/daprovider/das/dasutil"
"github.com/offchainlabs/nitro/gethhook"
Expand Down Expand Up @@ -162,6 +165,126 @@ func (r *BlobPreimageReader) Initialize(ctx context.Context) error {
return nil
}

type PreimageCelestiaReader struct {
}

func (dasReader *PreimageCelestiaReader) Read(ctx context.Context, blobPointer *celestiaTypes.BlobPointer) ([]byte, *celestiaTypes.SquareData, error) {
oracle := func(hash common.Hash) ([]byte, error) {
return wavmio.ResolveTypedPreimage(arbutil.Sha2_256PreimageType, hash)
}

if blobPointer.SharesLength == 0 {
return nil, nil, fmt.Errorf("Error, shares length is %v", blobPointer.SharesLength)
}
// first, walk down the merkle tree
leaves, err := tree.MerkleTreeContent(oracle, common.BytesToHash(blobPointer.DataRoot[:]))
if err != nil {
log.Warn("Error revealing contents behind data root", "err", err)
return nil, nil, err
}

squareSize := uint64(len(leaves)) / 2
// split leaves in half to get row roots
rowRoots := leaves[:squareSize]
// We get the original data square size, wich is (size_of_the_extended_square / 2)
odsSize := squareSize / 2

startRow := blobPointer.Start / odsSize

if blobPointer.Start >= odsSize*odsSize {
// check that the square isn't just our share (very niche case, should only happens on local testing)
if blobPointer.Start != odsSize*odsSize && odsSize > 1 {
return nil, nil, fmt.Errorf("Error Start Index out of ODS bounds: index=%v odsSize=%v", blobPointer.Start, odsSize)
}
}

// adjusted_end_index = adjusted_start_index + length - 1
if blobPointer.Start+blobPointer.SharesLength < 1 {
return nil, nil, fmt.Errorf("Error getting number of shares in first row: index+length %v > 1", blobPointer.Start+blobPointer.SharesLength)
}
endIndexOds := blobPointer.Start + blobPointer.SharesLength - 1
if endIndexOds >= odsSize*odsSize {
// check that the square isn't just our share (very niche case, should only happens on local testing)
if endIndexOds != odsSize*odsSize && odsSize > 1 {
return nil, nil, fmt.Errorf("Error End Index out of ODS bounds: index=%v odsSize=%v", endIndexOds, odsSize)
}
}
endRow := endIndexOds / odsSize

if endRow >= odsSize || startRow >= odsSize {
return nil, nil, fmt.Errorf("Error rows out of bounds: startRow=%v endRow=%v odsSize=%v", startRow, endRow, odsSize)
}

startColumn := blobPointer.Start % odsSize
endColumn := endIndexOds % odsSize

if startRow == endRow && startColumn > endColumn {
log.Error("start and end row are the same, and startColumn >= endColumn", "startColumn", startColumn, "endColumn ", endColumn)
return []byte{}, nil, nil
}

// adjust the math in the CelestiaPayload function in the inbox

// we can take ods * ods -> end index in ods
// then we check that start index is in bounds, otherwise ignore -> return empty batch
// then we check that end index is in bounds, otherwise ignore

// get rows behind row root and shares for our blob
rows := [][][]byte{}
shares := [][]byte{}
for i := startRow; i <= endRow; i++ {
row, err := tree.NmtContent(oracle, rowRoots[i])
if err != nil {
return nil, nil, err
}
rows = append(rows, row)

odsRow := row[:odsSize]

// TODO explain the logic behind this branching
if startRow == endRow {
shares = append(shares, odsRow[startColumn:endColumn+1]...)
break
} else if i == startRow {
shares = append(shares, odsRow[startColumn:]...)
} else if i == endRow {
shares = append(shares, odsRow[:endColumn+1]...)
} else {
shares = append(shares, odsRow...)
}
}

data := []byte{}
if tree.NamespaceSize*2+1 > uint64(len(shares[0])) || tree.NamespaceSize*2+5 > uint64(len(shares[0])) {
return nil, nil, fmt.Errorf("Error getting sequence length on share of size %v", len(shares[0]))
}
sequenceLength := binary.BigEndian.Uint32(shares[0][tree.NamespaceSize*2+1 : tree.NamespaceSize*2+5])
for i, share := range shares {
// trim extra namespace
share := share[tree.NamespaceSize:]
if i == 0 {
data = append(data, share[tree.NamespaceSize+5:]...)
continue
}
data = append(data, share[tree.NamespaceSize+1:]...)
}

data = data[:sequenceLength]
squareData := celestiaTypes.SquareData{
RowRoots: rowRoots,
ColumnRoots: leaves[squareSize:],
Rows: rows,
SquareSize: squareSize,
StartRow: startRow,
EndRow: endRow,
}
return data, &squareData, nil
}

func (dasReader *PreimageCelestiaReader) GetProof(ctx context.Context, msg []byte) ([]byte, error) {
return nil, nil
}

// To generate:
// key, _ := crypto.HexToECDSA("0000000000000000000000000000000000000000000000000000000000000001")
// sig, _ := crypto.Sign(make([]byte, 32), key)
Expand Down Expand Up @@ -219,27 +342,19 @@ func main() {
}
return wavmio.ReadInboxMessage(batchNum), nil
}
readMessage := func(dasEnabled bool) *arbostypes.MessageWithMetadata {
readMessage := func() *arbostypes.MessageWithMetadata {
var delayedMessagesRead uint64
if lastBlockHeader != nil {
delayedMessagesRead = lastBlockHeader.Nonce.Uint64()
}
var dasReader dasutil.DASReader
var dasKeysetFetcher dasutil.DASKeysetFetcher
if dasEnabled {
// DAS batch and keysets are all together in the same preimage binary.
dasReader = &PreimageDASReader{}
dasKeysetFetcher = &PreimageDASReader{}
}
backend := WavmInbox{}
var keysetValidationMode = daprovider.KeysetPanicIfInvalid
if backend.GetPositionWithinMessage() > 0 {
keysetValidationMode = daprovider.KeysetDontValidate
}
var dapReaders []daprovider.Reader
if dasReader != nil {
dapReaders = append(dapReaders, dasutil.NewReaderForDAS(dasReader, dasKeysetFetcher))
}
dapReaders = append(dapReaders, dasutil.NewReaderForDAS(&PreimageDASReader{}, &PreimageDASReader{}))
dapReaders = append(dapReaders, celestiaTypes.NewReaderForCelestia(&PreimageCelestiaReader{}))
dapReaders = append(dapReaders, daprovider.NewReaderForBlobReader(&BlobPreimageReader{}))
inboxMultiplexer := arbstate.NewInboxMultiplexer(backend, delayedMessagesRead, dapReaders, keysetValidationMode)
ctx := context.Background()
Expand Down Expand Up @@ -297,7 +412,10 @@ func main() {
}
}

message := readMessage(chainConfig.ArbitrumChainParams.DataAvailabilityCommittee)
// need to add Celestia or just "ExternalDA" as an option to the ArbitrumChainParams
// for now we hard code Cthis to treu and hardcode Celestia in `readMessage`
// to test the integration
message := readMessage()

chainContext := WavmChainContext{chainConfig: chainConfig}
newBlock, _, err = arbos.ProduceBlock(message.Message, message.DelayedMessagesRead, lastBlockHeader, statedb, chainContext, false, core.MessageReplayMode)
Expand All @@ -307,7 +425,7 @@ func main() {
} else {
// Initialize ArbOS with this init message and create the genesis block.

message := readMessage(false)
message := readMessage()

initMessage, err := message.Message.ParseInitMessage()
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion contracts
Submodule contracts updated 48 files
+5 −4 foundry.toml
+18 −2 hardhat.config.ts
+5 −0 remappings.txt
+102 −0 scripts/deploymentCelestiaReuseExisting.ts
+2 −0 scripts/deploymentUtils.ts
+6 −0 src/bridge/ISequencerInbox.sol
+989 −879 src/bridge/SequencerInbox.sol
+167 −0 src/celestia/BlobstreamVerifier.sol
+349 −0 src/celestia/DAVerifier.sol
+44 −0 src/celestia/IBlobstreamX.sol
+8 −0 src/celestia/lib/Constants.sol
+15 −0 src/celestia/lib/DataRootTuple.sol
+19 −0 src/celestia/lib/IDAOracle.sol
+27 −0 src/celestia/lib/tree/Constants.sol
+40 −0 src/celestia/lib/tree/Types.sol
+86 −0 src/celestia/lib/tree/Utils.sol
+12 −0 src/celestia/lib/tree/binary/BinaryMerkleProof.sol
+172 −0 src/celestia/lib/tree/binary/BinaryMerkleTree.sol
+23 −0 src/celestia/lib/tree/binary/TreeHasher.sol
+14 −0 src/celestia/lib/tree/namespace/NamespaceMerkleMultiproof.sol
+14 −0 src/celestia/lib/tree/namespace/NamespaceMerkleProof.sol
+409 −0 src/celestia/lib/tree/namespace/NamespaceMerkleTree.sol
+29 −0 src/celestia/lib/tree/namespace/NamespaceNode.sol
+83 −0 src/celestia/lib/tree/namespace/TreeHasher.sol
+547 −505 src/challengeV2/EdgeChallengeManager.sol
+45 −45 src/challengeV2/libraries/ArrayUtilsLib.sol
+277 −244 src/challengeV2/libraries/ChallengeEdgeLib.sol
+16 −4 src/challengeV2/libraries/ChallengeErrors.sol
+877 −793 src/challengeV2/libraries/EdgeChallengeManagerLib.sol
+5 −5 src/challengeV2/libraries/Enums.sol
+280 −271 src/challengeV2/libraries/MerkleTreeAccumulatorLib.sol
+55 −59 src/challengeV2/libraries/UintUtilsLib.sol
+13 −10 src/libraries/Error.sol
+57 −56 src/mocks/MerkleTreeAccess.sol
+92 −0 src/mocks/MockBlobstream.sol
+548 −0 src/mocks/OneStepProverHostIoCelestiaMock.sol
+53 −46 src/mocks/SimpleOneStepProofEntry.sol
+711 −565 src/osp/OneStepProverHostIo.sol
+161 −148 src/stylus/StylusDeployer.sol
+176 −177 test/MockAssertionChain.sol
+129 −129 test/challengeV2/ArrayUtilsLib.t.sol
+538 −406 test/challengeV2/ChallengeEdgeLib.t.sol
+2,919 −2,321 test/challengeV2/EdgeChallengeManager.t.sol
+2,826 −2,302 test/challengeV2/EdgeChallengeManagerLib.t.sol
+997 −704 test/challengeV2/MerkleTreeAccumulatorLib.t.sol
+36 −27 test/challengeV2/StateTools.sol
+72 −60 test/challengeV2/UintUtilsLib.t.sol
+161 −147 test/challengeV2/Utils.sol
Loading
Loading