diff --git a/.gitignore b/.gitignore
index e4a5ac9cce8..b245732d3fb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -72,8 +72,6 @@ __pycache__
libmdbx/build/*
tests/testdata/*
-go.work*
-
docker-compose.*.yml
.env
coverage-test.out
diff --git a/.gitmodules b/.gitmodules
index 7e2c136f083..227a38d189d 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -4,3 +4,6 @@
[submodule "eest-fixtures"]
path = execution/tests/execution-spec-tests
url = https://github.com/erigontech/eest-fixtures
+[submodule "execution/tests/arb-execution-spec-tests"]
+ path = execution/tests/arb-execution-spec-tests
+ url = https://github.com/erigontech/arbitrum-eest-fixtures
\ No newline at end of file
diff --git a/Dockerfile.rpc b/Dockerfile.rpc
new file mode 100644
index 00000000000..2db918cbce0
--- /dev/null
+++ b/Dockerfile.rpc
@@ -0,0 +1,68 @@
+FROM docker.io/library/golang:1.24.3-alpine3.22 AS builder
+
+RUN apk --no-cache add build-base linux-headers git bash ca-certificates libstdc++
+
+WORKDIR /app
+ADD go.mod go.mod
+ADD go.sum go.sum
+ADD erigon-lib/go.mod erigon-lib/go.mod
+ADD erigon-lib/go.sum erigon-lib/go.sum
+ADD p2p/go.mod p2p/go.mod
+ADD p2p/go.sum p2p/go.sum
+
+RUN go mod download
+ADD . .
+
+RUN --mount=type=cache,target=/root/.cache \
+ --mount=type=cache,target=/tmp/go-build \
+ --mount=type=cache,target=/go/pkg/mod \
+ make BUILD_TAGS=nosqlite,noboltdb,nosilkworm rpcdaemon
+
+
+FROM docker.io/library/alpine:3.22
+
+# install required runtime libs, along with some helpers for debugging
+RUN apk add --no-cache ca-certificates libstdc++ tzdata
+RUN apk add --no-cache curl jq bind-tools
+
+# Setup user and group
+#
+# from the perspective of the container, uid=1000, gid=1000 is a sensible choice
+# (mimicking Ubuntu Server), but if caller creates a .env (example in repo root),
+# these defaults will get overridden when make calls docker-compose
+ARG UID=1000
+ARG GID=1000
+RUN adduser -D -u $UID -g $GID erigon
+USER erigon
+RUN mkdir -p ~/.local/share/erigon
+
+RUN mkdir /erigon-data/
+ADD /erigon-data/arb-sep/ /erigon-data/
+
+## then give each binary its own layer
+COPY --from=builder /app/build/bin/rpcdaemon /usr/local/bin/rpcdaemon
+
+EXPOSE 8545 \
+ 8551 \
+ 8546 \
+ 42069 \
+ 42069/udp \
+ 8080 \
+ 9090 \
+ 6060
+
+# https://github.com/opencontainers/image-spec/blob/main/annotations.md
+ARG BUILD_DATE
+ARG VCS_REF
+ARG VERSION
+LABEL org.label-schema.build-date=$BUILD_DATE \
+ org.label-schema.description="Erigon Ethereum RPC Client" \
+ org.label-schema.name="Erigon-RPC" \
+ org.label-schema.schema-version="1.0" \
+ org.label-schema.url="https://erigon.tech" \
+ org.label-schema.vcs-ref=$VCS_REF \
+ org.label-schema.vcs-url="https://github.com/erigontech/erigon.git" \
+ org.label-schema.vendor="Erigon" \
+ org.label-schema.version=$VERSION
+
+ENTRYPOINT ["erigon"]
diff --git a/arb/blocks/header.go b/arb/blocks/header.go
new file mode 100644
index 00000000000..360c27f82e8
--- /dev/null
+++ b/arb/blocks/header.go
@@ -0,0 +1,55 @@
+package arbBlocks
+
+import (
+ "encoding/binary"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/types"
+)
+
+type HeaderInfo struct {
+ SendRoot common.Hash
+ SendCount uint64
+ L1BlockNumber uint64
+ ArbOSFormatVersion uint64
+}
+
+func (info HeaderInfo) extra() []byte {
+ return info.SendRoot[:]
+}
+
+func (info HeaderInfo) mixDigest() [32]byte {
+ mixDigest := common.Hash{}
+ binary.BigEndian.PutUint64(mixDigest[:8], info.SendCount)
+ binary.BigEndian.PutUint64(mixDigest[8:16], info.L1BlockNumber)
+ binary.BigEndian.PutUint64(mixDigest[16:24], info.ArbOSFormatVersion)
+ return mixDigest
+}
+
+func (info HeaderInfo) UpdateHeaderWithInfo(header *types.Header) {
+ header.MixDigest = info.mixDigest()
+ header.Extra = info.extra()
+}
+
+func DeserializeHeaderExtraInformation(header *types.Header) HeaderInfo {
+ if header == nil || header.BaseFee == nil || header.BaseFee.Sign() == 0 || len(header.Extra) != 32 || header.Difficulty.Cmp(common.Big1) != 0 {
+ // imported blocks have no base fee
+ // The genesis block doesn't have an ArbOS encoded extra field
+ return HeaderInfo{}
+ }
+ extra := HeaderInfo{}
+ copy(extra.SendRoot[:], header.Extra)
+ extra.SendCount = binary.BigEndian.Uint64(header.MixDigest[:8])
+ extra.L1BlockNumber = binary.BigEndian.Uint64(header.MixDigest[8:16])
+ extra.ArbOSFormatVersion = binary.BigEndian.Uint64(header.MixDigest[16:24])
+ return extra
+}
+
+func GetArbOSVersion(header *types.Header, chain *chain.Config) uint64 {
+ if !chain.IsArbitrum() {
+ return 0
+ }
+ extraInfo := DeserializeHeaderExtraInformation(header)
+ return extraInfo.ArbOSFormatVersion
+}
diff --git a/arb/bloombits/bitutil.go b/arb/bloombits/bitutil.go
new file mode 100644
index 00000000000..b34ed1e77f2
--- /dev/null
+++ b/arb/bloombits/bitutil.go
@@ -0,0 +1,181 @@
+package bloombits
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
+
+// XORBytes xors the bytes in a and b. The destination is assumed to have enough
+// space. Returns the number of bytes xor'd.
+func XORBytes(dst, a, b []byte) int {
+ if supportsUnaligned {
+ return fastXORBytes(dst, a, b)
+ }
+ return safeXORBytes(dst, a, b)
+}
+
+// fastXORBytes xors in bulk. It only works on architectures that support
+// unaligned read/writes.
+func fastXORBytes(dst, a, b []byte) int {
+ n := len(a)
+ if len(b) < n {
+ n = len(b)
+ }
+ w := n / wordSize
+ if w > 0 {
+ dw := *(*[]uintptr)(unsafe.Pointer(&dst))
+ aw := *(*[]uintptr)(unsafe.Pointer(&a))
+ bw := *(*[]uintptr)(unsafe.Pointer(&b))
+ for i := 0; i < w; i++ {
+ dw[i] = aw[i] ^ bw[i]
+ }
+ }
+ for i := n - n%wordSize; i < n; i++ {
+ dst[i] = a[i] ^ b[i]
+ }
+ return n
+}
+
+// safeXORBytes xors one by one. It works on all architectures, independent if
+// it supports unaligned read/writes or not.
+func safeXORBytes(dst, a, b []byte) int {
+ n := len(a)
+ if len(b) < n {
+ n = len(b)
+ }
+ for i := 0; i < n; i++ {
+ dst[i] = a[i] ^ b[i]
+ }
+ return n
+}
+
+// ANDBytes ands the bytes in a and b. The destination is assumed to have enough
+// space. Returns the number of bytes and'd.
+func ANDBytes(dst, a, b []byte) int {
+ if supportsUnaligned {
+ return fastANDBytes(dst, a, b)
+ }
+ return safeANDBytes(dst, a, b)
+}
+
+// fastANDBytes ands in bulk. It only works on architectures that support
+// unaligned read/writes.
+func fastANDBytes(dst, a, b []byte) int {
+ n := len(a)
+ if len(b) < n {
+ n = len(b)
+ }
+ w := n / wordSize
+ if w > 0 {
+ dw := *(*[]uintptr)(unsafe.Pointer(&dst))
+ aw := *(*[]uintptr)(unsafe.Pointer(&a))
+ bw := *(*[]uintptr)(unsafe.Pointer(&b))
+ for i := 0; i < w; i++ {
+ dw[i] = aw[i] & bw[i]
+ }
+ }
+ for i := n - n%wordSize; i < n; i++ {
+ dst[i] = a[i] & b[i]
+ }
+ return n
+}
+
+// safeANDBytes ands one by one. It works on all architectures, independent if
+// it supports unaligned read/writes or not.
+func safeANDBytes(dst, a, b []byte) int {
+ n := len(a)
+ if len(b) < n {
+ n = len(b)
+ }
+ for i := 0; i < n; i++ {
+ dst[i] = a[i] & b[i]
+ }
+ return n
+}
+
+// ORBytes ors the bytes in a and b. The destination is assumed to have enough
+// space. Returns the number of bytes or'd.
+func ORBytes(dst, a, b []byte) int {
+ if supportsUnaligned {
+ return fastORBytes(dst, a, b)
+ }
+ return safeORBytes(dst, a, b)
+}
+
+// fastORBytes ors in bulk. It only works on architectures that support
+// unaligned read/writes.
+func fastORBytes(dst, a, b []byte) int {
+ n := len(a)
+ if len(b) < n {
+ n = len(b)
+ }
+ w := n / wordSize
+ if w > 0 {
+ dw := *(*[]uintptr)(unsafe.Pointer(&dst))
+ aw := *(*[]uintptr)(unsafe.Pointer(&a))
+ bw := *(*[]uintptr)(unsafe.Pointer(&b))
+ for i := 0; i < w; i++ {
+ dw[i] = aw[i] | bw[i]
+ }
+ }
+ for i := n - n%wordSize; i < n; i++ {
+ dst[i] = a[i] | b[i]
+ }
+ return n
+}
+
+// safeORBytes ors one by one. It works on all architectures, independent if
+// it supports unaligned read/writes or not.
+func safeORBytes(dst, a, b []byte) int {
+ n := len(a)
+ if len(b) < n {
+ n = len(b)
+ }
+ for i := 0; i < n; i++ {
+ dst[i] = a[i] | b[i]
+ }
+ return n
+}
+
+// TestBytes tests whether any bit is set in the input byte slice.
+func TestBytes(p []byte) bool {
+ if supportsUnaligned {
+ return fastTestBytes(p)
+ }
+ return safeTestBytes(p)
+}
+
+// fastTestBytes tests for set bits in bulk. It only works on architectures that
+// support unaligned read/writes.
+func fastTestBytes(p []byte) bool {
+ n := len(p)
+ w := n / wordSize
+ if w > 0 {
+ pw := *(*[]uintptr)(unsafe.Pointer(&p))
+ for i := 0; i < w; i++ {
+ if pw[i] != 0 {
+ return true
+ }
+ }
+ }
+ for i := n - n%wordSize; i < n; i++ {
+ if p[i] != 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// safeTestBytes tests for set bits one byte at a time. It works on all
+// architectures, independent if it supports unaligned read/writes or not.
+func safeTestBytes(p []byte) bool {
+ for i := 0; i < len(p); i++ {
+ if p[i] != 0 {
+ return true
+ }
+ }
+ return false
+}
diff --git a/arb/bloombits/doc.go b/arb/bloombits/doc.go
new file mode 100644
index 00000000000..3d159e74f77
--- /dev/null
+++ b/arb/bloombits/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package bloombits implements bloom filtering on batches of data.
+package bloombits
diff --git a/arb/bloombits/generator.go b/arb/bloombits/generator.go
new file mode 100644
index 00000000000..acf6fa79f64
--- /dev/null
+++ b/arb/bloombits/generator.go
@@ -0,0 +1,98 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bloombits
+
+import (
+ "errors"
+
+ "github.com/erigontech/erigon/execution/types"
+)
+
+var (
+ // errSectionOutOfBounds is returned if the user tried to add more bloom filters
+ // to the batch than available space, or if tries to retrieve above the capacity.
+ errSectionOutOfBounds = errors.New("section out of bounds")
+
+ // errBloomBitOutOfBounds is returned if the user tried to retrieve specified
+ // bit bloom above the capacity.
+ errBloomBitOutOfBounds = errors.New("bloom bit out of bounds")
+)
+
+// Generator takes a number of bloom filters and generates the rotated bloom bits
+// to be used for batched filtering.
+type Generator struct {
+ blooms [types.BloomBitLength][]byte // Rotated blooms for per-bit matching
+ sections uint // Number of sections to batch together
+ nextSec uint // Next section to set when adding a bloom
+}
+
+// NewGenerator creates a rotated bloom generator that can iteratively fill a
+// batched bloom filter's bits.
+func NewGenerator(sections uint) (*Generator, error) {
+ if sections%8 != 0 {
+ return nil, errors.New("section count not multiple of 8")
+ }
+ b := &Generator{sections: sections}
+ for i := 0; i < types.BloomBitLength; i++ {
+ b.blooms[i] = make([]byte, sections/8)
+ }
+ return b, nil
+}
+
+// AddBloom takes a single bloom filter and sets the corresponding bit column
+// in memory accordingly.
+func (b *Generator) AddBloom(index uint, bloom types.Bloom) error {
+ // Make sure we're not adding more bloom filters than our capacity
+ if b.nextSec >= b.sections {
+ return errSectionOutOfBounds
+ }
+ if b.nextSec != index {
+ return errors.New("bloom filter with unexpected index")
+ }
+ // Rotate the bloom and insert into our collection
+ byteIndex := b.nextSec / 8
+ bitIndex := byte(7 - b.nextSec%8)
+ for byt := 0; byt < types.BloomByteLength; byt++ {
+ bloomByte := bloom[types.BloomByteLength-1-byt]
+ if bloomByte == 0 {
+ continue
+ }
+ base := 8 * byt
+ b.blooms[base+7][byteIndex] |= ((bloomByte >> 7) & 1) << bitIndex
+ b.blooms[base+6][byteIndex] |= ((bloomByte >> 6) & 1) << bitIndex
+ b.blooms[base+5][byteIndex] |= ((bloomByte >> 5) & 1) << bitIndex
+ b.blooms[base+4][byteIndex] |= ((bloomByte >> 4) & 1) << bitIndex
+ b.blooms[base+3][byteIndex] |= ((bloomByte >> 3) & 1) << bitIndex
+ b.blooms[base+2][byteIndex] |= ((bloomByte >> 2) & 1) << bitIndex
+ b.blooms[base+1][byteIndex] |= ((bloomByte >> 1) & 1) << bitIndex
+ b.blooms[base][byteIndex] |= (bloomByte & 1) << bitIndex
+ }
+ b.nextSec++
+ return nil
+}
+
+// Bitset returns the bit vector belonging to the given bit index after all
+// blooms have been added.
+func (b *Generator) Bitset(idx uint) ([]byte, error) {
+ if b.nextSec != b.sections {
+ return nil, errors.New("bloom not fully generated yet")
+ }
+ if idx >= types.BloomBitLength {
+ return nil, errBloomBitOutOfBounds
+ }
+ return b.blooms[idx], nil
+}
diff --git a/arb/bloombits/generator_test.go b/arb/bloombits/generator_test.go
new file mode 100644
index 00000000000..b4d710ec635
--- /dev/null
+++ b/arb/bloombits/generator_test.go
@@ -0,0 +1,100 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bloombits
+
+import (
+ "bytes"
+ crand "crypto/rand"
+ "math/rand"
+ "testing"
+
+ "github.com/erigontech/erigon/execution/types"
+)
+
+// Tests that batched bloom bits are correctly rotated from the input bloom
+// filters.
+func TestGenerator(t *testing.T) {
+ // Generate the input and the rotated output
+ var input, output [types.BloomBitLength][types.BloomByteLength]byte
+
+ for i := 0; i < types.BloomBitLength; i++ {
+ for j := 0; j < types.BloomBitLength; j++ {
+ bit := byte(rand.Int() % 2)
+
+ input[i][j/8] |= bit << byte(7-j%8)
+ output[types.BloomBitLength-1-j][i/8] |= bit << byte(7-i%8)
+ }
+ }
+ // Crunch the input through the generator and verify the result
+ gen, err := NewGenerator(types.BloomBitLength)
+ if err != nil {
+ t.Fatalf("failed to create bloombit generator: %v", err)
+ }
+ for i, bloom := range input {
+ if err := gen.AddBloom(uint(i), bloom); err != nil {
+ t.Fatalf("bloom %d: failed to add: %v", i, err)
+ }
+ }
+ for i, want := range output {
+ have, err := gen.Bitset(uint(i))
+ if err != nil {
+ t.Fatalf("output %d: failed to retrieve bits: %v", i, err)
+ }
+ if !bytes.Equal(have, want[:]) {
+ t.Errorf("output %d: bit vector mismatch have %x, want %x", i, have, want)
+ }
+ }
+}
+
+func BenchmarkGenerator(b *testing.B) {
+ var input [types.BloomBitLength][types.BloomByteLength]byte
+ b.Run("empty", func(b *testing.B) {
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ // Crunch the input through the generator and verify the result
+ gen, err := NewGenerator(types.BloomBitLength)
+ if err != nil {
+ b.Fatalf("failed to create bloombit generator: %v", err)
+ }
+ for j, bloom := range &input {
+ if err := gen.AddBloom(uint(j), bloom); err != nil {
+ b.Fatalf("bloom %d: failed to add: %v", i, err)
+ }
+ }
+ }
+ })
+ for i := 0; i < types.BloomBitLength; i++ {
+ crand.Read(input[i][:])
+ }
+ b.Run("random", func(b *testing.B) {
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ // Crunch the input through the generator and verify the result
+ gen, err := NewGenerator(types.BloomBitLength)
+ if err != nil {
+ b.Fatalf("failed to create bloombit generator: %v", err)
+ }
+ for j, bloom := range &input {
+ if err := gen.AddBloom(uint(j), bloom); err != nil {
+ b.Fatalf("bloom %d: failed to add: %v", i, err)
+ }
+ }
+ }
+ })
+}
diff --git a/arb/bloombits/matcher.go b/arb/bloombits/matcher.go
new file mode 100644
index 00000000000..08aa873d345
--- /dev/null
+++ b/arb/bloombits/matcher.go
@@ -0,0 +1,644 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bloombits
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "math"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/erigontech/erigon/common/crypto"
+)
+
+// bloomIndexes represents the bit indexes inside the bloom filter that belong
+// to some key.
+type bloomIndexes [3]uint
+
+// calcBloomIndexes returns the bloom filter bit indexes belonging to the given key.
+func calcBloomIndexes(b []byte) bloomIndexes {
+ b = crypto.Keccak256(b)
+
+ var idxs bloomIndexes
+ for i := 0; i < len(idxs); i++ {
+ idxs[i] = (uint(b[2*i])<<8)&2047 + uint(b[2*i+1])
+ }
+ return idxs
+}
+
+// partialMatches with a non-nil vector represents a section in which some sub-
+// matchers have already found potential matches. Subsequent sub-matchers will
+// binary AND their matches with this vector. If vector is nil, it represents a
+// section to be processed by the first sub-matcher.
+type partialMatches struct {
+ section uint64
+ bitset []byte
+}
+
+// Retrieval represents a request for retrieval task assignments for a given
+// bit with the given number of fetch elements, or a response for such a request.
+// It can also have the actual results set to be used as a delivery data struct.
+//
+// The context and error fields are used by the light client to terminate matching
+// early if an error is encountered on some path of the pipeline.
+type Retrieval struct {
+ Bit uint
+ Sections []uint64
+ Bitsets [][]byte
+
+ Context context.Context
+ Error error
+}
+
+// Matcher is a pipelined system of schedulers and logic matchers which perform
+// binary AND/OR operations on the bit-streams, creating a stream of potential
+// blocks to inspect for data content.
+type Matcher struct {
+ sectionSize uint64 // Size of the data batches to filter on
+
+ filters [][]bloomIndexes // Filter the system is matching for
+ schedulers map[uint]*scheduler // Retrieval schedulers for loading bloom bits
+
+ retrievers chan chan uint // Retriever processes waiting for bit allocations
+ counters chan chan uint // Retriever processes waiting for task count reports
+ retrievals chan chan *Retrieval // Retriever processes waiting for task allocations
+ deliveries chan *Retrieval // Retriever processes waiting for task response deliveries
+
+ running atomic.Bool // Atomic flag whether a session is live or not
+}
+
+// NewMatcher creates a new pipeline for retrieving bloom bit streams and doing
+// address and topic filtering on them. Setting a filter component to `nil` is
+// allowed and will result in that filter rule being skipped (OR 0x11...1).
+func NewMatcher(sectionSize uint64, filters [][][]byte) *Matcher {
+ // Create the matcher instance
+ m := &Matcher{
+ sectionSize: sectionSize,
+ schedulers: make(map[uint]*scheduler),
+ retrievers: make(chan chan uint),
+ counters: make(chan chan uint),
+ retrievals: make(chan chan *Retrieval),
+ deliveries: make(chan *Retrieval),
+ }
+ // Calculate the bloom bit indexes for the groups we're interested in
+ m.filters = nil
+
+ for _, filter := range filters {
+ // Gather the bit indexes of the filter rule, special casing the nil filter
+ if len(filter) == 0 {
+ continue
+ }
+ bloomBits := make([]bloomIndexes, len(filter))
+ for i, clause := range filter {
+ if clause == nil {
+ bloomBits = nil
+ break
+ }
+ bloomBits[i] = calcBloomIndexes(clause)
+ }
+ // Accumulate the filter rules if no nil rule was within
+ if bloomBits != nil {
+ m.filters = append(m.filters, bloomBits)
+ }
+ }
+ // For every bit, create a scheduler to load/download the bit vectors
+ for _, bloomIndexLists := range m.filters {
+ for _, bloomIndexList := range bloomIndexLists {
+ for _, bloomIndex := range bloomIndexList {
+ m.addScheduler(bloomIndex)
+ }
+ }
+ }
+ return m
+}
+
+// addScheduler adds a bit stream retrieval scheduler for the given bit index if
+// it has not existed before. If the bit is already selected for filtering, the
+// existing scheduler can be used.
+func (m *Matcher) addScheduler(idx uint) {
+ if _, ok := m.schedulers[idx]; ok {
+ return
+ }
+ m.schedulers[idx] = newScheduler(idx)
+}
+
+// Start starts the matching process and returns a stream of bloom matches in
+// a given range of blocks. If there are no more matches in the range, the result
+// channel is closed.
+func (m *Matcher) Start(ctx context.Context, begin, end uint64, results chan uint64) (*MatcherSession, error) {
+ // Make sure we're not creating concurrent sessions
+ if m.running.Swap(true) {
+ return nil, errors.New("matcher already running")
+ }
+ defer m.running.Store(false)
+
+ // Initiate a new matching round
+ session := &MatcherSession{
+ matcher: m,
+ quit: make(chan struct{}),
+ ctx: ctx,
+ }
+ for _, scheduler := range m.schedulers {
+ scheduler.reset()
+ }
+ sink := m.run(begin, end, cap(results), session)
+
+ // Read the output from the result sink and deliver to the user
+ session.pend.Add(1)
+ go func() {
+ defer session.pend.Done()
+ defer close(results)
+
+ for {
+ select {
+ case <-session.quit:
+ return
+
+ case res, ok := <-sink:
+ // New match result found
+ if !ok {
+ return
+ }
+ // Calculate the first and last blocks of the section
+ sectionStart := res.section * m.sectionSize
+
+ first := sectionStart
+ if begin > first {
+ first = begin
+ }
+ last := sectionStart + m.sectionSize - 1
+ if end < last {
+ last = end
+ }
+ // Iterate over all the blocks in the section and return the matching ones
+ for i := first; i <= last; i++ {
+ // Skip the entire byte if no matches are found inside (and we're processing an entire byte!)
+ next := res.bitset[(i-sectionStart)/8]
+ if next == 0 {
+ if i%8 == 0 {
+ i += 7
+ }
+ continue
+ }
+ // Some bit it set, do the actual submatching
+ if bit := 7 - i%8; next&(1<= req.section })
+ requests[req.bit] = append(queue[:index], append([]uint64{req.section}, queue[index:]...)...)
+
+ // If it's a new bit and we have waiting fetchers, allocate to them
+ if len(queue) == 0 {
+ assign(req.bit)
+ }
+
+ case fetcher := <-retrievers:
+ // New retriever arrived, find the lowest section-ed bit to assign
+ bit, best := uint(0), uint64(math.MaxUint64)
+ for idx := range unallocs {
+ if requests[idx][0] < best {
+ bit, best = idx, requests[idx][0]
+ }
+ }
+ // Stop tracking this bit (and alloc notifications if no more work is available)
+ delete(unallocs, bit)
+ if len(unallocs) == 0 {
+ retrievers = nil
+ }
+ allocs++
+ fetcher <- bit
+
+ case fetcher := <-m.counters:
+ // New task count request arrives, return number of items
+ fetcher <- uint(len(requests[<-fetcher]))
+
+ case fetcher := <-m.retrievals:
+ // New fetcher waiting for tasks to retrieve, assign
+ task := <-fetcher
+ if want := len(task.Sections); want >= len(requests[task.Bit]) {
+ task.Sections = requests[task.Bit]
+ delete(requests, task.Bit)
+ } else {
+ task.Sections = append(task.Sections[:0], requests[task.Bit][:want]...)
+ requests[task.Bit] = append(requests[task.Bit][:0], requests[task.Bit][want:]...)
+ }
+ fetcher <- task
+
+ // If anything was left unallocated, try to assign to someone else
+ if len(requests[task.Bit]) > 0 {
+ assign(task.Bit)
+ }
+
+ case result := <-m.deliveries:
+ // New retrieval task response from fetcher, split out missing sections and
+ // deliver complete ones
+ var (
+ sections = make([]uint64, 0, len(result.Sections))
+ bitsets = make([][]byte, 0, len(result.Bitsets))
+ missing = make([]uint64, 0, len(result.Sections))
+ )
+ for i, bitset := range result.Bitsets {
+ if len(bitset) == 0 {
+ missing = append(missing, result.Sections[i])
+ continue
+ }
+ sections = append(sections, result.Sections[i])
+ bitsets = append(bitsets, bitset)
+ }
+ m.schedulers[result.Bit].deliver(sections, bitsets)
+ allocs--
+
+ // Reschedule missing sections and allocate bit if newly available
+ if len(missing) > 0 {
+ queue := requests[result.Bit]
+ for _, section := range missing {
+ index := sort.Search(len(queue), func(i int) bool { return queue[i] >= section })
+ queue = append(queue[:index], append([]uint64{section}, queue[index:]...)...)
+ }
+ requests[result.Bit] = queue
+
+ if len(queue) == len(missing) {
+ assign(result.Bit)
+ }
+ }
+
+ // End the session when all pending deliveries have arrived.
+ if shutdown == nil && allocs == 0 {
+ return
+ }
+ }
+ }
+}
+
+// MatcherSession is returned by a started matcher to be used as a terminator
+// for the actively running matching operation.
+type MatcherSession struct {
+ matcher *Matcher
+
+ closer sync.Once // Sync object to ensure we only ever close once
+ quit chan struct{} // Quit channel to request pipeline termination
+
+ ctx context.Context // Context used by the light client to abort filtering
+ err error // Global error to track retrieval failures deep in the chain
+ errLock sync.Mutex
+
+ pend sync.WaitGroup
+}
+
+// Close stops the matching process and waits for all subprocesses to terminate
+// before returning. The timeout may be used for graceful shutdown, allowing the
+// currently running retrievals to complete before this time.
+func (s *MatcherSession) Close() {
+ s.closer.Do(func() {
+ // Signal termination and wait for all goroutines to tear down
+ close(s.quit)
+ s.pend.Wait()
+ })
+}
+
+// Error returns any failure encountered during the matching session.
+func (s *MatcherSession) Error() error {
+ s.errLock.Lock()
+ defer s.errLock.Unlock()
+
+ return s.err
+}
+
+// allocateRetrieval assigns a bloom bit index to a client process that can either
+// immediately request and fetch the section contents assigned to this bit or wait
+// a little while for more sections to be requested.
+func (s *MatcherSession) allocateRetrieval() (uint, bool) {
+ fetcher := make(chan uint)
+
+ select {
+ case <-s.quit:
+ return 0, false
+ case s.matcher.retrievers <- fetcher:
+ bit, ok := <-fetcher
+ return bit, ok
+ }
+}
+
+// pendingSections returns the number of pending section retrievals belonging to
+// the given bloom bit index.
+func (s *MatcherSession) pendingSections(bit uint) int {
+ fetcher := make(chan uint)
+
+ select {
+ case <-s.quit:
+ return 0
+ case s.matcher.counters <- fetcher:
+ fetcher <- bit
+ return int(<-fetcher)
+ }
+}
+
+// allocateSections assigns all or part of an already allocated bit-task queue
+// to the requesting process.
+func (s *MatcherSession) allocateSections(bit uint, count int) []uint64 {
+ fetcher := make(chan *Retrieval)
+
+ select {
+ case <-s.quit:
+ return nil
+ case s.matcher.retrievals <- fetcher:
+ task := &Retrieval{
+ Bit: bit,
+ Sections: make([]uint64, count),
+ }
+ fetcher <- task
+ return (<-fetcher).Sections
+ }
+}
+
+// deliverSections delivers a batch of section bit-vectors for a specific bloom
+// bit index to be injected into the processing pipeline.
+func (s *MatcherSession) deliverSections(bit uint, sections []uint64, bitsets [][]byte) {
+ s.matcher.deliveries <- &Retrieval{Bit: bit, Sections: sections, Bitsets: bitsets}
+}
+
+// Multiplex polls the matcher session for retrieval tasks and multiplexes it into
+// the requested retrieval queue to be serviced together with other sessions.
+//
+// This method will block for the lifetime of the session. Even after termination
+// of the session, any request in-flight need to be responded to! Empty responses
+// are fine though in that case.
+func (s *MatcherSession) Multiplex(batch int, wait time.Duration, mux chan chan *Retrieval) {
+ for {
+ // Allocate a new bloom bit index to retrieve data for, stopping when done
+ bit, ok := s.allocateRetrieval()
+ if !ok {
+ return
+ }
+ // Bit allocated, throttle a bit if we're below our batch limit
+ if s.pendingSections(bit) < batch {
+ select {
+ case <-s.quit:
+ // Session terminating, we can't meaningfully service, abort
+ s.allocateSections(bit, 0)
+ s.deliverSections(bit, []uint64{}, [][]byte{})
+ return
+
+ case <-time.After(wait):
+ // Throttling up, fetch whatever is available
+ }
+ }
+ // Allocate as much as we can handle and request servicing
+ sections := s.allocateSections(bit, batch)
+ request := make(chan *Retrieval)
+
+ select {
+ case <-s.quit:
+ // Session terminating, we can't meaningfully service, abort
+ s.deliverSections(bit, sections, make([][]byte, len(sections)))
+ return
+
+ case mux <- request:
+ // Retrieval accepted, something must arrive before we're aborting
+ request <- &Retrieval{Bit: bit, Sections: sections, Context: s.ctx}
+
+ result := <-request
+
+ // Deliver a result before s.Close() to avoid a deadlock
+ s.deliverSections(result.Bit, result.Sections, result.Bitsets)
+
+ if result.Error != nil {
+ s.errLock.Lock()
+ s.err = result.Error
+ s.errLock.Unlock()
+ s.Close()
+ }
+ }
+ }
+}
diff --git a/arb/bloombits/matcher_test.go b/arb/bloombits/matcher_test.go
new file mode 100644
index 00000000000..dce36427687
--- /dev/null
+++ b/arb/bloombits/matcher_test.go
@@ -0,0 +1,292 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bloombits
+
+import (
+ "context"
+ "math/rand"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/erigontech/erigon/common"
+)
+
+const testSectionSize = 4096
+
+// Tests that wildcard filter rules (nil) can be specified and are handled well.
+func TestMatcherWildcards(t *testing.T) {
+ t.Parallel()
+ matcher := NewMatcher(testSectionSize, [][][]byte{
+ {common.Address{}.Bytes(), common.Address{0x01}.Bytes()}, // Default address is not a wildcard
+ {common.Hash{}.Bytes(), common.Hash{0x01}.Bytes()}, // Default hash is not a wildcard
+ {common.Hash{0x01}.Bytes()}, // Plain rule, sanity check
+ {common.Hash{0x01}.Bytes(), nil}, // Wildcard suffix, drop rule
+ {nil, common.Hash{0x01}.Bytes()}, // Wildcard prefix, drop rule
+ {nil, nil}, // Wildcard combo, drop rule
+ {}, // Inited wildcard rule, drop rule
+ nil, // Proper wildcard rule, drop rule
+ })
+ if len(matcher.filters) != 3 {
+ t.Fatalf("filter system size mismatch: have %d, want %d", len(matcher.filters), 3)
+ }
+ if len(matcher.filters[0]) != 2 {
+ t.Fatalf("address clause size mismatch: have %d, want %d", len(matcher.filters[0]), 2)
+ }
+ if len(matcher.filters[1]) != 2 {
+ t.Fatalf("combo topic clause size mismatch: have %d, want %d", len(matcher.filters[1]), 2)
+ }
+ if len(matcher.filters[2]) != 1 {
+ t.Fatalf("singletone topic clause size mismatch: have %d, want %d", len(matcher.filters[2]), 1)
+ }
+}
+
+// Tests the matcher pipeline on a single continuous workflow without interrupts.
+func TestMatcherContinuous(t *testing.T) {
+ t.Parallel()
+ testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 0, 100000, false, 75)
+ testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 0, 100000, false, 81)
+ testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 0, 10000, false, 36)
+}
+
+// Tests the matcher pipeline on a constantly interrupted and resumed work pattern
+// with the aim of ensuring data items are requested only once.
+func TestMatcherIntermittent(t *testing.T) {
+ t.Parallel()
+ testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 0, 100000, true, 75)
+ testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 0, 100000, true, 81)
+ testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 0, 10000, true, 36)
+}
+
+// Tests the matcher pipeline on random input to hopefully catch anomalies.
+func TestMatcherRandom(t *testing.T) {
+ t.Parallel()
+ for i := 0; i < 10; i++ {
+ testMatcherBothModes(t, makeRandomIndexes([]int{1}, 50), 0, 10000, 0)
+ testMatcherBothModes(t, makeRandomIndexes([]int{3}, 50), 0, 10000, 0)
+ testMatcherBothModes(t, makeRandomIndexes([]int{2, 2, 2}, 20), 0, 10000, 0)
+ testMatcherBothModes(t, makeRandomIndexes([]int{5, 5, 5}, 50), 0, 10000, 0)
+ testMatcherBothModes(t, makeRandomIndexes([]int{4, 4, 4}, 20), 0, 10000, 0)
+ }
+}
+
+// Tests that the matcher can properly find matches if the starting block is
+// shifted from a multiple of 8. This is needed to cover an optimisation with
+// bitset matching https://github.com/ethereum/go-ethereum/issues/15309.
+func TestMatcherShifted(t *testing.T) {
+ t.Parallel()
+ // Block 0 always matches in the tests, skip ahead of first 8 blocks with the
+ // start to get a potential zero byte in the matcher bitset.
+
+ // To keep the second bitset byte zero, the filter must only match for the first
+ // time in block 16, so doing an all-16 bit filter should suffice.
+
+ // To keep the starting block non divisible by 8, block number 9 is the first
+ // that would introduce a shift and not match block 0.
+ testMatcherBothModes(t, [][]bloomIndexes{{{16, 16, 16}}}, 9, 64, 0)
+}
+
+// Tests that matching on everything doesn't crash (special case internally).
+func TestWildcardMatcher(t *testing.T) {
+ t.Parallel()
+ testMatcherBothModes(t, nil, 0, 10000, 0)
+}
+
+// makeRandomIndexes generates a random filter system, composed of multiple filter
+// criteria, each having one bloom list component for the address and arbitrarily
+// many topic bloom list components.
+func makeRandomIndexes(lengths []int, max int) [][]bloomIndexes {
+ res := make([][]bloomIndexes, len(lengths))
+ for i, topics := range lengths {
+ res[i] = make([]bloomIndexes, topics)
+ for j := 0; j < topics; j++ {
+ for k := 0; k < len(res[i][j]); k++ {
+ res[i][j][k] = uint(rand.Intn(max-1) + 2)
+ }
+ }
+ }
+ return res
+}
+
+// testMatcherDiffBatches runs the given matches test in single-delivery and also
+// in batches delivery mode, verifying that all kinds of deliveries are handled
+// correctly within.
+func testMatcherDiffBatches(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, intermittent bool, retrievals uint32) {
+ singleton := testMatcher(t, filter, start, blocks, intermittent, retrievals, 1)
+ batched := testMatcher(t, filter, start, blocks, intermittent, retrievals, 16)
+
+ if singleton != batched {
+ t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, %v in singleton vs. %v in batched mode", filter, blocks, intermittent, singleton, batched)
+ }
+}
+
+// testMatcherBothModes runs the given matcher test in both continuous as well as
+// in intermittent mode, verifying that the request counts match each other.
+func testMatcherBothModes(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, retrievals uint32) {
+ continuous := testMatcher(t, filter, start, blocks, false, retrievals, 16)
+ intermittent := testMatcher(t, filter, start, blocks, true, retrievals, 16)
+
+ if continuous != intermittent {
+ t.Errorf("filter = %v blocks = %v: request count mismatch, %v in continuous vs. %v in intermittent mode", filter, blocks, continuous, intermittent)
+ }
+}
+
+// testMatcher is a generic tester to run the given matcher test and return the
+// number of requests made for cross validation between different modes.
+func testMatcher(t *testing.T, filter [][]bloomIndexes, start, blocks uint64, intermittent bool, retrievals uint32, maxReqCount int) uint32 {
+ // Create a new matcher an simulate our explicit random bitsets
+ matcher := NewMatcher(testSectionSize, nil)
+ matcher.filters = filter
+
+ for _, rule := range filter {
+ for _, topic := range rule {
+ for _, bit := range topic {
+ matcher.addScheduler(bit)
+ }
+ }
+ }
+ // Track the number of retrieval requests made
+ var requested atomic.Uint32
+
+ // Start the matching session for the filter and the retriever goroutines
+ quit := make(chan struct{})
+ matches := make(chan uint64, 16)
+
+ session, err := matcher.Start(context.Background(), start, blocks-1, matches)
+ if err != nil {
+ t.Fatalf("failed to stat matcher session: %v", err)
+ }
+ startRetrievers(session, quit, &requested, maxReqCount)
+
+ // Iterate over all the blocks and verify that the pipeline produces the correct matches
+ for i := start; i < blocks; i++ {
+ if expMatch3(filter, i) {
+ match, ok := <-matches
+ if !ok {
+ t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, results channel closed", filter, blocks, intermittent, i)
+ return 0
+ }
+ if match != i {
+ t.Errorf("filter = %v blocks = %v intermittent = %v: expected #%v, got #%v", filter, blocks, intermittent, i, match)
+ }
+ // If we're testing intermittent mode, abort and restart the pipeline
+ if intermittent {
+ session.Close()
+ close(quit)
+
+ quit = make(chan struct{})
+ matches = make(chan uint64, 16)
+
+ session, err = matcher.Start(context.Background(), i+1, blocks-1, matches)
+ if err != nil {
+ t.Fatalf("failed to stat matcher session: %v", err)
+ }
+ startRetrievers(session, quit, &requested, maxReqCount)
+ }
+ }
+ }
+ // Ensure the result channel is torn down after the last block
+ match, ok := <-matches
+ if ok {
+ t.Errorf("filter = %v blocks = %v intermittent = %v: expected closed channel, got #%v", filter, blocks, intermittent, match)
+ }
+ // Clean up the session and ensure we match the expected retrieval count
+ session.Close()
+ close(quit)
+
+ if retrievals != 0 && requested.Load() != retrievals {
+ t.Errorf("filter = %v blocks = %v intermittent = %v: request count mismatch, have #%v, want #%v", filter, blocks, intermittent, requested.Load(), retrievals)
+ }
+ return requested.Load()
+}
+
+// startRetrievers starts a batch of goroutines listening for section requests
+// and serving them.
+func startRetrievers(session *MatcherSession, quit chan struct{}, retrievals *atomic.Uint32, batch int) {
+ requests := make(chan chan *Retrieval)
+
+ for i := 0; i < 10; i++ {
+ // Start a multiplexer to test multiple threaded execution
+ go session.Multiplex(batch, 100*time.Microsecond, requests)
+
+ // Start a services to match the above multiplexer
+ go func() {
+ for {
+ // Wait for a service request or a shutdown
+ select {
+ case <-quit:
+ return
+
+ case request := <-requests:
+ task := <-request
+
+ task.Bitsets = make([][]byte, len(task.Sections))
+ for i, section := range task.Sections {
+ if rand.Int()%4 != 0 { // Handle occasional missing deliveries
+ task.Bitsets[i] = generateBitset(task.Bit, section)
+ retrievals.Add(1)
+ }
+ }
+ request <- task
+ }
+ }
+ }()
+ }
+}
+
+// generateBitset generates the rotated bitset for the given bloom bit and section
+// numbers.
+func generateBitset(bit uint, section uint64) []byte {
+ bitset := make([]byte, testSectionSize/8)
+ for i := 0; i < len(bitset); i++ {
+ for b := 0; b < 8; b++ {
+ blockIdx := section*testSectionSize + uint64(i*8+b)
+ bitset[i] += bitset[i]
+ if (blockIdx % uint64(bit)) == 0 {
+ bitset[i]++
+ }
+ }
+ }
+ return bitset
+}
+
+func expMatch1(filter bloomIndexes, i uint64) bool {
+ for _, ii := range filter {
+ if (i % uint64(ii)) != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func expMatch2(filter []bloomIndexes, i uint64) bool {
+ for _, ii := range filter {
+ if expMatch1(ii, i) {
+ return true
+ }
+ }
+ return false
+}
+
+func expMatch3(filter [][]bloomIndexes, i uint64) bool {
+ for _, ii := range filter {
+ if !expMatch2(ii, i) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/arb/bloombits/scheduler.go b/arb/bloombits/scheduler.go
new file mode 100644
index 00000000000..6449c7465a1
--- /dev/null
+++ b/arb/bloombits/scheduler.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bloombits
+
+import (
+ "sync"
+)
+
+// request represents a bloom retrieval task to prioritize and pull from the local
+// database or remotely from the network.
+type request struct {
+ section uint64 // Section index to retrieve the a bit-vector from
+ bit uint // Bit index within the section to retrieve the vector of
+}
+
+// response represents the state of a requested bit-vector through a scheduler.
+type response struct {
+ cached []byte // Cached bits to dedup multiple requests
+ done chan struct{} // Channel to allow waiting for completion
+}
+
+// scheduler handles the scheduling of bloom-filter retrieval operations for
+// entire section-batches belonging to a single bloom bit. Beside scheduling the
+// retrieval operations, this struct also deduplicates the requests and caches
+// the results to minimize network/database overhead even in complex filtering
+// scenarios.
+type scheduler struct {
+ bit uint // Index of the bit in the bloom filter this scheduler is responsible for
+ responses map[uint64]*response // Currently pending retrieval requests or already cached responses
+ lock sync.Mutex // Lock protecting the responses from concurrent access
+}
+
+// newScheduler creates a new bloom-filter retrieval scheduler for a specific
+// bit index.
+func newScheduler(idx uint) *scheduler {
+ return &scheduler{
+ bit: idx,
+ responses: make(map[uint64]*response),
+ }
+}
+
+// run creates a retrieval pipeline, receiving section indexes from sections and
+// returning the results in the same order through the done channel. Concurrent
+// runs of the same scheduler are allowed, leading to retrieval task deduplication.
+func (s *scheduler) run(sections chan uint64, dist chan *request, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
+ // Create a forwarder channel between requests and responses of the same size as
+ // the distribution channel (since that will block the pipeline anyway).
+ pend := make(chan uint64, cap(dist))
+
+ // Start the pipeline schedulers to forward between user -> distributor -> user
+ wg.Add(2)
+ go s.scheduleRequests(sections, dist, pend, quit, wg)
+ go s.scheduleDeliveries(pend, done, quit, wg)
+}
+
+// reset cleans up any leftovers from previous runs. This is required before a
+// restart to ensure the no previously requested but never delivered state will
+// cause a lockup.
+func (s *scheduler) reset() {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ for section, res := range s.responses {
+ if res.cached == nil {
+ delete(s.responses, section)
+ }
+ }
+}
+
+// scheduleRequests reads section retrieval requests from the input channel,
+// deduplicates the stream and pushes unique retrieval tasks into the distribution
+// channel for a database or network layer to honour.
+func (s *scheduler) scheduleRequests(reqs chan uint64, dist chan *request, pend chan uint64, quit chan struct{}, wg *sync.WaitGroup) {
+ // Clean up the goroutine and pipeline when done
+ defer wg.Done()
+ defer close(pend)
+
+ // Keep reading and scheduling section requests
+ for {
+ select {
+ case <-quit:
+ return
+
+ case section, ok := <-reqs:
+ // New section retrieval requested
+ if !ok {
+ return
+ }
+ // Deduplicate retrieval requests
+ unique := false
+
+ s.lock.Lock()
+ if s.responses[section] == nil {
+ s.responses[section] = &response{
+ done: make(chan struct{}),
+ }
+ unique = true
+ }
+ s.lock.Unlock()
+
+ // Schedule the section for retrieval and notify the deliverer to expect this section
+ if unique {
+ select {
+ case <-quit:
+ return
+ case dist <- &request{bit: s.bit, section: section}:
+ }
+ }
+ select {
+ case <-quit:
+ return
+ case pend <- section:
+ }
+ }
+ }
+}
+
+// scheduleDeliveries reads section acceptance notifications and waits for them
+// to be delivered, pushing them into the output data buffer.
+func (s *scheduler) scheduleDeliveries(pend chan uint64, done chan []byte, quit chan struct{}, wg *sync.WaitGroup) {
+ // Clean up the goroutine and pipeline when done
+ defer wg.Done()
+ defer close(done)
+
+ // Keep reading notifications and scheduling deliveries
+ for {
+ select {
+ case <-quit:
+ return
+
+ case idx, ok := <-pend:
+ // New section retrieval pending
+ if !ok {
+ return
+ }
+ // Wait until the request is honoured
+ s.lock.Lock()
+ res := s.responses[idx]
+ s.lock.Unlock()
+
+ select {
+ case <-quit:
+ return
+ case <-res.done:
+ }
+ // Deliver the result
+ select {
+ case <-quit:
+ return
+ case done <- res.cached:
+ }
+ }
+ }
+}
+
+// deliver is called by the request distributor when a reply to a request arrives.
+func (s *scheduler) deliver(sections []uint64, data [][]byte) {
+ s.lock.Lock()
+ defer s.lock.Unlock()
+
+ for i, section := range sections {
+ if res := s.responses[section]; res != nil && res.cached == nil { // Avoid non-requests and double deliveries
+ res.cached = data[i]
+ close(res.done)
+ }
+ }
+}
diff --git a/arb/bloombits/scheduler_test.go b/arb/bloombits/scheduler_test.go
new file mode 100644
index 00000000000..dcaaa915258
--- /dev/null
+++ b/arb/bloombits/scheduler_test.go
@@ -0,0 +1,103 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package bloombits
+
+import (
+ "bytes"
+ "math/big"
+ "sync"
+ "sync/atomic"
+ "testing"
+)
+
+// Tests that the scheduler can deduplicate and forward retrieval requests to
+// underlying fetchers and serve responses back, irrelevant of the concurrency
+// of the requesting clients or serving data fetchers.
+func TestSchedulerSingleClientSingleFetcher(t *testing.T) { testScheduler(t, 1, 1, 5000) }
+func TestSchedulerSingleClientMultiFetcher(t *testing.T) { testScheduler(t, 1, 10, 5000) }
+func TestSchedulerMultiClientSingleFetcher(t *testing.T) { testScheduler(t, 10, 1, 5000) }
+func TestSchedulerMultiClientMultiFetcher(t *testing.T) { testScheduler(t, 10, 10, 5000) }
+
+func testScheduler(t *testing.T, clients int, fetchers int, requests int) {
+ t.Parallel()
+ f := newScheduler(0)
+
+ // Create a batch of handler goroutines that respond to bloom bit requests and
+ // deliver them to the scheduler.
+ var fetchPend sync.WaitGroup
+ fetchPend.Add(fetchers)
+ defer fetchPend.Wait()
+
+ fetch := make(chan *request, 16)
+ defer close(fetch)
+
+ var delivered atomic.Uint32
+ for i := 0; i < fetchers; i++ {
+ go func() {
+ defer fetchPend.Done()
+
+ for req := range fetch {
+ delivered.Add(1)
+
+ f.deliver([]uint64{
+ req.section + uint64(requests), // Non-requested data (ensure it doesn't go out of bounds)
+ req.section, // Requested data
+ req.section, // Duplicated data (ensure it doesn't double close anything)
+ }, [][]byte{
+ {},
+ new(big.Int).SetUint64(req.section).Bytes(),
+ new(big.Int).SetUint64(req.section).Bytes(),
+ })
+ }
+ }()
+ }
+ // Start a batch of goroutines to concurrently run scheduling tasks
+ quit := make(chan struct{})
+
+ var pend sync.WaitGroup
+ pend.Add(clients)
+
+ for i := 0; i < clients; i++ {
+ go func() {
+ defer pend.Done()
+
+ in := make(chan uint64, 16)
+ out := make(chan []byte, 16)
+
+ f.run(in, fetch, out, quit, &pend)
+
+ go func() {
+ for j := 0; j < requests; j++ {
+ in <- uint64(j)
+ }
+ close(in)
+ }()
+ b := new(big.Int)
+ for j := 0; j < requests; j++ {
+ bits := <-out
+ if want := b.SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) {
+ t.Errorf("vector %d: delivered content mismatch: have %x, want %x", j, bits, want)
+ }
+ }
+ }()
+ }
+ pend.Wait()
+
+ if have := delivered.Load(); int(have) != requests {
+ t.Errorf("request count mismatch: have %v, want %v", have, requests)
+ }
+}
diff --git a/arb/chain/allocs/arb_sepolia.json b/arb/chain/allocs/arb_sepolia.json
new file mode 100644
index 00000000000..cd0c076ceb1
--- /dev/null
+++ b/arb/chain/allocs/arb_sepolia.json
@@ -0,0 +1,109 @@
+{
+ "00000000000000000000000000000000000a4b05" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "a4b05fffffffffffffffffffffffffffffffffff" :{
+ "balance": "0x0",
+ "nonce": "0x1",
+ "storage": {
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b09770c": "7b22706572696f64223a302c2265706f6368223a307d2c22617262697472756d",
+ "4a24a7a70af0b886a2eb2bdeed720306dde1594c34b10e15cb3790f88cf6dc72": "01",
+ "a9f6f085d78d1d37c5819e5c16c9e03198bd14e08cd1f6f8191bc6207b9e9700": "71b61c2e250afa05dfc36304d6c91501be0965d8",
+ "a9f6f085d78d1d37c5819e5c16c9e03198bd14e08cd1f6f8191bc6207b9e9707": "5a777fe3",
+ "e54de2a4cdacc0a0059d2b6e16348103df8c4aff409c31e40ec73d11926c8200": "6acfc0",
+ "e54de2a4cdacc0a0059d2b6e16348103df8c4aff409c31e40ec73d11926c8203": "05f5e100",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097703": "6b537570706f7274223a747275652c22656970313530426c6f636b223a302c22",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097704": "65697031353048617368223a2230783030303030303030303030303030303030",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097708": "223a302c22636f6e7374616e74696e6f706c65426c6f636b223a302c22706574",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097709": "65727362757267426c6f636b223a302c22697374616e62756c426c6f636b223a",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097713": "223a307d7d",
+ "19cc27d300234c0bd290398e6aaf8b7680a1006d08dc01e871b6d473bc9d6000": "01",
+ "41e0d7d38ffe0727248ee6ed6ea1250b08279ad004e3ab07b7ffe78352d8c400": "01",
+ "15fed0451499512d95f3ec5a41c878b9de55f21878b5b4e190d4667ec709b404": "066eee",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097700": "0245",
+ "26438022d7bc3dacb366c03344760cb0b1045ad014826fa28bcc2ae55cc91601": "a4b000000000000000000073657175656e636572",
+ "9e9ffd355c04cc0ffaba550b5b46d79f750513bcaf322e22daca18080c857a00": "02",
+ "9e9ffd355c04cc0ffaba550b5b46d79f750513bcaf322e22daca18080c857a01": "02",
+ "15fed0451499512d95f3ec5a41c878b9de55f21878b5b4e190d4667ec709b403": "71b61c2e250afa05dfc36304d6c91501be0965d8",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097702": "6b223a302c2264616f466f726b426c6f636b223a6e756c6c2c2264616f466f72",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097705": "3030303030303030303030303030303030303030303030303030303030303030",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097707": "2c22656970313538426c6f636b223a302c2262797a616e7469756d426c6f636b",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b09770d": "223a7b22456e61626c654172624f53223a747275652c22416c6c6f7744656275",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b09770f": "696c697479436f6d6d6974746565223a66616c73652c22496e697469616c4172",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097710": "624f5356657273696f6e223a31302c22496e697469616c436861696e4f776e65",
+ "e54de2a4cdacc0a0059d2b6e16348103df8c4aff409c31e40ec73d11926c8202": "05f5e100",
+ "41e0d7d38ffe0727248ee6ed6ea1250b08279ad004e3ab07b7ffe78352d8c401": "71b61c2e250afa05dfc36304d6c91501be0965d8",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b09770e": "67507265636f6d70696c6573223a66616c73652c2244617461417661696c6162",
+ "a9f6f085d78d1d37c5819e5c16c9e03198bd14e08cd1f6f8191bc6207b9e9701": "09896800",
+ "a9f6f085d78d1d37c5819e5c16c9e03198bd14e08cd1f6f8191bc6207b9e9703": "0a",
+ "e54de2a4cdacc0a0059d2b6e16348103df8c4aff409c31e40ec73d11926c8205": "66",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b09770a": "302c226d756972476c6163696572426c6f636b223a302c226265726c696e426c",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b09770b": "6f636b223a302c226c6f6e646f6e426c6f636b223a302c22636c69717565223a",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097711": "72223a2230783731423631633245323530414661303564466333363330344436",
+ "ff922cd4c96a7d831e52b53a42855d4ff5e131718cca2d7063af2a0269f6a3d8": "01",
+ "a9f6f085d78d1d37c5819e5c16c9e03198bd14e08cd1f6f8191bc6207b9e970a": "ffffffffffffffff",
+ "a9f6f085d78d1d37c5819e5c16c9e03198bd14e08cd1f6f8191bc6207b9e9709": "0186a0",
+ "15fed0451499512d95f3ec5a41c878b9de55f21878b5b4e190d4667ec709b400": "0a",
+ "e54de2a4cdacc0a0059d2b6e16348103df8c4aff409c31e40ec73d11926c8201": "01e84800",
+ "e54de2a4cdacc0a0059d2b6e16348103df8c4aff409c31e40ec73d11926c8206": "0a",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097701": "7b22636861696e4964223a3432313631342c22686f6d657374656164426c6f63",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097706": "303030303030303030303030303030222c22656970313535426c6f636b223a30",
+ "696e3678057072f0c8dba2395c9474f6a52565714cff46262e4548533b097712": "6339313530316245303936354438222c2247656e65736973426c6f636b4e756d",
+ "19cc27d300234c0bd290398e6aaf8b7680a1006d08dc01e871b6d473bc9d6001": "a4b000000000000000000073657175656e636572",
+ "a9f6f085d78d1d37c5819e5c16c9e03198bd14e08cd1f6f8191bc6207b9e9702": "0a"
+ }
+ },
+ "0000000000000000000000000000000000000064" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "0000000000000000000000000000000000000065" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "0000000000000000000000000000000000000066" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "0000000000000000000000000000000000000067" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "0000000000000000000000000000000000000068" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "0000000000000000000000000000000000000069" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "000000000000000000000000000000000000006f" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "000000000000000000000000000000000000006b" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "00000000000000000000000000000000000000ff" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "000000000000000000000000000000000000006c" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "000000000000000000000000000000000000006d" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "000000000000000000000000000000000000006e" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ },
+ "0000000000000000000000000000000000000070" :{
+ "balance": "0x0",
+ "code": "0xfe"
+ }
+}
\ No newline at end of file
diff --git a/arb/chain/chainspecs/arb-sepolia.json b/arb/chain/chainspecs/arb-sepolia.json
new file mode 100644
index 00000000000..5b35413672a
--- /dev/null
+++ b/arb/chain/chainspecs/arb-sepolia.json
@@ -0,0 +1,42 @@
+{
+ "chainName": "arb-sepolia",
+ "chainId": 421614,
+ "eip150Block": 0,
+ "eip155Block": 0,
+ "eip158Block": 0,
+ "homesteadBlock": 0,
+ "byzantiumBlock": 0,
+ "constantinopleBlock": 0,
+ "petersburgBlock": 0,
+ "istanbulBlock": 0,
+ "muirGlacierBlock": 0,
+ "berlinBlock": 0,
+ "londonBlock": 0,
+ "clique": {
+ "period": 0,
+ "epoch": 0
+ },
+ "arbitrum": {
+ "parent-chain-id": 11155111,
+ "parent-chain-is-arbitrum": false,
+ "chain-name": "sepolia-rollup",
+ "sequencer-url": "https://sepolia-rollup-sequencer.arbitrum.io/rpc",
+ "feed-url": "wss://sepolia-rollup.arbitrum.io/feed",
+ "EnableArbOS": true,
+ "AllowDebugPrecompiles": false,
+ "DataAvailabilityCommittee": false,
+ "InitialArbOSVersion": 10,
+ "InitialChainOwner": "0x71B61c2E250AFa05dFc36304D6c91501bE0965D8",
+ "GenesisBlockNum": 0,
+ "rollup": {
+ "bridge": "0x38f918D0E9F1b721EDaA41302E399fa1B79333a9",
+ "inbox": "0xaAe29B0366299461418F5324a79Afc425BE5ae21",
+ "sequencer-inbox": "0x6c97864CE4bEf387dE0b3310A44230f7E3F1be0D",
+ "rollup": "0xd80810638dbDF9081b72C1B33c65375e807281C8",
+ "validator-utils": "0x1f6860C3cac255fFFa72B7410b1183c3a0D261e0",
+ "validator-wallet-creator": "0x894fC71fA0A666352824EC954B401573C861D664",
+ "stake-token": "0xefb383126640fe4a760010c6e59c397d2b6c7141",
+ "deployed-at": 4139226
+ }
+ }
+}
diff --git a/arb/chain/config.go b/arb/chain/config.go
new file mode 100644
index 00000000000..b72c034fcde
--- /dev/null
+++ b/arb/chain/config.go
@@ -0,0 +1,29 @@
+package chain
+
+import (
+ "embed"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/chain/networkname"
+ chainspec "github.com/erigontech/erigon/execution/chain/spec"
+)
+
+//go:embed chainspecs
+var chainspecs embed.FS
+
+var (
+ ArbSepoliaGenesisHash = common.HexToHash("0x77194da4010e549a7028a9c3c51c3e277823be6ac7d138d0bb8a70197b5c004c")
+
+ ArbSepoliaChainConfig = chainspec.ReadChainConfig(chainspecs, "chainspecs/arb-sepolia.json")
+
+ ArbSepolia = chainspec.Spec{
+ Name: networkname.ArbiturmSepolia,
+ GenesisHash: ArbSepoliaGenesisHash,
+ Config: chainspec.ReadChainConfig(chainspecs, "chainspecs/arb-sepolia.json"),
+ Genesis: ArbSepoliaRollupGenesisBlock(),
+ }
+)
+
+func init() {
+ chainspec.RegisterChainSpec(networkname.ArbiturmSepolia, ArbSepolia)
+}
diff --git a/arb/chain/genesis.go b/arb/chain/genesis.go
new file mode 100644
index 00000000000..2e2b8f50bdd
--- /dev/null
+++ b/arb/chain/genesis.go
@@ -0,0 +1,31 @@
+package chain
+
+import (
+ "embed"
+ "math/big"
+
+ "github.com/erigontech/erigon/common"
+ chainspec "github.com/erigontech/erigon/execution/chain/spec"
+ "github.com/erigontech/erigon/execution/types"
+)
+
+//go:embed allocs
+var allocs embed.FS
+
+func ArbSepoliaRollupGenesisBlock() *types.Genesis {
+ return &types.Genesis{
+ Config: ArbSepoliaChainConfig,
+ Nonce: 0x0000000000000001,
+ Timestamp: 0x0,
+ ExtraData: common.FromHex("0x0000000000000000000000000000000000000000000000000000000000000000"),
+ GasLimit: 0x4000000000000, // as given in hex
+ Difficulty: big.NewInt(1), // "0x1"
+ Mixhash: common.HexToHash("0x00000000000000000000000000000000000000000000000a0000000000000000"),
+ Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"),
+ Number: 0x0, // block number 0
+ GasUsed: 0x0,
+ ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
+ BaseFee: big.NewInt(0x5f5e100),
+ Alloc: chainspec.ReadPrealloc(allocs, "allocs/arb_sepolia.json"),
+ }
+}
diff --git a/arb/chain/params/config_arbitrum.go b/arb/chain/params/config_arbitrum.go
new file mode 100644
index 00000000000..c37959bf685
--- /dev/null
+++ b/arb/chain/params/config_arbitrum.go
@@ -0,0 +1,253 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package params
+
+import (
+ "github.com/erigontech/erigon/arb/chain/types"
+ "github.com/erigontech/erigon/arb/osver"
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/chain"
+
+ "math/big"
+)
+
+func ArbitrumOneParams() types.ArbitrumChainParams {
+ return types.ArbitrumChainParams{
+ EnableArbOS: true,
+ AllowDebugPrecompiles: false,
+ DataAvailabilityCommittee: false,
+ InitialArbOSVersion: osver.ArbosVersion_6,
+ InitialChainOwner: common.HexToAddress("0xd345e41ae2cb00311956aa7109fc801ae8c81a52"),
+ }
+}
+
+func ArbitrumNovaParams() types.ArbitrumChainParams {
+ return types.ArbitrumChainParams{
+ EnableArbOS: true,
+ AllowDebugPrecompiles: false,
+ DataAvailabilityCommittee: true,
+ InitialArbOSVersion: osver.ArbosVersion_1,
+ InitialChainOwner: common.HexToAddress("0x9C040726F2A657226Ed95712245DeE84b650A1b5"),
+ }
+}
+
+func ArbitrumRollupGoerliTestnetParams() types.ArbitrumChainParams {
+ return types.ArbitrumChainParams{
+ EnableArbOS: true,
+ AllowDebugPrecompiles: false,
+ DataAvailabilityCommittee: false,
+ InitialArbOSVersion: osver.ArbosVersion_2,
+ InitialChainOwner: common.HexToAddress("0x186B56023d42B2B4E7616589a5C62EEf5FCa21DD"),
+ }
+}
+
+func ArbitrumDevTestParams() types.ArbitrumChainParams {
+ return types.ArbitrumChainParams{
+ EnableArbOS: true,
+ AllowDebugPrecompiles: true,
+ DataAvailabilityCommittee: false,
+ InitialArbOSVersion: osver.ArbosVersion_32,
+ InitialChainOwner: common.Address{},
+ }
+}
+
+func ArbitrumDevTestDASParams() types.ArbitrumChainParams {
+ return types.ArbitrumChainParams{
+ EnableArbOS: true,
+ AllowDebugPrecompiles: true,
+ DataAvailabilityCommittee: true,
+ InitialArbOSVersion: osver.ArbosVersion_32,
+ InitialChainOwner: common.Address{},
+ }
+}
+
+func ArbitrumAnytrustGoerliTestnetParams() types.ArbitrumChainParams {
+ return types.ArbitrumChainParams{
+ EnableArbOS: true,
+ AllowDebugPrecompiles: false,
+ DataAvailabilityCommittee: true,
+ InitialArbOSVersion: osver.ArbosVersion_2,
+ InitialChainOwner: common.HexToAddress("0x186B56023d42B2B4E7616589a5C62EEf5FCa21DD"),
+ }
+}
+
+func DisableArbitrumParams() types.ArbitrumChainParams {
+ return types.ArbitrumChainParams{
+ EnableArbOS: false,
+ AllowDebugPrecompiles: false,
+ DataAvailabilityCommittee: false,
+ InitialArbOSVersion: osver.ArbosVersion_0,
+ InitialChainOwner: common.Address{},
+ }
+}
+
+func ArbitrumOneChainConfig() *chain.Config {
+ return &chain.Config{
+ ChainID: big.NewInt(42161),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArbitrumChainParams: ArbitrumOneParams(),
+ Clique: &chain.CliqueConfig{
+ Period: 0,
+ Epoch: 0,
+ },
+ }
+}
+
+func ArbitrumNovaChainConfig() *chain.Config {
+ return &chain.Config{
+ ChainID: big.NewInt(42170),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArbitrumChainParams: ArbitrumNovaParams(),
+ Clique: &chain.CliqueConfig{
+ Period: 0,
+ Epoch: 0,
+ },
+ }
+}
+
+func ArbitrumRollupGoerliTestnetChainConfig() *chain.Config {
+ return &chain.Config{
+ ChainID: big.NewInt(421613),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArbitrumChainParams: ArbitrumRollupGoerliTestnetParams(),
+ Clique: &chain.CliqueConfig{
+ Period: 0,
+ Epoch: 0,
+ },
+ }
+}
+
+func ArbitrumDevTestChainConfig() *chain.Config {
+ return &chain.Config{
+ ChainID: big.NewInt(412346),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArbitrumChainParams: ArbitrumDevTestParams(),
+ Clique: &chain.CliqueConfig{
+ Period: 0,
+ Epoch: 0,
+ },
+ }
+}
+
+func ArbitrumDevTestDASChainConfig() *chain.Config {
+ return &chain.Config{
+ ChainID: big.NewInt(412347),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArbitrumChainParams: ArbitrumDevTestDASParams(),
+ Clique: &chain.CliqueConfig{
+ Period: 0,
+ Epoch: 0,
+ },
+ }
+}
+
+func ArbitrumAnytrustGoerliTestnetChainConfig() *chain.Config {
+ return &chain.Config{
+ ChainID: big.NewInt(421703),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArbitrumChainParams: ArbitrumAnytrustGoerliTestnetParams(),
+ Clique: &chain.CliqueConfig{
+ Period: 0,
+ Epoch: 0,
+ },
+ }
+}
+
+var ArbitrumSupportedChainConfigs = []*chain.Config{
+ ArbitrumOneChainConfig(),
+ ArbitrumNovaChainConfig(),
+ ArbitrumRollupGoerliTestnetChainConfig(),
+ ArbitrumDevTestChainConfig(),
+ ArbitrumDevTestDASChainConfig(),
+ ArbitrumAnytrustGoerliTestnetChainConfig(),
+}
+
+// AllEthashProtocolChanges contains every protocol change (EIPs) introduced
+// and accepted by the Ethereum core developers into the Ethash consensus.
+var AllEthashProtocolChanges = &chain.Config{
+ ChainID: big.NewInt(1337),
+ HomesteadBlock: big.NewInt(0),
+ DAOForkBlock: nil,
+ TangerineWhistleBlock: big.NewInt(0),
+ SpuriousDragonBlock: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ LondonBlock: big.NewInt(0),
+ ArrowGlacierBlock: big.NewInt(0),
+ GrayGlacierBlock: big.NewInt(0),
+ MergeNetsplitBlock: nil,
+ ShanghaiTime: nil,
+ CancunTime: nil,
+ PragueTime: nil,
+ TerminalTotalDifficulty: nil,
+ TerminalTotalDifficultyPassed: true,
+ Ethash: new(chain.EthashConfig),
+ Clique: nil,
+ ArbitrumChainParams: DisableArbitrumParams(),
+}
diff --git a/arb/chain/types/types.go b/arb/chain/types/types.go
new file mode 100644
index 00000000000..5e91bf1760a
--- /dev/null
+++ b/arb/chain/types/types.go
@@ -0,0 +1,34 @@
+package types
+
+import "github.com/erigontech/erigon/common"
+
+type ArbRollupConfig struct {
+ Bridge string `json:"bridge"`
+ Inbox string `json:"inbox"`
+ SequencerInbox string `json:"sequencer-inbox"`
+ Rollup string `json:"rollup"`
+ ValidatorUtils string `json:"validator-utils"`
+ ValidatorWalletCreator string `json:"validator-wallet-creator"`
+ StakeToken string `json:"stake-token"`
+ DeployedAt int `json:"deployed-at"`
+}
+
+type ArbitrumChainParams struct {
+ ParentChainID int `json:"parent-chain-id"`
+ ParentChainIsArbitrum bool `json:"parent-chain-is-arbitrum"`
+ ChainName string `json:"chain-name"`
+ SequencerURL string `json:"sequencer-url"`
+ FeedURL string `json:"feed-url"`
+
+ EnableArbOS bool `json:"EnableArbOS"`
+ AllowDebugPrecompiles bool `json:"AllowDebugPrecompiles"`
+ DataAvailabilityCommittee bool `json:"DataAvailabilityCommittee"`
+ InitialArbOSVersion uint64 `json:"InitialArbOSVersion"`
+ InitialChainOwner common.Address `json:"InitialChainOwner"`
+ GenesisBlockNum uint64 `json:"GenesisBlockNum"`
+
+ MaxCodeSize uint64 `json:"MaxCodeSize,omitempty"` // Maximum bytecode to permit for a contract. 0 value implies params.DefaultMaxCodeSize
+ MaxInitCodeSize uint64 `json:"MaxInitCodeSize,omitempty"` // Maximum initcode to permit in a creation transaction and create instructions. 0 value implies params.DefaultMaxInitCodeSize
+
+ Rollup ArbRollupConfig `json:"rollup"`
+}
diff --git a/arb/chainparams/arb_params.go b/arb/chainparams/arb_params.go
new file mode 100644
index 00000000000..c4545e2156c
--- /dev/null
+++ b/arb/chainparams/arb_params.go
@@ -0,0 +1,32 @@
+package chainparams
+
+import (
+ "github.com/erigontech/erigon/common"
+)
+
+// System Arbitrum contracts.
+var (
+ // SystemAddress is where the system-transaction is sent from as per EIP-4788
+ SystemAddress = common.HexToAddress("0xfffffffffffffffffffffffffffffffffffffffe")
+
+ // EIP-4788 - Beacon block root in the EVM
+ //BeaconRootsAddress = common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02")
+ BeaconRootsCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500")
+
+ // EIP-2935 - Serve historical block hashes from state
+ //HistoryStorageAddress = common.HexToAddress("0x0000F90827F1C53a10cb7A02335B175320002935")
+ HistoryStorageCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe14604657602036036042575f35600143038111604257611fff81430311604257611fff9006545f5260205ff35b5f5ffd5b5f35611fff60014303065500")
+ // EIP-2935 - Serve historical block hashes from state (Arbitrum), majorly differ from the original in two aspects:
+ // 1. The buffer size is 393168 blocks instead of 8191.
+ // 2. Instead of using number (L1 block number), it uses arb_block_num (L2 block number).
+ // https://github.com/OffchainLabs/sys-asm/blob/main/src/execution_hash/main.eas
+ HistoryStorageCodeArbitrum = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460605760203603605c575f3563a3b1b31d5f5260205f6004601c60645afa15605c575f51600181038211605c57816205ffd0910311605c576205ffd09006545f5260205ff35b5f5ffd5b5f356205ffd0600163a3b1b31d5f5260205f6004601c60645afa15605c575f5103065500")
+
+ // EIP-7002 - Execution layer triggerable withdrawals
+ WithdrawalQueueAddress = common.HexToAddress("0x00000961Ef480Eb55e80D19ad83579A64c007002")
+ WithdrawalQueueCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460cb5760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff146101f457600182026001905f5b5f82111560685781019083028483029004916001019190604d565b909390049250505036603814608857366101f457346101f4575f5260205ff35b34106101f457600154600101600155600354806003026004013381556001015f35815560010160203590553360601b5f5260385f601437604c5fa0600101600355005b6003546002548082038060101160df575060105b5f5b8181146101835782810160030260040181604c02815460601b8152601401816001015481526020019060020154807fffffffffffffffffffffffffffffffff00000000000000000000000000000000168252906010019060401c908160381c81600701538160301c81600601538160281c81600501538160201c81600401538160181c81600301538160101c81600201538160081c81600101535360010160e1565b910180921461019557906002556101a0565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff14156101cd57505f5b6001546002828201116101e25750505f6101e8565b01600290035b5f555f600155604c025ff35b5f5ffd")
+
+ // EIP-7251 - Increase the MAX_EFFECTIVE_BALANCE
+ ConsolidationQueueAddress = common.HexToAddress("0x0000BBdDc7CE488642fb579F8B00f3a590007251")
+ ConsolidationQueueCode = common.FromHex("3373fffffffffffffffffffffffffffffffffffffffe1460d35760115f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1461019a57600182026001905f5b5f82111560685781019083028483029004916001019190604d565b9093900492505050366060146088573661019a573461019a575f5260205ff35b341061019a57600154600101600155600354806004026004013381556001015f358155600101602035815560010160403590553360601b5f5260605f60143760745fa0600101600355005b6003546002548082038060021160e7575060025b5f5b8181146101295782810160040260040181607402815460601b815260140181600101548152602001816002015481526020019060030154905260010160e9565b910180921461013b5790600255610146565b90505f6002555f6003555b5f54807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff141561017357505f5b6001546001828201116101885750505f61018e565b01600190035b5f555f6001556074025ff35b5f5ffd")
+)
diff --git a/arb/chainparams/protocol.go b/arb/chainparams/protocol.go
new file mode 100644
index 00000000000..d7e00e7563b
--- /dev/null
+++ b/arb/chainparams/protocol.go
@@ -0,0 +1,14 @@
+package chainparams
+
+const (
+ BlobTxBlobGasPerBlob = 1 << 17 // Gas consumption of a single data blob (== blob byte size)
+ BlobTxMinBlobGasprice = 1 // Minimum gas price for a blob transaction
+ BlobTxBlobGaspriceUpdateFraction = 3338477 // Controls the maximum rate of change for blob gas price
+ BlobTxFieldElementsPerBlob = 4096 // Number of field elements stored in a single data blob
+
+ BlobTxTargetBlobGasPerBlock = 3 * BlobTxBlobGasPerBlob // Target consumable blob gas for data blobs per block (for 1559-like pricing)
+ MaxBlobGasPerBlock = 6 * BlobTxBlobGasPerBlob // Maximum consumable blob gas for data blobs per block
+
+ SloadGas = uint64(50) // Multiplied by the number of 32-byte words that are copied (round up) for any *COPY operation and added.
+
+)
diff --git a/arb/ethdb/anti-cycle.go b/arb/ethdb/anti-cycle.go
new file mode 100644
index 00000000000..aab938a8e53
--- /dev/null
+++ b/arb/ethdb/anti-cycle.go
@@ -0,0 +1,20 @@
+package ethdb
+
+import (
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
+ "github.com/erigontech/nitro-erigon/arbos/programs"
+)
+
+// InitializeLocalWasmTarget initializes the local WASM target based on the current arch.
+func InitialiazeLocalWasmTarget() {
+ lt := wasmdb.LocalTarget()
+ desc := "description unavailable"
+ switch lt {
+ case wasmdb.TargetAmd64:
+ desc = programs.DefaultTargetDescriptionX86
+ case wasmdb.TargetArm64:
+ desc = programs.DefaultTargetDescriptionArm
+ }
+
+ programs.SetTarget(lt, desc, true)
+}
diff --git a/arb/ethdb/wasmdb/init.go b/arb/ethdb/wasmdb/init.go
new file mode 100644
index 00000000000..44553563262
--- /dev/null
+++ b/arb/ethdb/wasmdb/init.go
@@ -0,0 +1,5 @@
+package wasmdb
+
+func init() {
+
+}
diff --git a/arb/ethdb/wasmdb/wasmdb.go b/arb/ethdb/wasmdb/wasmdb.go
new file mode 100644
index 00000000000..fb0aa2fd2c5
--- /dev/null
+++ b/arb/ethdb/wasmdb/wasmdb.go
@@ -0,0 +1,230 @@
+package wasmdb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "runtime"
+
+ "github.com/erigontech/erigon/arb/lru"
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/length"
+ "github.com/erigontech/erigon/common/log/v3"
+ "github.com/erigontech/erigon/db/kv"
+ "github.com/erigontech/erigon/db/kv/dbcfg"
+ "github.com/erigontech/erigon/db/kv/mdbx"
+)
+
+type WasmTarget string
+
+const WasmPrefixLen = 3
+
+// WasmKeyLen = CompiledWasmCodePrefix + moduleHash
+const WasmKeyLen = WasmPrefixLen + length.Hash
+
+type WasmPrefix = [WasmPrefixLen]byte
+type WasmKey = [WasmKeyLen]byte
+
+const (
+ TargetWasm WasmTarget = "wasm"
+ TargetWavm WasmTarget = "wavm"
+ TargetArm64 WasmTarget = "arm64"
+ TargetAmd64 WasmTarget = "amd64"
+ TargetHost WasmTarget = "host"
+)
+
+var (
+ wasmSchemaVersionKey = []byte("WasmSchemaVersion")
+
+ // 0x00 prefix to avoid conflicts when wasmdb is not separate database
+ activatedAsmWavmPrefix = WasmPrefix{0x00, 'w', 'w'} // (prefix, moduleHash) -> stylus module (wavm)
+ activatedAsmArmPrefix = WasmPrefix{0x00, 'w', 'r'} // (prefix, moduleHash) -> stylus asm for ARM system
+ activatedAsmX86Prefix = WasmPrefix{0x00, 'w', 'x'} // (prefix, moduleHash) -> stylus asm for x86 system
+ activatedAsmHostPrefix = WasmPrefix{0x00, 'w', 'h'} // (prefix, moduleHash) -> stylus asm for system other then ARM and x86
+)
+
+const WasmSchemaVersion byte = 0x01
+
+func LocalTarget() WasmTarget {
+ if runtime.GOOS == "linux" {
+ switch runtime.GOARCH {
+ case "arm64":
+ return TargetArm64
+ case "amd64":
+ return TargetAmd64
+ }
+ }
+ return TargetHost
+}
+
+func DeprecatedPrefixesV0() (keyPrefixes [][]byte, keyLength int) {
+ return [][]byte{
+ // deprecated prefixes, used in version 0x00, purged in version 0x01
+ {0x00, 'w', 'a'}, // ActivatedAsmPrefix
+ {0x00, 'w', 'm'}, // ActivatedModulePrefix
+ }, 3 + 32
+}
+
+// key = prefix + moduleHash
+func activatedKey(prefix WasmPrefix, moduleHash common.Hash) WasmKey {
+ var key WasmKey
+ copy(key[:WasmPrefixLen], prefix[:])
+ copy(key[WasmPrefixLen:], moduleHash[:])
+ return key
+}
+
+func activatedAsmKeyPrefix(target WasmTarget) (WasmPrefix, error) {
+ var prefix WasmPrefix
+ switch target {
+ case TargetWavm:
+ prefix = activatedAsmWavmPrefix
+ case TargetArm64:
+ prefix = activatedAsmArmPrefix
+ case TargetAmd64:
+ prefix = activatedAsmX86Prefix
+ case TargetHost:
+ prefix = activatedAsmHostPrefix
+ default:
+ return WasmPrefix{}, fmt.Errorf("invalid target: %v", target)
+ }
+ return prefix, nil
+}
+
+func IsSupportedWasmTarget(target WasmTarget) bool {
+ _, err := activatedAsmKeyPrefix(target)
+ return err == nil
+}
+
+func WriteActivation(db kv.Putter, moduleHash common.Hash, asmMap map[WasmTarget][]byte) {
+ for target, asm := range asmMap {
+ if target != TargetWasm {
+ WriteActivatedAsm(db, target, moduleHash, asm)
+ }
+ }
+}
+
+// Stores the activated asm for a given moduleHash and target
+func WriteActivatedAsm(db kv.Putter, target WasmTarget, moduleHash common.Hash, asm []byte) {
+ prefix, err := activatedAsmKeyPrefix(target)
+ if err != nil {
+ log.Crit("Failed to store activated wasm asm", "err", err)
+ }
+ key := activatedKey(prefix, moduleHash)
+ if err := db.Put(kv.ArbWasmActivationBucket, key[:], asm); err != nil {
+ log.Crit("Failed to store activated wasm asm", "err", err)
+ }
+}
+
+// Retrieves the activated asm for a given moduleHash and target
+func ReadActivatedAsm(db kv.Getter, target WasmTarget, moduleHash common.Hash) []byte {
+ if target == TargetWasm {
+ return nil // wasm is not stored in the database
+ }
+ prefix, err := activatedAsmKeyPrefix(target)
+ if err != nil {
+ log.Crit("Failed to read activated wasm asm", "err", err)
+ }
+ key := activatedKey(prefix, moduleHash)
+ asm, err := db.GetOne(kv.ArbWasmActivationBucket, key[:])
+ if err != nil {
+ return nil
+ }
+ return asm
+}
+
+// Stores wasm schema version
+func WriteWasmSchemaVersion(db kv.Putter) {
+ if err := db.Put(kv.ArbWasmActivationBucket, wasmSchemaVersionKey, []byte{WasmSchemaVersion}); err != nil {
+ log.Crit("Failed to store wasm schema version", "err", err)
+ }
+}
+
+// Retrieves wasm schema version
+func ReadWasmSchemaVersion(db kv.Getter) ([]byte, error) {
+ return db.GetOne(kv.ArbWasmActivationBucket, wasmSchemaVersionKey)
+}
+
+type WasmIface interface {
+ ActivatedAsm(target WasmTarget, moduleHash common.Hash) ([]byte, error)
+ WasmStore() kv.RwDB
+ WasmCacheTag() uint32
+ WasmTargets() []WasmTarget
+}
+
+type activatedAsmCacheKey struct {
+ moduleHash common.Hash
+ target WasmTarget
+}
+
+type WasmDB struct {
+ kv.RwDB
+
+ activatedAsmCache *lru.SizeConstrainedCache[activatedAsmCacheKey, []byte]
+ cacheTag uint32
+ targets []WasmTarget
+}
+
+func (w *WasmDB) ActivatedAsm(target WasmTarget, moduleHash common.Hash) ([]byte, error) {
+ cacheKey := activatedAsmCacheKey{moduleHash, target}
+ if asm, _ := w.activatedAsmCache.Get(cacheKey); len(asm) > 0 {
+ return asm, nil
+ }
+ var asm []byte
+ err := w.View(context.Background(), func(tx kv.Tx) error {
+ asm = ReadActivatedAsm(tx, target, moduleHash)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ if len(asm) > 0 {
+ w.activatedAsmCache.Add(cacheKey, asm)
+ return asm, nil
+ }
+ return nil, errors.New("not found")
+}
+
+func (w *WasmDB) WriteActivatedAsm(moduleHash common.Hash, asmMap map[WasmTarget][]byte) error {
+ return w.Update(context.Background(), func(tx kv.RwTx) error {
+ WriteActivation(tx, moduleHash, asmMap)
+ return nil
+ })
+}
+
+func (w *WasmDB) WasmStore() kv.RwDB {
+ return w
+}
+
+func (w *WasmDB) WasmCacheTag() uint32 {
+ return w.cacheTag
+}
+
+func (w *WasmDB) WasmTargets() []WasmTarget {
+ return w.targets
+}
+
+const constantCacheTag = 1
+
+func WrapDatabaseWithWasm(wasm kv.RwDB, targets []WasmTarget) WasmIface {
+ return &WasmDB{RwDB: wasm, cacheTag: constantCacheTag, targets: targets, activatedAsmCache: lru.NewSizeConstrainedCache[activatedAsmCacheKey, []byte](1000)}
+}
+
+var openedArbitrumWasmDB WasmIface
+
+func OpenArbitrumWasmDB(ctx context.Context, path string) WasmIface {
+ if openedArbitrumWasmDB != nil {
+ return openedArbitrumWasmDB
+ }
+ mdbxDB := mdbx.New(dbcfg.ArbWasmDB, log.New()).Path(path).
+ WithTableCfg(func(defaultBuckets kv.TableCfg) kv.TableCfg {
+ return kv.ChaindataTablesCfg
+ }).MustOpen()
+ go func() {
+ <-ctx.Done()
+ openedArbitrumWasmDB = nil
+ mdbxDB.Close()
+ }()
+
+ openedArbitrumWasmDB = WrapDatabaseWithWasm(mdbxDB, []WasmTarget{LocalTarget()})
+ return openedArbitrumWasmDB
+}
diff --git a/arb/keystore/account.go b/arb/keystore/account.go
new file mode 100644
index 00000000000..78218c7e4bf
--- /dev/null
+++ b/arb/keystore/account.go
@@ -0,0 +1,409 @@
+package keystore
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "strings"
+
+ "golang.org/x/crypto/sha3"
+
+ ethereum "github.com/erigontech/erigon"
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/types"
+)
+
+type Account struct {
+ Address common.Address `json:"address"` // Ethereum account address derived from the key
+ URL URL `json:"url"` // Optional resource locator within a backend
+}
+
+// URL represents the canonical identification URL of a wallet or account.
+//
+// It is a simplified version of url.URL, with the important limitations (which
+// are considered features here) that it contains value-copyable components only,
+// as well as that it doesn't do any URL encoding/decoding of special characters.
+//
+// The former is important to allow an account to be copied without leaving live
+// references to the original version, whereas the latter is important to ensure
+// one single canonical form opposed to many allowed ones by the RFC 3986 spec.
+//
+// As such, these URLs should not be used outside of the scope of an Ethereum
+// wallet or account.
+type URL struct {
+ Scheme string // Protocol scheme to identify a capable account backend
+ Path string // Path for the backend to identify a unique entity
+}
+
+// parseURL converts a user supplied URL into the accounts specific structure.
+func parseURL(url string) (URL, error) {
+ parts := strings.Split(url, "://")
+ if len(parts) != 2 || parts[0] == "" {
+ return URL{}, errors.New("protocol scheme missing")
+ }
+ return URL{
+ Scheme: parts[0],
+ Path: parts[1],
+ }, nil
+}
+
+// String implements the stringer interface.
+func (u URL) String() string {
+ if u.Scheme != "" {
+ return fmt.Sprintf("%s://%s", u.Scheme, u.Path)
+ }
+ return u.Path
+}
+
+// TerminalString implements the log.TerminalStringer interface.
+func (u URL) TerminalString() string {
+ url := u.String()
+ if len(url) > 32 {
+ return url[:31] + ".."
+ }
+ return url
+}
+
+// MarshalJSON implements the json.Marshaller interface.
+func (u URL) MarshalJSON() ([]byte, error) {
+ return json.Marshal(u.String())
+}
+
+// UnmarshalJSON parses url.
+func (u *URL) UnmarshalJSON(input []byte) error {
+ var textURL string
+ err := json.Unmarshal(input, &textURL)
+ if err != nil {
+ return err
+ }
+ url, err := parseURL(textURL)
+ if err != nil {
+ return err
+ }
+ u.Scheme = url.Scheme
+ u.Path = url.Path
+ return nil
+}
+
+// Cmp compares x and y and returns:
+//
+// -1 if x < y
+// 0 if x == y
+// +1 if x > y
+func (u URL) Cmp(url URL) int {
+ if u.Scheme == url.Scheme {
+ return strings.Compare(u.Path, url.Path)
+ }
+ return strings.Compare(u.Scheme, url.Scheme)
+}
+
+// Wallet represents a software or hardware wallet that might contain one or more
+// accounts (derived from the same seed).
+type Wallet interface {
+ // URL retrieves the canonical path under which this wallet is reachable. It is
+ // used by upper layers to define a sorting order over all wallets from multiple
+ // backends.
+ URL() URL
+
+ // Status returns a textual status to aid the user in the current state of the
+ // wallet. It also returns an error indicating any failure the wallet might have
+ // encountered.
+ Status() (string, error)
+
+ // Open initializes access to a wallet instance. It is not meant to unlock or
+ // decrypt account keys, rather simply to establish a connection to hardware
+ // wallets and/or to access derivation seeds.
+ //
+ // The passphrase parameter may or may not be used by the implementation of a
+ // particular wallet instance. The reason there is no passwordless open method
+ // is to strive towards a uniform wallet handling, oblivious to the different
+ // backend providers.
+ //
+ // Please note, if you open a wallet, you must close it to release any allocated
+ // resources (especially important when working with hardware wallets).
+ Open(passphrase string) error
+
+ // Close releases any resources held by an open wallet instance.
+ Close() error
+
+ // Accounts retrieves the list of signing accounts the wallet is currently aware
+ // of. For hierarchical deterministic wallets, the list will not be exhaustive,
+ // rather only contain the accounts explicitly pinned during account derivation.
+ Accounts() []Account
+
+ // Contains returns whether an account is part of this particular wallet or not.
+ Contains(account Account) bool
+
+ // Derive attempts to explicitly derive a hierarchical deterministic account at
+ // the specified derivation path. If requested, the derived account will be added
+ // to the wallet's tracked account list.
+ Derive(path DerivationPath, pin bool) (Account, error)
+
+ // SelfDerive sets a base account derivation path from which the wallet attempts
+ // to discover non zero accounts and automatically add them to list of tracked
+ // accounts.
+ //
+ // Note, self derivation will increment the last component of the specified path
+ // opposed to descending into a child path to allow discovering accounts starting
+ // from non zero components.
+ //
+ // Some hardware wallets switched derivation paths through their evolution, so
+ // this method supports providing multiple bases to discover old user accounts
+ // too. Only the last base will be used to derive the next empty account.
+ //
+ // You can disable automatic account discovery by calling SelfDerive with a nil
+ // chain state reader.
+ SelfDerive(bases []DerivationPath, chain ethereum.ChainReader)
+
+ // SignData requests the wallet to sign the hash of the given data
+ // It looks up the account specified either solely via its address contained within,
+ // or optionally with the aid of any location metadata from the embedded URL field.
+ //
+ // If the wallet requires additional authentication to sign the request (e.g.
+ // a password to decrypt the account, or a PIN code to verify the transaction),
+ // an AuthNeededError instance will be returned, containing infos for the user
+ // about which fields or actions are needed. The user may retry by providing
+ // the needed details via SignDataWithPassphrase, or by other means (e.g. unlock
+ // the account in a keystore).
+ SignData(account Account, mimeType string, data []byte) ([]byte, error)
+
+ // SignDataWithPassphrase is identical to SignData, but also takes a password
+ // NOTE: there's a chance that an erroneous call might mistake the two strings, and
+ // supply password in the mimetype field, or vice versa. Thus, an implementation
+ // should never echo the mimetype or return the mimetype in the error-response
+ SignDataWithPassphrase(account Account, passphrase, mimeType string, data []byte) ([]byte, error)
+
+ // SignText requests the wallet to sign the hash of a given piece of data, prefixed
+ // by the Ethereum prefix scheme
+ // It looks up the account specified either solely via its address contained within,
+ // or optionally with the aid of any location metadata from the embedded URL field.
+ //
+ // If the wallet requires additional authentication to sign the request (e.g.
+ // a password to decrypt the account, or a PIN code to verify the transaction),
+ // an AuthNeededError instance will be returned, containing infos for the user
+ // about which fields or actions are needed. The user may retry by providing
+ // the needed details via SignTextWithPassphrase, or by other means (e.g. unlock
+ // the account in a keystore).
+ //
+ // This method should return the signature in 'canonical' format, with v 0 or 1.
+ SignText(account Account, text []byte) ([]byte, error)
+
+ // SignTextWithPassphrase is identical to Signtext, but also takes a password
+ SignTextWithPassphrase(account Account, passphrase string, hash []byte) ([]byte, error)
+
+ // SignTx requests the wallet to sign the given transaction.
+ //
+ // It looks up the account specified either solely via its address contained within,
+ // or optionally with the aid of any location metadata from the embedded URL field.
+ //
+ // If the wallet requires additional authentication to sign the request (e.g.
+ // a password to decrypt the account, or a PIN code to verify the transaction),
+ // an AuthNeededError instance will be returned, containing infos for the user
+ // about which fields or actions are needed. The user may retry by providing
+ // the needed details via SignTxWithPassphrase, or by other means (e.g. unlock
+ // the account in a keystore).
+ SignTx(account Account, tx types.Transaction, chainID *big.Int) (types.Transaction, error)
+
+ // SignTxWithPassphrase is identical to SignTx, but also takes a password
+ SignTxWithPassphrase(account Account, passphrase string, tx types.Transaction, chainID *big.Int) (types.Transaction, error)
+}
+
+// DefaultRootDerivationPath is the root path to which custom derivation endpoints
+// are appended. As such, the first account will be at m/44'/60'/0'/0, the second
+// at m/44'/60'/0'/1, etc.
+var DefaultRootDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
+
+// DefaultBaseDerivationPath is the base path from which custom derivation endpoints
+// are incremented. As such, the first account will be at m/44'/60'/0'/0/0, the second
+// at m/44'/60'/0'/0/1, etc.
+var DefaultBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0, 0}
+
+// LegacyLedgerBaseDerivationPath is the legacy base path from which custom derivation
+// endpoints are incremented. As such, the first account will be at m/44'/60'/0'/0, the
+// second at m/44'/60'/0'/1, etc.
+var LegacyLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 + 60, 0x80000000 + 0, 0}
+
+// DerivationPath represents the computer friendly version of a hierarchical
+// deterministic wallet account derivation path.
+//
+// The BIP-32 spec https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki
+// defines derivation paths to be of the form:
+//
+// m / purpose' / coin_type' / account' / change / address_index
+//
+// The BIP-44 spec https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki
+// defines that the `purpose` be 44' (or 0x8000002C) for crypto currencies, and
+// SLIP-44 https://github.com/satoshilabs/slips/blob/master/slip-0044.md assigns
+// the `coin_type` 60' (or 0x8000003C) to Ethereum.
+//
+// The root path for Ethereum is m/44'/60'/0'/0 according to the specification
+// from https://github.com/ethereum/EIPs/issues/84, albeit it's not set in stone
+// yet whether accounts should increment the last component or the children of
+// that. We will go with the simpler approach of incrementing the last component.
+type DerivationPath []uint32
+
+// ParseDerivationPath converts a user specified derivation path string to the
+// internal binary representation.
+//
+// Full derivation paths need to start with the `m/` prefix, relative derivation
+// paths (which will get appended to the default root path) must not have prefixes
+// in front of the first element. Whitespace is ignored.
+func ParseDerivationPath(path string) (DerivationPath, error) {
+ var result DerivationPath
+
+ // Handle absolute or relative paths
+ components := strings.Split(path, "/")
+ switch {
+ case len(components) == 0:
+ return nil, errors.New("empty derivation path")
+
+ case strings.TrimSpace(components[0]) == "":
+ return nil, errors.New("ambiguous path: use 'm/' prefix for absolute paths, or no leading '/' for relative ones")
+
+ case strings.TrimSpace(components[0]) == "m":
+ components = components[1:]
+
+ default:
+ result = append(result, DefaultRootDerivationPath...)
+ }
+ // All remaining components are relative, append one by one
+ if len(components) == 0 {
+ return nil, errors.New("empty derivation path") // Empty relative paths
+ }
+ for _, component := range components {
+ // Ignore any user added whitespace
+ component = strings.TrimSpace(component)
+ var value uint32
+
+ // Handle hardened paths
+ if strings.HasSuffix(component, "'") {
+ value = 0x80000000
+ component = strings.TrimSpace(strings.TrimSuffix(component, "'"))
+ }
+ // Handle the non hardened component
+ bigval, ok := new(big.Int).SetString(component, 0)
+ if !ok {
+ return nil, fmt.Errorf("invalid component: %s", component)
+ }
+ mx := math.MaxUint32 - value
+ if bigval.Sign() < 0 || bigval.Cmp(big.NewInt(int64(mx))) > 0 {
+ if value == 0 {
+ return nil, fmt.Errorf("component %v out of allowed range [0, %d]", bigval, mx)
+ }
+ return nil, fmt.Errorf("component %v out of allowed hardened range [0, %d]", bigval, mx)
+ }
+ value += uint32(bigval.Uint64())
+
+ // Append and repeat
+ result = append(result, value)
+ }
+ return result, nil
+}
+
+// String implements the stringer interface, converting a binary derivation path
+// to its canonical representation.
+func (path DerivationPath) String() string {
+ result := "m"
+ for _, component := range path {
+ var hardened bool
+ if component >= 0x80000000 {
+ component -= 0x80000000
+ hardened = true
+ }
+ result = fmt.Sprintf("%s/%d", result, component)
+ if hardened {
+ result += "'"
+ }
+ }
+ return result
+}
+
+// MarshalJSON turns a derivation path into its json-serialized string
+func (path DerivationPath) MarshalJSON() ([]byte, error) {
+ return json.Marshal(path.String())
+}
+
+// UnmarshalJSON a json-serialized string back into a derivation path
+func (path *DerivationPath) UnmarshalJSON(b []byte) error {
+ var dp string
+ var err error
+ if err = json.Unmarshal(b, &dp); err != nil {
+ return err
+ }
+ *path, err = ParseDerivationPath(dp)
+ return err
+}
+
+// DefaultIterator creates a BIP-32 path iterator, which progresses by increasing the last component:
+// i.e. m/44'/60'/0'/0/0, m/44'/60'/0'/0/1, m/44'/60'/0'/0/2, ... m/44'/60'/0'/0/N.
+func DefaultIterator(base DerivationPath) func() DerivationPath {
+ path := make(DerivationPath, len(base))
+ copy(path[:], base[:])
+ // Set it back by one, so the first call gives the first result
+ path[len(path)-1]--
+ return func() DerivationPath {
+ path[len(path)-1]++
+ return path
+ }
+}
+
+// LedgerLiveIterator creates a bip44 path iterator for Ledger Live.
+// Ledger Live increments the third component rather than the fifth component
+// i.e. m/44'/60'/0'/0/0, m/44'/60'/1'/0/0, m/44'/60'/2'/0/0, ... m/44'/60'/N'/0/0.
+func LedgerLiveIterator(base DerivationPath) func() DerivationPath {
+ path := make(DerivationPath, len(base))
+ copy(path[:], base[:])
+ // Set it back by one, so the first call gives the first result
+ path[2]--
+ return func() DerivationPath {
+ // ledgerLivePathIterator iterates on the third component
+ path[2]++
+ return path
+ }
+}
+
+func TextHash(data []byte) []byte {
+ hash, _ := TextAndHash(data)
+ return hash
+}
+
+// TextAndHash is a helper function that calculates a hash for the given message that can be
+// safely used to calculate a signature from.
+//
+// The hash is calculated as
+//
+// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}).
+//
+// This gives context to the signed message and prevents signing of transactions.
+func TextAndHash(data []byte) ([]byte, string) {
+ msg := fmt.Sprintf("\x19Ethereum Signed Message:\n%d%s", len(data), string(data))
+ hasher := sha3.NewLegacyKeccak256()
+ hasher.Write([]byte(msg))
+ return hasher.Sum(nil), msg
+}
+
+// WalletEventType represents the different event types that can be fired by
+// the wallet subscription subsystem.
+type WalletEventType int
+
+const (
+ // WalletArrived is fired when a new wallet is detected either via USB or via
+ // a filesystem event in the keystore.
+ WalletArrived WalletEventType = iota
+
+ // WalletOpened is fired when a wallet is successfully opened with the purpose
+ // of starting any background processes such as automatic key derivation.
+ WalletOpened
+
+ // WalletDropped
+ WalletDropped
+)
+
+// WalletEvent is an event fired by an account backend when a wallet arrival or
+// departure is detected.
+type WalletEvent struct {
+ Wallet Wallet // Wallet instance arrived or departed
+ Kind WalletEventType // Event type that happened in the system
+}
diff --git a/arb/keystore/account_cache.go b/arb/keystore/account_cache.go
new file mode 100644
index 00000000000..9d0f3711d09
--- /dev/null
+++ b/arb/keystore/account_cache.go
@@ -0,0 +1,306 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ mapset "github.com/deckarep/golang-set/v2"
+ "github.com/erigontech/erigon/common"
+ "golang.org/x/exp/slices"
+)
+
+// Minimum amount of time between cache reloads. This limit applies if the platform does
+// not support change notifications. It also applies if the keystore directory does not
+// exist yet, the code will attempt to create a watcher at most this often.
+const minReloadInterval = 2 * time.Second
+
+// byURL defines the sorting order for accounts.
+func byURL(a, b Account) int {
+ return a.URL.Cmp(b.URL)
+}
+
+// AmbiguousAddrError is returned when attempting to unlock
+// an address for which more than one file exists.
+type AmbiguousAddrError struct {
+ Addr common.Address
+ Matches []Account
+}
+
+func (err *AmbiguousAddrError) Error() string {
+ files := ""
+ for i, a := range err.Matches {
+ files += a.URL.Path
+ if i < len(err.Matches)-1 {
+ files += ", "
+ }
+ }
+ return fmt.Sprintf("multiple keys match address (%s)", files)
+}
+
+// accountCache is a live index of all accounts in the keystore.
+type accountCache struct {
+ keydir string
+ watcher *watcher
+ mu sync.Mutex
+ all []Account
+ byAddr map[common.Address][]Account
+ throttle *time.Timer
+ notify chan struct{}
+ fileC fileCache
+}
+
+func newAccountCache(keydir string) (*accountCache, chan struct{}) {
+ ac := &accountCache{
+ keydir: keydir,
+ byAddr: make(map[common.Address][]Account),
+ notify: make(chan struct{}, 1),
+ fileC: fileCache{all: mapset.NewThreadUnsafeSet[string]()},
+ }
+ ac.watcher = newWatcher(ac)
+ return ac, ac.notify
+}
+
+func (ac *accountCache) accounts() []Account {
+ ac.maybeReload()
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+ cpy := make([]Account, len(ac.all))
+ copy(cpy, ac.all)
+ return cpy
+}
+
+func (ac *accountCache) hasAddress(addr common.Address) bool {
+ ac.maybeReload()
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+ return len(ac.byAddr[addr]) > 0
+}
+
+func (ac *accountCache) add(newAccount Account) {
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+
+ i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].URL.Cmp(newAccount.URL) >= 0 })
+ if i < len(ac.all) && ac.all[i] == newAccount {
+ return
+ }
+ // newAccount is not in the cache.
+ ac.all = append(ac.all, Account{})
+ copy(ac.all[i+1:], ac.all[i:])
+ ac.all[i] = newAccount
+ ac.byAddr[newAccount.Address] = append(ac.byAddr[newAccount.Address], newAccount)
+}
+
+// note: removed needs to be unique here (i.e. both File and Address must be set).
+func (ac *accountCache) delete(removed Account) {
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+
+ ac.all = removeAccount(ac.all, removed)
+ if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
+ delete(ac.byAddr, removed.Address)
+ } else {
+ ac.byAddr[removed.Address] = ba
+ }
+}
+
+// deleteByFile removes an account referenced by the given path.
+func (ac *accountCache) deleteByFile(path string) {
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+ i := sort.Search(len(ac.all), func(i int) bool { return ac.all[i].URL.Path >= path })
+
+ if i < len(ac.all) && ac.all[i].URL.Path == path {
+ removed := ac.all[i]
+ ac.all = append(ac.all[:i], ac.all[i+1:]...)
+ if ba := removeAccount(ac.byAddr[removed.Address], removed); len(ba) == 0 {
+ delete(ac.byAddr, removed.Address)
+ } else {
+ ac.byAddr[removed.Address] = ba
+ }
+ }
+}
+
+// watcherStarted returns true if the watcher loop started running (even if it
+// has since also ended).
+func (ac *accountCache) watcherStarted() bool {
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+ return ac.watcher.running || ac.watcher.runEnded
+}
+
+func removeAccount(slice []Account, elem Account) []Account {
+ for i := range slice {
+ if slice[i] == elem {
+ return append(slice[:i], slice[i+1:]...)
+ }
+ }
+ return slice
+}
+
+// find returns the cached account for address if there is a unique match.
+// The exact matching rules are explained by the documentation of Account.
+// Callers must hold ac.mu.
+func (ac *accountCache) find(a Account) (Account, error) {
+ // Limit search to address candidates if possible.
+ matches := ac.all
+ if (a.Address != common.Address{}) {
+ matches = ac.byAddr[a.Address]
+ }
+ if a.URL.Path != "" {
+ // If only the basename is specified, complete the path.
+ if !strings.ContainsRune(a.URL.Path, filepath.Separator) {
+ a.URL.Path = filepath.Join(ac.keydir, a.URL.Path)
+ }
+ for i := range matches {
+ if matches[i].URL == a.URL {
+ return matches[i], nil
+ }
+ }
+ if (a.Address == common.Address{}) {
+ return Account{}, ErrNoMatch
+ }
+ }
+ switch len(matches) {
+ case 1:
+ return matches[0], nil
+ case 0:
+ return Account{}, ErrNoMatch
+ default:
+ err := &AmbiguousAddrError{Addr: a.Address, Matches: make([]Account, len(matches))}
+ copy(err.Matches, matches)
+ slices.SortFunc(err.Matches, byURL)
+ return Account{}, err
+ }
+}
+
+func (ac *accountCache) maybeReload() {
+ ac.mu.Lock()
+
+ if ac.watcher.running {
+ ac.mu.Unlock()
+ return // A watcher is running and will keep the cache up-to-date.
+ }
+ if ac.throttle == nil {
+ ac.throttle = time.NewTimer(0)
+ } else {
+ select {
+ case <-ac.throttle.C:
+ default:
+ ac.mu.Unlock()
+ return // The cache was reloaded recently.
+ }
+ }
+ // No watcher running, start it.
+ ac.watcher.start()
+ ac.throttle.Reset(minReloadInterval)
+ ac.mu.Unlock()
+ ac.scanAccounts()
+}
+
+func (ac *accountCache) close() {
+ ac.mu.Lock()
+ ac.watcher.close()
+ if ac.throttle != nil {
+ ac.throttle.Stop()
+ }
+ if ac.notify != nil {
+ close(ac.notify)
+ ac.notify = nil
+ }
+ ac.mu.Unlock()
+}
+
+// scanAccounts checks if any changes have occurred on the filesystem, and
+// updates the account cache accordingly
+func (ac *accountCache) scanAccounts() error {
+ // Scan the entire folder metadata for file changes
+ creates, deletes, updates, err := ac.fileC.scan(ac.keydir)
+ if err != nil {
+ log.Debug("Failed to reload keystore contents", "err", err)
+ return err
+ }
+ if creates.Cardinality() == 0 && deletes.Cardinality() == 0 && updates.Cardinality() == 0 {
+ return nil
+ }
+ // Create a helper method to scan the contents of the key files
+ var (
+ buf = new(bufio.Reader)
+ key struct {
+ Address string `json:"address"`
+ }
+ )
+ readAccount := func(path string) *Account {
+ fd, err := os.Open(path)
+ if err != nil {
+ log.Trace("Failed to open keystore file", "path", path, "err", err)
+ return nil
+ }
+ defer fd.Close()
+ buf.Reset(fd)
+ // Parse the address.
+ key.Address = ""
+ err = json.NewDecoder(buf).Decode(&key)
+ addr := common.HexToAddress(key.Address)
+ switch {
+ case err != nil:
+ log.Debug("Failed to decode keystore key", "path", path, "err", err)
+ case addr == common.Address{}:
+ log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address")
+ default:
+ return &Account{
+ Address: addr,
+ URL: URL{Scheme: KeyStoreScheme, Path: path},
+ }
+ }
+ return nil
+ }
+ // Process all the file diffs
+ start := time.Now()
+
+ for _, path := range creates.ToSlice() {
+ if a := readAccount(path); a != nil {
+ ac.add(*a)
+ }
+ }
+ for _, path := range deletes.ToSlice() {
+ ac.deleteByFile(path)
+ }
+ for _, path := range updates.ToSlice() {
+ ac.deleteByFile(path)
+ if a := readAccount(path); a != nil {
+ ac.add(*a)
+ }
+ }
+ end := time.Now()
+
+ select {
+ case ac.notify <- struct{}{}:
+ default:
+ }
+ log.Trace("Handled keystore changes", "time", end.Sub(start))
+ return nil
+}
diff --git a/arb/keystore/account_cache_test.go b/arb/keystore/account_cache_test.go
new file mode 100644
index 00000000000..93566707aa6
--- /dev/null
+++ b/arb/keystore/account_cache_test.go
@@ -0,0 +1,408 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "errors"
+ "fmt"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/cespare/cp"
+ "github.com/davecgh/go-spew/spew"
+ "github.com/erigontech/erigon/common"
+ "golang.org/x/exp/slices"
+)
+
+var (
+ cachetestDir, _ = filepath.Abs(filepath.Join("testdata", "keystore"))
+ cachetestAccounts = []Account{
+ {
+ Address: common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"),
+ URL: URL{Scheme: KeyStoreScheme, Path: filepath.Join(cachetestDir, "UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8")},
+ },
+ {
+ Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"),
+ URL: URL{Scheme: KeyStoreScheme, Path: filepath.Join(cachetestDir, "aaa")},
+ },
+ {
+ Address: common.HexToAddress("289d485d9771714cce91d3393d764e1311907acc"),
+ URL: URL{Scheme: KeyStoreScheme, Path: filepath.Join(cachetestDir, "zzz")},
+ },
+ }
+)
+
+// waitWatcherStarts waits up to 1s for the keystore watcher to start.
+func waitWatcherStart(ks *KeyStore) bool {
+ // On systems where file watch is not supported, just return "ok".
+ if !ks.cache.watcher.enabled() {
+ return true
+ }
+ // The watcher should start, and then exit.
+ for t0 := time.Now(); time.Since(t0) < 1*time.Second; time.Sleep(100 * time.Millisecond) {
+ if ks.cache.watcherStarted() {
+ return true
+ }
+ }
+ return false
+}
+
+func waitForAccounts(wantAccounts []Account, ks *KeyStore) error {
+ var list []Account
+ for t0 := time.Now(); time.Since(t0) < 5*time.Second; time.Sleep(100 * time.Millisecond) {
+ list = ks.Accounts()
+ if reflect.DeepEqual(list, wantAccounts) {
+ // ks should have also received change notifications
+ select {
+ case <-ks.changes:
+ default:
+ return errors.New("wasn't notified of new accounts")
+ }
+ return nil
+ }
+ }
+ return fmt.Errorf("\ngot %v\nwant %v", list, wantAccounts)
+}
+
+func TestWatchNewFile(t *testing.T) {
+ t.Parallel()
+
+ dir, ks := tmpKeyStore(t, false)
+
+ // Ensure the watcher is started before adding any files.
+ ks.Accounts()
+ if !waitWatcherStart(ks) {
+ t.Fatal("keystore watcher didn't start in time")
+ }
+ // Move in the files.
+ wantAccounts := make([]Account, len(cachetestAccounts))
+ for i := range cachetestAccounts {
+ wantAccounts[i] = Account{
+ Address: cachetestAccounts[i].Address,
+ URL: URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, filepath.Base(cachetestAccounts[i].URL.Path))},
+ }
+ if err := cp.CopyFile(wantAccounts[i].URL.Path, cachetestAccounts[i].URL.Path); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // ks should see the
+ if err := waitForAccounts(wantAccounts, ks); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestWatchNoDir(t *testing.T) {
+ t.Parallel()
+ // Create ks but not the directory that it watches.
+ dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-watchnodir-test-%d-%d", os.Getpid(), rand.Int()))
+ ks := NewKeyStore(dir, LightScryptN, LightScryptP)
+ list := ks.Accounts()
+ if len(list) > 0 {
+ t.Error("initial account list not empty:", list)
+ }
+ // The watcher should start, and then exit.
+ if !waitWatcherStart(ks) {
+ t.Fatal("keystore watcher didn't start in time")
+ }
+ // Create the directory and copy a key file into it.
+ os.MkdirAll(dir, 0700)
+ defer os.RemoveAll(dir)
+ file := filepath.Join(dir, "aaa")
+ if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil {
+ t.Fatal(err)
+ }
+
+ // ks should see the account.
+ wantAccounts := []Account{cachetestAccounts[0]}
+ wantAccounts[0].URL = URL{Scheme: KeyStoreScheme, Path: file}
+ for d := 200 * time.Millisecond; d < 8*time.Second; d *= 2 {
+ list = ks.Accounts()
+ if reflect.DeepEqual(list, wantAccounts) {
+ // ks should have also received change notifications
+ select {
+ case <-ks.changes:
+ default:
+ t.Fatalf("wasn't notified of new accounts")
+ }
+ return
+ }
+ time.Sleep(d)
+ }
+ t.Errorf("\ngot %v\nwant %v", list, wantAccounts)
+}
+
+func TestCacheInitialReload(t *testing.T) {
+ t.Parallel()
+ cache, _ := newAccountCache(cachetestDir)
+ accounts := cache.accounts()
+ if !reflect.DeepEqual(accounts, cachetestAccounts) {
+ t.Fatalf("got initial accounts: %swant %s", spew.Sdump(accounts), spew.Sdump(cachetestAccounts))
+ }
+}
+
+func TestCacheAddDeleteOrder(t *testing.T) {
+ t.Parallel()
+ cache, _ := newAccountCache("testdata/no-such-dir")
+ cache.watcher.running = true // prevent unexpected reloads
+
+ accs := []Account{
+ {
+ Address: common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
+ URL: URL{Scheme: KeyStoreScheme, Path: "-309830980"},
+ },
+ {
+ Address: common.HexToAddress("2cac1adea150210703ba75ed097ddfe24e14f213"),
+ URL: URL{Scheme: KeyStoreScheme, Path: "ggg"},
+ },
+ {
+ Address: common.HexToAddress("8bda78331c916a08481428e4b07c96d3e916d165"),
+ URL: URL{Scheme: KeyStoreScheme, Path: "zzzzzz-the-very-last-one.keyXXX"},
+ },
+ {
+ Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"),
+ URL: URL{Scheme: KeyStoreScheme, Path: "SOMETHING.key"},
+ },
+ {
+ Address: common.HexToAddress("7ef5a6135f1fd6a02593eedc869c6d41d934aef8"),
+ URL: URL{Scheme: KeyStoreScheme, Path: "UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8"},
+ },
+ {
+ Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"),
+ URL: URL{Scheme: KeyStoreScheme, Path: "aaa"},
+ },
+ {
+ Address: common.HexToAddress("289d485d9771714cce91d3393d764e1311907acc"),
+ URL: URL{Scheme: KeyStoreScheme, Path: "zzz"},
+ },
+ }
+ for _, a := range accs {
+ cache.add(a)
+ }
+ // Add some of them twice to check that they don't get reinserted.
+ cache.add(accs[0])
+ cache.add(accs[2])
+
+ // Check that the account list is sorted by filename.
+ wantAccounts := make([]Account, len(accs))
+ copy(wantAccounts, accs)
+ slices.SortFunc(wantAccounts, byURL)
+ list := cache.accounts()
+ if !reflect.DeepEqual(list, wantAccounts) {
+ t.Fatalf("got accounts: %s\nwant %s", spew.Sdump(accs), spew.Sdump(wantAccounts))
+ }
+ for _, a := range accs {
+ if !cache.hasAddress(a.Address) {
+ t.Errorf("expected hasAccount(%x) to return true", a.Address)
+ }
+ }
+ if cache.hasAddress(common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e")) {
+ t.Errorf("expected hasAccount(%x) to return false", common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e"))
+ }
+
+ // Delete a few keys from the cache.
+ for i := 0; i < len(accs); i += 2 {
+ cache.delete(wantAccounts[i])
+ }
+ cache.delete(Account{Address: common.HexToAddress("fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e"), URL: URL{Scheme: KeyStoreScheme, Path: "something"}})
+
+ // Check content again after deletion.
+ wantAccountsAfterDelete := []Account{
+ wantAccounts[1],
+ wantAccounts[3],
+ wantAccounts[5],
+ }
+ list = cache.accounts()
+ if !reflect.DeepEqual(list, wantAccountsAfterDelete) {
+ t.Fatalf("got accounts after delete: %s\nwant %s", spew.Sdump(list), spew.Sdump(wantAccountsAfterDelete))
+ }
+ for _, a := range wantAccountsAfterDelete {
+ if !cache.hasAddress(a.Address) {
+ t.Errorf("expected hasAccount(%x) to return true", a.Address)
+ }
+ }
+ if cache.hasAddress(wantAccounts[0].Address) {
+ t.Errorf("expected hasAccount(%x) to return false", wantAccounts[0].Address)
+ }
+}
+
+func TestCacheFind(t *testing.T) {
+ t.Parallel()
+ dir := filepath.Join("testdata", "dir")
+ cache, _ := newAccountCache(dir)
+ cache.watcher.running = true // prevent unexpected reloads
+
+ accs := []Account{
+ {
+ Address: common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"),
+ URL: URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "a.key")},
+ },
+ {
+ Address: common.HexToAddress("2cac1adea150210703ba75ed097ddfe24e14f213"),
+ URL: URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "b.key")},
+ },
+ {
+ Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"),
+ URL: URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "c.key")},
+ },
+ {
+ Address: common.HexToAddress("d49ff4eeb0b2686ed89c0fc0f2b6ea533ddbbd5e"),
+ URL: URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "c2.key")},
+ },
+ }
+ for _, a := range accs {
+ cache.add(a)
+ }
+
+ nomatchAccount := Account{
+ Address: common.HexToAddress("f466859ead1932d743d622cb74fc058882e8648a"),
+ URL: URL{Scheme: KeyStoreScheme, Path: filepath.Join(dir, "something")},
+ }
+ tests := []struct {
+ Query Account
+ WantResult Account
+ WantError error
+ }{
+ // by address
+ {Query: Account{Address: accs[0].Address}, WantResult: accs[0]},
+ // by file
+ {Query: Account{URL: accs[0].URL}, WantResult: accs[0]},
+ // by basename
+ {Query: Account{URL: URL{Scheme: KeyStoreScheme, Path: filepath.Base(accs[0].URL.Path)}}, WantResult: accs[0]},
+ // by file and address
+ {Query: accs[0], WantResult: accs[0]},
+ // ambiguous address, tie resolved by file
+ {Query: accs[2], WantResult: accs[2]},
+ // ambiguous address error
+ {
+ Query: Account{Address: accs[2].Address},
+ WantError: &AmbiguousAddrError{
+ Addr: accs[2].Address,
+ Matches: []Account{accs[2], accs[3]},
+ },
+ },
+ // no match error
+ {Query: nomatchAccount, WantError: ErrNoMatch},
+ {Query: Account{URL: nomatchAccount.URL}, WantError: ErrNoMatch},
+ {Query: Account{URL: URL{Scheme: KeyStoreScheme, Path: filepath.Base(nomatchAccount.URL.Path)}}, WantError: ErrNoMatch},
+ {Query: Account{Address: nomatchAccount.Address}, WantError: ErrNoMatch},
+ }
+ for i, test := range tests {
+ a, err := cache.find(test.Query)
+ if !reflect.DeepEqual(err, test.WantError) {
+ t.Errorf("test %d: error mismatch for query %v\ngot %q\nwant %q", i, test.Query, err, test.WantError)
+ continue
+ }
+ if a != test.WantResult {
+ t.Errorf("test %d: result mismatch for query %v\ngot %v\nwant %v", i, test.Query, a, test.WantResult)
+ continue
+ }
+ }
+}
+
+// TestUpdatedKeyfileContents tests that updating the contents of a keystore file
+// is noticed by the watcher, and the account cache is updated accordingly
+func TestUpdatedKeyfileContents(t *testing.T) {
+ t.Parallel()
+
+ // Create a temporary keystore to test with
+ dir := filepath.Join(os.TempDir(), fmt.Sprintf("eth-keystore-updatedkeyfilecontents-test-%d-%d", os.Getpid(), rand.Int()))
+ ks := NewKeyStore(dir, LightScryptN, LightScryptP)
+
+ list := ks.Accounts()
+ if len(list) > 0 {
+ t.Error("initial account list not empty:", list)
+ }
+ if !waitWatcherStart(ks) {
+ t.Fatal("keystore watcher didn't start in time")
+ }
+ // Create the directory and copy a key file into it.
+ os.MkdirAll(dir, 0700)
+ defer os.RemoveAll(dir)
+ file := filepath.Join(dir, "aaa")
+
+ // Place one of our testfiles in there
+ if err := cp.CopyFile(file, cachetestAccounts[0].URL.Path); err != nil {
+ t.Fatal(err)
+ }
+
+ // ks should see the account.
+ wantAccounts := []Account{cachetestAccounts[0]}
+ wantAccounts[0].URL = URL{Scheme: KeyStoreScheme, Path: file}
+ if err := waitForAccounts(wantAccounts, ks); err != nil {
+ t.Error(err)
+ return
+ }
+ // needed so that modTime of `file` is different to its current value after forceCopyFile
+ os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
+
+ // Now replace file contents
+ if err := forceCopyFile(file, cachetestAccounts[1].URL.Path); err != nil {
+ t.Fatal(err)
+ return
+ }
+ wantAccounts = []Account{cachetestAccounts[1]}
+ wantAccounts[0].URL = URL{Scheme: KeyStoreScheme, Path: file}
+ if err := waitForAccounts(wantAccounts, ks); err != nil {
+ t.Errorf("First replacement failed")
+ t.Error(err)
+ return
+ }
+
+ // needed so that modTime of `file` is different to its current value after forceCopyFile
+ os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
+
+ // Now replace file contents again
+ if err := forceCopyFile(file, cachetestAccounts[2].URL.Path); err != nil {
+ t.Fatal(err)
+ return
+ }
+ wantAccounts = []Account{cachetestAccounts[2]}
+ wantAccounts[0].URL = URL{Scheme: KeyStoreScheme, Path: file}
+ if err := waitForAccounts(wantAccounts, ks); err != nil {
+ t.Errorf("Second replacement failed")
+ t.Error(err)
+ return
+ }
+
+ // needed so that modTime of `file` is different to its current value after os.WriteFile
+ os.Chtimes(file, time.Now().Add(-time.Second), time.Now().Add(-time.Second))
+
+ // Now replace file contents with crap
+ if err := os.WriteFile(file, []byte("foo"), 0600); err != nil {
+ t.Fatal(err)
+ return
+ }
+ if err := waitForAccounts([]Account{}, ks); err != nil {
+ t.Errorf("Emptying account file failed")
+ t.Error(err)
+ return
+ }
+}
+
+// forceCopyFile is like cp.CopyFile, but doesn't complain if the destination exists.
+func forceCopyFile(dst, src string) error {
+ data, err := os.ReadFile(src)
+ if err != nil {
+ return err
+ }
+ return os.WriteFile(dst, data, 0644)
+}
diff --git a/arb/keystore/file_cache.go b/arb/keystore/file_cache.go
new file mode 100644
index 00000000000..4737e30749f
--- /dev/null
+++ b/arb/keystore/file_cache.go
@@ -0,0 +1,105 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ mapset "github.com/deckarep/golang-set/v2"
+ "github.com/erigontech/erigon/log/v3"
+)
+
+// fileCache is a cache of files seen during scan of keystore.
+type fileCache struct {
+ all mapset.Set[string] // Set of all files from the keystore folder
+ lastMod time.Time // Last time instance when a file was modified
+ mu sync.Mutex
+}
+
+// scan performs a new scan on the given directory, compares against the already
+// cached filenames, and returns file sets: creates, deletes, updates.
+func (fc *fileCache) scan(keyDir string) (mapset.Set[string], mapset.Set[string], mapset.Set[string], error) {
+ t0 := time.Now()
+
+ // List all the files from the keystore folder
+ files, err := os.ReadDir(keyDir)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ t1 := time.Now()
+
+ fc.mu.Lock()
+ defer fc.mu.Unlock()
+
+ // Iterate all the files and gather their metadata
+ all := mapset.NewThreadUnsafeSet[string]()
+ mods := mapset.NewThreadUnsafeSet[string]()
+
+ var newLastMod time.Time
+ for _, fi := range files {
+ path := filepath.Join(keyDir, fi.Name())
+ // Skip any non-key files from the folder
+ if nonKeyFile(fi) {
+ log.Trace("Ignoring file on account scan", "path", path)
+ continue
+ }
+ // Gather the set of all and freshly modified files
+ all.Add(path)
+
+ info, err := fi.Info()
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ modified := info.ModTime()
+ if modified.After(fc.lastMod) {
+ mods.Add(path)
+ }
+ if modified.After(newLastMod) {
+ newLastMod = modified
+ }
+ }
+ t2 := time.Now()
+
+ // Update the tracked files and return the three sets
+ deletes := fc.all.Difference(all) // Deletes = previous - current
+ creates := all.Difference(fc.all) // Creates = current - previous
+ updates := mods.Difference(creates) // Updates = modified - creates
+
+ fc.all, fc.lastMod = all, newLastMod
+ t3 := time.Now()
+
+ // Report on the scanning stats and return
+ log.Debug("FS scan times", "list", t1.Sub(t0), "set", t2.Sub(t1), "diff", t3.Sub(t2))
+ return creates, deletes, updates, nil
+}
+
+// nonKeyFile ignores editor backups, hidden files and folders/symlinks.
+func nonKeyFile(fi os.DirEntry) bool {
+ // Skip editor backups and UNIX-style hidden files.
+ if strings.HasSuffix(fi.Name(), "~") || strings.HasPrefix(fi.Name(), ".") {
+ return true
+ }
+ // Skip misc special files, directories (yes, symlinks too).
+ if fi.IsDir() || !fi.Type().IsRegular() {
+ return true
+ }
+ return false
+}
diff --git a/arb/keystore/key.go b/arb/keystore/key.go
new file mode 100644
index 00000000000..25ecd405459
--- /dev/null
+++ b/arb/keystore/key.go
@@ -0,0 +1,236 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/crypto"
+ "github.com/google/uuid"
+)
+
+const (
+ version = 3
+)
+
+type Key struct {
+ Id uuid.UUID // Version 4 "random" for unique id not derived from key data
+ // to simplify lookups we also store the address
+ Address common.Address
+ // we only store privkey as pubkey/address can be derived from it
+ // privkey in this struct is always in plaintext
+ PrivateKey *ecdsa.PrivateKey
+}
+
+type keyStore interface {
+ // Loads and decrypts the key from disk.
+ GetKey(addr common.Address, filename string, auth string) (*Key, error)
+ // Writes and encrypts the key.
+ StoreKey(filename string, k *Key, auth string) error
+ // Joins filename with the key directory unless it is already absolute.
+ JoinPath(filename string) string
+}
+
+type plainKeyJSON struct {
+ Address string `json:"address"`
+ PrivateKey string `json:"privatekey"`
+ Id string `json:"id"`
+ Version int `json:"version"`
+}
+
+type encryptedKeyJSONV3 struct {
+ Address string `json:"address"`
+ Crypto CryptoJSON `json:"crypto"`
+ Id string `json:"id"`
+ Version int `json:"version"`
+}
+
+type encryptedKeyJSONV1 struct {
+ Address string `json:"address"`
+ Crypto CryptoJSON `json:"crypto"`
+ Id string `json:"id"`
+ Version string `json:"version"`
+}
+
+type CryptoJSON struct {
+ Cipher string `json:"cipher"`
+ CipherText string `json:"ciphertext"`
+ CipherParams cipherparamsJSON `json:"cipherparams"`
+ KDF string `json:"kdf"`
+ KDFParams map[string]interface{} `json:"kdfparams"`
+ MAC string `json:"mac"`
+}
+
+type cipherparamsJSON struct {
+ IV string `json:"iv"`
+}
+
+func (k *Key) MarshalJSON() (j []byte, err error) {
+ jStruct := plainKeyJSON{
+ hex.EncodeToString(k.Address[:]),
+ hex.EncodeToString(crypto.FromECDSA(k.PrivateKey)),
+ k.Id.String(),
+ version,
+ }
+ j, err = json.Marshal(jStruct)
+ return j, err
+}
+
+func (k *Key) UnmarshalJSON(j []byte) (err error) {
+ keyJSON := new(plainKeyJSON)
+ err = json.Unmarshal(j, &keyJSON)
+ if err != nil {
+ return err
+ }
+
+ u := new(uuid.UUID)
+ *u, err = uuid.Parse(keyJSON.Id)
+ if err != nil {
+ return err
+ }
+ k.Id = *u
+ addr, err := hex.DecodeString(keyJSON.Address)
+ if err != nil {
+ return err
+ }
+ privkey, err := crypto.HexToECDSA(keyJSON.PrivateKey)
+ if err != nil {
+ return err
+ }
+
+ k.Address = common.BytesToAddress(addr)
+ k.PrivateKey = privkey
+
+ return nil
+}
+
+func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key {
+ id, err := uuid.NewRandom()
+ if err != nil {
+ panic(fmt.Sprintf("Could not create random uuid: %v", err))
+ }
+ key := &Key{
+ Id: id,
+ Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey),
+ PrivateKey: privateKeyECDSA,
+ }
+ return key
+}
+
+// NewKeyForDirectICAP generates a key whose address fits into < 155 bits so it can fit
+// into the Direct ICAP spec. for simplicity and easier compatibility with other libs, we
+// retry until the first byte is 0.
+func NewKeyForDirectICAP(rand io.Reader) *Key {
+ randBytes := make([]byte, 64)
+ _, err := rand.Read(randBytes)
+ if err != nil {
+ panic("key generation: could not read from random source: " + err.Error())
+ }
+ reader := bytes.NewReader(randBytes)
+ privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), reader)
+ if err != nil {
+ panic("key generation: ecdsa.GenerateKey failed: " + err.Error())
+ }
+ key := newKeyFromECDSA(privateKeyECDSA)
+ if !strings.HasPrefix(key.Address.Hex(), "0x00") {
+ return NewKeyForDirectICAP(rand)
+ }
+ return key
+}
+
+func newKey(rand io.Reader) (*Key, error) {
+ privateKeyECDSA, err := ecdsa.GenerateKey(crypto.S256(), rand)
+ if err != nil {
+ return nil, err
+ }
+ return newKeyFromECDSA(privateKeyECDSA), nil
+}
+
+func storeNewKey(ks keyStore, rand io.Reader, auth string) (*Key, Account, error) {
+ key, err := newKey(rand)
+ if err != nil {
+ return nil, Account{}, err
+ }
+ a := Account{
+ Address: key.Address,
+ URL: URL{Scheme: KeyStoreScheme, Path: ks.JoinPath(keyFileName(key.Address))},
+ }
+ //if err := ks.StoreKey(a.URL.Path, key, auth); err != nil {
+ // zeroKey(key.PrivateKey)
+ // return nil, a, err
+ //}
+ return key, a, err
+}
+
+func writeTemporaryKeyFile(file string, content []byte) (string, error) {
+ // Create the keystore directory with appropriate permissions
+ // in case it is not present yet.
+ const dirPerm = 0700
+ if err := os.MkdirAll(filepath.Dir(file), dirPerm); err != nil {
+ return "", err
+ }
+ // Atomic write: create a temporary hidden file first
+ // then move it into place. TempFile assigns mode 0600.
+ f, err := os.CreateTemp(filepath.Dir(file), "."+filepath.Base(file)+".tmp")
+ if err != nil {
+ return "", err
+ }
+ if _, err := f.Write(content); err != nil {
+ f.Close()
+ os.Remove(f.Name())
+ return "", err
+ }
+ f.Close()
+ return f.Name(), nil
+}
+
+func writeKeyFile(file string, content []byte) error {
+ name, err := writeTemporaryKeyFile(file, content)
+ if err != nil {
+ return err
+ }
+ return os.Rename(name, file)
+}
+
+// keyFileName implements the naming convention for keyfiles:
+// UTC---
+func keyFileName(keyAddr common.Address) string {
+ ts := time.Now().UTC()
+ return fmt.Sprintf("UTC--%s--%s", toISO8601(ts), hex.EncodeToString(keyAddr[:]))
+}
+
+func toISO8601(t time.Time) string {
+ var tz string
+ name, offset := t.Zone()
+ if name == "UTC" {
+ tz = "Z"
+ } else {
+ tz = fmt.Sprintf("%03d00", offset/3600)
+ }
+ return fmt.Sprintf("%04d-%02d-%02dT%02d-%02d-%02d.%09d%s",
+ t.Year(), t.Month(), t.Day(), t.Hour(), t.Minute(), t.Second(), t.Nanosecond(), tz)
+}
diff --git a/arb/keystore/keystore.go b/arb/keystore/keystore.go
new file mode 100644
index 00000000000..a12859cc44b
--- /dev/null
+++ b/arb/keystore/keystore.go
@@ -0,0 +1,514 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package keystore implements encrypted storage of secp256k1 private keys.
+//
+// Keys are stored as encrypted JSON files according to the Web3 Secret Storage specification.
+// See https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition for more information.
+package keystore
+
+import (
+ "crypto/ecdsa"
+ crand "crypto/rand"
+ "errors"
+ "math/big"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sync"
+ "time"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/crypto"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/erigontech/erigon/p2p/event"
+)
+
+var (
+ ErrLocked = NewAuthNeededError("password or unlock")
+ ErrNoMatch = errors.New("no key for given address or file")
+ ErrDecrypt = errors.New("could not decrypt key with given password")
+
+ // ErrAccountAlreadyExists is returned if an account attempted to import is
+ // already present in the keystore.
+ ErrAccountAlreadyExists = errors.New("account already exists")
+)
+
+// KeyStoreType is the reflect type of a keystore backend.
+var KeyStoreType = reflect.TypeOf(&KeyStore{})
+
+// KeyStoreScheme is the protocol scheme prefixing account and wallet URLs.
+const KeyStoreScheme = "keystore"
+
+// Maximum time between wallet refreshes (if filesystem notifications don't work).
+const walletRefreshCycle = 3 * time.Second
+
+// KeyStore manages a key storage directory on disk.
+type KeyStore struct {
+ storage keyStore // Storage backend, might be cleartext or encrypted
+ cache *accountCache // In-memory account cache over the filesystem storage
+ changes chan struct{} // Channel receiving change notifications from the cache
+ unlocked map[common.Address]*unlocked // Currently unlocked account (decrypted private keys)
+
+ wallets []Wallet // Wallet wrappers around the individual key files
+ updateFeed event.Feed // Event feed to notify wallet additions/removals
+ updateScope event.SubscriptionScope // Subscription scope tracking current live listeners
+ updating bool // Whether the event notification loop is running
+
+ mu sync.RWMutex
+ importMu sync.Mutex // Import Mutex locks the import to prevent two insertions from racing
+}
+
+type unlocked struct {
+ *Key
+ abort chan struct{}
+}
+
+// NewKeyStore creates a keystore for the given directory.
+func NewKeyStore(keydir string, scryptN, scryptP int) *KeyStore {
+ keydir, _ = filepath.Abs(keydir)
+ ks := &KeyStore{storage: &keyStorePassphrase{keydir, scryptN, scryptP, false}}
+ ks.init(keydir)
+ return ks
+}
+
+// NewPlaintextKeyStore creates a keystore for the given directory.
+// Deprecated: Use NewKeyStore.
+func NewPlaintextKeyStore(keydir string) *KeyStore {
+ keydir, _ = filepath.Abs(keydir)
+ ks := &KeyStore{storage: &keyStorePlain{keydir}}
+ ks.init(keydir)
+ return ks
+}
+
+func (ks *KeyStore) init(keydir string) {
+ // Lock the mutex since the account cache might call back with events
+ ks.mu.Lock()
+ defer ks.mu.Unlock()
+
+ // Initialize the set of unlocked keys and the account cache
+ ks.unlocked = make(map[common.Address]*unlocked)
+ ks.cache, ks.changes = newAccountCache(keydir)
+
+ // TODO: In order for this finalizer to work, there must be no references
+ // to ks. addressCache doesn't keep a reference but unlocked keys do,
+ // so the finalizer will not trigger until all timed unlocks have expired.
+ runtime.SetFinalizer(ks, func(m *KeyStore) {
+ m.cache.close()
+ })
+ // Create the initial list of wallets from the cache
+ accs := ks.cache.accounts()
+ ks.wallets = make([]Wallet, len(accs))
+ for i := 0; i < len(accs); i++ {
+ ks.wallets[i] = &keystoreWallet{account: accs[i], keystore: ks}
+ }
+}
+
+// Wallets implements accounts.Backend, returning all single-key wallets from the
+// keystore directory.
+func (ks *KeyStore) Wallets() []Wallet {
+ // Make sure the list of wallets is in sync with the account cache
+ ks.refreshWallets()
+
+ ks.mu.RLock()
+ defer ks.mu.RUnlock()
+
+ cpy := make([]Wallet, len(ks.wallets))
+ copy(cpy, ks.wallets)
+ return cpy
+}
+
+// refreshWallets retrieves the current account list and based on that does any
+// necessary wallet refreshes.
+func (ks *KeyStore) refreshWallets() {
+ // Retrieve the current list of accounts
+ ks.mu.Lock()
+ accs := ks.cache.accounts()
+
+ // Transform the current list of wallets into the new one
+ var (
+ wallets = make([]Wallet, 0, len(accs))
+ events []WalletEvent
+ )
+
+ for _, account := range accs {
+ // Drop wallets while they were in front of the next account
+ for len(ks.wallets) > 0 && ks.wallets[0].URL().Cmp(account.URL) < 0 {
+ events = append(events, WalletEvent{Wallet: ks.wallets[0], Kind: WalletDropped})
+ ks.wallets = ks.wallets[1:]
+ }
+ // If there are no more wallets or the account is before the next, wrap new wallet
+ if len(ks.wallets) == 0 || ks.wallets[0].URL().Cmp(account.URL) > 0 {
+ wallet := &keystoreWallet{account: account, keystore: ks}
+
+ events = append(events, WalletEvent{Wallet: wallet, Kind: WalletArrived})
+ wallets = append(wallets, wallet)
+ continue
+ }
+ // If the account is the same as the first wallet, keep it
+ if ks.wallets[0].Accounts()[0] == account {
+ wallets = append(wallets, ks.wallets[0])
+ ks.wallets = ks.wallets[1:]
+ continue
+ }
+ }
+ // Drop any leftover wallets and set the new batch
+ for _, wallet := range ks.wallets {
+ events = append(events, WalletEvent{Wallet: wallet, Kind: WalletDropped})
+ }
+ ks.wallets = wallets
+ ks.mu.Unlock()
+
+ // Fire all wallet events and return
+ for _, event := range events {
+ ks.updateFeed.Send(event)
+ }
+}
+
+// Subscribe implements accounts.Backend, creating an async subscription to
+// receive notifications on the addition or removal of keystore wallets.
+func (ks *KeyStore) Subscribe(sink chan<- WalletEvent) event.Subscription {
+ // We need the mutex to reliably start/stop the update loop
+ ks.mu.Lock()
+ defer ks.mu.Unlock()
+
+ // Subscribe the caller and track the subscriber count
+ sub := ks.updateScope.Track(ks.updateFeed.Subscribe(sink))
+
+ // Subscribers require an active notification loop, start it
+ if !ks.updating {
+ ks.updating = true
+ go ks.updater()
+ }
+ return sub
+}
+
+// updater is responsible for maintaining an up-to-date list of wallets stored in
+// the keystore, and for firing wallet addition/removal events. It listens for
+// account change events from the underlying account cache, and also periodically
+// forces a manual refresh (only triggers for systems where the filesystem notifier
+// is not running).
+func (ks *KeyStore) updater() {
+ for {
+ // Wait for an account update or a refresh timeout
+ select {
+ case <-ks.changes:
+ case <-time.After(walletRefreshCycle):
+ }
+ // Run the wallet refresher
+ ks.refreshWallets()
+
+ // If all our subscribers left, stop the updater
+ ks.mu.Lock()
+ if ks.updateScope.Count() == 0 {
+ ks.updating = false
+ ks.mu.Unlock()
+ return
+ }
+ ks.mu.Unlock()
+ }
+}
+
+// HasAddress reports whether a key with the given address is present.
+func (ks *KeyStore) HasAddress(addr common.Address) bool {
+ return ks.cache.hasAddress(addr)
+}
+
+// Accounts returns all key files present in the directory.
+func (ks *KeyStore) Accounts() []Account {
+ return ks.cache.accounts()
+}
+
+// Delete deletes the key matched by account if the passphrase is correct.
+// If the account contains no filename, the address must match a unique key.
+func (ks *KeyStore) Delete(a Account, passphrase string) error {
+ // Decrypting the key isn't really necessary, but we do
+ // it anyway to check the password and zero out the key
+ // immediately afterwards.
+ a, key, err := ks.getDecryptedKey(a, passphrase)
+ if key != nil {
+ zeroKey(key.PrivateKey)
+ }
+ if err != nil {
+ return err
+ }
+ // The order is crucial here. The key is dropped from the
+ // cache after the file is gone so that a reload happening in
+ // between won't insert it into the cache again.
+ err = os.Remove(a.URL.Path)
+ if err == nil {
+ ks.cache.delete(a)
+ ks.refreshWallets()
+ }
+ return err
+}
+
+// SignHash calculates a ECDSA signature for the given hash. The produced
+// signature is in the [R || S || V] format where V is 0 or 1.
+func (ks *KeyStore) SignHash(a Account, hash []byte) ([]byte, error) {
+ // Look up the key to sign with and abort if it cannot be found
+ ks.mu.RLock()
+ defer ks.mu.RUnlock()
+
+ unlockedKey, found := ks.unlocked[a.Address]
+ if !found {
+ return nil, ErrLocked
+ }
+ // Sign the hash using plain ECDSA operations
+ return crypto.Sign(hash, unlockedKey.PrivateKey)
+}
+
+// SignTx signs the given transaction with the requested account.
+func (ks *KeyStore) SignTx(a Account, tx types.Transaction, chainID *big.Int) (types.Transaction, error) {
+ // Look up the key to sign with and abort if it cannot be found
+ ks.mu.RLock()
+ defer ks.mu.RUnlock()
+
+ unlockedKey, found := ks.unlocked[a.Address]
+ if !found {
+ return nil, ErrLocked
+ }
+ // Depending on the presence of the chain ID, sign with 2718 or homestead
+ signer := types.LatestSignerForChainID(chainID)
+ return types.SignTx(tx, *signer, unlockedKey.PrivateKey)
+}
+
+// SignHashWithPassphrase signs hash if the private key matching the given address
+// can be decrypted with the given passphrase. The produced signature is in the
+// [R || S || V] format where V is 0 or 1.
+func (ks *KeyStore) SignHashWithPassphrase(a Account, passphrase string, hash []byte) (signature []byte, err error) {
+ _, key, err := ks.getDecryptedKey(a, passphrase)
+ if err != nil {
+ return nil, err
+ }
+ defer zeroKey(key.PrivateKey)
+ return crypto.Sign(hash, key.PrivateKey)
+}
+
+// SignTxWithPassphrase signs the transaction if the private key matching the
+// given address can be decrypted with the given passphrase.
+func (ks *KeyStore) SignTxWithPassphrase(a Account, passphrase string, tx types.Transaction, chainID *big.Int) (types.Transaction, error) {
+ _, key, err := ks.getDecryptedKey(a, passphrase)
+ if err != nil {
+ return nil, err
+ }
+ defer zeroKey(key.PrivateKey)
+ // Depending on the presence of the chain ID, sign with or without replay protection.
+ signer := types.LatestSignerForChainID(chainID)
+ return types.SignTx(tx, *signer, key.PrivateKey)
+}
+
+// Unlock unlocks the given account indefinitely.
+func (ks *KeyStore) Unlock(a Account, passphrase string) error {
+ return ks.TimedUnlock(a, passphrase, 0)
+}
+
+// Lock removes the private key with the given address from memory.
+func (ks *KeyStore) Lock(addr common.Address) error {
+ ks.mu.Lock()
+ if unl, found := ks.unlocked[addr]; found {
+ ks.mu.Unlock()
+ ks.expire(addr, unl, time.Duration(0)*time.Nanosecond)
+ } else {
+ ks.mu.Unlock()
+ }
+ return nil
+}
+
+// TimedUnlock unlocks the given account with the passphrase. The account
+// stays unlocked for the duration of timeout. A timeout of 0 unlocks the account
+// until the program exits. The account must match a unique key file.
+//
+// If the account address is already unlocked for a duration, TimedUnlock extends or
+// shortens the active unlock timeout. If the address was previously unlocked
+// indefinitely the timeout is not altered.
+func (ks *KeyStore) TimedUnlock(a Account, passphrase string, timeout time.Duration) error {
+ a, key, err := ks.getDecryptedKey(a, passphrase)
+ if err != nil {
+ return err
+ }
+
+ ks.mu.Lock()
+ defer ks.mu.Unlock()
+ u, found := ks.unlocked[a.Address]
+ if found {
+ if u.abort == nil {
+ // The address was unlocked indefinitely, so unlocking
+ // it with a timeout would be confusing.
+ zeroKey(key.PrivateKey)
+ return nil
+ }
+ // Terminate the expire goroutine and replace it below.
+ close(u.abort)
+ }
+ if timeout > 0 {
+ u = &unlocked{Key: key, abort: make(chan struct{})}
+ go ks.expire(a.Address, u, timeout)
+ } else {
+ u = &unlocked{Key: key}
+ }
+ ks.unlocked[a.Address] = u
+ return nil
+}
+
+// Find resolves the given account into a unique entry in the keystore.
+func (ks *KeyStore) Find(a Account) (Account, error) {
+ ks.cache.maybeReload()
+ ks.cache.mu.Lock()
+ a, err := ks.cache.find(a)
+ ks.cache.mu.Unlock()
+ return a, err
+}
+
+func (ks *KeyStore) getDecryptedKey(a Account, auth string) (Account, *Key, error) {
+ a, err := ks.Find(a)
+ if err != nil {
+ return a, nil, err
+ }
+ key, err := ks.storage.GetKey(a.Address, a.URL.Path, auth)
+ return a, key, err
+}
+
+func (ks *KeyStore) expire(addr common.Address, u *unlocked, timeout time.Duration) {
+ t := time.NewTimer(timeout)
+ defer t.Stop()
+ select {
+ case <-u.abort:
+ // just quit
+ case <-t.C:
+ ks.mu.Lock()
+ // only drop if it's still the same key instance that dropLater
+ // was launched with. we can check that using pointer equality
+ // because the map stores a new pointer every time the key is
+ // unlocked.
+ if ks.unlocked[addr] == u {
+ zeroKey(u.PrivateKey)
+ delete(ks.unlocked, addr)
+ }
+ ks.mu.Unlock()
+ }
+}
+
+// NewAccount generates a new key and stores it into the key directory,
+// encrypting it with the passphrase.
+func (ks *KeyStore) NewAccount(passphrase string) (Account, error) {
+ _, account, err := storeNewKey(ks.storage, crand.Reader, passphrase)
+ if err != nil {
+ return Account{}, err
+ }
+ // Add the account to the cache immediately rather
+ // than waiting for file system notifications to pick it up.
+ ks.cache.add(account)
+ ks.refreshWallets()
+ return account, nil
+}
+
+// Export exports as a JSON key, encrypted with newPassphrase.
+func (ks *KeyStore) Export(a Account, passphrase, newPassphrase string) (keyJSON []byte, err error) {
+ _, key, err := ks.getDecryptedKey(a, passphrase)
+ if err != nil {
+ return nil, err
+ }
+ var N, P int
+ if store, ok := ks.storage.(*keyStorePassphrase); ok {
+ N, P = store.scryptN, store.scryptP
+ } else {
+ N, P = StandardScryptN, StandardScryptP
+ }
+ return EncryptKey(key, newPassphrase, N, P)
+}
+
+// Import stores the given encrypted JSON key into the key directory.
+func (ks *KeyStore) Import(keyJSON []byte, passphrase, newPassphrase string) (Account, error) {
+ key, err := DecryptKey(keyJSON, passphrase)
+ if key != nil && key.PrivateKey != nil {
+ defer zeroKey(key.PrivateKey)
+ }
+ if err != nil {
+ return Account{}, err
+ }
+ ks.importMu.Lock()
+ defer ks.importMu.Unlock()
+
+ if ks.cache.hasAddress(key.Address) {
+ return Account{
+ Address: key.Address,
+ }, ErrAccountAlreadyExists
+ }
+ return ks.importKey(key, newPassphrase)
+}
+
+// ImportECDSA stores the given key into the key directory, encrypting it with the passphrase.
+func (ks *KeyStore) ImportECDSA(priv *ecdsa.PrivateKey, passphrase string) (Account, error) {
+ ks.importMu.Lock()
+ defer ks.importMu.Unlock()
+
+ key := newKeyFromECDSA(priv)
+ if ks.cache.hasAddress(key.Address) {
+ return Account{
+ Address: key.Address,
+ }, ErrAccountAlreadyExists
+ }
+ return ks.importKey(key, passphrase)
+}
+
+func (ks *KeyStore) importKey(key *Key, passphrase string) (Account, error) {
+ a := Account{Address: key.Address, URL: URL{Scheme: KeyStoreScheme, Path: ks.storage.JoinPath(keyFileName(key.Address))}}
+ if err := ks.storage.StoreKey(a.URL.Path, key, passphrase); err != nil {
+ return Account{}, err
+ }
+ ks.cache.add(a)
+ ks.refreshWallets()
+ return a, nil
+}
+
+// Update changes the passphrase of an existing account.
+func (ks *KeyStore) Update(a Account, passphrase, newPassphrase string) error {
+ a, key, err := ks.getDecryptedKey(a, passphrase)
+ if err != nil {
+ return err
+ }
+ return ks.storage.StoreKey(a.URL.Path, key, newPassphrase)
+}
+
+// ImportPreSaleKey decrypts the given Ethereum presale wallet and stores
+// a key file in the key directory. The key file is encrypted with the same passphrase.
+func (ks *KeyStore) ImportPreSaleKey(keyJSON []byte, passphrase string) (Account, error) {
+ a, _, err := importPreSaleKey(ks.storage, keyJSON, passphrase)
+ if err != nil {
+ return a, err
+ }
+ ks.cache.add(a)
+ ks.refreshWallets()
+ return a, nil
+}
+
+// isUpdating returns whether the event notification loop is running.
+// This method is mainly meant for tests.
+func (ks *KeyStore) isUpdating() bool {
+ ks.mu.RLock()
+ defer ks.mu.RUnlock()
+ return ks.updating
+}
+
+// zeroKey zeroes a private key in memory.
+func zeroKey(k *ecdsa.PrivateKey) {
+ b := k.D.Bits()
+ for i := range b {
+ b[i] = 0
+ }
+}
diff --git a/arb/keystore/keystore_fuzzing_test.go b/arb/keystore/keystore_fuzzing_test.go
new file mode 100644
index 00000000000..793b46336af
--- /dev/null
+++ b/arb/keystore/keystore_fuzzing_test.go
@@ -0,0 +1,34 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "testing"
+)
+
+func FuzzPassword(f *testing.F) {
+ f.Fuzz(func(t *testing.T, password string) {
+ ks := NewKeyStore(t.TempDir(), LightScryptN, LightScryptP)
+ a, err := ks.NewAccount(password)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := ks.Unlock(a, password); err != nil {
+ t.Fatal(err)
+ }
+ })
+}
diff --git a/arb/keystore/keystore_test.go b/arb/keystore/keystore_test.go
new file mode 100644
index 00000000000..793f53b9f0f
--- /dev/null
+++ b/arb/keystore/keystore_test.go
@@ -0,0 +1,467 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "math/rand"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/erigontech/erigon/common"
+ dirs "github.com/erigontech/erigon/common/dir"
+ "github.com/erigontech/erigon/crypto"
+ "github.com/erigontech/erigon/p2p/event"
+ "golang.org/x/exp/slices"
+)
+
+var testSigData = make([]byte, 32)
+
+func TestKeyStore(t *testing.T) {
+ t.Parallel()
+ dir, ks := tmpKeyStore(t, true)
+
+ a, err := ks.NewAccount("foo")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !strings.HasPrefix(a.URL.Path, dir) {
+ t.Errorf("account file %s doesn't have dir prefix", a.URL)
+ }
+ stat, err := os.Stat(a.URL.Path)
+ if err != nil {
+ t.Fatalf("account file %s doesn't exist (%v)", a.URL, err)
+ }
+ if runtime.GOOS != "windows" && stat.Mode() != 0600 {
+ t.Fatalf("account file has wrong mode: got %o, want %o", stat.Mode(), 0600)
+ }
+ if !ks.HasAddress(a.Address) {
+ t.Errorf("HasAccount(%x) should've returned true", a.Address)
+ }
+ if err := ks.Update(a, "foo", "bar"); err != nil {
+ t.Errorf("Update error: %v", err)
+ }
+ if err := ks.Delete(a, "bar"); err != nil {
+ t.Errorf("Delete error: %v", err)
+ }
+ if ex, err := dirs.FileExist(a.URL.Path); !ex || err != nil {
+ t.Errorf("account file %s should be gone after Delete", a.URL)
+ }
+ if ks.HasAddress(a.Address) {
+ t.Errorf("HasAccount(%x) should've returned true after Delete", a.Address)
+ }
+}
+
+func TestSign(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStore(t, true)
+
+ pass := "" // not used but required by API
+ a1, err := ks.NewAccount(pass)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := ks.Unlock(a1, ""); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := ks.SignHash(Account{Address: a1.Address}, testSigData); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestSignWithPassphrase(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStore(t, true)
+
+ pass := "passwd"
+ acc, err := ks.NewAccount(pass)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, unlocked := ks.unlocked[acc.Address]; unlocked {
+ t.Fatal("expected account to be locked")
+ }
+
+ _, err = ks.SignHashWithPassphrase(acc, pass, testSigData)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, unlocked := ks.unlocked[acc.Address]; unlocked {
+ t.Fatal("expected account to be locked")
+ }
+
+ if _, err = ks.SignHashWithPassphrase(acc, "invalid passwd", testSigData); err == nil {
+ t.Fatal("expected SignHashWithPassphrase to fail with invalid password")
+ }
+}
+
+func TestTimedUnlock(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStore(t, true)
+
+ pass := "foo"
+ a1, err := ks.NewAccount(pass)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Signing without passphrase fails because account is locked
+ _, err = ks.SignHash(Account{Address: a1.Address}, testSigData)
+ if err != ErrLocked {
+ t.Fatal("Signing should've failed with ErrLocked before unlocking, got ", err)
+ }
+
+ // Signing with passphrase works
+ if err = ks.TimedUnlock(a1, pass, 100*time.Millisecond); err != nil {
+ t.Fatal(err)
+ }
+
+ // Signing without passphrase works because account is temp unlocked
+ _, err = ks.SignHash(Account{Address: a1.Address}, testSigData)
+ if err != nil {
+ t.Fatal("Signing shouldn't return an error after unlocking, got ", err)
+ }
+
+ // Signing fails again after automatic locking
+ time.Sleep(250 * time.Millisecond)
+ _, err = ks.SignHash(Account{Address: a1.Address}, testSigData)
+ if err != ErrLocked {
+ t.Fatal("Signing should've failed with ErrLocked timeout expired, got ", err)
+ }
+}
+
+func TestOverrideUnlock(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStore(t, false)
+
+ pass := "foo"
+ a1, err := ks.NewAccount(pass)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Unlock indefinitely.
+ if err = ks.TimedUnlock(a1, pass, 5*time.Minute); err != nil {
+ t.Fatal(err)
+ }
+
+ // Signing without passphrase works because account is temp unlocked
+ _, err = ks.SignHash(Account{Address: a1.Address}, testSigData)
+ if err != nil {
+ t.Fatal("Signing shouldn't return an error after unlocking, got ", err)
+ }
+
+ // reset unlock to a shorter period, invalidates the previous unlock
+ if err = ks.TimedUnlock(a1, pass, 100*time.Millisecond); err != nil {
+ t.Fatal(err)
+ }
+
+ // Signing without passphrase still works because account is temp unlocked
+ _, err = ks.SignHash(Account{Address: a1.Address}, testSigData)
+ if err != nil {
+ t.Fatal("Signing shouldn't return an error after unlocking, got ", err)
+ }
+
+ // Signing fails again after automatic locking
+ time.Sleep(250 * time.Millisecond)
+ _, err = ks.SignHash(Account{Address: a1.Address}, testSigData)
+ if err != ErrLocked {
+ t.Fatal("Signing should've failed with ErrLocked timeout expired, got ", err)
+ }
+}
+
+// This test should fail under -race if signing races the expiration goroutine.
+func TestSignRace(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStore(t, false)
+
+ // Create a test account.
+ a1, err := ks.NewAccount("")
+ if err != nil {
+ t.Fatal("could not create the test account", err)
+ }
+
+ if err := ks.TimedUnlock(a1, "", 15*time.Millisecond); err != nil {
+ t.Fatal("could not unlock the test account", err)
+ }
+ end := time.Now().Add(500 * time.Millisecond)
+ for time.Now().Before(end) {
+ if _, err := ks.SignHash(Account{Address: a1.Address}, testSigData); err == ErrLocked {
+ return
+ } else if err != nil {
+ t.Errorf("Sign error: %v", err)
+ return
+ }
+ time.Sleep(1 * time.Millisecond)
+ }
+ t.Errorf("Account did not lock within the timeout")
+}
+
+// waitForKsUpdating waits until the updating-status of the ks reaches the
+// desired wantStatus.
+// It waits for a maximum time of maxTime, and returns false if it does not
+// finish in time
+func waitForKsUpdating(t *testing.T, ks *KeyStore, wantStatus bool, maxTime time.Duration) bool {
+ t.Helper()
+ // Wait max 250 ms, then return false
+ for t0 := time.Now(); time.Since(t0) < maxTime; {
+ if ks.isUpdating() == wantStatus {
+ return true
+ }
+ time.Sleep(25 * time.Millisecond)
+ }
+ return false
+}
+
+// Tests that the wallet notifier loop starts and stops correctly based on the
+// addition and removal of wallet event subscriptions.
+func TestWalletNotifierLifecycle(t *testing.T) {
+ t.Parallel()
+ // Create a temporary keystore to test with
+ _, ks := tmpKeyStore(t, false)
+
+ // Ensure that the notification updater is not running yet
+ time.Sleep(250 * time.Millisecond)
+
+ if ks.isUpdating() {
+ t.Errorf("wallet notifier running without subscribers")
+ }
+ // Subscribe to the wallet feed and ensure the updater boots up
+ updates := make(chan WalletEvent)
+
+ subs := make([]event.Subscription, 2)
+ for i := 0; i < len(subs); i++ {
+ // Create a new subscription
+ subs[i] = ks.Subscribe(updates)
+ if !waitForKsUpdating(t, ks, true, 250*time.Millisecond) {
+ t.Errorf("sub %d: wallet notifier not running after subscription", i)
+ }
+ }
+ // Close all but one sub
+ for i := 0; i < len(subs)-1; i++ {
+ // Close an existing subscription
+ subs[i].Unsubscribe()
+ }
+ // Check that it is still running
+ time.Sleep(250 * time.Millisecond)
+
+ if !ks.isUpdating() {
+ t.Fatal("event notifier stopped prematurely")
+ }
+ // Unsubscribe the last one and ensure the updater terminates eventually.
+ subs[len(subs)-1].Unsubscribe()
+ if !waitForKsUpdating(t, ks, false, 4*time.Second) {
+ t.Errorf("wallet notifier didn't terminate after unsubscribe")
+ }
+}
+
+type walletEvent struct {
+ WalletEvent
+ a Account
+}
+
+// Tests that wallet notifications and correctly fired when accounts are added
+// or deleted from the keystore.
+func TestWalletNotifications(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStore(t, false)
+
+ // Subscribe to the wallet feed and collect events.
+ var (
+ events []walletEvent
+ updates = make(chan WalletEvent)
+ sub = ks.Subscribe(updates)
+ )
+ defer sub.Unsubscribe()
+ go func() {
+ for {
+ select {
+ case ev := <-updates:
+ events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]})
+ case <-sub.Err():
+ close(updates)
+ return
+ }
+ }
+ }()
+
+ // Randomly add and remove accounts.
+ var (
+ live = make(map[common.Address]Account)
+ wantEvents []walletEvent
+ )
+ for i := 0; i < 1024; i++ {
+ if create := len(live) == 0 || rand.Int()%4 > 0; create {
+ // Add a new account and ensure wallet notifications arrives
+ account, err := ks.NewAccount("")
+ if err != nil {
+ t.Fatalf("failed to create test account: %v", err)
+ }
+ live[account.Address] = account
+ wantEvents = append(wantEvents, walletEvent{WalletEvent{Kind: WalletArrived}, account})
+ } else {
+ // Delete a random account.
+ var account Account
+ for _, a := range live {
+ account = a
+ break
+ }
+ if err := ks.Delete(account, ""); err != nil {
+ t.Fatalf("failed to delete test account: %v", err)
+ }
+ delete(live, account.Address)
+ wantEvents = append(wantEvents, walletEvent{WalletEvent{Kind: WalletDropped}, account})
+ }
+ }
+
+ // Shut down the event collector and check events.
+ sub.Unsubscribe()
+ for ev := range updates {
+ events = append(events, walletEvent{ev, ev.Wallet.Accounts()[0]})
+ }
+ checkAccounts(t, live, ks.Wallets())
+ checkEvents(t, wantEvents, events)
+}
+
+// TestImportExport tests the import functionality of a keystore.
+func TestImportECDSA(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStore(t, true)
+ key, err := crypto.GenerateKey()
+ if err != nil {
+ t.Fatalf("failed to generate key: %v", key)
+ }
+ if _, err = ks.ImportECDSA(key, "old"); err != nil {
+ t.Errorf("importing failed: %v", err)
+ }
+ if _, err = ks.ImportECDSA(key, "old"); err == nil {
+ t.Errorf("importing same key twice succeeded")
+ }
+ if _, err = ks.ImportECDSA(key, "new"); err == nil {
+ t.Errorf("importing same key twice succeeded")
+ }
+}
+
+// TestImportECDSA tests the import and export functionality of a keystore.
+func TestImportExport(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStore(t, true)
+ acc, err := ks.NewAccount("old")
+ if err != nil {
+ t.Fatalf("failed to create account: %v", acc)
+ }
+ json, err := ks.Export(acc, "old", "new")
+ if err != nil {
+ t.Fatalf("failed to export account: %v", acc)
+ }
+ _, ks2 := tmpKeyStore(t, true)
+ if _, err = ks2.Import(json, "old", "old"); err == nil {
+ t.Errorf("importing with invalid password succeeded")
+ }
+ acc2, err := ks2.Import(json, "new", "new")
+ if err != nil {
+ t.Errorf("importing failed: %v", err)
+ }
+ if acc.Address != acc2.Address {
+ t.Error("imported account does not match exported account")
+ }
+ if _, err = ks2.Import(json, "new", "new"); err == nil {
+ t.Errorf("importing a key twice succeeded")
+ }
+}
+
+// TestImportRace tests the keystore on races.
+// This test should fail under -race if importing races.
+func TestImportRace(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStore(t, true)
+ acc, err := ks.NewAccount("old")
+ if err != nil {
+ t.Fatalf("failed to create account: %v", acc)
+ }
+ json, err := ks.Export(acc, "old", "new")
+ if err != nil {
+ t.Fatalf("failed to export account: %v", acc)
+ }
+ _, ks2 := tmpKeyStore(t, true)
+ var atom atomic.Uint32
+ var wg sync.WaitGroup
+ wg.Add(2)
+ for i := 0; i < 2; i++ {
+ go func() {
+ defer wg.Done()
+ if _, err := ks2.Import(json, "new", "new"); err != nil {
+ atom.Add(1)
+ }
+ }()
+ }
+ wg.Wait()
+ if atom.Load() != 1 {
+ t.Errorf("Import is racy")
+ }
+}
+
+// checkAccounts checks that all known live accounts are present in the wallet list.
+func checkAccounts(t *testing.T, live map[common.Address]Account, wallets []Wallet) {
+ if len(live) != len(wallets) {
+ t.Errorf("wallet list doesn't match required accounts: have %d, want %d", len(wallets), len(live))
+ return
+ }
+ liveList := make([]Account, 0, len(live))
+ for _, account := range live {
+ liveList = append(liveList, account)
+ }
+ slices.SortFunc(liveList, byURL)
+ for j, wallet := range wallets {
+ if accs := wallet.Accounts(); len(accs) != 1 {
+ t.Errorf("wallet %d: contains invalid number of accounts: have %d, want 1", j, len(accs))
+ } else if accs[0] != liveList[j] {
+ t.Errorf("wallet %d: account mismatch: have %v, want %v", j, accs[0], liveList[j])
+ }
+ }
+}
+
+// checkEvents checks that all events in 'want' are present in 'have'. Events may be present multiple times.
+func checkEvents(t *testing.T, want []walletEvent, have []walletEvent) {
+ for _, wantEv := range want {
+ nmatch := 0
+ for ; len(have) > 0; nmatch++ {
+ if have[0].Kind != wantEv.Kind || have[0].a != wantEv.a {
+ break
+ }
+ have = have[1:]
+ }
+ if nmatch == 0 {
+ t.Fatalf("can't find event with Kind=%v for %x", wantEv.Kind, wantEv.a.Address)
+ }
+ }
+}
+
+func tmpKeyStore(t *testing.T, encrypted bool) (string, *KeyStore) {
+ d := t.TempDir()
+ newKs := NewPlaintextKeyStore
+ if encrypted {
+ newKs = func(kd string) *KeyStore { return NewKeyStore(kd, veryLightScryptN, veryLightScryptP) }
+ }
+ return d, newKs(d)
+}
diff --git a/arb/keystore/passphrase.go b/arb/keystore/passphrase.go
new file mode 100644
index 00000000000..78dd5e62062
--- /dev/null
+++ b/arb/keystore/passphrase.go
@@ -0,0 +1,367 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+/*
+
+This key store behaves as KeyStorePlain with the difference that
+the private key is encrypted and on disk uses another JSON encoding.
+
+The crypto is documented at https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition
+
+*/
+
+package keystore
+
+import (
+ "bytes"
+ "crypto/aes"
+ "crypto/rand"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/math"
+ "github.com/erigontech/erigon/crypto"
+ "github.com/google/uuid"
+ "golang.org/x/crypto/pbkdf2"
+ "golang.org/x/crypto/scrypt"
+)
+
+const (
+ keyHeaderKDF = "scrypt"
+
+ // StandardScryptN is the N parameter of Scrypt encryption algorithm, using 256MB
+ // memory and taking approximately 1s CPU time on a modern processor.
+ StandardScryptN = 1 << 18
+
+ // StandardScryptP is the P parameter of Scrypt encryption algorithm, using 256MB
+ // memory and taking approximately 1s CPU time on a modern processor.
+ StandardScryptP = 1
+
+ // LightScryptN is the N parameter of Scrypt encryption algorithm, using 4MB
+ // memory and taking approximately 100ms CPU time on a modern processor.
+ LightScryptN = 1 << 12
+
+ // LightScryptP is the P parameter of Scrypt encryption algorithm, using 4MB
+ // memory and taking approximately 100ms CPU time on a modern processor.
+ LightScryptP = 6
+
+ scryptR = 8
+ scryptDKLen = 32
+)
+
+type keyStorePassphrase struct {
+ keysDirPath string
+ scryptN int
+ scryptP int
+ // skipKeyFileVerification disables the security-feature which does
+ // reads and decrypts any newly created keyfiles. This should be 'false' in all
+ // cases except tests -- setting this to 'true' is not recommended.
+ skipKeyFileVerification bool
+}
+
+func (ks keyStorePassphrase) GetKey(addr common.Address, filename, auth string) (*Key, error) {
+ // Load the key from the keystore and decrypt its contents
+ keyjson, err := os.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ key, err := DecryptKey(keyjson, auth)
+ if err != nil {
+ return nil, err
+ }
+ // Make sure we're really operating on the requested key (no swap attacks)
+ if key.Address != addr {
+ return nil, fmt.Errorf("key content mismatch: have account %x, want %x", key.Address, addr)
+ }
+ return key, nil
+}
+
+// StoreKey generates a key, encrypts with 'auth' and stores in the given directory
+func StoreKey(dir, auth string, scryptN, scryptP int) (Account, error) {
+ _, a, err := storeNewKey(&keyStorePassphrase{dir, scryptN, scryptP, false}, rand.Reader, auth)
+ return a, err
+}
+
+func (ks keyStorePassphrase) StoreKey(filename string, key *Key, auth string) error {
+ keyjson, err := EncryptKey(key, auth, ks.scryptN, ks.scryptP)
+ if err != nil {
+ return err
+ }
+ // Write into temporary file
+ tmpName, err := writeTemporaryKeyFile(filename, keyjson)
+ if err != nil {
+ return err
+ }
+ if !ks.skipKeyFileVerification {
+ // Verify that we can decrypt the file with the given password.
+ _, err = ks.GetKey(key.Address, tmpName, auth)
+ if err != nil {
+ msg := "An error was encountered when saving and verifying the keystore file. \n" +
+ "This indicates that the keystore is corrupted. \n" +
+ "The corrupted file is stored at \n%v\n" +
+ "Please file a ticket at:\n\n" +
+ "https://github.com/ethereum/go-ethereum/issues." +
+ "The error was : %s"
+ //lint:ignore ST1005 This is a message for the user
+ return fmt.Errorf(msg, tmpName, err)
+ }
+ }
+ return os.Rename(tmpName, filename)
+}
+
+func (ks keyStorePassphrase) JoinPath(filename string) string {
+ if filepath.IsAbs(filename) {
+ return filename
+ }
+ return filepath.Join(ks.keysDirPath, filename)
+}
+
+// EncryptDataV3 encrypts the data given as 'data' with the password 'auth'.
+func EncryptDataV3(data, auth []byte, scryptN, scryptP int) (CryptoJSON, error) {
+ salt := make([]byte, 32)
+ if _, err := io.ReadFull(rand.Reader, salt); err != nil {
+ panic("reading from crypto/rand failed: " + err.Error())
+ }
+ derivedKey, err := scrypt.Key(auth, salt, scryptN, scryptR, scryptP, scryptDKLen)
+ if err != nil {
+ return CryptoJSON{}, err
+ }
+ encryptKey := derivedKey[:16]
+
+ iv := make([]byte, aes.BlockSize) // 16
+ if _, err := io.ReadFull(rand.Reader, iv); err != nil {
+ panic("reading from crypto/rand failed: " + err.Error())
+ }
+ cipherText, err := aesCTRXOR(encryptKey, data, iv)
+ if err != nil {
+ return CryptoJSON{}, err
+ }
+ mac := crypto.Keccak256(derivedKey[16:32], cipherText)
+
+ scryptParamsJSON := make(map[string]interface{}, 5)
+ scryptParamsJSON["n"] = scryptN
+ scryptParamsJSON["r"] = scryptR
+ scryptParamsJSON["p"] = scryptP
+ scryptParamsJSON["dklen"] = scryptDKLen
+ scryptParamsJSON["salt"] = hex.EncodeToString(salt)
+ cipherParamsJSON := cipherparamsJSON{
+ IV: hex.EncodeToString(iv),
+ }
+
+ cryptoStruct := CryptoJSON{
+ Cipher: "aes-128-ctr",
+ CipherText: hex.EncodeToString(cipherText),
+ CipherParams: cipherParamsJSON,
+ KDF: keyHeaderKDF,
+ KDFParams: scryptParamsJSON,
+ MAC: hex.EncodeToString(mac),
+ }
+ return cryptoStruct, nil
+}
+
+// EncryptKey encrypts a key using the specified scrypt parameters into a json
+// blob that can be decrypted later on.
+func EncryptKey(key *Key, auth string, scryptN, scryptP int) ([]byte, error) {
+ keyBytes := math.PaddedBigBytes(key.PrivateKey.D, 32)
+ cryptoStruct, err := EncryptDataV3(keyBytes, []byte(auth), scryptN, scryptP)
+ if err != nil {
+ return nil, err
+ }
+ encryptedKeyJSONV3 := encryptedKeyJSONV3{
+ hex.EncodeToString(key.Address[:]),
+ cryptoStruct,
+ key.Id.String(),
+ version,
+ }
+ return json.Marshal(encryptedKeyJSONV3)
+}
+
+// DecryptKey decrypts a key from a json blob, returning the private key itself.
+func DecryptKey(keyjson []byte, auth string) (*Key, error) {
+ // Parse the json into a simple map to fetch the key version
+ m := make(map[string]interface{})
+ if err := json.Unmarshal(keyjson, &m); err != nil {
+ return nil, err
+ }
+ // Depending on the version try to parse one way or another
+ var (
+ keyBytes, keyId []byte
+ err error
+ )
+ if version, ok := m["version"].(string); ok && version == "1" {
+ k := new(encryptedKeyJSONV1)
+ if err := json.Unmarshal(keyjson, k); err != nil {
+ return nil, err
+ }
+ keyBytes, keyId, err = decryptKeyV1(k, auth)
+ } else {
+ k := new(encryptedKeyJSONV3)
+ if err := json.Unmarshal(keyjson, k); err != nil {
+ return nil, err
+ }
+ keyBytes, keyId, err = decryptKeyV3(k, auth)
+ }
+ // Handle any decryption errors and return the key
+ if err != nil {
+ return nil, err
+ }
+ key, err := crypto.ToECDSA(keyBytes)
+ if err != nil {
+ return nil, fmt.Errorf("invalid key: %w", err)
+ }
+ id, err := uuid.FromBytes(keyId)
+ if err != nil {
+ return nil, fmt.Errorf("invalid UUID: %w", err)
+ }
+ return &Key{
+ Id: id,
+ Address: crypto.PubkeyToAddress(key.PublicKey),
+ PrivateKey: key,
+ }, nil
+}
+
+func DecryptDataV3(cryptoJson CryptoJSON, auth string) ([]byte, error) {
+ if cryptoJson.Cipher != "aes-128-ctr" {
+ return nil, fmt.Errorf("cipher not supported: %v", cryptoJson.Cipher)
+ }
+ mac, err := hex.DecodeString(cryptoJson.MAC)
+ if err != nil {
+ return nil, err
+ }
+
+ iv, err := hex.DecodeString(cryptoJson.CipherParams.IV)
+ if err != nil {
+ return nil, err
+ }
+
+ cipherText, err := hex.DecodeString(cryptoJson.CipherText)
+ if err != nil {
+ return nil, err
+ }
+
+ derivedKey, err := getKDFKey(cryptoJson, auth)
+ if err != nil {
+ return nil, err
+ }
+
+ calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
+ if !bytes.Equal(calculatedMAC, mac) {
+ return nil, ErrDecrypt
+ }
+
+ plainText, err := aesCTRXOR(derivedKey[:16], cipherText, iv)
+ if err != nil {
+ return nil, err
+ }
+ return plainText, err
+}
+
+func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byte, keyId []byte, err error) {
+ if keyProtected.Version != version {
+ return nil, nil, fmt.Errorf("version not supported: %v", keyProtected.Version)
+ }
+ keyUUID, err := uuid.Parse(keyProtected.Id)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyId = keyUUID[:]
+ plainText, err := DecryptDataV3(keyProtected.Crypto, auth)
+ if err != nil {
+ return nil, nil, err
+ }
+ return plainText, keyId, err
+}
+
+func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byte, keyId []byte, err error) {
+ keyUUID, err := uuid.Parse(keyProtected.Id)
+ if err != nil {
+ return nil, nil, err
+ }
+ keyId = keyUUID[:]
+ mac, err := hex.DecodeString(keyProtected.Crypto.MAC)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ iv, err := hex.DecodeString(keyProtected.Crypto.CipherParams.IV)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ cipherText, err := hex.DecodeString(keyProtected.Crypto.CipherText)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ derivedKey, err := getKDFKey(keyProtected.Crypto, auth)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ calculatedMAC := crypto.Keccak256(derivedKey[16:32], cipherText)
+ if !bytes.Equal(calculatedMAC, mac) {
+ return nil, nil, ErrDecrypt
+ }
+
+ plainText, err := aesCBCDecrypt(crypto.Keccak256(derivedKey[:16])[:16], cipherText, iv)
+ if err != nil {
+ return nil, nil, err
+ }
+ return plainText, keyId, err
+}
+
+func getKDFKey(cryptoJSON CryptoJSON, auth string) ([]byte, error) {
+ authArray := []byte(auth)
+ salt, err := hex.DecodeString(cryptoJSON.KDFParams["salt"].(string))
+ if err != nil {
+ return nil, err
+ }
+ dkLen := ensureInt(cryptoJSON.KDFParams["dklen"])
+
+ if cryptoJSON.KDF == keyHeaderKDF {
+ n := ensureInt(cryptoJSON.KDFParams["n"])
+ r := ensureInt(cryptoJSON.KDFParams["r"])
+ p := ensureInt(cryptoJSON.KDFParams["p"])
+ return scrypt.Key(authArray, salt, n, r, p, dkLen)
+ } else if cryptoJSON.KDF == "pbkdf2" {
+ c := ensureInt(cryptoJSON.KDFParams["c"])
+ prf := cryptoJSON.KDFParams["prf"].(string)
+ if prf != "hmac-sha256" {
+ return nil, fmt.Errorf("unsupported PBKDF2 PRF: %s", prf)
+ }
+ key := pbkdf2.Key(authArray, salt, c, dkLen, sha256.New)
+ return key, nil
+ }
+
+ return nil, fmt.Errorf("unsupported KDF: %s", cryptoJSON.KDF)
+}
+
+// TODO: can we do without this when unmarshalling dynamic JSON?
+// why do integers in KDF params end up as float64 and not int after
+// unmarshal?
+func ensureInt(x interface{}) int {
+ res, ok := x.(int)
+ if !ok {
+ res = int(x.(float64))
+ }
+ return res
+}
diff --git a/arb/keystore/passphrase_test.go b/arb/keystore/passphrase_test.go
new file mode 100644
index 00000000000..4558fcd3868
--- /dev/null
+++ b/arb/keystore/passphrase_test.go
@@ -0,0 +1,61 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "os"
+ "testing"
+
+ "github.com/erigontech/erigon/common"
+)
+
+const (
+ veryLightScryptN = 2
+ veryLightScryptP = 1
+)
+
+// Tests that a json key file can be decrypted and encrypted in multiple rounds.
+func TestKeyEncryptDecrypt(t *testing.T) {
+ t.Parallel()
+ keyjson, err := os.ReadFile("testdata/very-light-scrypt.json")
+ if err != nil {
+ t.Fatal(err)
+ }
+ password := ""
+ address := common.HexToAddress("45dea0fb0bba44f4fcf290bba71fd57d7117cbb8")
+
+ // Do a few rounds of decryption and encryption
+ for i := 0; i < 3; i++ {
+ // Try a bad password first
+ if _, err := DecryptKey(keyjson, password+"bad"); err == nil {
+ t.Errorf("test %d: json key decrypted with bad password", i)
+ }
+ // Decrypt with the correct password
+ key, err := DecryptKey(keyjson, password)
+ if err != nil {
+ t.Fatalf("test %d: json key failed to decrypt: %v", i, err)
+ }
+ if key.Address != address {
+ t.Errorf("test %d: key address mismatch: have %x, want %x", i, key.Address, address)
+ }
+ // Recrypt with a new password and start over
+ password += "new data appended" // nolint: gosec
+ if keyjson, err = EncryptKey(key, password, veryLightScryptN, veryLightScryptP); err != nil {
+ t.Errorf("test %d: failed to re-encrypt key %v", i, err)
+ }
+ }
+}
diff --git a/arb/keystore/plain.go b/arb/keystore/plain.go
new file mode 100644
index 00000000000..cb03bb16c3a
--- /dev/null
+++ b/arb/keystore/plain.go
@@ -0,0 +1,61 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/erigontech/erigon/common"
+)
+
+type keyStorePlain struct {
+ keysDirPath string
+}
+
+func (ks keyStorePlain) GetKey(addr common.Address, filename, auth string) (*Key, error) {
+ fd, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+ key := new(Key)
+ if err := json.NewDecoder(fd).Decode(key); err != nil {
+ return nil, err
+ }
+ if key.Address != addr {
+ return nil, fmt.Errorf("key content mismatch: have address %x, want %x", key.Address, addr)
+ }
+ return key, nil
+}
+
+func (ks keyStorePlain) StoreKey(filename string, key *Key, auth string) error {
+ content, err := json.Marshal(key)
+ if err != nil {
+ return err
+ }
+ return writeKeyFile(filename, content)
+}
+
+func (ks keyStorePlain) JoinPath(filename string) string {
+ if filepath.IsAbs(filename) {
+ return filename
+ }
+ return filepath.Join(ks.keysDirPath, filename)
+}
diff --git a/arb/keystore/plain_test.go b/arb/keystore/plain_test.go
new file mode 100644
index 00000000000..ef9725a6d07
--- /dev/null
+++ b/arb/keystore/plain_test.go
@@ -0,0 +1,262 @@
+// Copyright 2014 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/dir"
+ "github.com/erigontech/erigon/crypto"
+)
+
+func tmpKeyStoreIface(t *testing.T, encrypted bool) (dir string, ks keyStore) {
+ d := t.TempDir()
+ if encrypted {
+ ks = &keyStorePassphrase{d, veryLightScryptN, veryLightScryptP, true}
+ } else {
+ ks = &keyStorePlain{d}
+ }
+ return d, ks
+}
+
+func TestKeyStorePlain(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStoreIface(t, false)
+
+ pass := "" // not used but required by API
+ k1, account, err := storeNewKey(ks, rand.Reader, pass)
+ if err != nil {
+ t.Fatal(err)
+ }
+ k2, err := ks.GetKey(k1.Address, account.URL.Path, pass)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(k1.Address, k2.Address) {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(k1.PrivateKey, k2.PrivateKey) {
+ t.Fatal(err)
+ }
+}
+
+func TestKeyStorePassphrase(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStoreIface(t, true)
+
+ pass := "foo"
+ k1, account, err := storeNewKey(ks, rand.Reader, pass)
+ if err != nil {
+ t.Fatal(err)
+ }
+ k2, err := ks.GetKey(k1.Address, account.URL.Path, pass)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(k1.Address, k2.Address) {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(k1.PrivateKey, k2.PrivateKey) {
+ t.Fatal(err)
+ }
+}
+
+func TestKeyStorePassphraseDecryptionFail(t *testing.T) {
+ t.Parallel()
+ _, ks := tmpKeyStoreIface(t, true)
+
+ pass := "foo"
+ k1, account, err := storeNewKey(ks, rand.Reader, pass)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err = ks.GetKey(k1.Address, account.URL.Path, "bar"); err != ErrDecrypt {
+ t.Fatalf("wrong error for invalid password\ngot %q\nwant %q", err, ErrDecrypt)
+ }
+}
+
+func TestImportPreSaleKey(t *testing.T) {
+ t.Parallel()
+ dir, ks := tmpKeyStoreIface(t, true)
+
+ // file content of a presale key file generated with:
+ // python pyethsaletool.py genwallet
+ // with password "foo"
+ fileContent := "{\"encseed\": \"26d87f5f2bf9835f9a47eefae571bc09f9107bb13d54ff12a4ec095d01f83897494cf34f7bed2ed34126ecba9db7b62de56c9d7cd136520a0427bfb11b8954ba7ac39b90d4650d3448e31185affcd74226a68f1e94b1108e6e0a4a91cdd83eba\", \"ethaddr\": \"d4584b5f6229b7be90727b0fc8c6b91bb427821f\", \"email\": \"gustav.simonsson@gmail.com\", \"btcaddr\": \"1EVknXyFC68kKNLkh6YnKzW41svSRoaAcx\"}"
+ pass := "foo"
+ account, _, err := importPreSaleKey(ks, []byte(fileContent), pass)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if account.Address != common.HexToAddress("d4584b5f6229b7be90727b0fc8c6b91bb427821f") {
+ t.Errorf("imported account has wrong address %x", account.Address)
+ }
+ if !strings.HasPrefix(account.URL.Path, dir) {
+ t.Errorf("imported account file not in keystore directory: %q", account.URL)
+ }
+}
+
+// Test and utils for the key store tests in the Ethereum JSON tests;
+// testdataKeyStoreTests/basic_tests.json
+type KeyStoreTestV3 struct {
+ Json encryptedKeyJSONV3
+ Password string
+ Priv string
+}
+
+type KeyStoreTestV1 struct {
+ Json encryptedKeyJSONV1
+ Password string
+ Priv string
+}
+
+func TestV3_PBKDF2_1(t *testing.T) {
+ t.Parallel()
+ tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t)
+ testDecryptV3(tests["wikipage_test_vector_pbkdf2"], t)
+}
+
+var testsSubmodule = filepath.Join("..", "..", "tests", "testdata", "KeyStoreTests")
+
+func skipIfSubmoduleMissing(t *testing.T) {
+ if ok, _ := dir.FileExist(testsSubmodule); !ok {
+ t.Skipf("can't find JSON tests from submodule at %s", testsSubmodule)
+ }
+}
+
+func TestV3_PBKDF2_2(t *testing.T) {
+ skipIfSubmoduleMissing(t)
+ t.Parallel()
+ tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
+ testDecryptV3(tests["test1"], t)
+}
+
+func TestV3_PBKDF2_3(t *testing.T) {
+ skipIfSubmoduleMissing(t)
+ t.Parallel()
+ tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
+ testDecryptV3(tests["python_generated_test_with_odd_iv"], t)
+}
+
+func TestV3_PBKDF2_4(t *testing.T) {
+ skipIfSubmoduleMissing(t)
+ t.Parallel()
+ tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
+ testDecryptV3(tests["evilnonce"], t)
+}
+
+func TestV3_Scrypt_1(t *testing.T) {
+ t.Parallel()
+ tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t)
+ testDecryptV3(tests["wikipage_test_vector_scrypt"], t)
+}
+
+func TestV3_Scrypt_2(t *testing.T) {
+ skipIfSubmoduleMissing(t)
+ t.Parallel()
+ tests := loadKeyStoreTestV3(filepath.Join(testsSubmodule, "basic_tests.json"), t)
+ testDecryptV3(tests["test2"], t)
+}
+
+func TestV1_1(t *testing.T) {
+ t.Parallel()
+ tests := loadKeyStoreTestV1("testdata/v1_test_vector.json", t)
+ testDecryptV1(tests["test1"], t)
+}
+
+func TestV1_2(t *testing.T) {
+ t.Parallel()
+ ks := &keyStorePassphrase{"testdata/v1", LightScryptN, LightScryptP, true}
+ addr := common.HexToAddress("cb61d5a9c4896fb9658090b597ef0e7be6f7b67e")
+ file := "testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e"
+ k, err := ks.GetKey(addr, file, "g")
+ if err != nil {
+ t.Fatal(err)
+ }
+ privHex := hex.EncodeToString(crypto.FromECDSA(k.PrivateKey))
+ expectedHex := "d1b1178d3529626a1a93e073f65028370d14c7eb0936eb42abef05db6f37ad7d"
+ if privHex != expectedHex {
+ t.Fatal(fmt.Errorf("Unexpected privkey: %v, expected %v", privHex, expectedHex))
+ }
+}
+
+func testDecryptV3(test KeyStoreTestV3, t *testing.T) {
+ privBytes, _, err := decryptKeyV3(&test.Json, test.Password)
+ if err != nil {
+ t.Fatal(err)
+ }
+ privHex := hex.EncodeToString(privBytes)
+ if test.Priv != privHex {
+ t.Fatal(fmt.Errorf("Decrypted bytes not equal to test, expected %v have %v", test.Priv, privHex))
+ }
+}
+
+func testDecryptV1(test KeyStoreTestV1, t *testing.T) {
+ privBytes, _, err := decryptKeyV1(&test.Json, test.Password)
+ if err != nil {
+ t.Fatal(err)
+ }
+ privHex := hex.EncodeToString(privBytes)
+ if test.Priv != privHex {
+ t.Fatal(fmt.Errorf("Decrypted bytes not equal to test, expected %v have %v", test.Priv, privHex))
+ }
+}
+
+func loadKeyStoreTestV3(file string, t *testing.T) map[string]KeyStoreTestV3 {
+ tests := make(map[string]KeyStoreTestV3)
+ err := common.LoadJSON(file, &tests)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return tests
+}
+
+func loadKeyStoreTestV1(file string, t *testing.T) map[string]KeyStoreTestV1 {
+ tests := make(map[string]KeyStoreTestV1)
+ err := common.LoadJSON(file, &tests)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return tests
+}
+
+func TestKeyForDirectICAP(t *testing.T) {
+ t.Parallel()
+ key := NewKeyForDirectICAP(rand.Reader)
+ if !strings.HasPrefix(key.Address.Hex(), "0x00") {
+ t.Errorf("Expected first address byte to be zero, have: %s", key.Address.Hex())
+ }
+}
+
+func TestV3_31_Byte_Key(t *testing.T) {
+ t.Parallel()
+ tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t)
+ testDecryptV3(tests["31_byte_key"], t)
+}
+
+func TestV3_30_Byte_Key(t *testing.T) {
+ t.Parallel()
+ tests := loadKeyStoreTestV3("testdata/v3_test_vector.json", t)
+ testDecryptV3(tests["30_byte_key"], t)
+}
diff --git a/arb/keystore/presale.go b/arb/keystore/presale.go
new file mode 100644
index 00000000000..ef1f48143e5
--- /dev/null
+++ b/arb/keystore/presale.go
@@ -0,0 +1,149 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "crypto/aes"
+ "crypto/cipher"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/erigontech/erigon/crypto"
+ "github.com/google/uuid"
+ "golang.org/x/crypto/pbkdf2"
+)
+
+// creates a Key and stores that in the given KeyStore by decrypting a presale key JSON
+func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (Account, *Key, error) {
+ key, err := decryptPreSaleKey(keyJSON, password)
+ if err != nil {
+ return Account{}, nil, err
+ }
+ key.Id, err = uuid.NewRandom()
+ if err != nil {
+ return Account{}, nil, err
+ }
+ a := Account{
+ Address: key.Address,
+ URL: URL{
+ Scheme: KeyStoreScheme,
+ Path: keyStore.JoinPath(keyFileName(key.Address)),
+ },
+ }
+ err = keyStore.StoreKey(a.URL.Path, key, password)
+ return a, key, err
+}
+
+func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error) {
+ preSaleKeyStruct := struct {
+ EncSeed string
+ EthAddr string
+ Email string
+ BtcAddr string
+ }{}
+ err = json.Unmarshal(fileContent, &preSaleKeyStruct)
+ if err != nil {
+ return nil, err
+ }
+ encSeedBytes, err := hex.DecodeString(preSaleKeyStruct.EncSeed)
+ if err != nil {
+ return nil, errors.New("invalid hex in encSeed")
+ }
+ if len(encSeedBytes) < 16 {
+ return nil, errors.New("invalid encSeed, too short")
+ }
+ iv := encSeedBytes[:16]
+ cipherText := encSeedBytes[16:]
+ /*
+ See https://github.com/ethereum/pyethsaletool
+
+ pyethsaletool generates the encryption key from password by
+ 2000 rounds of PBKDF2 with HMAC-SHA-256 using password as salt (:().
+ 16 byte key length within PBKDF2 and resulting key is used as AES key
+ */
+ passBytes := []byte(password)
+ derivedKey := pbkdf2.Key(passBytes, passBytes, 2000, 16, sha256.New)
+ plainText, err := aesCBCDecrypt(derivedKey, cipherText, iv)
+ if err != nil {
+ return nil, err
+ }
+ ethPriv := crypto.Keccak256(plainText)
+ ecKey := crypto.ToECDSAUnsafe(ethPriv)
+
+ key = &Key{
+ Id: uuid.UUID{},
+ Address: crypto.PubkeyToAddress(ecKey.PublicKey),
+ PrivateKey: ecKey,
+ }
+ derivedAddr := hex.EncodeToString(key.Address.Bytes()) // needed because .Hex() gives leading "0x"
+ expectedAddr := preSaleKeyStruct.EthAddr
+ if derivedAddr != expectedAddr {
+ err = fmt.Errorf("decrypted addr '%s' not equal to expected addr '%s'", derivedAddr, expectedAddr)
+ }
+ return key, err
+}
+
+func aesCTRXOR(key, inText, iv []byte) ([]byte, error) {
+ // AES-128 is selected due to size of encryptKey.
+ aesBlock, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+ stream := cipher.NewCTR(aesBlock, iv)
+ outText := make([]byte, len(inText))
+ stream.XORKeyStream(outText, inText)
+ return outText, err
+}
+
+func aesCBCDecrypt(key, cipherText, iv []byte) ([]byte, error) {
+ aesBlock, err := aes.NewCipher(key)
+ if err != nil {
+ return nil, err
+ }
+ decrypter := cipher.NewCBCDecrypter(aesBlock, iv)
+ paddedPlaintext := make([]byte, len(cipherText))
+ decrypter.CryptBlocks(paddedPlaintext, cipherText)
+ plaintext := pkcs7Unpad(paddedPlaintext)
+ if plaintext == nil {
+ return nil, ErrDecrypt
+ }
+ return plaintext, err
+}
+
+// From https://leanpub.com/gocrypto/read#leanpub-auto-block-cipher-modes
+func pkcs7Unpad(in []byte) []byte {
+ if len(in) == 0 {
+ return nil
+ }
+
+ padding := in[len(in)-1]
+ if int(padding) > len(in) || padding > aes.BlockSize {
+ return nil
+ } else if padding == 0 {
+ return nil
+ }
+
+ for i := len(in) - 1; i > len(in)-int(padding)-1; i-- {
+ if in[i] != padding {
+ return nil
+ }
+ }
+ return in[:len(in)-int(padding)]
+}
diff --git a/arb/keystore/testdata/dupes/1 b/arb/keystore/testdata/dupes/1
new file mode 100644
index 00000000000..a3868ec6d54
--- /dev/null
+++ b/arb/keystore/testdata/dupes/1
@@ -0,0 +1 @@
+{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}
\ No newline at end of file
diff --git a/arb/keystore/testdata/dupes/2 b/arb/keystore/testdata/dupes/2
new file mode 100644
index 00000000000..a3868ec6d54
--- /dev/null
+++ b/arb/keystore/testdata/dupes/2
@@ -0,0 +1 @@
+{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}
\ No newline at end of file
diff --git a/arb/keystore/testdata/dupes/foo b/arb/keystore/testdata/dupes/foo
new file mode 100644
index 00000000000..c57060aea03
--- /dev/null
+++ b/arb/keystore/testdata/dupes/foo
@@ -0,0 +1 @@
+{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}
\ No newline at end of file
diff --git a/arb/keystore/testdata/keystore/.hiddenfile b/arb/keystore/testdata/keystore/.hiddenfile
new file mode 100644
index 00000000000..d91faccdeb9
--- /dev/null
+++ b/arb/keystore/testdata/keystore/.hiddenfile
@@ -0,0 +1 @@
+{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}
diff --git a/arb/keystore/testdata/keystore/README b/arb/keystore/testdata/keystore/README
new file mode 100644
index 00000000000..6af9ac3f1ba
--- /dev/null
+++ b/arb/keystore/testdata/keystore/README
@@ -0,0 +1,21 @@
+This directory contains accounts for testing.
+The password that unlocks them is "foobar".
+
+The "good" key files which are supposed to be loadable are:
+
+- File: UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
+ Address: 0x7ef5a6135f1fd6a02593eedc869c6d41d934aef8
+- File: aaa
+ Address: 0xf466859ead1932d743d622cb74fc058882e8648a
+- File: zzz
+ Address: 0x289d485d9771714cce91d3393d764e1311907acc
+
+The other files (including this README) are broken in various ways
+and should not be picked up by package accounts:
+
+- File: no-address (missing address field, otherwise same as "aaa")
+- File: garbage (file with random data)
+- File: empty (file with no content)
+- File: swapfile~ (should be skipped)
+- File: .hiddenfile (should be skipped)
+- File: foo/... (should be skipped because it is a directory)
diff --git a/arb/keystore/testdata/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8 b/arb/keystore/testdata/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
new file mode 100644
index 00000000000..c57060aea03
--- /dev/null
+++ b/arb/keystore/testdata/keystore/UTC--2016-03-22T12-57-55.920751759Z--7ef5a6135f1fd6a02593eedc869c6d41d934aef8
@@ -0,0 +1 @@
+{"address":"7ef5a6135f1fd6a02593eedc869c6d41d934aef8","crypto":{"cipher":"aes-128-ctr","ciphertext":"1d0839166e7a15b9c1333fc865d69858b22df26815ccf601b28219b6192974e1","cipherparams":{"iv":"8df6caa7ff1b00c4e871f002cb7921ed"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"e5e6ef3f4ea695f496b643ebd3f75c0aa58ef4070e90c80c5d3fb0241bf1595c"},"mac":"6d16dfde774845e4585357f24bce530528bc69f4f84e1e22880d34fa45c273e5"},"id":"950077c7-71e3-4c44-a4a1-143919141ed4","version":3}
\ No newline at end of file
diff --git a/arb/keystore/testdata/keystore/aaa b/arb/keystore/testdata/keystore/aaa
new file mode 100644
index 00000000000..a3868ec6d54
--- /dev/null
+++ b/arb/keystore/testdata/keystore/aaa
@@ -0,0 +1 @@
+{"address":"f466859ead1932d743d622cb74fc058882e8648a","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}
\ No newline at end of file
diff --git a/arb/keystore/testdata/keystore/empty b/arb/keystore/testdata/keystore/empty
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/arb/keystore/testdata/keystore/foo/fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e b/arb/keystore/testdata/keystore/foo/fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e
new file mode 100644
index 00000000000..309841e524b
--- /dev/null
+++ b/arb/keystore/testdata/keystore/foo/fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e
@@ -0,0 +1 @@
+{"address":"fd9bd350f08ee3c0c19b85a8e16114a11a60aa4e","crypto":{"cipher":"aes-128-ctr","ciphertext":"8124d5134aa4a927c79fd852989e4b5419397566f04b0936a1eb1d168c7c68a5","cipherparams":{"iv":"e2febe17176414dd2cda28287947eb2f"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":4096,"p":6,"r":8,"salt":"44b415ede89f3bdd6830390a21b78965f571b347a589d1d943029f016c5e8bd5"},"mac":"5e149ff25bfd9dd45746a84bb2bcd2f015f2cbca2b6d25c5de8c29617f71fe5b"},"id":"d6ac5452-2b2c-4d3c-ad80-4bf0327d971c","version":3}
\ No newline at end of file
diff --git a/arb/keystore/testdata/keystore/garbage b/arb/keystore/testdata/keystore/garbage
new file mode 100644
index 00000000000..ff45091e714
Binary files /dev/null and b/arb/keystore/testdata/keystore/garbage differ
diff --git a/arb/keystore/testdata/keystore/no-address b/arb/keystore/testdata/keystore/no-address
new file mode 100644
index 00000000000..ad51269eadb
--- /dev/null
+++ b/arb/keystore/testdata/keystore/no-address
@@ -0,0 +1 @@
+{"crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}
\ No newline at end of file
diff --git a/arb/keystore/testdata/keystore/zero b/arb/keystore/testdata/keystore/zero
new file mode 100644
index 00000000000..b52617f8aeb
--- /dev/null
+++ b/arb/keystore/testdata/keystore/zero
@@ -0,0 +1 @@
+{"address":"0000000000000000000000000000000000000000","crypto":{"cipher":"aes-128-ctr","ciphertext":"cb664472deacb41a2e995fa7f96fe29ce744471deb8d146a0e43c7898c9ddd4d","cipherparams":{"iv":"dfd9ee70812add5f4b8f89d0811c9158"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"0d6769bf016d45c479213990d6a08d938469c4adad8a02ce507b4a4e7b7739f1"},"mac":"bac9af994b15a45dd39669fc66f9aa8a3b9dd8c22cb16e4d8d7ea089d0f1a1a9"},"id":"472e8b3d-afb6-45b5-8111-72c89895099a","version":3}
\ No newline at end of file
diff --git a/arb/keystore/testdata/keystore/zzz b/arb/keystore/testdata/keystore/zzz
new file mode 100644
index 00000000000..cfd8a47017d
--- /dev/null
+++ b/arb/keystore/testdata/keystore/zzz
@@ -0,0 +1 @@
+{"address":"289d485d9771714cce91d3393d764e1311907acc","crypto":{"cipher":"aes-128-ctr","ciphertext":"faf32ca89d286b107f5e6d842802e05263c49b78d46eac74e6109e9a963378ab","cipherparams":{"iv":"558833eec4a665a8c55608d7d503407d"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":8,"p":16,"r":8,"salt":"d571fff447ffb24314f9513f5160246f09997b857ac71348b73e785aab40dc04"},"mac":"21edb85ff7d0dab1767b9bf498f2c3cb7be7609490756bd32300bb213b59effe"},"id":"3279afcf-55ba-43ff-8997-02dcc46a6525","version":3}
\ No newline at end of file
diff --git a/arb/keystore/testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e b/arb/keystore/testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e
new file mode 100644
index 00000000000..498d8131e8e
--- /dev/null
+++ b/arb/keystore/testdata/v1/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e/cb61d5a9c4896fb9658090b597ef0e7be6f7b67e
@@ -0,0 +1 @@
+{"address":"cb61d5a9c4896fb9658090b597ef0e7be6f7b67e","Crypto":{"cipher":"aes-128-cbc","ciphertext":"6143d3192db8b66eabd693d9c4e414dcfaee52abda451af79ccf474dafb35f1bfc7ea013aa9d2ee35969a1a2e8d752d0","cipherparams":{"iv":"35337770fc2117994ecdcad026bccff4"},"kdf":"scrypt","kdfparams":{"n":262144,"r":8,"p":1,"dklen":32,"salt":"9afcddebca541253a2f4053391c673ff9fe23097cd8555d149d929e4ccf1257f"},"mac":"3f3d5af884b17a100b0b3232c0636c230a54dc2ac8d986227219b0dd89197644","version":"1"},"id":"e25f7c1f-d318-4f29-b62c-687190d4d299","version":"1"}
\ No newline at end of file
diff --git a/arb/keystore/testdata/v1_test_vector.json b/arb/keystore/testdata/v1_test_vector.json
new file mode 100644
index 00000000000..3d09b55b5ed
--- /dev/null
+++ b/arb/keystore/testdata/v1_test_vector.json
@@ -0,0 +1,28 @@
+{
+ "test1": {
+ "json": {
+ "Crypto": {
+ "cipher": "aes-128-cbc",
+ "cipherparams": {
+ "iv": "35337770fc2117994ecdcad026bccff4"
+ },
+ "ciphertext": "6143d3192db8b66eabd693d9c4e414dcfaee52abda451af79ccf474dafb35f1bfc7ea013aa9d2ee35969a1a2e8d752d0",
+ "kdf": "scrypt",
+ "kdfparams": {
+ "dklen": 32,
+ "n": 262144,
+ "p": 1,
+ "r": 8,
+ "salt": "9afcddebca541253a2f4053391c673ff9fe23097cd8555d149d929e4ccf1257f"
+ },
+ "mac": "3f3d5af884b17a100b0b3232c0636c230a54dc2ac8d986227219b0dd89197644",
+ "version": "1"
+ },
+ "address": "cb61d5a9c4896fb9658090b597ef0e7be6f7b67e",
+ "id": "e25f7c1f-d318-4f29-b62c-687190d4d299",
+ "version": "1"
+ },
+ "password": "g",
+ "priv": "d1b1178d3529626a1a93e073f65028370d14c7eb0936eb42abef05db6f37ad7d"
+ }
+}
diff --git a/arb/keystore/testdata/v3_test_vector.json b/arb/keystore/testdata/v3_test_vector.json
new file mode 100644
index 00000000000..1e7f790c059
--- /dev/null
+++ b/arb/keystore/testdata/v3_test_vector.json
@@ -0,0 +1,97 @@
+{
+ "wikipage_test_vector_scrypt": {
+ "json": {
+ "crypto" : {
+ "cipher" : "aes-128-ctr",
+ "cipherparams" : {
+ "iv" : "83dbcc02d8ccb40e466191a123791e0e"
+ },
+ "ciphertext" : "d172bf743a674da9cdad04534d56926ef8358534d458fffccd4e6ad2fbde479c",
+ "kdf" : "scrypt",
+ "kdfparams" : {
+ "dklen" : 32,
+ "n" : 262144,
+ "r" : 1,
+ "p" : 8,
+ "salt" : "ab0c7876052600dd703518d6fc3fe8984592145b591fc8fb5c6d43190334ba19"
+ },
+ "mac" : "2103ac29920d71da29f15d75b4a16dbe95cfd7ff8faea1056c33131d846e3097"
+ },
+ "id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6",
+ "version" : 3
+ },
+ "password": "testpassword",
+ "priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d"
+ },
+ "wikipage_test_vector_pbkdf2": {
+ "json": {
+ "crypto" : {
+ "cipher" : "aes-128-ctr",
+ "cipherparams" : {
+ "iv" : "6087dab2f9fdbbfaddc31a909735c1e6"
+ },
+ "ciphertext" : "5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46",
+ "kdf" : "pbkdf2",
+ "kdfparams" : {
+ "c" : 262144,
+ "dklen" : 32,
+ "prf" : "hmac-sha256",
+ "salt" : "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd"
+ },
+ "mac" : "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e9b2"
+ },
+ "id" : "3198bc9c-6672-5ab3-d995-4942343ae5b6",
+ "version" : 3
+ },
+ "password": "testpassword",
+ "priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d"
+ },
+ "31_byte_key": {
+ "json": {
+ "crypto" : {
+ "cipher" : "aes-128-ctr",
+ "cipherparams" : {
+ "iv" : "e0c41130a323adc1446fc82f724bca2f"
+ },
+ "ciphertext" : "9517cd5bdbe69076f9bf5057248c6c050141e970efa36ce53692d5d59a3984",
+ "kdf" : "scrypt",
+ "kdfparams" : {
+ "dklen" : 32,
+ "n" : 2,
+ "r" : 8,
+ "p" : 1,
+ "salt" : "711f816911c92d649fb4c84b047915679933555030b3552c1212609b38208c63"
+ },
+ "mac" : "d5e116151c6aa71470e67a7d42c9620c75c4d23229847dcc127794f0732b0db5"
+ },
+ "id" : "fecfc4ce-e956-48fd-953b-30f8b52ed66c",
+ "version" : 3
+ },
+ "password": "foo",
+ "priv": "fa7b3db73dc7dfdf8c5fbdb796d741e4488628c41fc4febd9160a866ba0f35"
+ },
+ "30_byte_key": {
+ "json": {
+ "crypto" : {
+ "cipher" : "aes-128-ctr",
+ "cipherparams" : {
+ "iv" : "3ca92af36ad7c2cd92454c59cea5ef00"
+ },
+ "ciphertext" : "108b7d34f3442fc26ab1ab90ca91476ba6bfa8c00975a49ef9051dc675aa",
+ "kdf" : "scrypt",
+ "kdfparams" : {
+ "dklen" : 32,
+ "n" : 2,
+ "r" : 8,
+ "p" : 1,
+ "salt" : "d0769e608fb86cda848065642a9c6fa046845c928175662b8e356c77f914cd3b"
+ },
+ "mac" : "75d0e6759f7b3cefa319c3be41680ab6beea7d8328653474bd06706d4cc67420"
+ },
+ "id" : "a37e1559-5955-450d-8075-7b8931b392b2",
+ "version" : 3
+ },
+ "password": "foo",
+ "priv": "81c29e8142bb6a81bef5a92bda7a8328a5c85bb2f9542e76f9b0f94fc018"
+ }
+}
diff --git a/arb/keystore/testdata/very-light-scrypt.json b/arb/keystore/testdata/very-light-scrypt.json
new file mode 100644
index 00000000000..d23b9b2b91a
--- /dev/null
+++ b/arb/keystore/testdata/very-light-scrypt.json
@@ -0,0 +1 @@
+{"address":"45dea0fb0bba44f4fcf290bba71fd57d7117cbb8","crypto":{"cipher":"aes-128-ctr","ciphertext":"b87781948a1befd247bff51ef4063f716cf6c2d3481163e9a8f42e1f9bb74145","cipherparams":{"iv":"dc4926b48a105133d2f16b96833abf1e"},"kdf":"scrypt","kdfparams":{"dklen":32,"n":2,"p":1,"r":8,"salt":"004244bbdc51cadda545b1cfa43cff9ed2ae88e08c61f1479dbb45410722f8f0"},"mac":"39990c1684557447940d4c69e06b1b82b2aceacb43f284df65c956daf3046b85"},"id":"ce541d8d-c79b-40f8-9f8c-20f59616faba","version":3}
diff --git a/arb/keystore/wallet.go b/arb/keystore/wallet.go
new file mode 100644
index 00000000000..de24842e26a
--- /dev/null
+++ b/arb/keystore/wallet.go
@@ -0,0 +1,178 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package keystore
+
+import (
+ "errors"
+ "fmt"
+ "math/big"
+
+ ethereum "github.com/erigontech/erigon"
+ "github.com/erigontech/erigon/crypto"
+ "github.com/erigontech/erigon/execution/types"
+)
+
+// keystoreWallet implements the Account interface for the original
+// keystore.
+type keystoreWallet struct {
+ account Account // Single account contained in this wallet
+ keystore *KeyStore // Keystore where the account originates from
+}
+
+// URL implements Account, returning the URL of the account within.
+func (w *keystoreWallet) URL() URL {
+ return w.account.URL
+}
+
+// Status implements Account, returning whether the account held by the
+// keystore wallet is unlocked or not.
+func (w *keystoreWallet) Status() (string, error) {
+ w.keystore.mu.RLock()
+ defer w.keystore.mu.RUnlock()
+
+ if _, ok := w.keystore.unlocked[w.account.Address]; ok {
+ return "Unlocked", nil
+ }
+ return "Locked", nil
+}
+
+// Open implements Account, but is a noop for plain wallets since there
+// is no connection or decryption step necessary to access the list of accounts.
+func (w *keystoreWallet) Open(passphrase string) error { return nil }
+
+// Close implements Account, but is a noop for plain wallets since there
+// is no meaningful open operation.
+func (w *keystoreWallet) Close() error { return nil }
+
+// Accounts implements Account, returning an account list consisting of
+// a single account that the plain keystore wallet contains.
+func (w *keystoreWallet) Accounts() []Account {
+ return []Account{w.account}
+}
+
+// Contains implements Account, returning whether a particular account is
+// or is not wrapped by this wallet instance.
+func (w *keystoreWallet) Contains(account Account) bool {
+ return account.Address == w.account.Address && (account.URL == (URL{}) || account.URL == w.account.URL)
+}
+
+// Derive implements Account, but is a noop for plain wallets since there
+// is no notion of hierarchical account derivation for plain keystore accounts.
+func (w *keystoreWallet) Derive(path DerivationPath, pin bool) (Account, error) {
+ return Account{}, ErrNotSupported
+}
+
+// SelfDerive implements Account, but is a noop for plain wallets since
+// there is no notion of hierarchical account derivation for plain keystore accounts.
+func (w *keystoreWallet) SelfDerive(bases []DerivationPath, chain ethereum.ChainReader) {
+}
+
+// signHash attempts to sign the given hash with
+// the given account. If the wallet does not wrap this particular account, an
+// error is returned to avoid account leakage (even though in theory we may be
+// able to sign via our shared keystore backend).
+func (w *keystoreWallet) signHash(account Account, hash []byte) ([]byte, error) {
+ // Make sure the requested account is contained within
+ if !w.Contains(account) {
+ return nil, ErrUnknownAccount
+ }
+ // Account seems valid, request the keystore to sign
+ return w.keystore.SignHash(account, hash)
+}
+
+// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed.
+func (w *keystoreWallet) SignData(account Account, mimeType string, data []byte) ([]byte, error) {
+ return w.signHash(account, crypto.Keccak256(data))
+}
+
+// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed.
+func (w *keystoreWallet) SignDataWithPassphrase(account Account, passphrase, mimeType string, data []byte) ([]byte, error) {
+ // Make sure the requested account is contained within
+ if !w.Contains(account) {
+ return nil, ErrUnknownAccount
+ }
+ // Account seems valid, request the keystore to sign
+ return w.keystore.SignHashWithPassphrase(account, passphrase, crypto.Keccak256(data))
+}
+
+// SignText implements Account, attempting to sign the hash of
+// the given text with the given account.
+func (w *keystoreWallet) SignText(account Account, text []byte) ([]byte, error) {
+ return w.signHash(account, TextHash(text))
+}
+
+// SignTextWithPassphrase implements Account, attempting to sign the
+// hash of the given text with the given account using passphrase as extra authentication.
+func (w *keystoreWallet) SignTextWithPassphrase(account Account, passphrase string, text []byte) ([]byte, error) {
+ // Make sure the requested account is contained within
+ if !w.Contains(account) {
+ return nil, ErrUnknownAccount
+ }
+ // Account seems valid, request the keystore to sign
+ return w.keystore.SignHashWithPassphrase(account, passphrase, TextHash(text))
+}
+
+var (
+ ErrUnknownAccount = errors.New("unknown account")
+ ErrNotSupported = errors.New("not supported")
+)
+
+// SignTx implements Account, attempting to sign the given transaction
+// with the given account. If the wallet does not wrap this particular account,
+// an error is returned to avoid account leakage (even though in theory we may
+// be able to sign via our shared keystore backend).
+func (w *keystoreWallet) SignTx(account Account, tx types.Transaction, chainID *big.Int) (types.Transaction, error) {
+ // Make sure the requested account is contained within
+ if !w.Contains(account) {
+ return nil, ErrUnknownAccount
+ }
+ // Account seems valid, request the keystore to sign
+ return w.keystore.SignTx(account, tx, chainID)
+}
+
+// SignTxWithPassphrase implements Account, attempting to sign the given
+// transaction with the given account using passphrase as extra authentication.
+func (w *keystoreWallet) SignTxWithPassphrase(account Account, passphrase string, tx types.Transaction, chainID *big.Int) (types.Transaction, error) {
+ // Make sure the requested account is contained within
+ if !w.Contains(account) {
+ return nil, ErrUnknownAccount
+ }
+ // Account seems valid, request the keystore to sign
+ return w.keystore.SignTxWithPassphrase(account, passphrase, tx, chainID)
+}
+
+// AuthNeededError is returned by backends for signing requests where the user
+// is required to provide further authentication before signing can succeed.
+//
+// This usually means either that a password needs to be supplied, or perhaps a
+// one time PIN code displayed by some hardware device.
+type AuthNeededError struct {
+ Needed string // Extra authentication the user needs to provide
+}
+
+// NewAuthNeededError creates a new authentication error with the extra details
+// about the needed fields set.
+func NewAuthNeededError(needed string) error {
+ return &AuthNeededError{
+ Needed: needed,
+ }
+}
+
+// Error implements the standard error interface.
+func (err *AuthNeededError) Error() string {
+ return fmt.Sprintf("authentication needed: %s", err.Needed)
+}
diff --git a/arb/keystore/watch.go b/arb/keystore/watch.go
new file mode 100644
index 00000000000..1d166a42557
--- /dev/null
+++ b/arb/keystore/watch.go
@@ -0,0 +1,134 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+//go:build (darwin && !ios && cgo) || freebsd || (linux && !arm64) || netbsd || solaris
+// +build darwin,!ios,cgo freebsd linux,!arm64 netbsd solaris
+
+package keystore
+
+import (
+ "os"
+ "time"
+
+ "github.com/erigontech/erigon/log/v3"
+ "github.com/fsnotify/fsnotify"
+)
+
+type watcher struct {
+ ac *accountCache
+ running bool // set to true when runloop begins
+ runEnded bool // set to true when runloop ends
+ starting bool // set to true prior to runloop starting
+ quit chan struct{}
+}
+
+func newWatcher(ac *accountCache) *watcher {
+ return &watcher{
+ ac: ac,
+ quit: make(chan struct{}),
+ }
+}
+
+// enabled returns false on systems not supported.
+func (*watcher) enabled() bool { return true }
+
+// starts the watcher loop in the background.
+// Start a watcher in the background if that's not already in progress.
+// The caller must hold w.ac.mu.
+func (w *watcher) start() {
+ if w.starting || w.running {
+ return
+ }
+ w.starting = true
+ go w.loop()
+}
+
+func (w *watcher) close() {
+ close(w.quit)
+}
+
+func (w *watcher) loop() {
+ defer func() {
+ w.ac.mu.Lock()
+ w.running = false
+ w.starting = false
+ w.runEnded = true
+ w.ac.mu.Unlock()
+ }()
+ logger := log.New("path", w.ac.keydir)
+
+ // Create new watcher.
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Error("Failed to start filesystem watcher", "err", err)
+ return
+ }
+ defer watcher.Close()
+ if err := watcher.Add(w.ac.keydir); err != nil {
+ if !os.IsNotExist(err) {
+ logger.Warn("Failed to watch keystore folder", "err", err)
+ }
+ return
+ }
+
+ logger.Trace("Started watching keystore folder", "folder", w.ac.keydir)
+ defer logger.Trace("Stopped watching keystore folder")
+
+ w.ac.mu.Lock()
+ w.running = true
+ w.ac.mu.Unlock()
+
+ // Wait for file system events and reload.
+ // When an event occurs, the reload call is delayed a bit so that
+ // multiple events arriving quickly only cause a single reload.
+ var (
+ debounceDuration = 500 * time.Millisecond
+ rescanTriggered = false
+ debounce = time.NewTimer(0)
+ )
+ // Ignore initial trigger
+ if !debounce.Stop() {
+ <-debounce.C
+ }
+ defer debounce.Stop()
+ for {
+ select {
+ case <-w.quit:
+ return
+ case _, ok := <-watcher.Events:
+ if !ok {
+ return
+ }
+ // Trigger the scan (with delay), if not already triggered
+ if !rescanTriggered {
+ debounce.Reset(debounceDuration)
+ rescanTriggered = true
+ }
+ // The fsnotify library does provide more granular event-info, it
+ // would be possible to refresh individual affected files instead
+ // of scheduling a full rescan. For most cases though, the
+ // full rescan is quick and obviously simplest.
+ case err, ok := <-watcher.Errors:
+ if !ok {
+ return
+ }
+ log.Info("Filesystem watcher error", "err", err)
+ case <-debounce.C:
+ w.ac.scanAccounts()
+ rescanTriggered = false
+ }
+ }
+}
diff --git a/arb/keystore/watch_fallback.go b/arb/keystore/watch_fallback.go
new file mode 100644
index 00000000000..e3c133b3f6a
--- /dev/null
+++ b/arb/keystore/watch_fallback.go
@@ -0,0 +1,35 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+//go:build (darwin && !cgo) || ios || (linux && arm64) || windows || (!darwin && !freebsd && !linux && !netbsd && !solaris)
+// +build darwin,!cgo ios linux,arm64 windows !darwin,!freebsd,!linux,!netbsd,!solaris
+
+// This is the fallback implementation of directory watching.
+// It is used on unsupported platforms.
+
+package keystore
+
+type watcher struct {
+ running bool
+ runEnded bool
+}
+
+func newWatcher(*accountCache) *watcher { return new(watcher) }
+func (*watcher) start() {}
+func (*watcher) close() {}
+
+// enabled returns false on systems not supported.
+func (*watcher) enabled() bool { return false }
diff --git a/arb/lru/basiclru.go b/arb/lru/basiclru.go
new file mode 100644
index 00000000000..a429157fe50
--- /dev/null
+++ b/arb/lru/basiclru.go
@@ -0,0 +1,223 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package lru implements generically-typed LRU caches.
+package lru
+
+// BasicLRU is a simple LRU cache.
+//
+// This type is not safe for concurrent use.
+// The zero value is not valid, instances must be created using NewCache.
+type BasicLRU[K comparable, V any] struct {
+ list *list[K]
+ items map[K]cacheItem[K, V]
+ cap int
+}
+
+type cacheItem[K any, V any] struct {
+ elem *listElem[K]
+ value V
+}
+
+// NewBasicLRU creates a new LRU cache.
+func NewBasicLRU[K comparable, V any](capacity int) BasicLRU[K, V] {
+ if capacity <= 0 {
+ capacity = 1
+ }
+ c := BasicLRU[K, V]{
+ items: make(map[K]cacheItem[K, V]),
+ list: newList[K](),
+ cap: capacity,
+ }
+ return c
+}
+
+// Add adds a value to the cache. Returns true if an item was evicted to store the new item.
+func (c *BasicLRU[K, V]) Add(key K, value V) (evicted bool) {
+ item, ok := c.items[key]
+ if ok {
+ // Already exists in cache.
+ item.value = value
+ c.items[key] = item
+ c.list.moveToFront(item.elem)
+ return false
+ }
+
+ var elem *listElem[K]
+ if c.Len() >= c.cap {
+ elem = c.list.removeLast()
+ delete(c.items, elem.v)
+ evicted = true
+ } else {
+ elem = new(listElem[K])
+ }
+
+ // Store the new item.
+ // Note that, if another item was evicted, we re-use its list element here.
+ elem.v = key
+ c.items[key] = cacheItem[K, V]{elem, value}
+ c.list.pushElem(elem)
+ return evicted
+}
+
+// Contains reports whether the given key exists in the cache.
+func (c *BasicLRU[K, V]) Contains(key K) bool {
+ _, ok := c.items[key]
+ return ok
+}
+
+// Get retrieves a value from the cache. This marks the key as recently used.
+func (c *BasicLRU[K, V]) Get(key K) (value V, ok bool) {
+ item, ok := c.items[key]
+ if !ok {
+ return value, false
+ }
+ c.list.moveToFront(item.elem)
+ return item.value, true
+}
+
+// GetOldest retrieves the least-recently-used item.
+// Note that this does not update the item's recency.
+func (c *BasicLRU[K, V]) GetOldest() (key K, value V, ok bool) {
+ lastElem := c.list.last()
+ if lastElem == nil {
+ return key, value, false
+ }
+ key = lastElem.v
+ item := c.items[key]
+ return key, item.value, true
+}
+
+// Len returns the current number of items in the cache.
+func (c *BasicLRU[K, V]) Len() int {
+ return len(c.items)
+}
+
+// Peek retrieves a value from the cache, but does not mark the key as recently used.
+func (c *BasicLRU[K, V]) Peek(key K) (value V, ok bool) {
+ item, ok := c.items[key]
+ return item.value, ok
+}
+
+// Purge empties the cache.
+func (c *BasicLRU[K, V]) Purge() {
+ c.list.init()
+ for k := range c.items {
+ delete(c.items, k)
+ }
+}
+
+// Remove drops an item from the cache. Returns true if the key was present in cache.
+func (c *BasicLRU[K, V]) Remove(key K) bool {
+ item, ok := c.items[key]
+ if ok {
+ delete(c.items, key)
+ c.list.remove(item.elem)
+ }
+ return ok
+}
+
+// RemoveOldest drops the least recently used item.
+func (c *BasicLRU[K, V]) RemoveOldest() (key K, value V, ok bool) {
+ lastElem := c.list.last()
+ if lastElem == nil {
+ return key, value, false
+ }
+
+ key = lastElem.v
+ item := c.items[key]
+ delete(c.items, key)
+ c.list.remove(lastElem)
+ return key, item.value, true
+}
+
+// Keys returns all keys in the cache.
+func (c *BasicLRU[K, V]) Keys() []K {
+ keys := make([]K, 0, len(c.items))
+ return c.list.appendTo(keys)
+}
+
+// list is a doubly-linked list holding items of type he.
+// The zero value is not valid, use newList to create lists.
+type list[T any] struct {
+ root listElem[T]
+}
+
+type listElem[T any] struct {
+ next *listElem[T]
+ prev *listElem[T]
+ v T
+}
+
+func newList[T any]() *list[T] {
+ l := new(list[T])
+ l.init()
+ return l
+}
+
+// init reinitializes the list, making it empty.
+func (l *list[T]) init() {
+ l.root.next = &l.root
+ l.root.prev = &l.root
+}
+
+// push adds an element to the front of the list.
+func (l *list[T]) pushElem(e *listElem[T]) {
+ e.prev = &l.root
+ e.next = l.root.next
+ l.root.next = e
+ e.next.prev = e
+}
+
+// moveToFront makes 'node' the head of the list.
+func (l *list[T]) moveToFront(e *listElem[T]) {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ l.pushElem(e)
+}
+
+// remove removes an element from the list.
+func (l *list[T]) remove(e *listElem[T]) {
+ e.prev.next = e.next
+ e.next.prev = e.prev
+ e.next, e.prev = nil, nil
+}
+
+// removeLast removes the last element of the list.
+func (l *list[T]) removeLast() *listElem[T] {
+ last := l.last()
+ if last != nil {
+ l.remove(last)
+ }
+ return last
+}
+
+// last returns the last element of the list, or nil if the list is empty.
+func (l *list[T]) last() *listElem[T] {
+ e := l.root.prev
+ if e == &l.root {
+ return nil
+ }
+ return e
+}
+
+// appendTo appends all list elements to a slice.
+func (l *list[T]) appendTo(slice []T) []T {
+ for e := l.root.prev; e != &l.root; e = e.prev {
+ slice = append(slice, e.v)
+ }
+ return slice
+}
diff --git a/arb/lru/basiclru_arbitrum.go b/arb/lru/basiclru_arbitrum.go
new file mode 100644
index 00000000000..aa8d3fdf877
--- /dev/null
+++ b/arb/lru/basiclru_arbitrum.go
@@ -0,0 +1,22 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package lru implements generically-typed LRU caches.
+package lru
+
+func (c *BasicLRU[K, V]) Capacity() int {
+ return c.cap
+}
diff --git a/arb/lru/basiclru_test.go b/arb/lru/basiclru_test.go
new file mode 100644
index 00000000000..29812bda157
--- /dev/null
+++ b/arb/lru/basiclru_test.go
@@ -0,0 +1,255 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package lru
+
+import (
+ crand "crypto/rand"
+ "fmt"
+ "io"
+ "math/rand"
+ "testing"
+)
+
+// Some of these test cases were adapted
+// from https://github.com/hashicorp/golang-lru/blob/master/simplelru/lru_test.go
+
+func TestBasicLRU(t *testing.T) {
+ cache := NewBasicLRU[int, int](128)
+
+ for i := 0; i < 256; i++ {
+ cache.Add(i, i)
+ }
+ if cache.Len() != 128 {
+ t.Fatalf("bad len: %v", cache.Len())
+ }
+
+ // Check that Keys returns least-recent key first.
+ keys := cache.Keys()
+ if len(keys) != 128 {
+ t.Fatal("wrong Keys() length", len(keys))
+ }
+ for i, k := range keys {
+ v, ok := cache.Peek(k)
+ if !ok {
+ t.Fatalf("expected key %d be present", i)
+ }
+ if v != k {
+ t.Fatalf("expected %d == %d", k, v)
+ }
+ if v != i+128 {
+ t.Fatalf("wrong value at key %d: %d, want %d", i, v, i+128)
+ }
+ }
+
+ for i := 0; i < 128; i++ {
+ _, ok := cache.Get(i)
+ if ok {
+ t.Fatalf("%d should be evicted", i)
+ }
+ }
+ for i := 128; i < 256; i++ {
+ _, ok := cache.Get(i)
+ if !ok {
+ t.Fatalf("%d should not be evicted", i)
+ }
+ }
+
+ for i := 128; i < 192; i++ {
+ ok := cache.Remove(i)
+ if !ok {
+ t.Fatalf("%d should be in cache", i)
+ }
+ ok = cache.Remove(i)
+ if ok {
+ t.Fatalf("%d should not be in cache", i)
+ }
+ _, ok = cache.Get(i)
+ if ok {
+ t.Fatalf("%d should be deleted", i)
+ }
+ }
+
+ // Request item 192.
+ cache.Get(192)
+ // It should be the last item returned by Keys().
+ for i, k := range cache.Keys() {
+ if (i < 63 && k != i+193) || (i == 63 && k != 192) {
+ t.Fatalf("out of order key: %v", k)
+ }
+ }
+
+ cache.Purge()
+ if cache.Len() != 0 {
+ t.Fatalf("bad len: %v", cache.Len())
+ }
+ if _, ok := cache.Get(200); ok {
+ t.Fatalf("should contain nothing")
+ }
+}
+
+func TestBasicLRUAddExistingKey(t *testing.T) {
+ cache := NewBasicLRU[int, int](1)
+
+ cache.Add(1, 1)
+ cache.Add(1, 2)
+
+ v, _ := cache.Get(1)
+ if v != 2 {
+ t.Fatal("wrong value:", v)
+ }
+}
+
+// This test checks GetOldest and RemoveOldest.
+func TestBasicLRUGetOldest(t *testing.T) {
+ cache := NewBasicLRU[int, int](128)
+ for i := 0; i < 256; i++ {
+ cache.Add(i, i)
+ }
+
+ k, _, ok := cache.GetOldest()
+ if !ok {
+ t.Fatalf("missing")
+ }
+ if k != 128 {
+ t.Fatalf("bad: %v", k)
+ }
+
+ k, _, ok = cache.RemoveOldest()
+ if !ok {
+ t.Fatalf("missing")
+ }
+ if k != 128 {
+ t.Fatalf("bad: %v", k)
+ }
+
+ k, _, ok = cache.RemoveOldest()
+ if !ok {
+ t.Fatalf("missing oldest item")
+ }
+ if k != 129 {
+ t.Fatalf("wrong oldest item: %v", k)
+ }
+}
+
+// Test that Add returns true/false if an eviction occurred
+func TestBasicLRUAddReturnValue(t *testing.T) {
+ cache := NewBasicLRU[int, int](1)
+ if cache.Add(1, 1) {
+ t.Errorf("first add shouldn't have evicted")
+ }
+ if !cache.Add(2, 2) {
+ t.Errorf("second add should have evicted")
+ }
+}
+
+// This test verifies that Contains doesn't change item recency.
+func TestBasicLRUContains(t *testing.T) {
+ cache := NewBasicLRU[int, int](2)
+ cache.Add(1, 1)
+ cache.Add(2, 2)
+ if !cache.Contains(1) {
+ t.Errorf("1 should be in the cache")
+ }
+ cache.Add(3, 3)
+ if cache.Contains(1) {
+ t.Errorf("Contains should not have updated recency of 1")
+ }
+}
+
+// Test that Peek doesn't update recent-ness
+func TestBasicLRUPeek(t *testing.T) {
+ cache := NewBasicLRU[int, int](2)
+ cache.Add(1, 1)
+ cache.Add(2, 2)
+ if v, ok := cache.Peek(1); !ok || v != 1 {
+ t.Errorf("1 should be set to 1")
+ }
+ cache.Add(3, 3)
+ if cache.Contains(1) {
+ t.Errorf("should not have updated recent-ness of 1")
+ }
+}
+
+func BenchmarkLRU(b *testing.B) {
+ var (
+ capacity = 1000
+ indexes = make([]int, capacity*20)
+ keys = make([]string, capacity)
+ values = make([][]byte, capacity)
+ )
+ for i := range indexes {
+ indexes[i] = rand.Intn(capacity)
+ }
+ for i := range keys {
+ b := make([]byte, 32)
+ crand.Read(b)
+ keys[i] = string(b)
+ crand.Read(b)
+ values[i] = b
+ }
+
+ var sink []byte
+
+ b.Run("Add/BasicLRU", func(b *testing.B) {
+ cache := NewBasicLRU[int, int](capacity)
+ for i := 0; i < b.N; i++ {
+ cache.Add(i, i)
+ }
+ })
+ b.Run("Get/BasicLRU", func(b *testing.B) {
+ cache := NewBasicLRU[string, []byte](capacity)
+ for i := 0; i < capacity; i++ {
+ index := indexes[i]
+ cache.Add(keys[index], values[index])
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ k := keys[indexes[i%len(indexes)]]
+ v, ok := cache.Get(k)
+ if ok {
+ sink = v
+ }
+ }
+ })
+
+ // // vs. github.com/hashicorp/golang-lru/simplelru
+ // b.Run("Add/simplelru.LRU", func(b *testing.B) {
+ // cache, _ := simplelru.NewLRU(capacity, nil)
+ // for i := 0; i < b.N; i++ {
+ // cache.Add(i, i)
+ // }
+ // })
+ // b.Run("Get/simplelru.LRU", func(b *testing.B) {
+ // cache, _ := simplelru.NewLRU(capacity, nil)
+ // for i := 0; i < capacity; i++ {
+ // index := indexes[i]
+ // cache.Add(keys[index], values[index])
+ // }
+ //
+ // b.ResetTimer()
+ // for i := 0; i < b.N; i++ {
+ // k := keys[indexes[i%len(indexes)]]
+ // v, ok := cache.Get(k)
+ // if ok {
+ // sink = v.([]byte)
+ // }
+ // }
+ // })
+
+ fmt.Fprintln(io.Discard, sink)
+}
diff --git a/arb/lru/blob_lru.go b/arb/lru/blob_lru.go
new file mode 100644
index 00000000000..c9b33985032
--- /dev/null
+++ b/arb/lru/blob_lru.go
@@ -0,0 +1,84 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package lru
+
+import (
+ "math"
+ "sync"
+)
+
+// blobType is the type constraint for values stored in SizeConstrainedCache.
+type blobType interface {
+ ~[]byte | ~string
+}
+
+// SizeConstrainedCache is a cache where capacity is in bytes (instead of item count). When the cache
+// is at capacity, and a new item is added, older items are evicted until the size
+// constraint is met.
+//
+// OBS: This cache assumes that items are content-addressed: keys are unique per content.
+// In other words: two Add(..) with the same key K, will always have the same value V.
+type SizeConstrainedCache[K comparable, V blobType] struct {
+ size uint64
+ maxSize uint64
+ lru BasicLRU[K, V]
+ lock sync.Mutex
+}
+
+// NewSizeConstrainedCache creates a new size-constrained LRU cache.
+func NewSizeConstrainedCache[K comparable, V blobType](maxSize uint64) *SizeConstrainedCache[K, V] {
+ return &SizeConstrainedCache[K, V]{
+ size: 0,
+ maxSize: maxSize,
+ lru: NewBasicLRU[K, V](math.MaxInt),
+ }
+}
+
+// Add adds a value to the cache. Returns true if an eviction occurred.
+// OBS: This cache assumes that items are content-addressed: keys are unique per content.
+// In other words: two Add(..) with the same key K, will always have the same value V.
+// OBS: The value is _not_ copied on Add, so the caller must not modify it afterwards.
+func (c *SizeConstrainedCache[K, V]) Add(key K, value V) (evicted bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Unless it is already present, might need to evict something.
+ // OBS: If it is present, we still call Add internally to bump the recentness.
+ if !c.lru.Contains(key) {
+ targetSize := c.size + uint64(len(value))
+ for targetSize > c.maxSize {
+ evicted = true
+ _, v, ok := c.lru.RemoveOldest()
+ if !ok {
+ // list is now empty. Break
+ break
+ }
+ targetSize -= uint64(len(v))
+ }
+ c.size = targetSize
+ }
+ c.lru.Add(key, value)
+ return evicted
+}
+
+// Get looks up a key's value from the cache.
+func (c *SizeConstrainedCache[K, V]) Get(key K) (V, bool) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ return c.lru.Get(key)
+}
diff --git a/arb/lru/blob_lru_test.go b/arb/lru/blob_lru_test.go
new file mode 100644
index 00000000000..ca1b0ddd742
--- /dev/null
+++ b/arb/lru/blob_lru_test.go
@@ -0,0 +1,155 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package lru
+
+import (
+ "encoding/binary"
+ "fmt"
+ "testing"
+)
+
+type testKey [8]byte
+
+func mkKey(i int) (key testKey) {
+ binary.LittleEndian.PutUint64(key[:], uint64(i))
+ return key
+}
+
+func TestSizeConstrainedCache(t *testing.T) {
+ lru := NewSizeConstrainedCache[testKey, []byte](100)
+ var want uint64
+ // Add 11 items of 10 byte each. First item should be swapped out
+ for i := 0; i < 11; i++ {
+ k := mkKey(i)
+ v := fmt.Sprintf("value-%04d", i)
+ lru.Add(k, []byte(v))
+ want += uint64(len(v))
+ if want > 100 {
+ want = 100
+ }
+ if have := lru.size; have != want {
+ t.Fatalf("size wrong, have %d want %d", have, want)
+ }
+ }
+ // Zero:th should be evicted
+ {
+ k := mkKey(0)
+ if _, ok := lru.Get(k); ok {
+ t.Fatalf("should be evicted: %v", k)
+ }
+ }
+ // Elems 1-11 should be present
+ for i := 1; i < 11; i++ {
+ k := mkKey(i)
+ want := fmt.Sprintf("value-%04d", i)
+ have, ok := lru.Get(k)
+ if !ok {
+ t.Fatalf("missing key %v", k)
+ }
+ if string(have) != want {
+ t.Fatalf("wrong value, have %v want %v", have, want)
+ }
+ }
+}
+
+// This test adds inserting an element exceeding the max size.
+func TestSizeConstrainedCacheOverflow(t *testing.T) {
+ lru := NewSizeConstrainedCache[testKey, []byte](100)
+
+ // Add 10 items of 10 byte each, filling the cache
+ for i := 0; i < 10; i++ {
+ k := mkKey(i)
+ v := fmt.Sprintf("value-%04d", i)
+ lru.Add(k, []byte(v))
+ }
+ // Add one single large elem. We expect it to swap out all entries.
+ {
+ k := mkKey(1337)
+ v := make([]byte, 200)
+ lru.Add(k, v)
+ }
+ // Elems 0-9 should be missing
+ for i := 1; i < 10; i++ {
+ k := mkKey(i)
+ if _, ok := lru.Get(k); ok {
+ t.Fatalf("should be evicted: %v", k)
+ }
+ }
+ // The size should be accurate
+ if have, want := lru.size, uint64(200); have != want {
+ t.Fatalf("size wrong, have %d want %d", have, want)
+ }
+ // Adding one small item should swap out the large one
+ {
+ i := 0
+ k := mkKey(i)
+ v := fmt.Sprintf("value-%04d", i)
+ lru.Add(k, []byte(v))
+ if have, want := lru.size, uint64(10); have != want {
+ t.Fatalf("size wrong, have %d want %d", have, want)
+ }
+ }
+}
+
+// This checks what happens when inserting the same k/v multiple times.
+func TestSizeConstrainedCacheSameItem(t *testing.T) {
+ lru := NewSizeConstrainedCache[testKey, []byte](100)
+
+ // Add one 10 byte-item 10 times.
+ k := mkKey(0)
+ v := fmt.Sprintf("value-%04d", 0)
+ for i := 0; i < 10; i++ {
+ lru.Add(k, []byte(v))
+ }
+
+ // The size should be accurate.
+ if have, want := lru.size, uint64(10); have != want {
+ t.Fatalf("size wrong, have %d want %d", have, want)
+ }
+}
+
+// This tests that empty/nil values are handled correctly.
+func TestSizeConstrainedCacheEmpties(t *testing.T) {
+ lru := NewSizeConstrainedCache[testKey, []byte](100)
+
+ // This test abuses the lru a bit, using different keys for identical value(s).
+ for i := 0; i < 10; i++ {
+ lru.Add(testKey{byte(i)}, []byte{})
+ lru.Add(testKey{byte(255 - i)}, nil)
+ }
+
+ // The size should not count, only the values count. So this could be a DoS
+ // since it basically has no cap, and it is intentionally overloaded with
+ // different-keyed 0-length values.
+ if have, want := lru.size, uint64(0); have != want {
+ t.Fatalf("size wrong, have %d want %d", have, want)
+ }
+
+ for i := 0; i < 10; i++ {
+ if v, ok := lru.Get(testKey{byte(i)}); !ok {
+ t.Fatalf("test %d: expected presence", i)
+ } else if v == nil {
+ t.Fatalf("test %d, v is nil", i)
+ }
+
+ if v, ok := lru.Get(testKey{byte(255 - i)}); !ok {
+ t.Fatalf("test %d: expected presence", i)
+ } else if v != nil {
+ t.Fatalf("test %d, v is not nil", i)
+ }
+ }
+}
diff --git a/arb/lru/lru.go b/arb/lru/lru.go
new file mode 100644
index 00000000000..45965adb0df
--- /dev/null
+++ b/arb/lru/lru.go
@@ -0,0 +1,95 @@
+// Copyright 2022 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package lru
+
+import "sync"
+
+// Cache is a LRU cache.
+// This type is safe for concurrent use.
+type Cache[K comparable, V any] struct {
+ cache BasicLRU[K, V]
+ mu sync.Mutex
+}
+
+// NewCache creates an LRU cache.
+func NewCache[K comparable, V any](capacity int) *Cache[K, V] {
+ return &Cache[K, V]{cache: NewBasicLRU[K, V](capacity)}
+}
+
+// Add adds a value to the cache. Returns true if an item was evicted to store the new item.
+func (c *Cache[K, V]) Add(key K, value V) (evicted bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ return c.cache.Add(key, value)
+}
+
+// Contains reports whether the given key exists in the cache.
+func (c *Cache[K, V]) Contains(key K) bool {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ return c.cache.Contains(key)
+}
+
+// Get retrieves a value from the cache. This marks the key as recently used.
+func (c *Cache[K, V]) Get(key K) (value V, ok bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ return c.cache.Get(key)
+}
+
+// Len returns the current number of items in the cache.
+func (c *Cache[K, V]) Len() int {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ return c.cache.Len()
+}
+
+// Peek retrieves a value from the cache, but does not mark the key as recently used.
+func (c *Cache[K, V]) Peek(key K) (value V, ok bool) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ return c.cache.Peek(key)
+}
+
+// Purge empties the cache.
+func (c *Cache[K, V]) Purge() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ c.cache.Purge()
+}
+
+// Remove drops an item from the cache. Returns true if the key was present in cache.
+func (c *Cache[K, V]) Remove(key K) bool {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ return c.cache.Remove(key)
+}
+
+// Keys returns all keys of items currently in the LRU.
+func (c *Cache[K, V]) Keys() []K {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ return c.cache.Keys()
+}
diff --git a/arb/multigas/resources.go b/arb/multigas/resources.go
new file mode 100644
index 00000000000..681fd41d56a
--- /dev/null
+++ b/arb/multigas/resources.go
@@ -0,0 +1,570 @@
+package multigas
+
+import (
+ "encoding/json"
+ "fmt"
+ "math/bits"
+
+ "github.com/erigontech/erigon/common/hexutil"
+ "github.com/erigontech/erigon/common/math"
+ "github.com/erigontech/erigon/execution/chain/params"
+)
+
+// ResourceKind represents a dimension for the multi-dimensional gas.
+type ResourceKind uint8
+
+const (
+ ResourceKindUnknown ResourceKind = iota
+ ResourceKindComputation
+ ResourceKindHistoryGrowth
+ ResourceKindStorageAccess
+ ResourceKindStorageGrowth
+ ResourceKindL1Calldata
+ ResourceKindL2Calldata
+ ResourceKindWasmComputation
+ NumResourceKind
+)
+
+func (rk ResourceKind) String() string {
+ switch rk {
+ case ResourceKindUnknown:
+ return "Unknown"
+ case ResourceKindComputation:
+ return "Computation"
+ case ResourceKindHistoryGrowth:
+ return "HistoryGrowth"
+ case ResourceKindStorageAccess:
+ return "StorageAccess"
+ case ResourceKindStorageGrowth:
+ return "StorageGrowth"
+ case ResourceKindL1Calldata:
+ return "L1Calldata"
+ case ResourceKindL2Calldata:
+ return "L2Calldata"
+ case ResourceKindWasmComputation:
+ return "WasmComputation"
+ default:
+ return fmt.Sprintf("ResourceKind(%d)", uint8(rk))
+ }
+}
+
+func CheckResourceKind(id uint8) (ResourceKind, error) {
+ if id <= uint8(ResourceKindUnknown) || id >= uint8(NumResourceKind) {
+ return ResourceKindUnknown, fmt.Errorf("invalid resource id: %v", id)
+ }
+ return ResourceKind(id), nil
+}
+
+// MultiGas tracks gas usage across multiple resource kinds, while also
+// maintaining a single-dimensional total gas sum and refund amount.
+type MultiGas struct {
+ gas [NumResourceKind]uint64
+ total uint64
+ refund uint64
+}
+
+// Pair represents a single resource kind and its associated gas amount.
+type Pair struct {
+ Kind ResourceKind
+ Amount uint64
+}
+
+// ZeroGas creates a MultiGas value with all fields set to zero.
+func ZeroGas() MultiGas {
+ return MultiGas{}
+}
+
+// NewMultiGas creates a new MultiGas with the given resource kind initialized to `amount`.
+// All other kinds are zero. The total is also set to `amount`.
+func NewMultiGas(kind ResourceKind, amount uint64) MultiGas {
+ var mg MultiGas
+ mg.gas[kind] = amount
+ mg.total = amount
+ return mg
+}
+
+// MultiGasFromPairs creates a new MultiGas from resource–amount pairs.
+// Intended for constant-like construction; panics on overflow.
+func MultiGasFromPairs(pairs ...Pair) MultiGas {
+ var mg MultiGas
+ for _, p := range pairs {
+ newTotal, c := bits.Add64(mg.total, p.Amount, 0)
+ if c != 0 {
+ panic("multigas overflow")
+ }
+ mg.gas[p.Kind] = p.Amount
+ mg.total = newTotal
+ }
+ return mg
+}
+
+// UnknownGas returns a MultiGas initialized with unknown gas.
+func UnknownGas(amount uint64) MultiGas {
+ return NewMultiGas(ResourceKindUnknown, amount)
+}
+
+// ComputationGas returns a MultiGas initialized with computation gas.
+func ComputationGas(amount uint64) MultiGas {
+ return NewMultiGas(ResourceKindComputation, amount)
+}
+
+// HistoryGrowthGas returns a MultiGas initialized with history growth gas.
+func HistoryGrowthGas(amount uint64) MultiGas {
+ return NewMultiGas(ResourceKindHistoryGrowth, amount)
+}
+
+// StorageAccessGas returns a MultiGas initialized with storage access gas.
+func StorageAccessGas(amount uint64) MultiGas {
+ return NewMultiGas(ResourceKindStorageAccess, amount)
+}
+
+// StorageGrowthGas returns a MultiGas initialized with storage growth gas.
+func StorageGrowthGas(amount uint64) MultiGas {
+ return NewMultiGas(ResourceKindStorageGrowth, amount)
+}
+
+// L1CalldataGas returns a MultiGas initialized with L1 calldata gas.
+func L1CalldataGas(amount uint64) MultiGas {
+ return NewMultiGas(ResourceKindL1Calldata, amount)
+}
+
+// L2CalldataGas returns a MultiGas initialized with L2 calldata gas.
+func L2CalldataGas(amount uint64) MultiGas {
+ return NewMultiGas(ResourceKindL2Calldata, amount)
+}
+
+// WasmComputationGas returns a MultiGas initialized with computation gas used for WASM (Stylus contracts).
+func WasmComputationGas(amount uint64) MultiGas {
+ return NewMultiGas(ResourceKindWasmComputation, amount)
+}
+
+// Get returns the gas amount for the specified resource kind.
+func (z MultiGas) Get(kind ResourceKind) uint64 {
+ return z.gas[kind]
+}
+
+// With returns a copy of z with the given resource kind set to amount.
+// The total is adjusted accordingly. It returns the updated value and true if an overflow occurred.
+func (z MultiGas) With(kind ResourceKind, amount uint64) (MultiGas, bool) {
+ res := z
+ newTotal, c := bits.Add64(z.total-z.gas[kind], amount, 0)
+ if c != 0 {
+ return z, true
+ }
+ res.gas[kind] = amount
+ res.total = newTotal
+ return res, false
+}
+
+// GetRefund gets the SSTORE refund computed at the end of the transaction.
+func (z MultiGas) GetRefund() uint64 {
+ return z.refund
+}
+
+// WithRefund returns a copy of z with its refund set to amount.
+func (z MultiGas) WithRefund(amount uint64) MultiGas {
+ res := z
+ res.refund = amount
+ return res
+}
+
+// SafeAdd returns a copy of z with the per-kind, total, and refund gas
+// added to the values from x. It returns the updated value and true if
+// an overflow occurred.
+func (z MultiGas) SafeAdd(x MultiGas) (MultiGas, bool) {
+ res := z
+
+ for i := 0; i < int(NumResourceKind); i++ {
+ v, c := bits.Add64(res.gas[i], x.gas[i], 0)
+ if c != 0 {
+ return z, true
+ }
+ res.gas[i] = v
+ }
+
+ t, c := bits.Add64(res.total, x.total, 0)
+ if c != 0 {
+ return z, true
+ }
+ res.total = t
+
+ r, c := bits.Add64(res.refund, x.refund, 0)
+ if c != 0 {
+ return z, true
+ }
+ res.refund = r
+
+ return res, false
+}
+
+// SaturatingAdd returns a copy of z with the per-kind, total, and refund gas
+// added to the values from x. On overflow, the affected field(s) are clamped
+// to MaxUint64.
+func (z MultiGas) SaturatingAdd(x MultiGas) MultiGas {
+ res := z
+
+ for i := 0; i < int(NumResourceKind); i++ {
+ if v, c := bits.Add64(res.gas[i], x.gas[i], 0); c != 0 {
+ res.gas[i] = ^uint64(0) // clamp
+ } else {
+ res.gas[i] = v
+ }
+ }
+
+ if t, c := bits.Add64(res.total, x.total, 0); c != 0 {
+ res.total = ^uint64(0) // clamp
+ } else {
+ res.total = t
+ }
+
+ if r, c := bits.Add64(res.refund, x.refund, 0); c != 0 {
+ res.refund = ^uint64(0) // clamp
+ } else {
+ res.refund = r
+ }
+
+ return res
+}
+
+// SaturatingAddInto adds x into z in place (per kind, total, and refund).
+// On overflow, the affected field(s) are clamped to MaxUint64.
+// This is a hot-path helper; the public immutable API remains preferred elsewhere.
+func (z *MultiGas) SaturatingAddInto(x MultiGas) {
+ for i := 0; i < int(NumResourceKind); i++ {
+ if v, c := bits.Add64(z.gas[i], x.gas[i], 0); c != 0 {
+ z.gas[i] = ^uint64(0) // clamp
+ } else {
+ z.gas[i] = v
+ }
+ }
+ if t, c := bits.Add64(z.total, x.total, 0); c != 0 {
+ z.total = ^uint64(0) // clamp
+ } else {
+ z.total = t
+ }
+ if r, c := bits.Add64(z.refund, x.refund, 0); c != 0 {
+ z.refund = ^uint64(0) // clamp
+ } else {
+ z.refund = r
+ }
+}
+
+// SafeSub returns a copy of z with the per-kind, total, and refund gas
+// subtracted by the values from x. It returns the updated value and true if
+// a underflow occurred.
+func (z MultiGas) SafeSub(x MultiGas) (MultiGas, bool) {
+ res := z
+
+ for i := 0; i < int(NumResourceKind); i++ {
+ v, b := bits.Sub64(res.gas[i], x.gas[i], 0)
+ if b != 0 {
+ return z, true
+ }
+ res.gas[i] = v
+ }
+
+ t, b := bits.Sub64(res.total, x.total, 0)
+ if b != 0 {
+ return z, true
+ }
+ res.total = t
+
+ r, b := bits.Sub64(res.refund, x.refund, 0)
+ if b != 0 {
+ return z, true
+ }
+ res.refund = r
+
+ return res, false
+}
+
+// SaturatingSub returns a copy of z with the per-kind, total, and refund gas
+// subtracted by the values from x. On underflow, the affected field(s) are
+// clamped to zero.
+func (z MultiGas) SaturatingSub(x MultiGas) MultiGas {
+ res := z
+
+ for i := 0; i < int(NumResourceKind); i++ {
+ if v, c := bits.Sub64(res.gas[i], x.gas[i], 0); c != 0 {
+ res.gas[i] = uint64(0) // clamp
+ } else {
+ res.gas[i] = v
+ }
+ }
+
+ if t, c := bits.Sub64(res.total, x.total, 0); c != 0 {
+ res.total = uint64(0) // clamp
+ } else {
+ res.total = t
+ }
+
+ if r, c := bits.Sub64(res.refund, x.refund, 0); c != 0 {
+ res.refund = uint64(0) // clamp
+ } else {
+ res.refund = r
+ }
+
+ return res
+}
+
+// SafeIncrement returns a copy of z with the given resource kind
+// and the total incremented by gas. It returns the updated value and true if
+// an overflow occurred.
+func (z MultiGas) SafeIncrement(kind ResourceKind, gas uint64) (MultiGas, bool) {
+ res := z
+
+ newValue, c := bits.Add64(z.gas[kind], gas, 0)
+ if c != 0 {
+ return res, true
+ }
+
+ newTotal, c := bits.Add64(z.total, gas, 0)
+ if c != 0 {
+ return res, true
+ }
+
+ res.gas[kind] = newValue
+ res.total = newTotal
+ return res, false
+}
+
+// SaturatingIncrement returns a copy of z with the given resource kind
+// and the total incremented by gas. On overflow, the field(s) are clamped to MaxUint64.
+func (z MultiGas) SaturatingIncrement(kind ResourceKind, gas uint64) MultiGas {
+ res := z
+
+ if v, c := bits.Add64(res.gas[kind], gas, 0); c != 0 {
+ res.gas[kind] = ^uint64(0) // clamp
+ } else {
+ res.gas[kind] = v
+ }
+
+ if t, c := bits.Add64(res.total, gas, 0); c != 0 {
+ res.total = ^uint64(0) // clamp
+ } else {
+ res.total = t
+ }
+
+ return res
+}
+
+// SaturatingIncrementInto increments the given resource kind and the total
+// in place by gas. On overflow, the affected field(s) are clamped to MaxUint64.
+// Unlike SaturatingIncrement, this method mutates the receiver directly and
+// is intended for VM hot paths where avoiding value copies is critical.
+func (z *MultiGas) SaturatingIncrementInto(kind ResourceKind, gas uint64) {
+ if v, c := bits.Add64(z.gas[kind], gas, 0); c != 0 {
+ z.gas[kind] = ^uint64(0)
+ } else {
+ z.gas[kind] = v
+ }
+
+ if t, c := bits.Add64(z.total, gas, 0); c != 0 {
+ z.total = ^uint64(0)
+ } else {
+ z.total = t
+ }
+}
+
+// SingleGas returns the single-dimensional total gas.
+func (z MultiGas) SingleGas() uint64 {
+ return z.total
+}
+
+func (z MultiGas) IsZero() bool {
+ return z.total == 0 && z.refund == 0 && z.gas == [NumResourceKind]uint64{}
+}
+
+// multiGasJSON is an auxiliary type for JSON marshaling/unmarshaling of MultiGas.
+type multiGasJSON struct {
+ Unknown hexutil.Uint64 `json:"unknown"`
+ Computation hexutil.Uint64 `json:"computation"`
+ HistoryGrowth hexutil.Uint64 `json:"historyGrowth"`
+ StorageAccess hexutil.Uint64 `json:"storageAccess"`
+ StorageGrowth hexutil.Uint64 `json:"storageGrowth"`
+ L1Calldata hexutil.Uint64 `json:"l1Calldata"`
+ L2Calldata hexutil.Uint64 `json:"l2Calldata"`
+ WasmComputation hexutil.Uint64 `json:"wasmComputation"`
+ Refund hexutil.Uint64 `json:"refund"`
+ Total hexutil.Uint64 `json:"total"`
+}
+
+// MarshalJSON implements json.Marshaler for MultiGas.
+func (z MultiGas) MarshalJSON() ([]byte, error) {
+ return json.Marshal(multiGasJSON{
+ Unknown: hexutil.Uint64(z.gas[ResourceKindUnknown]),
+ Computation: hexutil.Uint64(z.gas[ResourceKindComputation]),
+ HistoryGrowth: hexutil.Uint64(z.gas[ResourceKindHistoryGrowth]),
+ StorageAccess: hexutil.Uint64(z.gas[ResourceKindStorageAccess]),
+ StorageGrowth: hexutil.Uint64(z.gas[ResourceKindStorageGrowth]),
+ L1Calldata: hexutil.Uint64(z.gas[ResourceKindL1Calldata]),
+ L2Calldata: hexutil.Uint64(z.gas[ResourceKindL2Calldata]),
+ WasmComputation: hexutil.Uint64(z.gas[ResourceKindWasmComputation]),
+ Refund: hexutil.Uint64(z.refund),
+ Total: hexutil.Uint64(z.total),
+ })
+}
+
+// UnmarshalJSON implements json.Unmarshaler for MultiGas.
+func (z *MultiGas) UnmarshalJSON(data []byte) error {
+ var j multiGasJSON
+ if err := json.Unmarshal(data, &j); err != nil {
+ return err
+ }
+ *z = ZeroGas()
+ z.gas[ResourceKindUnknown] = uint64(j.Unknown)
+ z.gas[ResourceKindComputation] = uint64(j.Computation)
+ z.gas[ResourceKindHistoryGrowth] = uint64(j.HistoryGrowth)
+ z.gas[ResourceKindStorageAccess] = uint64(j.StorageAccess)
+ z.gas[ResourceKindStorageGrowth] = uint64(j.StorageGrowth)
+ z.gas[ResourceKindL1Calldata] = uint64(j.L1Calldata)
+ z.gas[ResourceKindL2Calldata] = uint64(j.L2Calldata)
+ z.gas[ResourceKindWasmComputation] = uint64(j.WasmComputation)
+ z.refund = uint64(j.Refund)
+ z.total = uint64(j.Total)
+ return nil
+}
+
+//
+//// IntrinsicMultiGas returns the intrinsic gas as a multi-gas.
+//func IntrinsicMultiGas(data []byte, accessList types.AccessList, authList []types.SetCodeAuthorization, isContractCreation, isHomestead, isEIP2028, isEIP3860 bool) (MultiGas, error) {
+// // Set the starting gas for the raw transaction
+// var gas MultiGas
+// if isContractCreation && isHomestead {
+// gas.SaturatingIncrementInto(ResourceKindComputation, params.TxGasContractCreation)
+// } else {
+// gas.SaturatingIncrementInto(ResourceKindComputation, params.TxGas)
+// }
+// dataLen := uint64(len(data))
+// // Bump the required gas by the amount of transactional data
+// if dataLen > 0 {
+// // Zero and non-zero bytes are priced differently
+// z := uint64(bytes.Count(data, []byte{0}))
+// nz := dataLen - z
+//
+// // Make sure we don't exceed uint64 for all data combinations
+// nonZeroGas := params.TxDataNonZeroGasFrontier
+// if isEIP2028 {
+// nonZeroGas = params.TxDataNonZeroGasEIP2028
+// }
+// if (math.MaxUint64-gas.SingleGas())/nonZeroGas < nz {
+// return ZeroGas(), vm.ErrGasUintOverflow
+// }
+// gas.SaturatingIncrementInto(ResourceKindL2Calldata, nz*nonZeroGas)
+//
+// if (math.MaxUint64-gas.SingleGas())/params.TxDataZeroGas < z {
+// return ZeroGas(), vm.ErrGasUintOverflow
+// }
+// gas.SaturatingIncrementInto(ResourceKindL2Calldata, z*params.TxDataZeroGas)
+//
+// if isContractCreation && isEIP3860 {
+// lenWords := toWordSize(dataLen)
+// if (math.MaxUint64-gas.SingleGas())/params.InitCodeWordGas < lenWords {
+// return ZeroGas(), vm.ErrGasUintOverflow
+// }
+// gas.SaturatingIncrementInto(ResourceKindComputation, lenWords*params.InitCodeWordGas)
+// }
+// }
+// if lenAccessList != nil {
+// gas.SaturatingIncrementInto(ResourceKindStorageAccess, lenAccessList*params.TxAccessListAddressGas)
+// gas.SaturatingIncrementInto(ResourceKindStorageAccess, uint64(accessList.StorageKeys())*params.TxAccessListStorageKeyGas)
+// }
+// if authList != nil {
+// gas.SaturatingIncrementInto(ResourceKindStorageGrowth, uint64(len(authList))*params.CallNewAccountGas)
+// }
+// return gas, nil
+//}
+
+// IntrinsicMultiGas returns the intrinsic gas as a multi-gas. (TODO: move to arb package)
+func IntrinsicMultiGas(data []byte, accessListLen, storageKeysLen uint64, isContractCreation bool, isEIP2, isEIP2028, isEIP3860, isEIP7623, isAATxn bool, authorizationsLen uint64) (MultiGas, uint64, bool) {
+ dataLen := uint64(len(data))
+ dataNonZeroLen := uint64(0)
+ for _, byt := range data {
+ if byt != 0 {
+ dataNonZeroLen++
+ }
+ }
+
+ gas := ZeroGas()
+ // Set the starting gas for the raw transaction
+ if isContractCreation && isEIP2 {
+ gas.SaturatingIncrementInto(ResourceKindComputation, params.TxGasContractCreation)
+ } else if isAATxn {
+ gas.SaturatingIncrementInto(ResourceKindComputation, params.TxAAGas)
+ } else {
+ gas.SaturatingIncrementInto(ResourceKindComputation, params.TxGas)
+ }
+ floorGas7623 := params.TxGas
+
+ // Bump the required gas by the amount of transactional data
+ if dataLen > 0 {
+ // Zero and non-zero bytes are priced differently
+ nz := dataNonZeroLen
+ // Make sure we don't exceed uint64 for all data combinations
+ nonZeroGas := params.TxDataNonZeroGasFrontier
+ if isEIP2028 {
+ nonZeroGas = params.TxDataNonZeroGasEIP2028
+ }
+
+ if (math.MaxUint64-gas.SingleGas())/nonZeroGas < nz {
+ return ZeroGas(), 0, true
+ }
+ gas.SaturatingIncrementInto(ResourceKindL2Calldata, nz*nonZeroGas)
+
+ z := dataLen - nz
+
+ if (math.MaxUint64-gas.SingleGas())/params.TxDataZeroGas < z {
+ return ZeroGas(), 0, true
+ }
+ gas.SaturatingIncrementInto(ResourceKindL2Calldata, z*params.TxDataZeroGas)
+
+ if isContractCreation && isEIP3860 {
+ lenWords := toWordSize(dataLen)
+ if (math.MaxUint64-gas.SingleGas())/params.InitCodeWordGas < lenWords {
+ return ZeroGas(), 0, true
+ }
+ gas.SaturatingIncrementInto(ResourceKindComputation, lenWords*params.InitCodeWordGas)
+ }
+
+ if isEIP7623 {
+ tokenLen := dataLen + 3*nz
+ dataGas, overflow := math.SafeMul(tokenLen, params.TxTotalCostFloorPerToken)
+ if overflow {
+ return ZeroGas(), 0, true
+ }
+ floorGas7623, overflow = math.SafeAdd(floorGas7623, dataGas)
+ if overflow {
+ return ZeroGas(), 0, true
+ }
+ }
+ }
+ if accessListLen > 0 {
+ gas.SaturatingIncrementInto(ResourceKindStorageAccess, accessListLen*params.TxAccessListAddressGas)
+ gas.SaturatingIncrementInto(ResourceKindStorageAccess, storageKeysLen*params.TxAccessListStorageKeyGas)
+ }
+
+ if authorizationsLen > 0 {
+ gas.SaturatingIncrementInto(ResourceKindStorageGrowth, authorizationsLen*params.CallNewAccountGas)
+ }
+
+ return gas, floorGas7623, false
+}
+
+func toWordSize(size uint64) uint64 {
+ if size > math.MaxUint64-31 {
+ return math.MaxUint64/32 + 1
+ }
+ return (size + 31) / 32
+}
+
+func (z MultiGas) String() string {
+ s := "mG:\n\t"
+ for i := 0; i < int(NumResourceKind); i++ {
+ s += fmt.Sprintf("%s: %d, ", ResourceKind(i).String(), z.gas[ResourceKind(i)])
+ if i%4 == 0 && i > 0 {
+ s += "\n\t"
+ }
+ }
+ s += fmt.Sprintf("Total: %d, Refund: %d", z.total, z.refund)
+ return s
+}
diff --git a/arb/osver/arbos_versions.go b/arb/osver/arbos_versions.go
new file mode 100644
index 00000000000..1458af3f223
--- /dev/null
+++ b/arb/osver/arbos_versions.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package osver
+
+const (
+ ArbosVersion_0 = uint64(0)
+ ArbosVersion_1 = uint64(1)
+ ArbosVersion_2 = uint64(2)
+ ArbosVersion_3 = uint64(3)
+ ArbosVersion_4 = uint64(4)
+ ArbosVersion_5 = uint64(5)
+ ArbosVersion_6 = uint64(6)
+ ArbosVersion_7 = uint64(7)
+ ArbosVersion_8 = uint64(8)
+ ArbosVersion_9 = uint64(9)
+ ArbosVersion_10 = uint64(10)
+ ArbosVersion_11 = uint64(11)
+ ArbosVersion_20 = uint64(20)
+ ArbosVersion_30 = uint64(30)
+ ArbosVersion_31 = uint64(31)
+ ArbosVersion_32 = uint64(32)
+ ArbosVersion_40 = uint64(40)
+ ArbosVersion_41 = uint64(41)
+ ArbosVersion_50 = uint64(50)
+ ArbosVersion_51 = uint64(51)
+)
+
+const ArbosVersion_FixRedeemGas = ArbosVersion_11
+const ArbosVersion_Stylus = ArbosVersion_30
+const ArbosVersion_StylusFixes = ArbosVersion_31
+const ArbosVersion_StylusChargingFixes = ArbosVersion_32
+const MaxArbosVersionSupported = ArbosVersion_51
+const MaxDebugArbosVersionSupported = ArbosVersion_51
+const ArbosVersion_Dia = ArbosVersion_50
+
+const ArbosVersion_MultiConstraintFix = ArbosVersion_51
+
+func IsStylus(arbosVersion uint64) bool {
+ return arbosVersion >= ArbosVersion_Stylus
+}
diff --git a/arb/receipt/arb_receipt.go b/arb/receipt/arb_receipt.go
new file mode 100644
index 00000000000..be82dd64085
--- /dev/null
+++ b/arb/receipt/arb_receipt.go
@@ -0,0 +1 @@
+package receipt
diff --git a/arb/targets/targets.go b/arb/targets/targets.go
new file mode 100644
index 00000000000..927504f67c6
--- /dev/null
+++ b/arb/targets/targets.go
@@ -0,0 +1,28 @@
+package targets
+
+import (
+ "bytes"
+ "os"
+
+ "github.com/erigontech/erigon/common/length"
+ "github.com/erigontech/erigon/db/kv"
+ "github.com/erigontech/erigon/db/kv/dbcfg"
+ "github.com/erigontech/erigon/db/kv/memdb"
+)
+
+var CodePrefix = []byte("c") // CodePrefix + code hash -> account code
+
+func NewMemoryDatabase() kv.RwDB {
+ tmp := os.TempDir()
+
+ return memdb.New(nil, tmp, dbcfg.ChainDB)
+}
+
+// IsCodeKey reports whether the given byte slice is the key of contract code,
+// if so return the raw code hash as well.
+func IsCodeKey(key []byte) (bool, []byte) {
+ if bytes.HasPrefix(key, CodePrefix) && len(key) == length.Hash+len(CodePrefix) {
+ return true, key[len(CodePrefix):]
+ }
+ return false, nil
+}
diff --git a/arb/tx_timebosted.go b/arb/tx_timebosted.go
new file mode 100644
index 00000000000..c19b536b347
--- /dev/null
+++ b/arb/tx_timebosted.go
@@ -0,0 +1,11 @@
+package arb
+
+type NoTimeBoosted bool
+
+func (tx *NoTimeBoosted) IsTimeBoosted() *bool {
+ return nil
+}
+
+func (tx *NoTimeBoosted) SetTimeboosted(_ *bool) {
+
+}
diff --git a/arb/txn/addresses.go b/arb/txn/addresses.go
new file mode 100644
index 00000000000..e82af72c3a7
--- /dev/null
+++ b/arb/txn/addresses.go
@@ -0,0 +1,24 @@
+package txn
+
+import "github.com/erigontech/erigon/common"
+
+var ArbosAddress = common.HexToAddress("0xa4b05")
+var ArbosStateAddress = common.HexToAddress("0xA4B05FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
+var ArbSysAddress = common.HexToAddress("0x64")
+var ArbInfoAddress = common.HexToAddress("0x65")
+var ArbAddressTableAddress = common.HexToAddress("0x66")
+var ArbBLSAddress = common.HexToAddress("0x67")
+var ArbFunctionTableAddress = common.HexToAddress("0x68")
+var ArbosTestAddress = common.HexToAddress("0x69")
+var ArbGasInfoAddress = common.HexToAddress("0x6c")
+var ArbOwnerPublicAddress = common.HexToAddress("0x6b")
+var ArbAggregatorAddress = common.HexToAddress("0x6d")
+var ArbRetryableTxAddress = common.HexToAddress("0x6e")
+var ArbStatisticsAddress = common.HexToAddress("0x6f")
+var ArbOwnerAddress = common.HexToAddress("0x70")
+var ArbWasmAddress = common.HexToAddress("0x71")
+var ArbWasmCacheAddress = common.HexToAddress("0x72")
+var ArbNativeTokenManagerAddress = common.HexToAddress("0x73")
+var NodeInterfaceAddress = common.HexToAddress("0xc8")
+var NodeInterfaceDebugAddress = common.HexToAddress("0xc9")
+var ArbDebugAddress = common.HexToAddress("0xff")
diff --git a/arb/txn/arb_tx.go b/arb/txn/arb_tx.go
new file mode 100644
index 00000000000..d7a9dcfc4bd
--- /dev/null
+++ b/arb/txn/arb_tx.go
@@ -0,0 +1,661 @@
+package txn
+
+/*
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "sync/atomic"
+ "time"
+
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/holiman/uint256"
+
+ "github.com/erigontech/erigon/common"
+ cmath "github.com/erigontech/erigon/common/math"
+ "github.com/erigontech/erigon/execution/rlp"
+)
+
+var (
+ ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
+ errShortTypedTx = errors.New("typed transaction too short")
+ errInvalidYParity = errors.New("'yParity' field must be 0 or 1")
+ errVYParityMismatch = errors.New("'v' and 'yParity' fields do not match")
+ errVYParityMissing = errors.New("missing 'yParity' or 'v' field in transaction")
+)
+
+// getPooledBuffer retrieves a buffer from the pool and creates a byte slice of the
+// requested size from it.
+//
+// The caller should return the *bytes.Buffer object back into encodeBufferPool after use!
+// The returned byte slice must not be used after returning the buffer.
+func getPooledBuffer(size uint64) ([]byte, *bytes.Buffer, error) {
+ if size > math.MaxInt {
+ return nil, nil, fmt.Errorf("can't get buffer of size %d", size)
+ }
+ buf := types.EncodeBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ buf.Grow(int(size))
+ b := buf.Bytes()[:int(size)]
+ return b, buf, nil
+}
+
+// ArbTx is an Arbitrum transaction.
+type ArbTx struct {
+ types.BlobTxWrapper
+ inner types.Transaction // Consensus contents of a transaction
+ // sidecar *BlobTxSidecar
+ time time.Time // Time first seen locally (spam avoidance)
+
+ // Arbitrum cache: must be atomically accessed
+ CalldataUnits uint64
+
+ // caches
+ hash atomic.Value
+ size atomic.Value
+ from atomic.Value
+}
+
+// NewTx creates a new transaction.
+func NewArbTx(inner types.Transaction) *ArbTx {
+ tx := new(ArbTx)
+ tx.setDecoded(inner.Unwrap(), 0)
+ return tx
+}
+
+// EncodeRLP implements rlp.Encoder
+// func (tx *ArbTx) EncodeRLP(w io.Writer) error {
+// if tx.Type() == LegacyTxType {
+// return rlp.Encode(w, tx.inner)
+// }
+// // It's an EIP-2718 typed TX envelope.
+// buf := encodeBufferPool.Get().(*bytes.Buffer)
+// defer encodeBufferPool.Put(buf)
+// buf.Reset()
+// if err := tx.encodeTyped(buf); err != nil {
+// return err
+// }
+// return rlp.Encode(w, buf.Bytes())
+// }
+
+// encodeTyped writes the canonical encoding of a typed transaction to w.
+func (tx *ArbTx) encodeTyped(w *bytes.Buffer) error {
+ w.WriteByte(tx.Type())
+ return tx.inner.EncodeRLP(w)
+}
+
+func (tx *ArbTx) AsMessage(s types.Signer, baseFee *big.Int, rules *chain.Rules) (*types.Message, error) {
+ msg, err := tx.Tx.AsMessage(s, baseFee, rules)
+ if err == nil {
+ msg.Tx = tx
+ }
+ return msg, err
+}
+
+// MarshalBinary returns the canonical encoding of the transaction.
+// For legacy transactions, it returns the RLP encoding. For EIP-2718 typed
+// transactions, it returns the type and payload.
+// func (tx *ArbTx) MarshalBinary() ([]byte, error) {
+// if tx.Type() == LegacyTxType {
+// return rlp.EncodeToBytes(tx.inner)
+// }
+// var buf bytes.Buffer
+// err := tx.encodeTyped(&buf)
+// return buf.Bytes(), err
+// }
+
+// DecodeRLP implements rlp.Decoder
+func (tx *ArbTx) DecodeRLP(s *rlp.Stream) error {
+ kind, size, err := s.Kind()
+ switch {
+ case err != nil:
+ return err
+ case kind == rlp.List:
+ // It's a legacy transaction.
+ var inner types.LegacyTx
+ err := s.Decode(&inner)
+ if err == nil {
+ tx.setDecoded(&inner, rlp.ListSize(size))
+ }
+ return err
+ case kind == rlp.Byte:
+ return errShortTypedTx
+ default:
+ //b, buf, err := getPooledBuffer(size)
+ //if err != nil {
+ // return err
+ //}
+ //defer encodeBufferPool.Put(buf)
+ //s.
+
+ // It's an EIP-2718 typed TX envelope.
+ // First read the tx payload bytes into a temporary buffer.
+ b, err := s.Bytes()
+ if err != nil {
+ return err
+ }
+ // Now decode the inner transaction.
+ inner, err := tx.decodeTyped(b, true)
+ if err == nil {
+ tx.setDecoded(inner, size)
+ }
+ return err
+ }
+}
+
+// UnmarshalBinary decodes the canonical encoding of transactions.
+// It supports legacy RLP transactions and EIP-2718 typed transactions.
+func (tx *ArbTx) UnmarshalBinary(b []byte) error {
+ if len(b) > 0 && b[0] > 0x7f {
+ // It's a legacy transaction.
+ var data types.LegacyTx
+ err := rlp.DecodeBytes(b, &data)
+ if err != nil {
+ return err
+ }
+ tx.setDecoded(&data, uint64(len(b)))
+ return nil
+ }
+ // It's an EIP-2718 typed transaction envelope.
+ inner, err := tx.decodeTyped(b, false)
+ if err != nil {
+ return err
+ }
+ tx.setDecoded(inner, uint64(len(b)))
+ return nil
+}
+
+// decodeTyped decodes a typed transaction from the canonical format.
+func (tx *ArbTx) decodeTyped(b []byte, arbParsing bool) (types.Transaction, error) {
+ if len(b) <= 1 {
+ return nil, errShortTypedTx
+ }
+ var inner types.Transaction
+ if arbParsing {
+ switch b[0] {
+ case ArbitrumDepositTxType:
+ inner = new(ArbitrumDepositTx)
+ case ArbitrumInternalTxType:
+ inner = new(ArbitrumInternalTx)
+ case ArbitrumUnsignedTxType:
+ inner = new(ArbitrumUnsignedTx)
+ case ArbitrumContractTxType:
+ inner = new(ArbitrumContractTx)
+ case ArbitrumRetryTxType:
+ inner = new(ArbitrumRetryTx)
+ case ArbitrumSubmitRetryableTxType:
+ inner = new(ArbitrumSubmitRetryableTx)
+ case ArbitrumLegacyTxType:
+ inner = new(ArbitrumLegacyTxData)
+ default:
+ arbParsing = false
+ }
+ }
+ if !arbParsing {
+ switch b[0] {
+ case types.AccessListTxType:
+ inner = new(types.AccessListTx)
+ case types.DynamicFeeTxType:
+ inner = new(types.DynamicFeeTransaction)
+ case types.BlobTxType:
+ inner = new(types.BlobTx)
+ default:
+ return nil, types.ErrTxTypeNotSupported
+ }
+ }
+ s := rlp.NewStream(bytes.NewReader(b[1:]), uint64(len(b)-1))
+ err := inner.DecodeRLP(s)
+ return inner, err
+}
+
+// setDecoded sets the inner transaction and size after decoding.
+func (tx *ArbTx) setDecoded(inner types.Transaction, size uint64) {
+ tx.inner = inner
+ tx.time = time.Now()
+ if size > 0 {
+ tx.size.Store(size)
+ }
+}
+
+// Protected says whether the transaction is replay-protected.
+func (tx *ArbTx) Protected() bool {
+ switch tx := tx.inner.(type) {
+ case *types.LegacyTx:
+ return !tx.V.IsZero() && types.IsProtectedV(&tx.V)
+ default:
+ return true
+ }
+}
+
+// Type returns the transaction type.
+func (tx *ArbTx) Type() uint8 {
+ return tx.inner.Type()
+ //return tx.inner.txType()
+}
+
+func (tx *ArbTx) GetInner() types.Transaction {
+ return tx.inner
+}
+
+// ChainId returns the EIP155 chain ID of the transaction. The return value will always be
+// non-nil. For legacy transactions which are not replay-protected, the return value is
+// zero.
+func (tx *ArbTx) ChainId() *big.Int {
+ return tx.inner.GetChainID().ToBig()
+}
+
+// Data returns the input data of the transaction.
+func (tx *ArbTx) Data() []byte { return tx.inner.GetData() }
+
+// AccessList returns the access list of the transaction.
+func (tx *ArbTx) AccessList() types.AccessList { return tx.inner.GetAccessList() }
+
+// Gas returns the gas limit of the transaction.
+func (tx *ArbTx) Gas() uint64 { return tx.inner.GetGasLimit() }
+
+// GasPrice returns the gas price of the transaction.
+// TODO same as .GasFeeCap()?
+func (tx *ArbTx) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.GetFeeCap().ToBig()) }
+
+// GasTipCap returns the gasTipCap per gas of the transaction.
+func (tx *ArbTx) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.GetTipCap().ToBig()) }
+
+// GasFeeCap returns the fee cap per gas of the transaction.
+func (tx *ArbTx) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.GetFeeCap().ToBig()) }
+
+// Value returns the ether amount of the transaction.
+func (tx *ArbTx) Value() *big.Int { return new(big.Int).Set(tx.inner.GetValue().ToBig()) }
+
+// Nonce returns the sender account nonce of the transaction.
+func (tx *ArbTx) Nonce() uint64 { return tx.inner.GetNonce() }
+
+// To returns the recipient address of the transaction.
+// For contract-creation transactions, To returns nil.
+func (tx *ArbTx) To() *common.Address {
+ return copyAddressPtr(tx.inner.GetTo())
+}
+
+// Cost returns (gas * gasPrice) + (blobGas * blobGasPrice) + value.
+func (tx *ArbTx) Cost() *big.Int {
+ total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas()))
+ if tx.Type() == types.BlobTxType {
+ total.Add(total, new(big.Int).Mul(tx.BlobGasFeeCap(), new(big.Int).SetUint64(tx.BlobGas())))
+ }
+ total.Add(total, tx.Value())
+ return total
+}
+
+// GasFeeCapCmp compares the fee cap of two transactions.
+func (tx *ArbTx) GasFeeCapCmp(other *ArbTx) int {
+ return tx.inner.GetFeeCap().ToBig().Cmp(other.inner.GetFeeCap().ToBig())
+}
+
+// GasFeeCapIntCmp compares the fee cap of the transaction against the given fee cap.
+func (tx *ArbTx) GasFeeCapIntCmp(other *big.Int) int {
+ return tx.inner.GetFeeCap().ToBig().Cmp(other)
+}
+
+// GasTipCapCmp compares the gasTipCap of two transactions.
+func (tx *ArbTx) GasTipCapCmp(other *ArbTx) int {
+ return tx.inner.GetTipCap().Cmp(other.inner.GetTipCap())
+}
+
+// GasTipCapIntCmp compares the gasTipCap of the transaction against the given gasTipCap.
+func (tx *ArbTx) GasTipCapIntCmp(other *big.Int) int {
+ return tx.inner.GetTipCap().ToBig().Cmp(other)
+}
+
+// EffectiveGasTip returns the effective miner gasTipCap for the given base fee.
+// Note: if the effective gasTipCap is negative, this method returns both error
+// the actual negative value, _and_ ErrGasFeeCapTooLow
+func (tx *ArbTx) EffectiveGasTip(baseFee *big.Int) (*big.Int, error) {
+ if baseFee == nil {
+ return tx.GasTipCap(), nil
+ }
+ var err error
+ gasFeeCap := tx.GasFeeCap()
+ if gasFeeCap.Cmp(baseFee) == -1 {
+ err = ErrGasFeeCapTooLow
+ }
+ minn := tx.GasTipCap()
+ gasCap := gasFeeCap.Sub(gasFeeCap, baseFee)
+ if minn.Cmp(gasCap) > 0 {
+ minn = gasCap
+ }
+ return minn, err
+}
+
+// EffectiveGasTipValue is identical to EffectiveGasTip, but does not return an
+// error in case the effective gasTipCap is negative
+func (tx *ArbTx) EffectiveGasTipValue(baseFee *big.Int) *big.Int {
+ effectiveTip, _ := tx.EffectiveGasTip(baseFee)
+ return effectiveTip
+}
+
+// EffectiveGasTipCmp compares the effective gasTipCap of two transactions assuming the given base fee.
+func (tx *ArbTx) EffectiveGasTipCmp(other *ArbTx, baseFee *big.Int) int {
+ if baseFee == nil {
+ return tx.GasTipCapCmp(other)
+ }
+ return tx.EffectiveGasTipValue(baseFee).Cmp(other.EffectiveGasTipValue(baseFee))
+}
+
+// EffectiveGasTipIntCmp compares the effective gasTipCap of a transaction to the given gasTipCap.
+func (tx *ArbTx) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) int {
+ if baseFee == nil {
+ return tx.GasTipCapIntCmp(other)
+ }
+ return tx.EffectiveGasTipValue(baseFee).Cmp(other)
+}
+
+// BlobGas returns the blob gas limit of the transaction for blob transactions, 0 otherwise.
+func (tx *ArbTx) BlobGas() uint64 {
+ if blobtx, ok := tx.inner.(*types.BlobTx); ok {
+ return blobtx.GetBlobGas()
+ }
+ return 0
+}
+
+// BlobGasFeeCap returns the blob gas fee cap per blob gas of the transaction for blob transactions, nil otherwise.
+func (tx *ArbTx) BlobGasFeeCap() *big.Int {
+ if blobtx, ok := tx.inner.(*types.BlobTx); ok {
+ return blobtx.GetFeeCap().ToBig()
+ }
+ return nil
+}
+
+// BlobHashes returns the hashes of the blob commitments for blob transactions, nil otherwise.
+func (tx *ArbTx) BlobHashes() []common.Hash {
+ if blobtx, ok := tx.inner.(*types.BlobTx); ok {
+ return blobtx.GetBlobHashes()
+ }
+ return nil
+}
+
+// BlobTxSidecar returns the sidecar of a blob transaction, nil otherwise.
+func (tx *ArbTx) BlobTxSidecar() *types.BlobTxWrapper {
+ //if blobtx, ok := tx.inner.(*BlobTx); ok {
+ // //return blobtx.Get
+ //}
+ return &tx.BlobTxWrapper
+}
+
+// BlobGasFeeCapCmp compares the blob fee cap of two transactions.
+func (tx *ArbTx) BlobGasFeeCapCmp(other *ArbTx) int {
+ return tx.BlobGasFeeCap().Cmp(other.BlobGasFeeCap())
+}
+
+// BlobGasFeeCapIntCmp compares the blob fee cap of the transaction against the given blob fee cap.
+func (tx *ArbTx) BlobGasFeeCapIntCmp(other *big.Int) int {
+ return tx.BlobGasFeeCap().Cmp(other)
+}
+
+//// WithoutBlobTxSidecar returns a copy of tx with the blob sidecar removed.
+//func (tx *ArbTx) WithoutBlobTxSidecar() *ArbTx {
+// blobtx, ok := tx.inner.(*BlobTx)
+// if !ok {
+// return tx
+// }
+// cpy := &ArbTx{
+// inner: blobtx.withoutSidecar(),
+// time: tx.time,
+// }
+// // Note: tx.size cache not carried over because the sidecar is included in size!
+// if h := tx.hash.Load(); h != nil {
+// cpy.hash.Store(h)
+// }
+// if f := tx.from.Load(); f != nil {
+// cpy.from.Store(f)
+// }
+// return cpy
+//}
+
+// BlobTxSidecar contains the blobs of a blob transaction.
+// type BlobTxSidecar struct {
+// Blobs []kzg4844.Blob // Blobs needed by the blob pool
+// Commitments []kzg4844.KZGCommitment // Commitments needed by the blob pool
+// Proofs []kzg4844.KZGProof // Proofs needed by the blob pool
+// }
+
+// // BlobHashes computes the blob hashes of the given blobs.
+// func (sc *BlobTxSidecar) BlobHashes() []common.Hash {
+// hasher := sha256.New()
+// h := make([]common.Hash, len(sc.Commitments))
+// for i := range sc.Blobs {
+// h[i] = kzg4844.CalcBlobHashV1(hasher, &sc.Commitments[i])
+// }
+// return h
+// }
+
+// // encodedSize computes the RLP size of the sidecar elements. This does NOT return the
+// // encoded size of the BlobTxSidecar, it's just a helper for tx.Size().
+// func (sc *BlobTxSidecar) encodedSize() uint64 {
+// var blobs, commitments, proofs uint64
+// for i := range sc.Blobs {
+// blobs += rlp.BytesSize(sc.Blobs[i][:])
+// }
+// for i := range sc.Commitments {
+// commitments += rlp.BytesSize(sc.Commitments[i][:])
+// }
+// for i := range sc.Proofs {
+// proofs += rlp.BytesSize(sc.Proofs[i][:])
+// }
+// return rlp.ListSize(blobs) + rlp.ListSize(commitments) + rlp.ListSize(proofs)
+// }
+
+// WithBlobTxSidecar returns a copy of tx with the blob sidecar added.
+// TODO figure out how to add the sidecar
+func (tx *ArbTx) WithBlobTxSidecar(sideCar *types.BlobTxWrapper) *ArbTx {
+ //blobtx, ok := tx.inner.(*BlobTx)
+ //if !ok {
+ // return tx
+ //}
+ cpy := &ArbTx{
+ inner: tx.inner,
+ //inner: blobtx.withSidecar(sideCar),
+ // sidecar: sideCar,
+ time: tx.time,
+ }
+ // Note: tx.size cache not carried over because the sidecar is included in size!
+ if h := tx.hash.Load(); h != nil {
+ cpy.hash.Store(h)
+ }
+ if f := tx.from.Load(); f != nil {
+ cpy.from.Store(f)
+ }
+ return cpy
+}
+
+// SetTime sets the decoding time of a transaction. This is used by tests to set
+// arbitrary times and by persistent transaction pools when loading old txs from
+// disk.
+func (tx *ArbTx) SetTime(t time.Time) {
+ tx.time = t
+}
+
+// Time returns the time when the transaction was first seen on the network. It
+// is a heuristic to prefer mining older txs vs new all other things equal.
+func (tx *ArbTx) Time() time.Time {
+ return tx.time
+}
+
+// Hash returns the transaction hash.
+func (tx *ArbTx) Hash() common.Hash {
+ if hash := tx.hash.Load(); hash != nil {
+ return hash.(common.Hash)
+ }
+
+ var h common.Hash
+ if tx.Type() == types.LegacyTxType {
+ h = rlp.RlpHash(tx.inner)
+ } else if tx.Type() == ArbitrumLegacyTxType {
+ h = tx.inner.(*ArbitrumLegacyTxData).HashOverride
+ } else {
+ h = types.PrefixedRlpHash(tx.Type(), tx.inner)
+ }
+ tx.hash.Store(h)
+ return h
+}
+
+// Size returns the true encoded storage size of the transaction, either by encoding
+// and returning it, or returning a previously cached value.
+// func (tx *ArbTx) Size() uint64 {
+// if size := tx.size.Load(); size != nil {
+// return size.(uint64)
+// }
+
+// // Cache miss, encode and cache.
+// // Note we rely on the assumption that all tx.inner values are RLP-encoded!
+// c := writeCounter(0)
+// rlp.Encode(&c, &tx.inner)
+// size := uint64(c)
+
+// // For blob transactions, add the size of the blob content and the outer list of the
+// // tx + sidecar encoding.
+// if sc := tx.BlobTxSidecar(); sc != nil {
+// size += rlp.ListSize(sc.encodedSize())
+// }
+
+// // For typed transactions, the encoding also includes the leading type byte.
+// if tx.Type() != LegacyTxType {
+// size += 1
+// }
+
+// tx.size.Store(size)
+// return size
+// }
+
+// WithSignature returns a new transaction with the given signature.
+// This signature needs to be in the [R || S || V] format where V is 0 or 1.
+// func (tx *ArbTx) WithSignature(signer Signer, sig []byte) (*ArbTx, error) {
+// r, s, v, err := signer.SignatureValues(tx, sig)
+// if err != nil {
+// return nil, err
+// }
+// if r == nil || s == nil || v == nil {
+// return nil, fmt.Errorf("%w: r: %s, s: %s, v: %s", ErrInvalidSig, r, s, v)
+// }
+// cpy := tx.inner.copy()
+// cpy.setSignatureValues(signer.ChainID(), v, r, s)
+// return &ArbTx{inner: cpy, time: tx.time}, nil
+// }
+
+// ArbTxs implements DerivableList for transactions.
+type ArbTxs []*ArbTx
+
+func WrapArbTransactions(txs ArbTxs) types.Transactions {
+ txns := make([]types.Transaction, len(txs))
+ for i := 0; i < len(txs); i++ {
+ txns[i] = types.Transaction(txs[i])
+ }
+ return txns
+}
+
+// Len returns the length of s.
+func (s ArbTxs) Len() int { return len(s) }
+
+// EncodeIndex encodes the i'th transaction to w. Note that this does not check for errors
+// because we assume that *ArbTx will only ever contain valid txs that were either
+// constructed by decoding or via public API in this package.
+func (s ArbTxs) EncodeIndex(i int, w *bytes.Buffer) {
+ tx := s[i]
+
+ switch tx.Type() {
+ // case ArbitrumLegacyTxType:
+ // arbData := tx.inner.(*ArbitrumLegacyTxData) //
+ // arbData.EncodeOnlyLegacyInto(w)
+ case ArbitrumLegacyTxType, types.LegacyTxType:
+ rlp.Encode(w, tx.inner)
+ default:
+ tx.encodeTyped(w)
+ }
+}
+
+// TxDifference returns a new set which is the difference between a and b.
+// func TxDifference(a, b ArbTxs) ArbTxs {
+// keep := make(ArbTxs, 0, len(a))
+
+// remove := make(map[common.Hash]struct{})
+// for _, tx := range b {
+// remove[tx.Hash()] = struct{}{}
+// }
+
+// for _, tx := range a {
+// if _, ok := remove[tx.Hash()]; !ok {
+// keep = append(keep, tx)
+// }
+// }
+
+// return keep
+// }
+
+// HashDifference returns a new set which is the difference between a and b.
+func HashDifference(a, b []common.Hash) []common.Hash {
+ keep := make([]common.Hash, 0, len(a))
+
+ remove := make(map[common.Hash]struct{})
+ for _, hash := range b {
+ remove[hash] = struct{}{}
+ }
+
+ for _, hash := range a {
+ if _, ok := remove[hash]; !ok {
+ keep = append(keep, hash)
+ }
+ }
+
+ return keep
+}
+
+// TxByNonce implements the sort interface to allow sorting a list of transactions
+// by their nonces. This is usually only useful for sorting transactions from a
+// single account, otherwise a nonce comparison doesn't make much sense.
+// type TxByNonce ArbTxs
+
+// func (s TxByNonce) Len() int { return len(s) }
+// func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() }
+// func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// copyAddressPtr copies an address.
+func copyAddressPtr(a *common.Address) *common.Address {
+ if a == nil {
+ return nil
+ }
+ cpy := *a
+ return &cpy
+}
+
+// // TransactionToMessage converts a transaction into a Message.
+func TransactionToMessage(tx types.Transaction, s ArbitrumSigner, baseFee *big.Int, runmode types.MessageRunMode) (msg *types.Message, err error) {
+ // tx.AsMessage(s types.Signer, baseFee *big.Int, rules *chain.Rules)
+ msg = &types.Message{
+ TxRunMode: runmode,
+ Tx: tx,
+ }
+ msg.SetNonce(tx.GetNonce())
+ msg.SetGasLimit(tx.GetGasLimit())
+ msg.SetGasPrice(tx.GetFeeCap())
+ msg.SetFeeCap(tx.GetFeeCap())
+ msg.SetTip(tx.GetTipCap())
+ msg.SetTo(tx.GetTo())
+ msg.SetAmount(tx.GetValue())
+ msg.SetData(tx.GetData())
+ msg.SetAccessList(tx.GetAccessList())
+ msg.SetSkipAccountCheck(false)
+ msg.SetBlobHashes(tx.GetBlobHashes())
+
+ // If baseFee provided, set gasPrice to effectiveGasPrice.
+ if baseFee != nil {
+ var gasPrice uint256.Int
+
+ gasPrice.SetFromBig(cmath.BigMin(msg.GasPrice().ToBig().Add(msg.TipCap().ToBig(), baseFee), msg.FeeCap().ToBig()))
+ msg.SetGasPrice(&gasPrice)
+ }
+ sender, err := s.Sender(tx)
+ msg.SetFrom(&sender)
+ return msg, err
+}
+*/
diff --git a/arb/txn/arb_types.go b/arb/txn/arb_types.go
new file mode 100644
index 00000000000..b89e4c9907b
--- /dev/null
+++ b/arb/txn/arb_types.go
@@ -0,0 +1,2591 @@
+package txn
+
+/*
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/big"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/hexutil"
+ "github.com/erigontech/erigon/common/length"
+ "github.com/erigontech/erigon/common/log/v3"
+ cmath "github.com/erigontech/erigon/common/math"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/holiman/uint256"
+)
+
+// Returns true if nonce checks should be skipped based on inner's isFake()
+// This also disables requiring that sender is an EOA and not a contract
+func (tx *ArbTx) SkipAccountChecks() bool {
+ // return tx.inner.skipAccountChecks()
+ return skipAccountChecks[tx.Type()]
+}
+
+var fallbackErrorMsg = "missing trie node 0000000000000000000000000000000000000000000000000000000000000000 (path ) "
+var fallbackErrorCode = -32000
+
+func SetFallbackError(msg string, code int) {
+ fallbackErrorMsg = msg
+ fallbackErrorCode = code
+ log.Debug("setting fallback error", "msg", msg, "code", code)
+}
+
+type fallbackError struct{}
+
+func (f fallbackError) ErrorCode() int { return fallbackErrorCode }
+func (f fallbackError) Error() string { return fallbackErrorMsg }
+
+var ErrUseFallback = fallbackError{}
+
+type FallbackClient interface {
+ CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
+}
+
+// Transaction types.
+const (
+ ArbitrumDepositTxType byte = 0x64
+ ArbitrumUnsignedTxType byte = 0x65
+ ArbitrumContractTxType byte = 0x66
+ ArbitrumRetryTxType byte = 0x68
+ ArbitrumSubmitRetryableTxType byte = 0x69
+ ArbitrumInternalTxType byte = 0x6A
+ ArbitrumLegacyTxType byte = 0x78
+)
+
+var bigZero = big.NewInt(0)
+var uintZero = uint256.NewInt(0)
+
+var skipAccountChecks = [...]bool{
+ ArbitrumDepositTxType: true,
+ ArbitrumRetryTxType: true,
+ ArbitrumSubmitRetryableTxType: true,
+ ArbitrumInternalTxType: true,
+ ArbitrumContractTxType: true,
+ ArbitrumUnsignedTxType: false,
+}
+
+func init() {
+ types.RegisterTransaction(ArbitrumDepositTxType, createArbitrumDespoitTxn)
+ types.RegisterTransaction(ArbitrumRetryTxType, createArbitrumRetryTx)
+ types.RegisterTransaction(ArbitrumSubmitRetryableTxType, createArbitrumSubmitRetryableTx)
+ types.RegisterTransaction(ArbitrumInternalTxType, createArbitrumInternalTx)
+ types.RegisterTransaction(ArbitrumContractTxType, createArbitrumContractTx)
+ types.RegisterTransaction(ArbitrumUnsignedTxType, createArbitrumUnsignedTx)
+ types.RegisterTransaction(ArbitrumLegacyTxType, createArbitrumLegacyTx)
+}
+
+// func (tx *LegacyTx) skipAccountChecks() bool { return false }
+// func (tx *AccessListTx) skipAccountChecks() bool { return false }
+// func (tx *DynamicFeeTransaction) skipAccountChecks() bool { return false }
+// func (tx *ArbitrumUnsignedTx) skipAccountChecks() bool { return false }
+// func (tx *ArbitrumContractTx) skipAccountChecks() bool { return true }
+// func (tx *ArbitrumRetryTx) skipAccountChecks() bool { return true }
+// func (tx *ArbitrumSubmitRetryableTx) skipAccountChecks() bool { return true }
+// func (tx *ArbitrumDepositTx) skipAccountChecks() bool { return true }
+// func (tx *ArbitrumInternalTx) skipAccountChecks() bool { return true }
+
+func createArbitrumUnsignedTx() types.Transaction {
+ return &ArbitrumUnsignedTx{}
+}
+
+type ArbitrumUnsignedTx struct {
+ NoTimeBoosted
+ ChainId *big.Int
+ From common.Address
+
+ Nonce uint64 // nonce of sender account
+ GasFeeCap *big.Int // wei per gas
+ Gas uint64 // gas limit
+ To *common.Address `rlp:"nil"` // nil means contract creation
+ Value *big.Int // wei amount
+ Data []byte // contract invocation input data
+}
+
+func (tx *ArbitrumUnsignedTx) copy() types.Transaction {
+ cpy := &ArbitrumUnsignedTx{
+ ChainId: new(big.Int),
+ Nonce: tx.Nonce,
+ GasFeeCap: new(big.Int),
+ Gas: tx.Gas,
+ From: tx.From,
+ To: nil,
+ Value: new(big.Int),
+ Data: common.Copy(tx.Data),
+ }
+ if tx.ChainId != nil {
+ cpy.ChainId.Set(tx.ChainId)
+ }
+ if tx.GasFeeCap != nil {
+ cpy.GasFeeCap.Set(tx.GasFeeCap)
+ }
+ if tx.To != nil {
+ tmp := *tx.To
+ cpy.To = &tmp
+ }
+ if tx.Value != nil {
+ cpy.Value.Set(tx.Value)
+ }
+ return cpy
+}
+
+func (tx *ArbitrumUnsignedTx) Type() byte { return ArbitrumUnsignedTxType }
+func (tx *ArbitrumUnsignedTx) GetChainID() *uint256.Int { return uint256.MustFromBig(tx.ChainId) }
+func (tx *ArbitrumUnsignedTx) GetNonce() uint64 { return tx.Nonce }
+func (tx *ArbitrumUnsignedTx) GetPrice() *uint256.Int { return uint256.MustFromBig(tx.GasFeeCap) }
+func (tx *ArbitrumUnsignedTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumUnsignedTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumUnsignedTx) GetGasLimit() uint64 { return tx.Gas }
+func (tx *ArbitrumUnsignedTx) GetBlobGas() uint64 { return 0 }
+func (tx *ArbitrumUnsignedTx) GetValue() *uint256.Int { return uint256.MustFromBig(tx.Value) }
+func (tx *ArbitrumUnsignedTx) GetTo() *common.Address { return tx.To }
+func (tx *ArbitrumUnsignedTx) GetData() []byte { return tx.Data }
+func (tx *ArbitrumUnsignedTx) GetAccessList() types.AccessList { return nil }
+func (tx *ArbitrumUnsignedTx) GetFeeCap() *uint256.Int { return uint256.MustFromBig(tx.GasFeeCap) }
+func (tx *ArbitrumUnsignedTx) GetAuthorizations() []types.Authorization { return nil }
+
+func (tx *ArbitrumUnsignedTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int {
+ if baseFee == nil {
+ return tx.GetPrice()
+ }
+ res := uint256.NewInt(0)
+ return res.Set(baseFee)
+}
+
+func (tx *ArbitrumUnsignedTx) AsMessage(s types.Signer, baseFee *big.Int, rules *chain.Rules) (*types.Message, error) {
+ msg := &types.Message{
+ Tx: tx,
+ }
+ msg.SetGasPrice(tx.GetPrice())
+ msg.SetTip(tx.GetTipCap())
+ msg.SetFeeCap(tx.GetFeeCap())
+ msg.SetGasLimit(tx.GetGasLimit())
+ msg.SetNonce(tx.GetNonce())
+ msg.SetAccessList(tx.GetAccessList())
+ msg.SetFrom(&tx.From)
+ msg.SetTo(tx.GetTo())
+ msg.SetData(tx.GetData())
+ msg.SetAmount(tx.GetValue())
+ msg.SetCheckNonce(!skipAccountChecks[tx.Type()])
+
+ return msg, nil
+}
+
+func (tx *ArbitrumUnsignedTx) WithSignature(signer types.Signer, sig []byte) (types.Transaction, error) {
+ panic("implement me")
+}
+
+func (tx *ArbitrumUnsignedTx) Hash() common.Hash {
+ return types.PrefixedRlpHash(ArbitrumUnsignedTxType, []interface{}{
+ tx.ChainId,
+ tx.From,
+ tx.Nonce,
+ tx.GasFeeCap,
+ tx.Gas,
+ tx.To,
+ tx.Value,
+ tx.Data,
+ })
+}
+
+func (tx *ArbitrumUnsignedTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumUnsignedTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumUnsignedTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (tx *ArbitrumUnsignedTx) payloadSize() (payloadSize int, nonceLen, gasLen int) {
+ // ChainId
+ payloadSize++
+ payloadSize += rlp.BigIntLenExcludingHead(tx.ChainId)
+
+ // Nonce
+ payloadSize++
+ nonceLen = rlp.IntLenExcludingHead(tx.Nonce)
+ payloadSize += nonceLen
+
+ // From (20 bytes)
+ payloadSize++
+ payloadSize += 20
+
+ // GasFeeCap
+ payloadSize++
+ payloadSize += rlp.BigIntLenExcludingHead(tx.GasFeeCap)
+
+ // Gas
+ payloadSize++
+ gasLen = rlp.IntLenExcludingHead(tx.Gas)
+ payloadSize += gasLen
+
+ // To (20 bytes if non-nil)
+ payloadSize++
+ if tx.To != nil {
+ payloadSize += 20
+ }
+
+ // Value
+ payloadSize++
+ payloadSize += rlp.BigIntLenExcludingHead(tx.Value)
+
+ // Data (includes its own header)
+ payloadSize += rlp.StringLen(tx.Data)
+
+ return payloadSize, nonceLen, gasLen
+}
+
+func (tx *ArbitrumUnsignedTx) encodePayload(w io.Writer, b []byte, payloadSize, nonceLen, gasLen int) error {
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeBigInt(tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.From[:]); err != nil {
+ return err
+ }
+
+ if tx.Nonce > 0 && tx.Nonce < 128 {
+ b[0] = byte(tx.Nonce)
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ binary.BigEndian.PutUint64(b[1:], tx.Nonce)
+ b[8-nonceLen] = 128 + byte(nonceLen)
+ if _, err := w.Write(b[8-nonceLen : 9]); err != nil {
+ return err
+ }
+ }
+
+ if err := rlp.EncodeBigInt(tx.GasFeeCap, w, b); err != nil {
+ return err
+ }
+
+ if tx.Gas > 0 && tx.Gas < 128 {
+ b[0] = byte(tx.Gas)
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ binary.BigEndian.PutUint64(b[1:], tx.Gas)
+ b[8-gasLen] = 128 + byte(gasLen)
+ if _, err := w.Write(b[8-gasLen : 9]); err != nil {
+ return err
+ }
+ }
+
+ if tx.To == nil {
+ b[0] = 128
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write((*tx.To)[:]); err != nil {
+ return err
+ }
+ }
+
+ if err := rlp.EncodeBigInt(tx.Value, w, b); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeString(tx.Data, w, b); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumUnsignedTx) EncodingSize() int {
+ payloadSize, _, _ := tx.payloadSize()
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumUnsignedTx) EncodeRLP(w io.Writer) error {
+ payloadSize, nonceLen, gasLen := tx.payloadSize()
+
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // envelope
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+
+ // encode TxType
+ b[0] = ArbitrumUnsignedTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, nonceLen, gasLen); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumUnsignedTx) DecodeRLP(s *rlp.Stream) error {
+ // Begin decoding the RLP list.
+ if _, err := s.List(); err != nil {
+ return err
+ }
+
+ var b []byte
+ var err error
+
+ // Decode ChainId (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(big.Int).SetBytes(b)
+
+ // Decode From (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read From: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for From: %d", len(b))
+ }
+ copy(tx.From[:], b)
+
+ // Decode Nonce (uint64)
+ if tx.Nonce, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Nonce: %w", err)
+ }
+
+ // Decode GasFeeCap (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read GasFeeCap: %w", err)
+ }
+ tx.GasFeeCap = new(big.Int).SetBytes(b)
+
+ // Decode Gas (uint64)
+ if tx.Gas, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Gas: %w", err)
+ }
+
+ // Decode To (*common.Address, 20 bytes if non-nil)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read To: %w", err)
+ }
+ if len(b) > 0 {
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for To: %d", len(b))
+ }
+ tx.To = new(common.Address)
+ copy(tx.To[:], b)
+ } else {
+ tx.To = nil
+ }
+
+ // Decode Value (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Value: %w", err)
+ }
+ tx.Value = new(big.Int).SetBytes(b)
+
+ // Decode Data ([]byte)
+ if tx.Data, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Data: %w", err)
+ }
+
+ // End the RLP list.
+ if err := s.ListEnd(); err != nil {
+ return fmt.Errorf("close ArbitrumUnsignedTx: %w", err)
+ }
+ return nil
+}
+
+func (tx *ArbitrumUnsignedTx) MarshalBinary(w io.Writer) error {
+ payloadSize, nonceLen, gasLen := tx.payloadSize()
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumUnsignedTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, nonceLen, gasLen); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumUnsignedTx) Sender(signer types.Signer) (common.Address, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumUnsignedTx) CachedSender() (common.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumUnsignedTx) GetSender() (common.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumUnsignedTx) SetSender(address common.Address) {
+ tx.From = address
+}
+
+func (tx *ArbitrumUnsignedTx) IsContractDeploy() bool {
+ return tx.To == nil
+}
+
+func (tx *ArbitrumUnsignedTx) Unwrap() types.Transaction {
+ //TODO implement me
+ panic("implement me")
+}
+
+// func (tx *ArbitrumUnsignedTx) gas() uint64 { }
+// func (tx *ArbitrumUnsignedTx) gasPrice() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumUnsignedTx) gasTipCap() *big.Int { return bigZero }
+// func (tx *ArbitrumUnsignedTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumUnsignedTx) value() *big.Int { return tx.Value }
+// func (tx *ArbitrumUnsignedTx) nonce() uint64 { }
+// func (tx *ArbitrumUnsignedTx) to() *common.Address { return tx.To }
+
+func (tx *ArbitrumUnsignedTx) setSignatureValues(chainID, v, r, s *big.Int) {}
+
+//func (tx *ArbitrumUnsignedTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// if baseFee == nil {
+// return dst.Set(tx.GasFeeCap)
+// }
+// return dst.Set(baseFee)
+//}
+
+func createArbitrumContractTx() types.Transaction {
+ return &ArbitrumContractTx{}
+}
+
+type ArbitrumContractTx struct {
+ NoTimeBoosted
+ ChainId *big.Int
+ RequestId common.Hash
+ From common.Address
+
+ GasFeeCap *big.Int // wei per gas
+ Gas uint64 // gas limit
+ To *common.Address `rlp:"nil"` // nil means contract creation
+ Value *big.Int // wei amount
+ Data []byte // contract invocation input data
+}
+
+func (tx *ArbitrumContractTx) copy() *ArbitrumContractTx {
+ cpy := &ArbitrumContractTx{
+ ChainId: new(big.Int),
+ RequestId: tx.RequestId,
+ GasFeeCap: new(big.Int),
+ Gas: tx.Gas,
+ From: tx.From,
+ To: nil,
+ Value: new(big.Int),
+ Data: common.CopyBytes(tx.Data),
+ }
+ if tx.ChainId != nil {
+ cpy.ChainId.Set(tx.ChainId)
+ }
+ if tx.GasFeeCap != nil {
+ cpy.GasFeeCap.Set(tx.GasFeeCap)
+ }
+ if tx.To != nil {
+ tmp := *tx.To
+ cpy.To = &tmp
+ }
+ if tx.Value != nil {
+ cpy.Value.Set(tx.Value)
+ }
+ return cpy
+}
+func (tx *ArbitrumContractTx) Type() byte { return ArbitrumContractTxType }
+func (tx *ArbitrumContractTx) GetChainID() *uint256.Int { return uint256.MustFromBig(tx.ChainId) }
+func (tx *ArbitrumContractTx) GetNonce() uint64 { return 0 }
+func (tx *ArbitrumContractTx) GetPrice() *uint256.Int { return uint256.MustFromBig(tx.GasFeeCap) }
+func (tx *ArbitrumContractTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumContractTx) GetFeeCap() *uint256.Int { return uint256.MustFromBig(tx.GasFeeCap) }
+func (tx *ArbitrumContractTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumContractTx) GetGasLimit() uint64 { return tx.Gas }
+func (tx *ArbitrumContractTx) GetBlobGas() uint64 { return 0 }
+func (tx *ArbitrumContractTx) GetData() []byte { return tx.Data }
+func (tx *ArbitrumContractTx) GetValue() *uint256.Int { return uint256.MustFromBig(tx.Value) }
+func (tx *ArbitrumContractTx) GetTo() *common.Address { return tx.To }
+func (tx *ArbitrumContractTx) GetAccessList() types.AccessList { return nil }
+func (tx *ArbitrumContractTx) GetAuthorizations() []types.Authorization { return nil }
+
+func (tx *ArbitrumContractTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int {
+ if baseFee == nil {
+ return tx.GetPrice()
+ }
+ res := uint256.NewInt(0)
+ return res.Set(baseFee)
+}
+func (tx *ArbitrumContractTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (tx *ArbitrumContractTx) AsMessage(s types.Signer, baseFee *big.Int, rules *chain.Rules) (*types.Message, error) {
+ msg := &types.Message{
+ Tx: tx,
+ }
+ msg.SetGasPrice(tx.GetPrice())
+ msg.SetTip(tx.GetTipCap())
+ msg.SetFeeCap(tx.GetFeeCap())
+ msg.SetGasLimit(tx.GetGasLimit())
+ msg.SetNonce(tx.GetNonce())
+ msg.SetAccessList(tx.GetAccessList())
+ msg.SetFrom(&tx.From)
+ msg.SetTo(tx.GetTo())
+ msg.SetData(tx.GetData())
+ msg.SetAmount(tx.GetValue())
+ msg.SetCheckNonce(!skipAccountChecks[tx.Type()])
+
+ // If baseFee provided, set gasPrice to effectiveGasPrice.
+ if baseFee != nil {
+ gp, of := uint256.FromBig(cmath.BigMin(msg.GasPrice().ToBig().Add(msg.TipCap().ToBig(), baseFee), msg.FeeCap().ToBig()))
+ if of {
+ return nil, fmt.Errorf("gas price overflow happened")
+ }
+ msg.SetGasPrice(gp)
+ }
+ return msg, nil
+
+}
+
+func (tx *ArbitrumContractTx) WithSignature(signer types.Signer, sig []byte) (types.Transaction, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumContractTx) Hash() common.Hash {
+ //TODO implement me
+ return types.PrefixedRlpHash(ArbitrumContractTxType, []interface{}{
+ tx.ChainId,
+ tx.RequestId,
+ tx.From,
+ tx.GasFeeCap,
+ tx.Gas,
+ tx.To,
+ tx.Value,
+ tx.Data,
+ })
+}
+
+func (tx *ArbitrumContractTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumContractTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumContractTx) payloadSize() (payloadSize int, gasLen int) {
+ // 1. ChainId (big.Int): 1 header byte + content length.
+ payloadSize++ // header for ChainId
+ payloadSize += rlp.BigIntLenExcludingHead(tx.ChainId)
+
+ // 2. RequestId (common.Hash, fixed 32 bytes): header + 32 bytes.
+ payloadSize++ // header for RequestId
+ payloadSize += 32
+
+ // 3. From (common.Address, fixed 20 bytes): header + 20 bytes.
+ payloadSize++ // header for From
+ payloadSize += 20
+
+ // 4. GasFeeCap (big.Int): header + content length.
+ payloadSize++ // header for GasFeeCap
+ payloadSize += rlp.BigIntLenExcludingHead(tx.GasFeeCap)
+
+ // 5. Gas (uint64): header + computed length.
+ payloadSize++ // header for Gas
+ gasLen = rlp.IntLenExcludingHead(tx.Gas)
+ payloadSize += gasLen
+
+ // 6. To (*common.Address): header always; if non-nil then add 20 bytes.
+ payloadSize++ // header for To
+ if tx.To != nil {
+ payloadSize += 20
+ }
+
+ // 7. Value (big.Int): header + content length.
+ payloadSize++ // header for Value
+ payloadSize += rlp.BigIntLenExcludingHead(tx.Value)
+
+ // 8. Data ([]byte): rlp.StringLen returns full encoded length (header + data).
+ payloadSize += rlp.StringLen(tx.Data)
+
+ return payloadSize, gasLen
+}
+
+func (tx *ArbitrumContractTx) encodePayload(w io.Writer, b []byte, payloadSize, gasLen int) error {
+ // Write the RLP list prefix for the payload.
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ // 1. ChainId (big.Int)
+ if err := rlp.EncodeBigInt(tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ // 2. RequestId (common.Hash, 32 bytes)
+ // Write header for fixed length 32: 0x80 + 32.
+ b[0] = 128 + 32
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.RequestId[:]); err != nil {
+ return err
+ }
+
+ // 3. From (common.Address, 20 bytes)
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.From[:]); err != nil {
+ return err
+ }
+
+ // 4. GasFeeCap (big.Int)
+ if err := rlp.EncodeBigInt(tx.GasFeeCap, w, b); err != nil {
+ return err
+ }
+
+ // 5. Gas (uint64)
+ // If Gas is less than 128, it is encoded as a single byte.
+ if tx.Gas > 0 && tx.Gas < 128 {
+ b[0] = byte(tx.Gas)
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ // Otherwise, encode as big‑endian. Write into b[1:9],
+ // then set the header at position 8 - gasLen.
+ binary.BigEndian.PutUint64(b[1:], tx.Gas)
+ b[8-gasLen] = 128 + byte(gasLen)
+ if _, err := w.Write(b[8-gasLen : 9]); err != nil {
+ return err
+ }
+ }
+
+ // 6. To (*common.Address)
+ if tx.To == nil {
+ // nil is encoded as an empty byte string.
+ b[0] = 128
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ // Write header for 20-byte string and then the address bytes.
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write((*tx.To)[:]); err != nil {
+ return err
+ }
+ }
+
+ // 7. Value (big.Int)
+ if err := rlp.EncodeBigInt(tx.Value, w, b); err != nil {
+ return err
+ }
+
+ // 8. Data ([]byte)
+ if err := rlp.EncodeString(tx.Data, w, b); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumContractTx) EncodingSize() int {
+ payloadSize, _ := tx.payloadSize()
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumContractTx) EncodeRLP(w io.Writer) error {
+ payloadSize, gasLen := tx.payloadSize()
+
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // envelope
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+
+ // encode TxType
+ b[0] = ArbitrumContractTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, gasLen); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumContractTx) DecodeRLP(s *rlp.Stream) error {
+ // Begin decoding the RLP list.
+ if _, err := s.List(); err != nil {
+ return err
+ }
+
+ var b []byte
+ var err error
+
+ // Decode ChainId (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(big.Int).SetBytes(b)
+
+ // Decode RequestId (common.Hash, 32 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RequestId: %w", err)
+ }
+ if len(b) != 32 {
+ return fmt.Errorf("wrong size for RequestId: %d", len(b))
+ }
+ copy(tx.RequestId[:], b)
+
+ // Decode From (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read From: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for From: %d", len(b))
+ }
+ copy(tx.From[:], b)
+
+ // Decode GasFeeCap (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read GasFeeCap: %w", err)
+ }
+ tx.GasFeeCap = new(big.Int).SetBytes(b)
+
+ // Decode Gas (uint64)
+ if tx.Gas, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Gas: %w", err)
+ }
+
+ // Decode To (*common.Address, 20 bytes if non-nil)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read To: %w", err)
+ }
+ if len(b) > 0 {
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for To: %d", len(b))
+ }
+ tx.To = new(common.Address)
+ copy(tx.To[:], b)
+ } else {
+ tx.To = nil
+ }
+
+ // Decode Value (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Value: %w", err)
+ }
+ tx.Value = new(big.Int).SetBytes(b)
+
+ // Decode Data ([]byte)
+ if tx.Data, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Data: %w", err)
+ }
+
+ // End the RLP list.
+ if err := s.ListEnd(); err != nil {
+ return fmt.Errorf("close ArbitrumContractTx: %w", err)
+ }
+ return nil
+}
+
+func (tx *ArbitrumContractTx) MarshalBinary(w io.Writer) error {
+ payloadSize, gasLen := tx.payloadSize()
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumContractTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, gasLen); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumContractTx) Sender(signer types.Signer) (common.Address, error) {
+ panic("implement me")
+}
+
+func (tx *ArbitrumContractTx) CachedSender() (common.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumContractTx) GetSender() (common.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumContractTx) SetSender(address common.Address) {
+ tx.From = address
+}
+
+func (tx *ArbitrumContractTx) IsContractDeploy() bool {
+ return tx.To == nil
+}
+
+func (tx *ArbitrumContractTx) Unwrap() types.Transaction {
+ return tx
+}
+
+// func (tx *ArbitrumContractTx) ChainID() *big.Int { return tx.ChainId }
+// func (tx *ArbitrumContractTx) accessList() types.AccessList { return nil }
+// func (tx *ArbitrumContractTx) data() []byte { return tx.Data }
+// func (tx *ArbitrumContractTx) gas() uint64 { return tx.Gas }
+// func (tx *ArbitrumContractTx) gasPrice() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumContractTx) gasTipCap() *big.Int { return bigZero }
+// func (tx *ArbitrumContractTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumContractTx) value() *big.Int { return tx.Value }
+// func (tx *ArbitrumContractTx) nonce() uint64 { return 0 }
+// func (tx *ArbitrumContractTx) to() *common.Address { return tx.To }
+func (tx *ArbitrumContractTx) encode(b *bytes.Buffer) error {
+ return rlp.Encode(b, tx)
+}
+func (tx *ArbitrumContractTx) decode(input []byte) error {
+ return rlp.DecodeBytes(input, tx)
+}
+
+// func (tx *ArbitrumContractTx) rawSignatureValues() (v, r, s *big.Int) {
+// return bigZero, bigZero, bigZero
+// }
+func (tx *ArbitrumContractTx) setSignatureValues(chainID, v, r, s *big.Int) {}
+
+//func (tx *ArbitrumContractTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// if baseFee == nil {
+// return dst.Set(tx.GasFeeCap)
+// }
+// return dst.Set(baseFee)
+//}
+
+func createArbitrumRetryTx() types.Transaction {
+ return &ArbitrumRetryTx{}
+}
+
+type ArbitrumRetryTx struct {
+ ChainId *big.Int
+ Nonce uint64
+ From common.Address
+ GasFeeCap *big.Int // wei per gas
+ Gas uint64 // gas limit
+ To *common.Address `rlp:"nil"` // nil means contract creation
+ Value *big.Int // wei amount
+ Data []byte // contract invocation input data
+ TicketId common.Hash
+ RefundTo common.Address
+ MaxRefund *big.Int // the maximum refund sent to RefundTo (the rest goes to From)
+ SubmissionFeeRefund *big.Int // the submission fee to refund if successful (capped by MaxRefund)
+ Timeboosted *bool
+}
+
+func (tx *ArbitrumRetryTx) copy() *ArbitrumRetryTx {
+ cpy := &ArbitrumRetryTx{
+ ChainId: new(big.Int),
+ Nonce: tx.Nonce,
+ GasFeeCap: new(big.Int),
+ Gas: tx.Gas,
+ From: tx.From,
+ To: nil,
+ Value: new(big.Int),
+ Data: common.CopyBytes(tx.Data),
+ TicketId: tx.TicketId,
+ RefundTo: tx.RefundTo,
+ MaxRefund: new(big.Int),
+ SubmissionFeeRefund: new(big.Int),
+ Timeboosted: tx.Timeboosted,
+ }
+ if tx.ChainId != nil {
+ cpy.ChainId.Set(tx.ChainId)
+ }
+ if tx.GasFeeCap != nil {
+ cpy.GasFeeCap.Set(tx.GasFeeCap)
+ }
+ if tx.To != nil {
+ tmp := *tx.To
+ cpy.To = &tmp
+ }
+ if tx.Value != nil {
+ cpy.Value.Set(tx.Value)
+ }
+ if tx.MaxRefund != nil {
+ cpy.MaxRefund.Set(tx.MaxRefund)
+ }
+ if tx.SubmissionFeeRefund != nil {
+ cpy.SubmissionFeeRefund.Set(tx.SubmissionFeeRefund)
+ }
+ return cpy
+}
+
+func (tx *ArbitrumRetryTx) Type() byte { return ArbitrumRetryTxType }
+func (tx *ArbitrumRetryTx) GetChainID() *uint256.Int { return uint256.MustFromBig(tx.ChainId) }
+func (tx *ArbitrumRetryTx) GetNonce() uint64 { return tx.Nonce }
+func (tx *ArbitrumRetryTx) GetPrice() *uint256.Int { return uint256.MustFromBig(tx.GasFeeCap) }
+func (tx *ArbitrumRetryTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumRetryTx) GetFeeCap() *uint256.Int { return uint256.MustFromBig(tx.GasFeeCap) }
+func (tx *ArbitrumRetryTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumRetryTx) GetGasLimit() uint64 { return tx.Gas }
+func (tx *ArbitrumRetryTx) GetBlobGas() uint64 { return 0 }
+func (tx *ArbitrumRetryTx) GetData() []byte { return tx.Data }
+func (tx *ArbitrumRetryTx) GetValue() *uint256.Int { return uint256.MustFromBig(tx.Value) }
+func (tx *ArbitrumRetryTx) GetTo() *common.Address { return tx.To }
+func (tx *ArbitrumRetryTx) GetAccessList() types.AccessList { return nil }
+func (tx *ArbitrumRetryTx) GetAuthorizations() []types.Authorization { return nil }
+
+func (tx *ArbitrumRetryTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int {
+ if baseFee == nil {
+ return tx.GetPrice()
+ }
+ res := uint256.NewInt(0)
+ return res.Set(baseFee)
+}
+func (tx *ArbitrumRetryTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (tx *ArbitrumRetryTx) AsMessage(s types.Signer, baseFee *big.Int, rules *chain.Rules) (*types.Message, error) {
+ msg := &types.Message{
+ Tx: tx,
+ }
+ msg.SetGasPrice(tx.GetPrice())
+ msg.SetTip(tx.GetTipCap())
+ msg.SetFeeCap(tx.GetFeeCap())
+ msg.SetGasLimit(tx.GetGasLimit())
+ msg.SetNonce(tx.GetNonce())
+ msg.SetAccessList(tx.GetAccessList())
+ msg.SetFrom(&tx.From)
+ msg.SetTo(tx.GetTo())
+ msg.SetData(tx.GetData())
+ msg.SetAmount(tx.GetValue())
+ msg.SetCheckNonce(!skipAccountChecks[tx.Type()])
+
+ // If baseFee provided, set gasPrice to effectiveGasPrice.
+ if baseFee != nil {
+ gp, of := uint256.FromBig(cmath.BigMin(msg.GasPrice().ToBig().Add(msg.TipCap().ToBig(), baseFee), msg.FeeCap().ToBig()))
+ if of {
+ return nil, fmt.Errorf("gas price overflow happened")
+ }
+ msg.SetGasPrice(gp)
+ }
+
+ return msg, nil
+}
+
+func (tx *ArbitrumRetryTx) WithSignature(signer types.Signer, sig []byte) (types.Transaction, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumRetryTx) Hash() common.Hash {
+ //TODO implement me
+ return types.PrefixedRlpHash(ArbitrumRetryTxType, []interface{}{
+ tx.ChainId,
+ tx.Nonce,
+ tx.From,
+ tx.GasFeeCap,
+ tx.Gas,
+ tx.To,
+ tx.Value,
+ tx.Data,
+ tx.TicketId,
+ tx.RefundTo,
+ tx.MaxRefund,
+ tx.SubmissionFeeRefund,
+ })
+}
+
+func (tx *ArbitrumRetryTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumRetryTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumRetryTx) encodePayload(w io.Writer, b []byte, payloadSize, nonceLen, gasLen int, hashingOnly bool) error {
+ // Write the RLP list prefix.
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ // ChainId (big.Int)
+ if err := rlp.EncodeBigInt(tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ // Nonce (uint64)
+ if tx.Nonce > 0 && tx.Nonce < 128 {
+ b[0] = byte(tx.Nonce)
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ binary.BigEndian.PutUint64(b[1:], tx.Nonce)
+ b[8-nonceLen] = 128 + byte(nonceLen)
+ if _, err := w.Write(b[8-nonceLen : 9]); err != nil {
+ return err
+ }
+ }
+
+ // From (common.Address, 20 bytes)
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.From[:]); err != nil {
+ return err
+ }
+
+ // GasFeeCap (big.Int)
+ if err := rlp.EncodeBigInt(tx.GasFeeCap, w, b); err != nil {
+ return err
+ }
+
+ // Gas (uint64)
+ if err := rlp.EncodeInt(tx.Gas, w, b); err != nil {
+ return err
+ }
+
+ // To (optional common.Address, 20 bytes if non-nil)
+ if tx.To == nil {
+ b[0] = 128
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write((*tx.To)[:]); err != nil {
+ return err
+ }
+ }
+
+ // Value (big.Int)
+ if err := rlp.EncodeBigInt(tx.Value, w, b); err != nil {
+ return err
+ }
+
+ // Data ([]byte)
+ if err := rlp.EncodeString(tx.Data, w, b); err != nil {
+ return err
+ }
+
+ // TicketId (common.Hash, 32 bytes)
+ b[0] = 128 + 32
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.TicketId[:]); err != nil {
+ return err
+ }
+
+ // RefundTo (common.Address, 20 bytes)
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.RefundTo[:]); err != nil {
+ return err
+ }
+
+ // MaxRefund (big.Int)
+ if err := rlp.EncodeBigInt(tx.MaxRefund, w, b); err != nil {
+ return err
+ }
+
+ // SubmissionFeeRefund (big.Int)
+ if err := rlp.EncodeBigInt(tx.SubmissionFeeRefund, w, b); err != nil {
+ return err
+ }
+
+ if tx.Timeboosted != nil && !hashingOnly {
+ if err := rlp.EncodeBool(*tx.Timeboosted, w, b); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumRetryTx) payloadSize(hashingOnly bool) (payloadSize int, nonceLen, gasLen int) {
+ // ChainId (big.Int)
+ payloadSize++ // header
+ payloadSize += rlp.BigIntLenExcludingHead(tx.ChainId)
+
+ // Nonce (uint64)
+ payloadSize++ // header
+ nonceLen = rlp.IntLenExcludingHead(tx.Nonce)
+ payloadSize += nonceLen
+
+ // From (common.Address, 20 bytes)
+ payloadSize++ // header
+ payloadSize += 20
+
+ // GasFeeCap (big.Int)
+ payloadSize++ // header
+ payloadSize += rlp.BigIntLenExcludingHead(tx.GasFeeCap)
+
+ // Gas (uint64)
+ payloadSize++ // header
+ gasLen = rlp.IntLenExcludingHead(tx.Gas)
+ payloadSize += gasLen
+
+ // To (optional common.Address, 20 bytes if non-nil)
+ payloadSize++ // header
+ if tx.To != nil {
+ payloadSize += 20
+ }
+
+ // Value (big.Int)
+ payloadSize++ // header
+ payloadSize += rlp.BigIntLenExcludingHead(tx.Value)
+
+ // Data ([]byte) — rlp.StringLen returns the full encoded length (header + data)
+ payloadSize += rlp.StringLen(tx.Data)
+
+ // TicketId (common.Hash, 32 bytes)
+ payloadSize++ // header
+ payloadSize += 32
+
+ // RefundTo (common.Address, 20 bytes)
+ payloadSize++ // header
+ payloadSize += 20
+
+ // MaxRefund (big.Int)
+ payloadSize++ // header
+ payloadSize += rlp.BigIntLenExcludingHead(tx.MaxRefund)
+
+ // SubmissionFeeRefund (big.Int)
+ payloadSize++ // header
+ payloadSize += rlp.BigIntLenExcludingHead(tx.SubmissionFeeRefund)
+
+ if tx.Timeboosted != nil && !hashingOnly {
+ // Timeboosted (bool)
+ payloadSize++
+ payloadSize += rlp.BoolLen()
+ }
+
+ return payloadSize, nonceLen, gasLen
+}
+
+func (tx *ArbitrumRetryTx) EncodingSize() int {
+ payloadSize, _, _ := tx.payloadSize(false)
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumRetryTx) EncodeRLP(w io.Writer) error {
+ payloadSize, nonceLen, gasLen := tx.payloadSize(false)
+
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // envelope
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+
+ // encode TxType
+ b[0] = ArbitrumRetryTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, nonceLen, gasLen, false); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumRetryTx) DecodeRLP(s *rlp.Stream) error {
+ // Begin list decoding.
+ if _, err := s.List(); err != nil {
+ return err
+ }
+
+ var b []byte
+ var err error
+
+ // Decode ChainId (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(big.Int).SetBytes(b)
+
+ // Decode Nonce (uint64)
+ if tx.Nonce, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Nonce: %w", err)
+ }
+
+ // Decode From (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read From: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for From: %d", len(b))
+ }
+ copy(tx.From[:], b)
+
+ // Decode GasFeeCap (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read GasFeeCap: %w", err)
+ }
+ tx.GasFeeCap = new(big.Int).SetBytes(b)
+
+ // Decode Gas (uint64)
+ if tx.Gas, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Gas: %w", err)
+ }
+
+ // Decode To (*common.Address, 20 bytes if non-nil)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read To: %w", err)
+ }
+ if len(b) > 0 {
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for To: %d", len(b))
+ }
+ tx.To = new(common.Address)
+ copy(tx.To[:], b)
+ }
+
+ // Decode Value (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Value: %w", err)
+ }
+ tx.Value = new(big.Int).SetBytes(b)
+
+ // Decode Data ([]byte)
+ if tx.Data, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Data: %w", err)
+ }
+
+ // Decode TicketId (common.Hash, 32 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read TicketId: %w", err)
+ }
+ if len(b) != 32 {
+ return fmt.Errorf("wrong size for TicketId: %d", len(b))
+ }
+ copy(tx.TicketId[:], b)
+
+ // Decode RefundTo (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RefundTo: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for RefundTo: %d", len(b))
+ }
+ copy(tx.RefundTo[:], b)
+
+ // Decode MaxRefund (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read MaxRefund: %w", err)
+ }
+ tx.MaxRefund = new(big.Int).SetBytes(b)
+
+ // Decode SubmissionFeeRefund (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read SubmissionFeeRefund: %w", err)
+ }
+ tx.SubmissionFeeRefund = new(big.Int).SetBytes(b)
+
+ if s.MoreDataInList() {
+ boolVal, err := s.Bool()
+ if err != nil {
+ return err
+ }
+ tx.Timeboosted = &boolVal
+ }
+ return s.ListEnd()
+}
+
+func (t *ArbitrumRetryTx) MarshalBinary(w io.Writer) error {
+ payloadSize, nonceLen, gasLen := t.payloadSize(false)
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumRetryTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := t.encodePayload(w, b[:], payloadSize, nonceLen, gasLen, false); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumRetryTx) MarshalBinaryForHashing(w io.Writer) error {
+ payloadSize, nonceLen, gasLen := tx.payloadSize(true)
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumRetryTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, nonceLen, gasLen, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumRetryTx) Sender(signer types.Signer) (common.Address, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumRetryTx) CachedSender() (common.Address, bool) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumRetryTx) GetSender() (common.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumRetryTx) SetSender(address common.Address) {
+ tx.From = address
+}
+
+func (tx *ArbitrumRetryTx) IsContractDeploy() bool {
+ return tx.To == nil
+}
+
+func (tx *ArbitrumRetryTx) Unwrap() types.Transaction {
+ return tx
+}
+
+func (tx *ArbitrumRetryTx) IsTimeBoosted() *bool {
+ return tx.Timeboosted
+}
+
+func (tx *ArbitrumRetryTx) SetTimeboosted(val *bool) {
+ tx.Timeboosted = val
+}
+
+// func (tx *ArbitrumRetryTx) chainID() *big.Int { return tx.ChainId }
+// func (tx *ArbitrumRetryTx) accessList() types.AccessList { return nil }
+// func (tx *ArbitrumRetryTx) data() []byte { return tx.Data }
+// func (tx *ArbitrumRetryTx) gas() uint64 { return tx.Gas }
+// func (tx *ArbitrumRetryTx) gasPrice() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumRetryTx) gasTipCap() *big.Int { return bigZero }
+// func (tx *ArbitrumRetryTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumRetryTx) value() *big.Int { return tx.Value }
+// func (tx *ArbitrumRetryTx) nonce() uint64 { return tx.Nonce }
+// func (tx *ArbitrumRetryTx) to() *common.Address { return tx.To }
+func (tx *ArbitrumRetryTx) encode(b *bytes.Buffer) error {
+ return rlp.Encode(b, tx)
+}
+func (tx *ArbitrumRetryTx) decode(input []byte) error {
+ return rlp.DecodeBytes(input, tx)
+}
+
+func (tx *ArbitrumRetryTx) setSignatureValues(chainID, v, r, s *big.Int) {}
+
+//func (tx *ArbitrumRetryTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// if baseFee == nil {
+// return dst.Set(tx.GasFeeCap)
+// }
+// return dst.Set(baseFee)
+//}
+
+func createArbitrumSubmitRetryableTx() types.Transaction {
+ return &ArbitrumSubmitRetryableTx{}
+}
+
+type ArbitrumSubmitRetryableTx struct {
+ NoTimeBoosted
+ ChainId *big.Int
+ RequestId common.Hash
+ From common.Address
+ L1BaseFee *big.Int
+
+ DepositValue *big.Int
+ GasFeeCap *big.Int // wei per gas
+ Gas uint64 // gas limit for the retryable tx, actual gas spending is EffectiveGasUsed
+ RetryTo *common.Address `rlp:"nil"` // nil means contract creation
+ RetryValue *big.Int // wei amount
+ Beneficiary common.Address
+ MaxSubmissionFee *big.Int
+ FeeRefundAddr common.Address
+ RetryData []byte // contract invocation input data
+ EffectiveGasUsed uint64
+}
+
+func (tx *ArbitrumSubmitRetryableTx) copy() *ArbitrumSubmitRetryableTx {
+ cpy := &ArbitrumSubmitRetryableTx{
+ ChainId: new(big.Int),
+ RequestId: tx.RequestId,
+ DepositValue: new(big.Int),
+ L1BaseFee: new(big.Int),
+ GasFeeCap: new(big.Int),
+ Gas: tx.Gas,
+ From: tx.From,
+ RetryTo: tx.RetryTo,
+ RetryValue: new(big.Int),
+ Beneficiary: tx.Beneficiary,
+ MaxSubmissionFee: new(big.Int),
+ FeeRefundAddr: tx.FeeRefundAddr,
+ RetryData: common.CopyBytes(tx.RetryData),
+ EffectiveGasUsed: tx.EffectiveGasUsed,
+ }
+ if tx.ChainId != nil {
+ cpy.ChainId.Set(tx.ChainId)
+ }
+ if tx.DepositValue != nil {
+ cpy.DepositValue.Set(tx.DepositValue)
+ }
+ if tx.L1BaseFee != nil {
+ cpy.L1BaseFee.Set(tx.L1BaseFee)
+ }
+ if tx.GasFeeCap != nil {
+ cpy.GasFeeCap.Set(tx.GasFeeCap)
+ }
+ if tx.RetryTo != nil {
+ tmp := *tx.RetryTo
+ cpy.RetryTo = &tmp
+ }
+ if tx.RetryValue != nil {
+ cpy.RetryValue.Set(tx.RetryValue)
+ }
+ if tx.MaxSubmissionFee != nil {
+ cpy.MaxSubmissionFee.Set(tx.MaxSubmissionFee)
+ }
+
+ return cpy
+}
+
+func (tx *ArbitrumSubmitRetryableTx) Type() byte { return ArbitrumSubmitRetryableTxType }
+func (tx *ArbitrumSubmitRetryableTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumSubmitRetryableTx) GetGasLimit() uint64 { return tx.Gas }
+func (tx *ArbitrumSubmitRetryableTx) GetBlobGas() uint64 { return 0 }
+func (tx *ArbitrumSubmitRetryableTx) GetNonce() uint64 { return 0 }
+func (tx *ArbitrumSubmitRetryableTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumSubmitRetryableTx) GetValue() *uint256.Int { return uintZero }
+func (tx *ArbitrumSubmitRetryableTx) GetTo() *common.Address { return &ArbRetryableTxAddress }
+func (tx *ArbitrumSubmitRetryableTx) GetAccessList() types.AccessList { return nil }
+func (tx *ArbitrumSubmitRetryableTx) GetAuthorizations() []types.Authorization { return nil }
+func (tx *ArbitrumSubmitRetryableTx) GetChainID() *uint256.Int {
+ return uint256.MustFromBig(tx.ChainId)
+}
+func (tx *ArbitrumSubmitRetryableTx) GetPrice() *uint256.Int {
+ return uint256.MustFromBig(tx.GasFeeCap)
+}
+func (tx *ArbitrumSubmitRetryableTx) GetFeeCap() *uint256.Int {
+ return uint256.MustFromBig(tx.GasFeeCap)
+}
+
+func (tx *ArbitrumSubmitRetryableTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int {
+ if baseFee == nil {
+ return tx.GetPrice()
+ }
+ res := uint256.NewInt(0)
+ return res.Set(baseFee)
+}
+
+func (tx *ArbitrumSubmitRetryableTx) GetData() []byte {
+ var retryTo common.Address
+ if tx.RetryTo != nil {
+ retryTo = *tx.RetryTo
+ }
+ data := make([]byte, 0)
+ data = append(data, tx.RequestId.Bytes()...)
+ data = append(data, math.U256Bytes(tx.L1BaseFee)...)
+ data = append(data, math.U256Bytes(tx.DepositValue)...)
+ data = append(data, math.U256Bytes(tx.RetryValue)...)
+ data = append(data, math.U256Bytes(tx.GasFeeCap)...)
+ data = append(data, math.U256Bytes(new(big.Int).SetUint64(tx.Gas))...)
+ data = append(data, math.U256Bytes(tx.MaxSubmissionFee)...)
+ data = append(data, make([]byte, 12)...)
+ data = append(data, tx.FeeRefundAddr.Bytes()...)
+ data = append(data, make([]byte, 12)...)
+ data = append(data, tx.Beneficiary.Bytes()...)
+ data = append(data, make([]byte, 12)...)
+ data = append(data, retryTo.Bytes()...)
+ offset := len(data) + 32
+ data = append(data, math.U256Bytes(big.NewInt(int64(offset)))...)
+ data = append(data, math.U256Bytes(big.NewInt(int64(len(tx.RetryData))))...)
+ data = append(data, tx.RetryData...)
+ extra := len(tx.RetryData) % 32
+ if extra > 0 {
+ data = append(data, make([]byte, 32-extra)...)
+ }
+ data = append(hexutil.MustDecode("0xc9f95d32"), data...)
+ return data
+}
+
+func (tx *ArbitrumSubmitRetryableTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (tx *ArbitrumSubmitRetryableTx) payloadSize(hashingOnly bool) (payloadSize int, gasLen int) {
+ size := 0
+ size++
+ size += rlp.BigIntLenExcludingHead(tx.ChainId)
+ size++
+ size += length.Hash
+ size++
+ size += length.Addr
+ size++
+ size += rlp.BigIntLenExcludingHead(tx.L1BaseFee)
+ size++
+ size += rlp.BigIntLenExcludingHead(tx.DepositValue)
+ size++
+ size += rlp.BigIntLenExcludingHead(tx.GasFeeCap)
+ size++
+ gasLen = rlp.IntLenExcludingHead(tx.Gas)
+ size += gasLen
+ size++
+ if tx.RetryTo != nil {
+ size += length.Addr
+ }
+ size++
+ size += rlp.BigIntLenExcludingHead(tx.RetryValue)
+ size++
+ size += length.Addr
+ size++
+ size += rlp.BigIntLenExcludingHead(tx.MaxSubmissionFee)
+ size++
+ size += length.Addr
+ size += rlp.StringLen(tx.RetryData)
+
+ if hashingOnly {
+ return size, gasLen
+ }
+ // effective gas used is only included in non-hashing RLP encodings
+ size++
+ size += rlp.IntLenExcludingHead(tx.EffectiveGasUsed)
+
+ return size, gasLen
+}
+
+func (tx *ArbitrumSubmitRetryableTx) encodePayload(w io.Writer, b []byte, payloadSize int, hashingOnly bool) error {
+ // Write the RLP list prefix.
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ // ChainId (big.Int)
+ if err := rlp.EncodeBigInt(tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ // RequestId (common.Hash, 32 bytes)
+ b[0] = 128 + length.Hash
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.RequestId[:]); err != nil {
+ return err
+ }
+
+ // From (common.Address, 20 bytes)
+ b[0] = 128 + length.Addr
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.From[:]); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeBigInt(tx.L1BaseFee, w, b); err != nil {
+ return err
+ }
+ if err := rlp.EncodeBigInt(tx.DepositValue, w, b); err != nil {
+ return err
+ }
+ if err := rlp.EncodeBigInt(tx.GasFeeCap, w, b); err != nil {
+ return err
+ }
+ if err := rlp.EncodeInt(tx.Gas, w, b); err != nil {
+ return err
+ }
+
+ // RetryTo (pointer to common.Address, 20 bytes if non-nil; otherwise RLP nil)
+ if tx.RetryTo == nil {
+ b[0] = 128
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ b[0] = 128 + length.Addr
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write((*tx.RetryTo)[:]); err != nil {
+ return err
+ }
+ }
+ if err := rlp.EncodeBigInt(tx.RetryValue, w, b); err != nil {
+ return err
+ }
+ // Beneficiary (common.Address, 20 bytes)
+ b[0] = 128 + length.Addr
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.Beneficiary[:]); err != nil {
+ return err
+ }
+ if err := rlp.EncodeBigInt(tx.MaxSubmissionFee, w, b); err != nil {
+ return err
+ }
+
+ // FeeRefundAddr (common.Address, 20 bytes)
+ b[0] = 128 + length.Addr
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.FeeRefundAddr[:]); err != nil {
+ return err
+ }
+ if err := rlp.EncodeString(tx.RetryData, w, b); err != nil {
+ return err
+ }
+
+ if hashingOnly {
+ return nil
+ }
+ if err := rlp.EncodeInt(tx.EffectiveGasUsed, w, b); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) AsMessage(s types.Signer, baseFee *big.Int, rules *chain.Rules) (*types.Message, error) {
+ msg := &types.Message{
+ Tx: tx,
+ EffectiveGas: tx.EffectiveGasUsed,
+ }
+ msg.SetGasPrice(tx.GetPrice())
+ msg.SetTip(tx.GetTipCap())
+ msg.SetFeeCap(tx.GetFeeCap())
+ msg.SetGasLimit(tx.GetGasLimit())
+ msg.SetNonce(tx.GetNonce())
+ msg.SetAccessList(tx.GetAccessList())
+ msg.SetFrom(&tx.From)
+ msg.SetTo(tx.GetTo())
+ msg.SetData(tx.GetData())
+ msg.SetAmount(tx.GetValue())
+ msg.SetCheckNonce(!skipAccountChecks[tx.Type()])
+
+ if baseFee != nil {
+ var gasPrice uint256.Int
+
+ gasPrice.SetFromBig(cmath.BigMin(msg.GasPrice().ToBig().Add(msg.TipCap().ToBig(), baseFee), msg.FeeCap().ToBig()))
+ msg.SetGasPrice(&gasPrice)
+ }
+ gp, of := uint256.FromBig(cmath.BigMin(msg.GasPrice().ToBig().Add(msg.TipCap().ToBig(), baseFee), msg.FeeCap().ToBig()))
+ if of {
+ return nil, fmt.Errorf("gas price overflow happened")
+ }
+ msg.SetGasPrice(gp)
+ // if !rules.IsCancun {
+ // return msg, errors.New("BlobTx transactions require Cancun")
+ // }
+ // if baseFee != nil {
+ // overflow := msg.gasPrice.SetFromBig(baseFee)
+ // if overflow {
+ // return msg, errors.New("gasPrice higher than 2^256-1")
+ // }
+ // }
+ // msg.gasPrice.Add(&msg.gasPrice, stx.Tip)
+ // if msg.gasPrice.Gt(stx.FeeCap) {
+ // msg.gasPrice.Set(stx.FeeCap)
+ // }
+ // var err error
+ // msg.from, err = d.Sender(s)
+ // msg.maxFeePerBlobGas = *stx.MaxFeePerBlobGas
+ // msg.blobHashes = stx.BlobVersionedHashes
+ return msg, nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) WithSignature(signer types.Signer, sig []byte) (types.Transaction, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumSubmitRetryableTx) Hash() common.Hash {
+ return types.PrefixedRlpHash(ArbitrumSubmitRetryableTxType, []interface{}{
+ tx.ChainId,
+ tx.RequestId,
+ tx.From,
+ tx.L1BaseFee,
+ tx.DepositValue,
+ tx.GasFeeCap,
+ tx.Gas,
+ tx.RetryTo,
+ tx.RetryValue,
+ tx.Beneficiary,
+ tx.MaxSubmissionFee,
+ tx.FeeRefundAddr,
+ tx.RetryData,
+ })
+}
+
+func (tx *ArbitrumSubmitRetryableTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumSubmitRetryableTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumSubmitRetryableTx) EncodingSize() int {
+ payloadSize, _ := tx.payloadSize(false)
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumSubmitRetryableTx) EncodeRLP(w io.Writer) error {
+ hashingOnly := false
+
+ payloadSize, _ := tx.payloadSize(hashingOnly)
+
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+
+ // envelope
+
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+
+ // encode TxType
+ b[0] = ArbitrumSubmitRetryableTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, hashingOnly); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) DecodeRLP(s *rlp.Stream) error {
+ // Begin decoding the RLP list.
+ if _, err := s.List(); err != nil {
+ return err
+ }
+
+ var b []byte
+ var err error
+
+ // Decode ChainId (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(big.Int).SetBytes(b)
+
+ // Decode RequestId (common.Hash, 32 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RequestId: %w", err)
+ }
+ if len(b) != 32 {
+ return fmt.Errorf("wrong size for RequestId: %d", len(b))
+ }
+ copy(tx.RequestId[:], b)
+
+ // Decode From (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read From: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for From: %d", len(b))
+ }
+ copy(tx.From[:], b)
+
+ // Decode L1BaseFee (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read L1BaseFee: %w", err)
+ }
+ tx.L1BaseFee = new(big.Int).SetBytes(b)
+
+ // Decode DepositValue (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read DepositValue: %w", err)
+ }
+ tx.DepositValue = new(big.Int).SetBytes(b)
+
+ // Decode GasFeeCap (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read GasFeeCap: %w", err)
+ }
+ tx.GasFeeCap = new(big.Int).SetBytes(b)
+
+ // Decode Gas (uint64)
+ if tx.Gas, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Gas: %w", err)
+ }
+
+ // Decode RetryTo (*common.Address, 20 bytes if non-nil)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RetryTo: %w", err)
+ }
+ if len(b) > 0 {
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for RetryTo: %d", len(b))
+ }
+ tx.RetryTo = new(common.Address)
+ copy(tx.RetryTo[:], b)
+ } else {
+ tx.RetryTo = nil
+ }
+
+ // Decode RetryValue (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RetryValue: %w", err)
+ }
+ tx.RetryValue = new(big.Int).SetBytes(b)
+
+ // Decode Beneficiary (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Beneficiary: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for Beneficiary: %d", len(b))
+ }
+ copy(tx.Beneficiary[:], b)
+
+ // Decode MaxSubmissionFee (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read MaxSubmissionFee: %w", err)
+ }
+ tx.MaxSubmissionFee = new(big.Int).SetBytes(b)
+
+ // Decode FeeRefundAddr (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read FeeRefundAddr: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for FeeRefundAddr: %d", len(b))
+ }
+ copy(tx.FeeRefundAddr[:], b)
+
+ // Decode RetryData ([]byte)
+ if tx.RetryData, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RetryData: %w", err)
+ }
+
+ if s.MoreDataInList() {
+ if tx.EffectiveGasUsed, err = s.Uint(); err != nil {
+ return fmt.Errorf("read EffectiveGasUSed: %w", err)
+ }
+ }
+
+ // End the RLP list.
+ if err := s.ListEnd(); err != nil {
+ return fmt.Errorf("close ArbitrumSubmitRetryableTx: %w", err)
+ }
+ return nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) MarshalBinary(w io.Writer) error {
+ hashingOnly := false
+ payloadSize, _ := tx.payloadSize(hashingOnly)
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumSubmitRetryableTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, hashingOnly); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) MarshalBinaryForHashing(w io.Writer) error {
+ hashingOnly := true
+
+ payloadSize, _ := tx.payloadSize(hashingOnly)
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumSubmitRetryableTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, hashingOnly); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) Sender(signer types.Signer) (common.Address, error) {
+ panic("cannot sign ArbitrumSubmitRetryableTx")
+}
+
+func (tx *ArbitrumSubmitRetryableTx) CachedSender() (common.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumSubmitRetryableTx) GetSender() (common.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumSubmitRetryableTx) SetSender(address common.Address) {
+ tx.From = address
+}
+
+func (tx *ArbitrumSubmitRetryableTx) IsContractDeploy() bool {
+ return tx.RetryTo == nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) Unwrap() types.Transaction {
+ return tx
+}
+
+// func (tx *ArbitrumSubmitRetryableTx) chainID() *big.Int { return tx.ChainId }
+// func (tx *ArbitrumSubmitRetryableTx) accessList() types.AccessList { return nil }
+// func (tx *ArbitrumSubmitRetryableTx) gas() uint64 { return tx.Gas }
+// func (tx *ArbitrumSubmitRetryableTx) gasPrice() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumSubmitRetryableTx) gasTipCap() *big.Int { return big.NewInt(0) }
+// func (tx *ArbitrumSubmitRetryableTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumSubmitRetryableTx) value() *big.Int { return common.Big0 }
+// func (tx *ArbitrumSubmitRetryableTx) nonce() uint64 { return 0 }
+// func (tx *ArbitrumSubmitRetryableTx) to() *common.Address { return &ArbRetryableTxAddress }
+func (tx *ArbitrumSubmitRetryableTx) encode(b *bytes.Buffer) error {
+ return rlp.Encode(b, tx)
+}
+func (tx *ArbitrumSubmitRetryableTx) decode(input []byte) error {
+ return rlp.DecodeBytes(input, tx)
+}
+
+//func (tx *ArbitrumSubmitRetryableTx) setSignatureValues(chainID, v, r, s *big.Int) {}
+//
+//func (tx *ArbitrumSubmitRetryableTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// if baseFee == nil {
+// return dst.Set(tx.GasFeeCap)
+// }
+// return dst.Set(baseFee)
+//}
+
+func createArbitrumDespoitTxn() types.Transaction {
+ return &ArbitrumDepositTx{}
+}
+
+type ArbitrumDepositTx struct {
+ NoTimeBoosted
+ ChainId *big.Int
+ L1RequestId common.Hash
+ From common.Address
+ To common.Address
+ Value *big.Int
+}
+
+func (tx *ArbitrumDepositTx) copy() *ArbitrumDepositTx {
+ dtx := &ArbitrumDepositTx{
+ ChainId: new(big.Int),
+ L1RequestId: tx.L1RequestId,
+ From: tx.From,
+ To: tx.To,
+ Value: new(big.Int),
+ }
+ if tx.ChainId != nil {
+ dtx.ChainId.Set(tx.ChainId)
+ }
+ if tx.Value != nil {
+ dtx.Value.Set(tx.Value)
+ }
+ return dtx
+}
+
+func (tx *ArbitrumDepositTx) Type() byte { return ArbitrumDepositTxType }
+func (tx *ArbitrumDepositTx) GetChainID() *uint256.Int { return uint256.MustFromBig(tx.ChainId) }
+func (tx *ArbitrumDepositTx) GetNonce() uint64 { return 0 }
+func (tx *ArbitrumDepositTx) GetPrice() *uint256.Int { return uintZero }
+func (tx *ArbitrumDepositTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumDepositTx) GetFeeCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumDepositTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumDepositTx) GetGasLimit() uint64 { return 0 }
+func (tx *ArbitrumDepositTx) GetBlobGas() uint64 { return 0 }
+func (tx *ArbitrumDepositTx) GetData() []byte { return nil }
+func (tx *ArbitrumDepositTx) GetValue() *uint256.Int { return uint256.MustFromBig(tx.Value) }
+func (tx *ArbitrumDepositTx) GetTo() *common.Address { return &tx.To }
+func (tx *ArbitrumDepositTx) GetAccessList() types.AccessList { return nil }
+func (tx *ArbitrumDepositTx) GetAuthorizations() []types.Authorization { return nil }
+
+func (tx *ArbitrumDepositTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int { return uintZero }
+func (tx *ArbitrumDepositTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (tx *ArbitrumDepositTx) AsMessage(s types.Signer, baseFee *big.Int, rules *chain.Rules) (*types.Message, error) {
+ msg := &types.Message{
+ Tx: tx,
+ }
+ msg.SetGasPrice(tx.GetPrice())
+ msg.SetTip(tx.GetTipCap())
+ msg.SetFeeCap(tx.GetFeeCap())
+ msg.SetGasLimit(tx.GetGasLimit())
+ msg.SetNonce(tx.GetNonce())
+ msg.SetAccessList(tx.GetAccessList())
+ msg.SetFrom(&tx.From)
+ msg.SetTo(tx.GetTo())
+ msg.SetData(tx.GetData())
+ msg.SetAmount(tx.GetValue())
+ msg.SetCheckNonce(!skipAccountChecks[tx.Type()])
+
+ // if msg.feeCap.IsZero() {
+ // msg.feeCap.Set(uint256.NewInt(0x5f5e100))
+ // }
+ // if !rules.IsCancun {
+ // return msg, errors.New("BlobTx transactions require Cancun")
+ // }
+ // if baseFee != nil {
+ // overflow := msg.gasPrice.SetFromBig(baseFee)
+ // if overflow {
+ // return msg, errors.New("gasPrice higher than 2^256-1")
+ // }
+ // }
+ // msg.gasPrice.Add(&msg.gasPrice, tx.GetTipCap())
+ // if msg.gasPrice.Gt(tx.GetFeeCap()) {
+ // msg.gasPrice.Set(tx.GetFeeCap())
+ // }
+ // var err error
+ // msg.from, err = d.Sender(s)
+ // msg.maxFeePerBlobGas = *stx.MaxFeePerBlobGas
+ // msg.blobHashes = stx.BlobVersionedHashes
+ return msg, nil
+}
+
+func (tx *ArbitrumDepositTx) WithSignature(signer types.Signer, sig []byte) (types.Transaction, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumDepositTx) Hash() common.Hash {
+ return types.PrefixedRlpHash(ArbitrumDepositTxType, []interface{}{
+ tx.ChainId,
+ tx.L1RequestId,
+ tx.From,
+ tx.To,
+ tx.Value,
+ })
+}
+
+func (tx *ArbitrumDepositTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumDepositTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumDepositTx) EncodingSize() int {
+ payloadSize := tx.payloadSize()
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumDepositTx) EncodeRLP(w io.Writer) error {
+ payloadSize := tx.payloadSize()
+
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+
+ // envelope
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+
+ // encode TxType
+ b[0] = ArbitrumDepositTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumDepositTx) payloadSize() int {
+ size := 0
+
+ // ChainId: header + length of big.Int (excluding header)
+ size++ // header for ChainId
+ size += rlp.BigIntLenExcludingHead(tx.ChainId)
+
+ // L1RequestId: header + 32 bytes
+ size++ // header for L1RequestId
+ size += 32
+
+ // From: header + 20 bytes
+ size++ // header for From
+ size += 20
+
+ // To: header + 20 bytes
+ size++ // header for To
+ size += 20
+
+ // Value: header + length of big.Int (excluding header)
+ size++ // header for Value
+ size += rlp.BigIntLenExcludingHead(tx.Value)
+
+ return size
+}
+
+func (tx *ArbitrumDepositTx) encodePayload(w io.Writer, b []byte, payloadSize int) error {
+ // Write the RLP list prefix.
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ // Encode ChainId.
+ if err := rlp.EncodeBigInt(tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ // Encode L1RequestId (common.Hash, 32 bytes).
+ b[0] = 128 + 32
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.L1RequestId[:]); err != nil {
+ return err
+ }
+
+ // Encode From (common.Address, 20 bytes).
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.From[:]); err != nil {
+ return err
+ }
+
+ // Encode To (common.Address, 20 bytes).
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.To[:]); err != nil {
+ return err
+ }
+
+ // Encode Value.
+ if err := rlp.EncodeBigInt(tx.Value, w, b); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumDepositTx) DecodeRLP(s *rlp.Stream) error {
+ // Begin decoding the RLP list.
+ if _, err := s.List(); err != nil {
+ return err
+ }
+
+ var b []byte
+ var err error
+
+ // Decode ChainId (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(big.Int).SetBytes(b)
+
+ // Decode L1RequestId (common.Hash, 32 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read L1RequestId: %w", err)
+ }
+ if len(b) != 32 {
+ return fmt.Errorf("wrong size for L1RequestId: %d", len(b))
+ }
+ copy(tx.L1RequestId[:], b)
+
+ // Decode From (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read From: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for From: %d", len(b))
+ }
+ copy(tx.From[:], b)
+
+ // Decode To (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read To: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for To: %d", len(b))
+ }
+ copy(tx.To[:], b)
+
+ // Decode Value (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Value: %w", err)
+ }
+ tx.Value = new(big.Int).SetBytes(b)
+
+ // End the RLP list.
+ if err := s.ListEnd(); err != nil {
+ return fmt.Errorf("close ArbitrumDepositTx: %w", err)
+ }
+ return nil
+}
+
+func (tx *ArbitrumDepositTx) MarshalBinary(w io.Writer) error {
+ payloadSize := tx.payloadSize()
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumDepositTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumDepositTx) Sender(signer types.Signer) (common.Address, error) {
+ panic("implement me")
+}
+func (tx *ArbitrumDepositTx) CachedSender() (common.Address, bool) { return tx.From, true }
+func (tx *ArbitrumDepositTx) GetSender() (common.Address, bool) { return tx.From, true }
+func (tx *ArbitrumDepositTx) SetSender(address common.Address) { tx.From = address }
+func (tx *ArbitrumDepositTx) IsContractDeploy() bool { return false }
+func (tx *ArbitrumDepositTx) Unwrap() types.Transaction { return tx }
+func (tx *ArbitrumDepositTx) encode(b *bytes.Buffer) error { return rlp.Encode(b, tx) }
+func (tx *ArbitrumDepositTx) decode(input []byte) error { return rlp.DecodeBytes(input, tx) }
+
+//func (tx *ArbitrumDepositTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// return dst.Set(bigZero)
+//}
+
+func createArbitrumInternalTx() types.Transaction {
+ return &ArbitrumInternalTx{}
+}
+
+type ArbitrumInternalTx struct {
+ NoTimeBoosted
+ ChainId *uint256.Int
+ Data []byte
+}
+
+func (t *ArbitrumInternalTx) copy() *ArbitrumInternalTx {
+ cpy := &ArbitrumInternalTx{
+ ChainId: t.ChainId.Clone(),
+ Data: common.CopyBytes(t.Data),
+ }
+ return cpy
+}
+
+func (tx *ArbitrumInternalTx) Type() byte { return ArbitrumInternalTxType }
+func (tx *ArbitrumInternalTx) GetChainID() *uint256.Int { return tx.ChainId }
+func (tx *ArbitrumInternalTx) GetNonce() uint64 { return 0 }
+func (tx *ArbitrumInternalTx) GetPrice() *uint256.Int { return uintZero }
+func (tx *ArbitrumInternalTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumInternalTx) GetFeeCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumInternalTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumInternalTx) GetGasLimit() uint64 { return 0 }
+func (tx *ArbitrumInternalTx) GetBlobGas() uint64 { return 0 } // todo
+func (tx *ArbitrumInternalTx) GetData() []byte { return tx.Data }
+func (tx *ArbitrumInternalTx) GetValue() *uint256.Int { return uintZero }
+func (tx *ArbitrumInternalTx) GetTo() *common.Address { return &ArbosAddress }
+func (tx *ArbitrumInternalTx) GetAccessList() types.AccessList { return nil }
+func (tx *ArbitrumInternalTx) GetAuthorizations() []types.Authorization { return nil }
+
+func (tx *ArbitrumInternalTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int { return uintZero }
+func (tx *ArbitrumInternalTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (tx *ArbitrumInternalTx) AsMessage(s types.Signer, baseFee *big.Int, rules *chain.Rules) (*types.Message, error) {
+ msg := &types.Message{
+ Tx: tx,
+ }
+ msg.SetGasPrice(tx.GetPrice())
+ msg.SetTip(tx.GetTipCap())
+ msg.SetFeeCap(tx.GetFeeCap())
+ msg.SetGasLimit(tx.GetGasLimit())
+ msg.SetNonce(tx.GetNonce())
+ msg.SetAccessList(tx.GetAccessList())
+ msg.SetFrom(&ArbosAddress)
+ msg.SetTo(tx.GetTo())
+ msg.SetData(tx.GetData())
+ msg.SetAmount(tx.GetValue())
+ msg.SetCheckNonce(!skipAccountChecks[tx.Type()])
+
+ if baseFee != nil {
+ gp, of := uint256.FromBig(cmath.BigMin(msg.GasPrice().ToBig().Add(msg.TipCap().ToBig(), baseFee), msg.FeeCap().ToBig()))
+ if of {
+ return nil, fmt.Errorf("gas price overflow happened")
+ }
+ msg.SetGasPrice(gp)
+ }
+ // if msg.feeCap.IsZero() {
+ // msg.gasLimit = baseFee.Uint64()
+ // msg.feeCap.Set(uint256.NewInt(0x5f5e100))
+ // }
+ // if baseFee != nil {
+ // overflow := msg.gasPrice.SetFromBig(baseFee)
+ // if overflow {
+ // return msg, errors.New("gasPrice higher than 2^256-1")
+ // }
+ // }
+ // if msg.feeCap.IsZero() {
+ // msg.gasLimit = baseFee.Uint64()
+ // }
+ // msg.gasPrice.Add(&msg.gasPrice, tx.GetTipCap())
+ // if msg.gasPrice.Gt(tx.GetFeeCap()) {
+ // msg.gasPrice.Set(tx.GetFeeCap())
+ // }
+ return msg, nil
+}
+
+func (tx *ArbitrumInternalTx) WithSignature(signer types.Signer, sig []byte) (types.Transaction, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumInternalTx) Hash() common.Hash {
+ //TODO implement me
+ return types.PrefixedRlpHash(ArbitrumInternalTxType, []interface{}{
+ tx.ChainId,
+ tx.Data,
+ })
+}
+
+func (tx *ArbitrumInternalTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumInternalTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumInternalTx) EncodingSize() int {
+ payloadSize := tx.payloadSize()
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumInternalTx) payloadSize() int {
+ size := 0
+
+ // ChainId: add 1 byte for header and the length of ChainId (excluding header)
+ size++
+ size += rlp.Uint256LenExcludingHead(tx.ChainId)
+
+ // Data: rlp.StringLen returns the full encoded length (header + payload)
+ size += rlp.StringLen(tx.Data)
+
+ return size
+}
+
+func (tx *ArbitrumInternalTx) encodePayload(w io.Writer, b []byte, payloadSize int) error {
+ // Write the RLP list prefix
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ // Encode ChainId
+ if err := rlp.EncodeUint256(tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ // Encode Data
+ if err := rlp.EncodeString(tx.Data, w, b); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumInternalTx) EncodeRLP(w io.Writer) error {
+ payloadSize := tx.payloadSize()
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // envelope
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+ // encode TxType
+ b[0] = ArbitrumInternalTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumInternalTx) DecodeRLP(s *rlp.Stream) error {
+ _, err := s.List()
+ if err != nil {
+ return err
+ }
+ var b []byte
+ if b, err = s.Uint256Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(uint256.Int).SetBytes(b)
+ if tx.Data, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Data: %w", err)
+ }
+
+ if err := s.ListEnd(); err != nil {
+ return fmt.Errorf("close ArbitrumInternalTx: %w", err)
+ }
+ return nil
+}
+
+func (tx *ArbitrumInternalTx) MarshalBinary(w io.Writer) error {
+ payloadSize := tx.payloadSize()
+ b := types.NewEncodingBuf()
+ defer types.PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumInternalTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumInternalTx) Sender(signer types.Signer) (common.Address, error) {
+ panic("not supported in ArbitrumInternalTx")
+}
+
+func (tx *ArbitrumInternalTx) CachedSender() (common.Address, bool) {
+ return ArbosAddress, true
+}
+
+func (tx *ArbitrumInternalTx) GetSender() (common.Address, bool) {
+ return ArbosAddress, true
+}
+
+// not supported in ArbitrumInternalTx
+func (tx *ArbitrumInternalTx) SetSender(address common.Address) {}
+
+func (tx *ArbitrumInternalTx) IsContractDeploy() bool {
+ return false
+}
+
+func (tx *ArbitrumInternalTx) Unwrap() types.Transaction {
+ return tx
+}
+
+func (tx *ArbitrumInternalTx) encode(b *bytes.Buffer) error {
+ return rlp.Encode(b, tx)
+}
+func (tx *ArbitrumInternalTx) decode(input []byte) error {
+ return rlp.DecodeBytes(input, tx)
+}
+
+type HeaderInfo struct {
+ SendRoot common.Hash
+ SendCount uint64
+ L1BlockNumber uint64
+ ArbOSFormatVersion uint64
+}
+
+func (info HeaderInfo) extra() []byte {
+ return info.SendRoot[:]
+}
+
+func (info HeaderInfo) mixDigest() [32]byte {
+ mixDigest := common.Hash{}
+ binary.BigEndian.PutUint64(mixDigest[:8], info.SendCount)
+ binary.BigEndian.PutUint64(mixDigest[8:16], info.L1BlockNumber)
+ binary.BigEndian.PutUint64(mixDigest[16:24], info.ArbOSFormatVersion)
+ return mixDigest
+}
+
+func (info HeaderInfo) UpdateHeaderWithInfo(header *types.Header) {
+ header.MixDigest = info.mixDigest()
+ header.Extra = info.extra()
+}
+
+func DeserializeHeaderExtraInformation(header *types.Header) HeaderInfo {
+ if header == nil || header.BaseFee == nil || header.BaseFee.Sign() == 0 || len(header.Extra) != 32 || header.Difficulty.Cmp(common.Big1) != 0 {
+ // imported blocks have no base fee
+ // The genesis block doesn't have an ArbOS encoded extra field
+ return HeaderInfo{}
+ }
+ extra := HeaderInfo{}
+ copy(extra.SendRoot[:], header.Extra)
+ extra.SendCount = binary.BigEndian.Uint64(header.MixDigest[:8])
+ extra.L1BlockNumber = binary.BigEndian.Uint64(header.MixDigest[8:16])
+ extra.ArbOSFormatVersion = binary.BigEndian.Uint64(header.MixDigest[16:24])
+ return extra
+}
+
+func GetArbOSVersion(header *types.Header, chain *chain.Config) uint64 {
+ if !chain.IsArbitrum() {
+ return 0
+ }
+ extraInfo := DeserializeHeaderExtraInformation(header)
+ return extraInfo.ArbOSFormatVersion
+}
+
+
+*/
diff --git a/arb/txn/arb_types_test.go b/arb/txn/arb_types_test.go
new file mode 100644
index 00000000000..2507438efeb
--- /dev/null
+++ b/arb/txn/arb_types_test.go
@@ -0,0 +1,181 @@
+package txn
+
+import (
+ "bytes"
+ "math/big"
+ "testing"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/stretchr/testify/require"
+)
+
+func TestArbitrumInternalTx(t *testing.T) {
+ rawInitial := [][]byte{
+ common.FromHex("6af88a83066eeeb8846bf6a42d000000000000000000000000000000000000000000000000000000005bd57bd900000000000000000000000000000000000000000000000000000000003f28db00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000064e4f6d4"),
+ common.FromHex("0x6af88a83066eeeb8846bf6a42d00000000000000000000000000000000000000000000000000000000064cb523000000000000000000000000000000000000000000000000000000000049996b00000000000000000000000000000000000000000000000000000000001f09350000000000000000000000000000000000000000000000000000000000000000"),
+ }
+
+ expectedHashes := []common.Hash{
+ common.HexToHash("0x1ac8d67d5c4be184b3822f9ef97102789394f4bc75a0f528d5e14debef6e184c"),
+ common.HexToHash("0x3d78fd6ddbac46955b91777c1fc698b011b7c4a2a84d07a0b0b1a11f34ccf817"),
+ }
+
+ for ri, raw := range rawInitial {
+ var tx ArbitrumInternalTx
+ if err := rlp.DecodeBytes(raw[1:], &tx); err != nil {
+ t.Fatal(err)
+ }
+
+ var b bytes.Buffer
+ require.Equal(t, tx.Hash(), expectedHashes[ri])
+ // now encode and decode again
+ require.NoError(t, tx.MarshalBinary(&b))
+ require.Equal(t, raw, b.Bytes())
+ }
+}
+
+func TestArbitrumUnsignedTx(t *testing.T) {
+ rawInitial := [][]byte{
+ common.FromHex("0x65f85e83066eee9462182981bf35cdf00dbecdb9bbc00be33138a4dc0184a0eebb008301bdd494000000000000000000000000000000000000006401a425e1606300000000000000000000000051072981bf35cdf00dbecdb9bbc00be3313893cb"),
+ }
+
+ expectedHashes := []common.Hash{
+ common.HexToHash("0x8b7e4e0a2a31d2889200dc6c91c12833208d2f7847eabf0c21e9b15f86a8a8aa"),
+ }
+
+ for ri, raw := range rawInitial {
+ var tx ArbitrumUnsignedTx
+ if err := rlp.DecodeBytes(raw[1:], &tx); err != nil {
+ t.Fatal(err)
+ }
+
+ var b bytes.Buffer
+ require.Equal(t, tx.Hash(), expectedHashes[ri])
+ // now encode and decode again
+ require.NoError(t, tx.MarshalBinary(&b))
+ require.Equal(t, raw, b.Bytes())
+ }
+}
+
+func TestArbitrumSubmitRetryableTx(t *testing.T) {
+ rawInitial := common.FromHex("0x69f89f83066eeea0000000000000000000000000000000000000000000000000000000000000000194b8787d8f23e176a5d32135d746b69886e03313be845bd57bd98723e3dbb7b88ab8843b9aca00830186a0943fab184622dc19b6109349b94811493bf2a45362872386f26fc100009411155ca9bbf7be58e27f3309e629c847996b43c88601f6377d4ab89411155ca9bbf7be58e27f3309e629c847996b43c880")
+ var tx ArbitrumSubmitRetryableTx
+ if err := rlp.DecodeBytes(rawInitial[1:], &tx); err != nil {
+ t.Fatal(err)
+ }
+ require.Equal(t, tx.Hash(), common.HexToHash("0x13cb79b086a427f3db7ebe6ec2bb90a806a3b0368ecee6020144f352e37dbdf6"))
+
+ var b bytes.Buffer
+
+ // now encode and decode again
+ require.NoError(t, tx.MarshalBinary(&b))
+
+ require.Equal(t, rawInitial, b.Bytes())
+}
+
+func TestArbitrumSubmitRetryTx(t *testing.T) {
+ rawInitial := common.FromHex("0x68f88583066eee8094b8787d8f23e176a5d32135d746b69886e03313be8405f5e100830186a0943fab184622dc19b6109349b94811493bf2a45362872386f26fc1000080a013cb79b086a427f3db7ebe6ec2bb90a806a3b0368ecee6020144f352e37dbdf69411155ca9bbf7be58e27f3309e629c847996b43c8860b0e85efeab88601f6377d4ab8")
+ var tx ArbitrumRetryTx
+ if err := rlp.DecodeBytes(rawInitial[1:], &tx); err != nil {
+ t.Fatal(err)
+ }
+ require.Equal(t, tx.Hash(), common.HexToHash("0x873c5ee3092c40336006808e249293bf5f4cb3235077a74cac9cafa7cf73cb8b"))
+
+ var b bytes.Buffer
+
+ // now encode and decode again
+ require.NoError(t, tx.MarshalBinary(&b))
+
+ require.Equal(t, rawInitial, b.Bytes())
+}
+
+func TestArbitrumDepsitTx(t *testing.T) {
+ rawInitial := common.FromHex("0x64f85883066eeea0000000000000000000000000000000000000000000000000000000000000000f9499998aa374dbde60d26433e275ad700b658731749488888aa374dbde60d26433e275ad700b65872063880de0b6b3a7640000")
+ var tx ArbitrumDepositTx
+
+ if err := rlp.DecodeBytes(rawInitial[1:], &tx); err != nil {
+ t.Fatal(err)
+ }
+
+ require.Equal(t, tx.Hash(), common.HexToHash("0x733c1300c06ac4ced959e68f16f565ee8918a4e75c9f9e3913bc7a7e939c60db"))
+
+ var b bytes.Buffer
+
+ // now encode and decode again
+ require.NoError(t, tx.MarshalBinary(&b))
+
+ require.Equal(t, rawInitial, b.Bytes())
+}
+
+func TestArbitrumSubmitRetryableTxGasUsed(t *testing.T) {
+ gasUsedVals := []uint64{0, 32000}
+
+ for _, gasUsed := range gasUsedVals {
+ two := big.NewInt(2)
+ chainID := big.NewInt(1)
+
+ requestId := common.HexToHash("0x0123")
+ from := common.HexToAddress("0x0000000000000000000000000000000000000001")
+ retryTo := common.HexToAddress("0x0000000000000000000000000000000000000002")
+ beneficiary := common.HexToAddress("0x00000000000000000000000000000000000000B5")
+ feeRefund := common.HexToAddress("0x0000000000000000000000000000000000000003")
+
+ tx := &ArbitrumSubmitRetryableTx{
+ ChainId: chainID,
+ RequestId: requestId,
+ From: from,
+ L1BaseFee: big.NewInt(0),
+ DepositValue: big.NewInt(1000),
+ GasFeeCap: two,
+ Gas: 60000,
+ RetryTo: &retryTo,
+ RetryValue: two,
+ Beneficiary: beneficiary,
+ MaxSubmissionFee: big.NewInt(7),
+ FeeRefundAddr: feeRefund,
+ RetryData: []byte("data"),
+ EffectiveGasUsed: gasUsed,
+ }
+
+ var buf bytes.Buffer
+ require.NoError(t, tx.EncodeRLP(&buf))
+
+ // Decode using your generic RLP transaction decoder
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := types.DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*ArbitrumSubmitRetryableTx)
+ require.True(t, ok, "decoded type should be *ArbitrumSubmitRetryableTx")
+
+ // Field-by-field equality
+ require.EqualValues(t, tx.ChainId, tx2.ChainId)
+ require.EqualValues(t, tx.RequestId, tx2.RequestId)
+ require.EqualValues(t, tx.From, tx2.From)
+ require.EqualValues(t, tx.L1BaseFee, tx2.L1BaseFee)
+ require.EqualValues(t, tx.DepositValue, tx2.DepositValue)
+ require.EqualValues(t, tx.GasFeeCap, tx2.GasFeeCap)
+ require.EqualValues(t, tx.Gas, tx2.Gas)
+ require.EqualValues(t, tx.RetryTo, tx2.RetryTo)
+ require.EqualValues(t, tx.RetryValue, tx2.RetryValue)
+ require.EqualValues(t, tx.Beneficiary, tx2.Beneficiary)
+ require.EqualValues(t, tx.MaxSubmissionFee, tx2.MaxSubmissionFee)
+ require.EqualValues(t, tx.FeeRefundAddr, tx2.FeeRefundAddr)
+ require.EqualValues(t, tx.RetryData, tx2.RetryData)
+ require.EqualValues(t, tx.EffectiveGasUsed, tx2.EffectiveGasUsed)
+
+ // With NoTimeBoosted embedded, this should be false.
+ require.False(t, tx2.IsTimeBoosted())
+ }
+}
+
+func Test_RegisterExternalTypes(t *testing.T) {
+ externalTypes := []byte{ArbitrumDepositTxType, ArbitrumRetryTxType, ArbitrumSubmitRetryableTxType, ArbitrumInternalTxType, ArbitrumContractTxType, ArbitrumUnsignedTxType, ArbitrumLegacyTxType}
+ for _, txType := range externalTypes {
+ require.NotNil(t, types.CreateTransactioByType(txType))
+
+ }
+
+}
diff --git a/arb/txn/arbitrum_legacy_tx.go b/arb/txn/arbitrum_legacy_tx.go
new file mode 100644
index 00000000000..b57dc0fb617
--- /dev/null
+++ b/arb/txn/arbitrum_legacy_tx.go
@@ -0,0 +1,326 @@
+package txn
+
+/*a
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "io"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/hexutil"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/holiman/uint256"
+)
+
+func init() {
+ types.RegisterTransaction(ArbitrumLegacyTxType, createArbitrumLegacyTx)
+}
+
+func createArbitrumLegacyTx() types.Transaction {
+ return &ArbitrumLegacyTxData{}
+}
+
+type ArbitrumLegacyTxData struct {
+ *types.LegacyTx
+ HashOverride common.Hash // Hash cannot be locally computed from other fields
+ EffectiveGasPrice uint64
+ L1BlockNumber uint64
+ OverrideSender *common.Address `rlp:"optional,nil"` // only used in unsigned Txs
+}
+
+func NewArbitrumLegacyTx(origTx types.Transaction, hashOverride common.Hash, effectiveGas uint64, l1Block uint64, senderOverride *common.Address) (types.Transaction, error) {
+ if origTx.Type() != types.LegacyTxType {
+ return nil, errors.New("attempt to arbitrum-wrap non-legacy transaction")
+ }
+ inner := ArbitrumLegacyTxData{
+ LegacyTx: origTx.(*types.LegacyTx),
+ HashOverride: hashOverride,
+ EffectiveGasPrice: effectiveGas,
+ L1BlockNumber: l1Block,
+ OverrideSender: senderOverride,
+ }
+ return NewArbTx(&inner), nil
+}
+
+// func (tx *ArbitrumLegacyTxData) copy() *ArbitrumLegacyTxData {
+// legacyCopy := tx.LegacyTx.copy()
+// var sender *common.Address
+// if tx.Sender != nil {
+// sender = new(common.Address)
+// *sender = *tx.Sender()
+// }
+// return &ArbitrumLegacyTxData{
+// LegacyTx: *legacyCopy,
+// HashOverride: tx.HashOverride,
+// EffectiveGasPrice: tx.EffectiveGasPrice,
+// L1BlockNumber: tx.L1BlockNumber,
+// OverrideSender: sender,
+// }
+// }
+
+func (tx *ArbitrumLegacyTxData) Type() byte { return ArbitrumLegacyTxType }
+
+func (tx *ArbitrumLegacyTxData) Unwrap() types.Transaction {
+ return tx
+}
+
+func (tx *ArbitrumLegacyTxData) Hash() common.Hash {
+ if tx.HashOverride != (common.Hash{}) {
+ return tx.HashOverride
+ }
+ return tx.LegacyTx.Hash()
+}
+
+func (tx *ArbitrumLegacyTxData) EncodeRLP(w io.Writer) error {
+ if _, err := w.Write([]byte{ArbitrumLegacyTxType}); err != nil {
+ return err
+ }
+
+ legacy := bytes.NewBuffer(nil)
+ if err := tx.LegacyTx.EncodeRLP(legacy); err != nil {
+ return err
+ }
+ legacyBytes := legacy.Bytes()
+
+ payloadSize := rlp.StringLen(legacyBytes) // embedded LegacyTx RLP
+ payloadSize += 1 + 32 // HashOverride (1 byte length + 32 bytes hash)
+ payloadSize += 1 + rlp.IntLenExcludingHead(tx.EffectiveGasPrice) // EffectiveGasPrice
+ payloadSize += 1 + rlp.IntLenExcludingHead(tx.L1BlockNumber) // L1BlockNumber
+
+ if tx.OverrideSender == nil {
+ payloadSize += 1 // empty OverrideSender
+ } else {
+ payloadSize += 1 + 20 // OverrideSender (1 byte length + 20 bytes address)
+ }
+
+ b := make([]byte, 10)
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeString(legacyBytes, w, b); err != nil {
+ return err
+ }
+
+ b[0] = 128 + 32
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.HashOverride[:]); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeInt(tx.EffectiveGasPrice, w, b); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeInt(tx.L1BlockNumber, w, b); err != nil {
+ return err
+ }
+
+ if tx.OverrideSender == nil {
+ b[0] = 0x80
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.OverrideSender[:]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumLegacyTxData) DecodeRLP(s *rlp.Stream) error {
+ _, err := s.List()
+ if err != nil {
+ return err
+ }
+
+ legacyBytes, err := s.Bytes()
+ if err != nil {
+ return err
+ }
+
+ legacyTx := &types.LegacyTx{}
+ str := rlp.NewStream(bytes.NewReader(legacyBytes), uint64(len(legacyBytes)))
+ if err := legacyTx.DecodeRLP(str); err != nil {
+ return err
+ }
+ tx.LegacyTx = legacyTx
+
+ var hash common.Hash
+ if err := s.Decode(&hash); err != nil {
+ return err
+ }
+ tx.HashOverride = hash
+
+ var effectiveGasPrice uint64
+ if err := s.Decode(&effectiveGasPrice); err != nil {
+ return err
+ }
+ tx.EffectiveGasPrice = effectiveGasPrice
+
+ var l1BlockNumber uint64
+ if err := s.Decode(&l1BlockNumber); err != nil {
+ return err
+ }
+ tx.L1BlockNumber = l1BlockNumber
+
+ var sender common.Address
+ if err := s.Decode(&sender); err != nil {
+ if err.Error() == "rlp: input string too short for common.Address" {
+ tx.OverrideSender = nil
+ } else {
+ return err
+ }
+ } else if sender != (common.Address{}) {
+ tx.OverrideSender = &sender
+ }
+
+ return s.ListEnd()
+}
+
+type arbitrumLegacyTxJSON struct {
+ Type hexutil.Uint64 `json:"type"`
+ Hash common.Hash `json:"hash"`
+ Nonce *hexutil.Uint64 `json:"nonce"`
+ GasPrice *hexutil.Big `json:"gasPrice"`
+ Gas *hexutil.Uint64 `json:"gas"`
+ To *common.Address `json:"to"`
+ Value *hexutil.Big `json:"value"`
+ Data *hexutil.Bytes `json:"input"`
+ V *hexutil.Big `json:"v"`
+ R *hexutil.Big `json:"r"`
+ S *hexutil.Big `json:"s"`
+ HashOverride common.Hash `json:"hashOverride"`
+ EffectiveGasPrice *hexutil.Uint64 `json:"effectiveGasPrice"`
+ L1BlockNumber *hexutil.Uint64 `json:"l1BlockNumber"`
+ OverrideSender *common.Address `json:"overrideSender,omitempty"`
+}
+
+func (tx *ArbitrumLegacyTxData) MarshalJSON() ([]byte, error) {
+ var enc arbitrumLegacyTxJSON
+
+ // These are set for all txn types.
+ enc.Type = hexutil.Uint64(tx.Type())
+ enc.Hash = tx.HashOverride // For ArbitrumLegacyTxData, hash comes from HashOverride
+ enc.Nonce = (*hexutil.Uint64)(&tx.Nonce)
+ enc.Gas = (*hexutil.Uint64)(&tx.GasLimit)
+ enc.GasPrice = (*hexutil.Big)(tx.GasPrice.ToBig())
+ enc.Value = (*hexutil.Big)(tx.Value.ToBig())
+ enc.Data = (*hexutil.Bytes)(&tx.Data)
+ enc.To = tx.To
+ enc.V = (*hexutil.Big)(tx.V.ToBig())
+ enc.R = (*hexutil.Big)(tx.R.ToBig())
+ enc.S = (*hexutil.Big)(tx.S.ToBig())
+
+ // Arbitrum-specific fields
+ enc.HashOverride = tx.HashOverride
+ enc.EffectiveGasPrice = (*hexutil.Uint64)(&tx.EffectiveGasPrice)
+ enc.L1BlockNumber = (*hexutil.Uint64)(&tx.L1BlockNumber)
+ enc.OverrideSender = tx.OverrideSender
+
+ return json.Marshal(&enc)
+}
+
+func (tx *ArbitrumLegacyTxData) UnmarshalJSON(input []byte) error {
+ var dec arbitrumLegacyTxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+
+ // Validate and set common fields
+ if dec.To != nil {
+ tx.To = dec.To
+ }
+ if dec.Nonce == nil {
+ return errors.New("missing required field 'nonce' in transaction")
+ }
+ tx.Nonce = uint64(*dec.Nonce)
+
+ if dec.GasPrice == nil {
+ return errors.New("missing required field 'gasPrice' in transaction")
+ }
+ var overflow bool
+ tx.GasPrice, overflow = uint256.FromBig(dec.GasPrice.ToInt())
+ if overflow {
+ return errors.New("'gasPrice' in transaction does not fit in 256 bits")
+ }
+
+ if dec.Gas == nil {
+ return errors.New("missing required field 'gas' in transaction")
+ }
+ tx.GasLimit = uint64(*dec.Gas)
+
+ if dec.Value == nil {
+ return errors.New("missing required field 'value' in transaction")
+ }
+ tx.Value, overflow = uint256.FromBig(dec.Value.ToInt())
+ if overflow {
+ return errors.New("'value' in transaction does not fit in 256 bits")
+ }
+
+ if dec.Data == nil {
+ return errors.New("missing required field 'input' in transaction")
+ }
+ tx.Data = *dec.Data
+
+ // Decode signature fields
+ if dec.V == nil {
+ return errors.New("missing required field 'v' in transaction")
+ }
+ overflow = tx.V.SetFromBig(dec.V.ToInt())
+ if overflow {
+ return errors.New("dec.V higher than 2^256-1")
+ }
+
+ if dec.R == nil {
+ return errors.New("missing required field 'r' in transaction")
+ }
+ overflow = tx.R.SetFromBig(dec.R.ToInt())
+ if overflow {
+ return errors.New("dec.R higher than 2^256-1")
+ }
+
+ if dec.S == nil {
+ return errors.New("missing required field 's' in transaction")
+ }
+ overflow = tx.S.SetFromBig(dec.S.ToInt())
+ if overflow {
+ return errors.New("dec.S higher than 2^256-1")
+ }
+
+ // Validate signature if present
+ withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero()
+ if withSignature {
+ if err := types.SanityCheckSignature(&tx.V, &tx.R, &tx.S, true); err != nil {
+ return err
+ }
+ }
+
+ // Arbitrum-specific fields
+ tx.HashOverride = dec.HashOverride
+
+ if dec.EffectiveGasPrice != nil {
+ tx.EffectiveGasPrice = uint64(*dec.EffectiveGasPrice)
+ }
+
+ if dec.L1BlockNumber != nil {
+ tx.L1BlockNumber = uint64(*dec.L1BlockNumber)
+ }
+
+ tx.OverrideSender = dec.OverrideSender
+
+ return nil
+}
+
+
+*/
diff --git a/arb/txn/arbitrum_legacy_tx_test.go b/arb/txn/arbitrum_legacy_tx_test.go
new file mode 100644
index 00000000000..f0816a6553f
--- /dev/null
+++ b/arb/txn/arbitrum_legacy_tx_test.go
@@ -0,0 +1,419 @@
+package txn
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+)
+
+func TestArbitrumLegacyTxData_RLPEncodeDecode(t *testing.T) {
+ to := common.HexToAddress("0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb7")
+ senderOverride := common.HexToAddress("0x1234567890123456789012345678901234567890")
+ legacyTx := &types.LegacyTx{
+ CommonTx: types.CommonTx{
+ Nonce: 42,
+ GasLimit: 50000,
+ To: &to,
+ Value: uint256.NewInt(1000000),
+ Data: []byte{0x01, 0x02, 0x03, 0x04},
+ V: *uint256.NewInt(28),
+ R: *uint256.NewInt(100),
+ S: *uint256.NewInt(200),
+ },
+ GasPrice: uint256.NewInt(20000000000), // 20 gwei
+ }
+
+ arbLegacyTx := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"),
+ EffectiveGasPrice: 15000000000, // 15 gwei
+ L1BlockNumber: 1234567,
+ OverrideSender: &senderOverride,
+ }
+
+ t.Run("RLP Encode and Decode from bytes", func(t *testing.T) {
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encodedBytes := buf.Bytes()
+ require.Equal(t, ArbitrumLegacyTxType, encodedBytes[0])
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &types.LegacyTx{},
+ }
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+ require.Equal(t, arbLegacyTx.Nonce, decodedTx.Nonce)
+ require.Equal(t, arbLegacyTx.GasLimit, decodedTx.GasLimit)
+ require.Equal(t, arbLegacyTx.To, decodedTx.To)
+ require.True(t, arbLegacyTx.Value.Eq(decodedTx.Value))
+ require.Equal(t, arbLegacyTx.Data, decodedTx.Data)
+ require.True(t, arbLegacyTx.V.Eq(&decodedTx.V))
+ require.True(t, arbLegacyTx.R.Eq(&decodedTx.R))
+ require.True(t, arbLegacyTx.S.Eq(&decodedTx.S))
+ require.True(t, arbLegacyTx.GasPrice.Eq(decodedTx.GasPrice))
+ require.Equal(t, arbLegacyTx.HashOverride, decodedTx.HashOverride)
+ require.Equal(t, arbLegacyTx.EffectiveGasPrice, decodedTx.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTx.L1BlockNumber, decodedTx.L1BlockNumber)
+ require.Equal(t, arbLegacyTx.OverrideSender, decodedTx.OverrideSender)
+ })
+
+ t.Run("RLP Decode from Stream", func(t *testing.T) {
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encodedBytes := buf.Bytes()
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &types.LegacyTx{},
+ }
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.Equal(t, arbLegacyTx.Nonce, decodedTx.Nonce)
+ require.Equal(t, arbLegacyTx.GasLimit, decodedTx.GasLimit)
+ require.Equal(t, arbLegacyTx.To, decodedTx.To)
+ require.True(t, arbLegacyTx.Value.Eq(decodedTx.Value))
+ require.Equal(t, arbLegacyTx.Data, decodedTx.Data)
+ require.True(t, arbLegacyTx.V.Eq(&decodedTx.V))
+ require.True(t, arbLegacyTx.R.Eq(&decodedTx.R))
+ require.True(t, arbLegacyTx.S.Eq(&decodedTx.S))
+ require.True(t, arbLegacyTx.GasPrice.Eq(decodedTx.GasPrice))
+ require.Equal(t, arbLegacyTx.HashOverride, decodedTx.HashOverride)
+ require.Equal(t, arbLegacyTx.EffectiveGasPrice, decodedTx.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTx.L1BlockNumber, decodedTx.L1BlockNumber)
+ require.Equal(t, arbLegacyTx.OverrideSender, decodedTx.OverrideSender)
+ })
+
+ t.Run("RLP with nil OverrideSender", func(t *testing.T) {
+ arbLegacyTxNoSender := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xdeadbeef"),
+ EffectiveGasPrice: 25000000000,
+ L1BlockNumber: 999999,
+ OverrideSender: nil,
+ }
+
+ var buf bytes.Buffer
+ err := arbLegacyTxNoSender.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &types.LegacyTx{},
+ }
+ encodedBytes := buf.Bytes()
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+ require.Nil(t, decodedTx.OverrideSender)
+ require.Equal(t, arbLegacyTxNoSender.HashOverride, decodedTx.HashOverride)
+ require.Equal(t, arbLegacyTxNoSender.EffectiveGasPrice, decodedTx.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTxNoSender.L1BlockNumber, decodedTx.L1BlockNumber)
+ })
+
+ t.Run("Type byte verification", func(t *testing.T) {
+ require.Equal(t, ArbitrumLegacyTxType, arbLegacyTx.Type())
+
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ bytes := buf.Bytes()
+ require.Greater(t, len(bytes), 0)
+ require.Equal(t, ArbitrumLegacyTxType, bytes[0])
+ })
+
+ t.Run("LegacyTx embedding verification", func(t *testing.T) {
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &types.LegacyTx{},
+ }
+ encodedBytes := buf.Bytes()
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.NotNil(t, decodedTx.LegacyTx)
+ require.Equal(t, legacyTx.Nonce, decodedTx.LegacyTx.Nonce)
+ require.True(t, legacyTx.GasPrice.Eq(decodedTx.LegacyTx.GasPrice))
+ })
+}
+
+func TestArbitrumLegacyTxData_ComplexScenarios(t *testing.T) {
+ t.Run("Contract creation transaction", func(t *testing.T) {
+ legacyTx := &types.LegacyTx{
+ CommonTx: types.CommonTx{
+ Nonce: 1,
+ GasLimit: 1000000,
+ To: nil, // Contract creation
+ Value: uint256.NewInt(0),
+ Data: []byte{0x60, 0x80, 0x60, 0x40},
+ V: *uint256.NewInt(27),
+ R: *uint256.NewInt(1),
+ S: *uint256.NewInt(2),
+ },
+ GasPrice: uint256.NewInt(1000000000),
+ }
+
+ arbLegacyTx := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"),
+ EffectiveGasPrice: 900000000,
+ L1BlockNumber: 100,
+ OverrideSender: nil,
+ }
+
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &types.LegacyTx{},
+ }
+ encodedBytes := buf.Bytes()
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.Nil(t, decodedTx.To)
+ require.Equal(t, arbLegacyTx.Data, decodedTx.Data)
+ })
+
+ t.Run("Large values", func(t *testing.T) {
+ maxUint256 := new(uint256.Int)
+ maxUint256.SetAllOne()
+
+ to := common.HexToAddress("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
+ legacyTx := &types.LegacyTx{
+ CommonTx: types.CommonTx{
+ Nonce: ^uint64(0),
+ GasLimit: ^uint64(0),
+ To: &to,
+ Value: maxUint256,
+ Data: make([]byte, 1000),
+ V: *maxUint256,
+ R: *maxUint256,
+ S: *maxUint256,
+ },
+ GasPrice: maxUint256,
+ }
+
+ arbLegacyTx := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"),
+ EffectiveGasPrice: ^uint64(0),
+ L1BlockNumber: ^uint64(0),
+ OverrideSender: &to,
+ }
+
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &types.LegacyTx{},
+ }
+ encodedBytes := buf.Bytes()
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.Equal(t, arbLegacyTx.Nonce, decodedTx.Nonce)
+ require.Equal(t, arbLegacyTx.GasLimit, decodedTx.GasLimit)
+ require.True(t, arbLegacyTx.Value.Eq(decodedTx.Value))
+ require.Equal(t, arbLegacyTx.EffectiveGasPrice, decodedTx.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTx.L1BlockNumber, decodedTx.L1BlockNumber)
+ })
+}
+
+func TestArbitrumLegacyTxData_TypeByteHandling(t *testing.T) {
+ to := common.HexToAddress("0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb7")
+ legacyTx := &types.LegacyTx{
+ CommonTx: types.CommonTx{
+ Nonce: 100,
+ GasLimit: 21000,
+ To: &to,
+ Value: uint256.NewInt(1000000),
+ Data: []byte{0x12, 0x34},
+ V: *uint256.NewInt(28),
+ R: *uint256.NewInt(1),
+ S: *uint256.NewInt(2),
+ },
+ GasPrice: uint256.NewInt(30000000000),
+ }
+
+ arbLegacyTx := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xabcdef"),
+ EffectiveGasPrice: 25000000000,
+ L1BlockNumber: 999999,
+ OverrideSender: nil,
+ }
+
+ t.Run("EncodeRLP writes type byte first", func(t *testing.T) {
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encoded := buf.Bytes()
+ require.Greater(t, len(encoded), 1)
+ require.Equal(t, ArbitrumLegacyTxType, encoded[0])
+
+ decoded := &ArbitrumLegacyTxData{
+ LegacyTx: &types.LegacyTx{},
+ }
+ stream := rlp.NewStream(bytes.NewReader(encoded[1:]), uint64(len(encoded)-1))
+ err = decoded.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.Equal(t, arbLegacyTx.HashOverride, decoded.HashOverride)
+ require.Equal(t, arbLegacyTx.EffectiveGasPrice, decoded.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTx.L1BlockNumber, decoded.L1BlockNumber)
+ require.Equal(t, arbLegacyTx.Nonce, decoded.Nonce)
+ })
+
+ t.Run("Round-trip with type byte", func(t *testing.T) {
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encoded := buf.Bytes()
+ require.Equal(t, ArbitrumLegacyTxType, encoded[0])
+
+ // Decode skipping type byte
+ decoded := &ArbitrumLegacyTxData{
+ LegacyTx: &types.LegacyTx{},
+ }
+ stream := rlp.NewStream(bytes.NewReader(encoded[1:]), uint64(len(encoded)-1))
+ err = decoded.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ // Re-encode and compare
+ var buf2 bytes.Buffer
+ err = decoded.EncodeRLP(&buf2)
+ require.NoError(t, err)
+
+ require.Equal(t, encoded, buf2.Bytes())
+ })
+}
+
+func TestArbitrumLegacyTxData_ArbTxIntegration(t *testing.T) {
+ to := common.HexToAddress("0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb7")
+
+ legacyTx := &types.LegacyTx{
+ CommonTx: types.CommonTx{
+ Nonce: 10,
+ GasLimit: 21000,
+ To: &to,
+ Value: uint256.NewInt(1000),
+ Data: []byte{},
+ V: *uint256.NewInt(28),
+ R: *uint256.NewInt(1000),
+ S: *uint256.NewInt(2000),
+ },
+ GasPrice: uint256.NewInt(10000000000),
+ }
+
+ arbLegacyTxData := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"),
+ EffectiveGasPrice: 9000000000,
+ L1BlockNumber: 500000,
+ OverrideSender: nil,
+ }
+
+ arbTx := NewArbTx(arbLegacyTxData)
+ require.Equal(t, ArbitrumLegacyTxType, arbTx.Type())
+
+ // Encode using the inner transaction's EncodeRLP (which includes type byte)
+ var buf bytes.Buffer
+ err := arbLegacyTxData.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encodedBytes := buf.Bytes()
+
+ // Verify first byte is the type
+ require.Equal(t, ArbitrumLegacyTxType, encodedBytes[0])
+
+ // Decode using ArbTx's decodeTyped (skip the type byte)
+ newArbTx := &ArbTx{}
+ decoded, err := newArbTx.decodeTyped(encodedBytes, true)
+ require.NoError(t, err)
+
+ decodedArbLegacy, ok := decoded.(*ArbitrumLegacyTxData)
+ require.True(t, ok, "Decoded transaction should be ArbitrumLegacyTxData")
+
+ // Verify all fields
+ require.Equal(t, arbLegacyTxData.HashOverride, decodedArbLegacy.HashOverride)
+ require.Equal(t, arbLegacyTxData.EffectiveGasPrice, decodedArbLegacy.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTxData.L1BlockNumber, decodedArbLegacy.L1BlockNumber)
+ require.Equal(t, arbLegacyTxData.Nonce, decodedArbLegacy.Nonce)
+ require.Equal(t, arbLegacyTxData.GasLimit, decodedArbLegacy.GasLimit)
+}
+
+func TestArbitrumLegacyTxData_TypeBasedDecodingPattern(t *testing.T) {
+ to := common.HexToAddress("0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb7")
+ legacyTx := &types.LegacyTx{
+ CommonTx: types.CommonTx{
+ Nonce: 42,
+ GasLimit: 50000,
+ To: &to,
+ Value: uint256.NewInt(1000000),
+ Data: []byte{0x01, 0x02, 0x03, 0x04},
+ V: *uint256.NewInt(28),
+ R: *uint256.NewInt(100),
+ S: *uint256.NewInt(200),
+ },
+ GasPrice: uint256.NewInt(20000000000),
+ }
+
+ arbLegacyTx := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"),
+ EffectiveGasPrice: 15000000000,
+ L1BlockNumber: 1234567,
+ OverrideSender: nil,
+ }
+
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encoded := buf.Bytes()
+ require.Greater(t, len(encoded), 0)
+
+ txType := encoded[0]
+ require.Equal(t, ArbitrumLegacyTxType, txType)
+
+ var decodedTx types.Transaction
+ switch txType {
+ case ArbitrumLegacyTxType:
+ decodedTx = &ArbitrumLegacyTxData{
+ LegacyTx: &types.LegacyTx{},
+ }
+ default:
+ t.Fatalf("Unknown transaction type: 0x%x", txType)
+ }
+
+ stream := rlp.NewStream(bytes.NewReader(encoded[1:]), uint64(len(encoded)-1))
+ err = decodedTx.(*ArbitrumLegacyTxData).DecodeRLP(stream)
+ require.NoError(t, err)
+
+ decoded := decodedTx.(*ArbitrumLegacyTxData)
+ require.Equal(t, arbLegacyTx.HashOverride, decoded.HashOverride)
+ require.Equal(t, arbLegacyTx.EffectiveGasPrice, decoded.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTx.L1BlockNumber, decoded.L1BlockNumber)
+ require.Equal(t, arbLegacyTx.Nonce, decoded.Nonce)
+}
diff --git a/arb/txn/marshalling.go b/arb/txn/marshalling.go
new file mode 100644
index 00000000000..200972ff03c
--- /dev/null
+++ b/arb/txn/marshalling.go
@@ -0,0 +1,100 @@
+package txn
+
+/*
+import (
+ "encoding/json"
+
+ "github.com/erigontech/erigon/execution/types"
+)
+
+func init() {
+ types.UnmarshalExtTxnFunc = ArbUnmarshalJSON
+}
+
+func ArbUnmarshalJSON(txType byte, input []byte) (types.Transaction, error) {
+ switch txType {
+ case ArbitrumDepositTxType:
+ tx := new(ArbitrumDepositTx)
+ if err := tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumInternalTxType:
+ tx := new(ArbitrumInternalTx)
+ if err := tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumUnsignedTxType:
+ tx := new(ArbitrumUnsignedTx)
+ if err := tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumContractTxType:
+ tx := new(ArbitrumContractTx)
+ if err := tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumRetryTxType:
+ tx := new(ArbitrumRetryTx)
+ if err := tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumSubmitRetryableTxType:
+ tx := new(ArbitrumSubmitRetryableTx)
+ if err := tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumLegacyTxType:
+ tx := new(ArbitrumLegacyTxData)
+ if err := tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+}
+
+func (tx *ArbitrumContractTx) UnmarshalJSON(input []byte) error {
+ var dec types.TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+func (tx *ArbitrumRetryTx) UnmarshalJSON(input []byte) error {
+ var dec types.TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+func (tx *ArbitrumSubmitRetryableTx) UnmarshalJSON(input []byte) error {
+ var dec types.TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumDepositTx) UnmarshalJSON(input []byte) error {
+ var dec types.TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumUnsignedTx) UnmarshalJSON(input []byte) error {
+ var dec types.TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumInternalTx) UnmarshalJSON(input []byte) error {
+ var dec types.TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+*/
diff --git a/arb/txn/signer.go b/arb/txn/signer.go
new file mode 100644
index 00000000000..46dcb790d97
--- /dev/null
+++ b/arb/txn/signer.go
@@ -0,0 +1,64 @@
+package txn
+
+/*
+import (
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/holiman/uint256"
+)
+
+type ArbitrumSigner struct {
+ types.Signer
+}
+
+func NewArbitrumSigner(signer types.Signer) ArbitrumSigner {
+ return ArbitrumSigner{Signer: signer}
+}
+
+func (s ArbitrumSigner) Sender(tx types.Transaction) (common.Address, error) {
+ switch inner := tx.(type) {
+ case *ArbitrumUnsignedTx:
+ return inner.From, nil
+ case *ArbitrumContractTx:
+ return inner.From, nil
+ case *ArbitrumDepositTx:
+ return inner.From, nil
+ case *ArbitrumInternalTx:
+ return ArbosAddress, nil
+ case *ArbitrumRetryTx:
+ return inner.From, nil
+ case *ArbitrumSubmitRetryableTx:
+ return inner.From, nil
+ case *ArbitrumLegacyTxData:
+ if inner.OverrideSender != nil {
+ return *inner.OverrideSender, nil
+ }
+ if inner.LegacyTx.V.IsZero() && inner.LegacyTx.R.IsZero() && inner.LegacyTx.S.IsZero() {
+ return common.Address{}, nil
+ }
+ return s.Signer.Sender(inner.LegacyTx)
+ default:
+ return s.Signer.Sender(tx)
+ }
+}
+
+func (s ArbitrumSigner) Equal(s2 ArbitrumSigner) bool {
+ // x, ok := s2.(ArbitrumSigner)
+ return s2.Signer.Equal(s.Signer)
+}
+
+func (s ArbitrumSigner) SignatureValues(tx types.Transaction, sig []byte) (R, S, V *uint256.Int, err error) {
+ switch tx.(type) {
+ case *ArbitrumUnsignedTx, *ArbitrumContractTx, *ArbitrumDepositTx,
+ *ArbitrumInternalTx, *ArbitrumRetryTx, *ArbitrumSubmitRetryableTx:
+
+ return nil, nil, nil, nil
+ case *ArbitrumLegacyTxData:
+ legacyData := tx.(*ArbitrumLegacyTxData)
+ fakeTx := NewArbTx(legacyData.LegacyTx)
+ return s.Signer.SignatureValues(fakeTx, sig)
+ default:
+ return s.Signer.SignatureValues(tx, sig)
+ }
+}
+*/
diff --git a/arb/txn/timeboosted_tx_rlp_test.go b/arb/txn/timeboosted_tx_rlp_test.go
new file mode 100644
index 00000000000..e126d6d633c
--- /dev/null
+++ b/arb/txn/timeboosted_tx_rlp_test.go
@@ -0,0 +1,311 @@
+package txn
+
+import (
+ "bytes"
+ "math/big"
+ "testing"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_LegacyTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []bool{true, false}
+ for i := 0; i < 2; i++ {
+ two := uint256.NewInt(2)
+ ltx := types.NewTransaction(4, common.HexToAddress("0x2"), two, 21000, two, []byte("data"))
+ ltx.Timeboosted = timeboostedVals[i]
+
+ buf := bytes.NewBuffer(nil)
+ err := ltx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ var ltx2 types.LegacyTx
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), uint64(buf.Len()))
+ err = ltx2.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.EqualValues(t, ltx.Timeboosted, ltx2.Timeboosted)
+ require.EqualValues(t, ltx.GasLimit, ltx2.GasLimit)
+ require.EqualValues(t, ltx.GasPrice.Bytes(), ltx2.GasPrice.Bytes())
+ require.EqualValues(t, ltx.Value.Bytes(), ltx2.Value.Bytes())
+ require.EqualValues(t, ltx.Data, ltx2.Data)
+ require.EqualValues(t, ltx.To, ltx2.To)
+
+ require.EqualValues(t, timeboostedVals[i], ltx2.IsTimeBoosted())
+ }
+}
+
+func Test_DynamicFeeTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []bool{true, false}
+ for i := 0; i < 2; i++ {
+ two := uint256.NewInt(2)
+ three := uint256.NewInt(3)
+ chainID := uint256.NewInt(1)
+ accessList := types.AccessList{
+ {Address: common.HexToAddress("0x1"), StorageKeys: []common.Hash{common.HexToHash("0x01")}},
+ }
+
+ tx := &types.DynamicFeeTransaction{
+ CommonTx: types.CommonTx{
+ Nonce: 4,
+ To: &common.Address{0x2},
+ Value: two,
+ GasLimit: 21000,
+ Data: []byte("data"),
+ },
+ ChainID: chainID,
+ TipCap: two,
+ FeeCap: three,
+ AccessList: accessList,
+ Timeboosted: timeboostedVals[i],
+ }
+
+ buf := bytes.NewBuffer(nil)
+ err := tx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ // Decode using DecodeRLPTransaction pattern
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := types.DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*types.DynamicFeeTransaction)
+ require.True(t, ok)
+
+ require.EqualValues(t, tx.Timeboosted, tx2.Timeboosted)
+ require.EqualValues(t, tx.GasLimit, tx2.GasLimit)
+ require.EqualValues(t, tx.TipCap.Bytes(), tx2.TipCap.Bytes())
+ require.EqualValues(t, tx.FeeCap.Bytes(), tx2.FeeCap.Bytes())
+ require.EqualValues(t, tx.Value.Bytes(), tx2.Value.Bytes())
+ require.EqualValues(t, tx.Data, tx2.Data)
+ require.EqualValues(t, tx.To, tx2.To)
+ require.EqualValues(t, tx.ChainID.Bytes(), tx2.ChainID.Bytes())
+ require.EqualValues(t, len(tx.AccessList), len(tx2.AccessList))
+ require.EqualValues(t, timeboostedVals[i], tx.IsTimeBoosted())
+ }
+}
+
+func Test_AccessListTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []bool{true, false}
+ for i := 0; i < 2; i++ {
+ two := uint256.NewInt(2)
+ chainID := uint256.NewInt(1)
+ accessList := types.AccessList{
+ {Address: common.HexToAddress("0x1"), StorageKeys: []common.Hash{common.HexToHash("0x01")}},
+ }
+
+ tx := &types.AccessListTx{
+ LegacyTx: types.LegacyTx{
+ CommonTx: types.CommonTx{
+ Nonce: 4,
+ To: &common.Address{0x2},
+ Value: two,
+ GasLimit: 21000,
+ Data: []byte("data"),
+ },
+ GasPrice: two,
+ },
+ ChainID: chainID,
+ AccessList: accessList,
+ Timeboosted: timeboostedVals[i],
+ }
+
+ buf := bytes.NewBuffer(nil)
+ err := tx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ // Decode using DecodeRLPTransaction pattern
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := types.DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*types.AccessListTx)
+ require.True(t, ok)
+
+ require.EqualValues(t, tx.Timeboosted, tx2.Timeboosted)
+ require.EqualValues(t, tx.GasLimit, tx2.GasLimit)
+ require.EqualValues(t, tx.GasPrice.Bytes(), tx2.GasPrice.Bytes())
+ require.EqualValues(t, tx.Value.Bytes(), tx2.Value.Bytes())
+ require.EqualValues(t, tx.Data, tx2.Data)
+ require.EqualValues(t, tx.To, tx2.To)
+ require.EqualValues(t, tx.ChainID.Bytes(), tx2.ChainID.Bytes())
+ require.EqualValues(t, len(tx.AccessList), len(tx2.AccessList))
+ require.EqualValues(t, timeboostedVals[i], tx.IsTimeBoosted())
+ }
+}
+
+func Test_BlobTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []bool{true, false}
+ for i := 0; i < 2; i++ {
+ two := uint256.NewInt(2)
+ three := uint256.NewInt(3)
+ chainID := uint256.NewInt(1)
+ maxFeePerBlobGas := uint256.NewInt(5)
+ accessList := types.AccessList{
+ {Address: common.HexToAddress("0x1"), StorageKeys: []common.Hash{common.HexToHash("0x01")}},
+ }
+ blobHashes := []common.Hash{common.HexToHash("0x01"), common.HexToHash("0x02")}
+
+ tx := &types.BlobTx{
+ DynamicFeeTransaction: types.DynamicFeeTransaction{
+ CommonTx: types.CommonTx{
+ Nonce: 4,
+ To: &common.Address{0x2},
+ Value: two,
+ GasLimit: 21000,
+ Data: []byte("data"),
+ },
+ ChainID: chainID,
+ TipCap: two,
+ FeeCap: three,
+ AccessList: accessList,
+ Timeboosted: timeboostedVals[i],
+ },
+ MaxFeePerBlobGas: maxFeePerBlobGas,
+ BlobVersionedHashes: blobHashes,
+ }
+
+ buf := bytes.NewBuffer(nil)
+ err := tx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ // Decode using DecodeRLPTransaction pattern
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := types.DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*types.BlobTx)
+ require.True(t, ok)
+
+ require.EqualValues(t, tx.Timeboosted, tx2.Timeboosted)
+ require.EqualValues(t, tx.GasLimit, tx2.GasLimit)
+ require.EqualValues(t, tx.TipCap.Bytes(), tx2.TipCap.Bytes())
+ require.EqualValues(t, tx.FeeCap.Bytes(), tx2.FeeCap.Bytes())
+ require.EqualValues(t, tx.Value.Bytes(), tx2.Value.Bytes())
+ require.EqualValues(t, tx.Data, tx2.Data)
+ require.EqualValues(t, tx.To, tx2.To)
+ require.EqualValues(t, tx.ChainID.Bytes(), tx2.ChainID.Bytes())
+ require.EqualValues(t, tx.MaxFeePerBlobGas.Bytes(), tx2.MaxFeePerBlobGas.Bytes())
+ require.EqualValues(t, len(tx.AccessList), len(tx2.AccessList))
+ require.EqualValues(t, len(tx.BlobVersionedHashes), len(tx2.BlobVersionedHashes))
+ require.EqualValues(t, timeboostedVals[i], tx.IsTimeBoosted())
+ }
+}
+
+func Test_SetCodeTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []bool{true, false}
+ for i := 0; i < 2; i++ {
+ two := uint256.NewInt(2)
+ three := uint256.NewInt(3)
+ chainID := uint256.NewInt(1)
+ accessList := types.AccessList{
+ {Address: common.HexToAddress("0x1"), StorageKeys: []common.Hash{common.HexToHash("0x01")}},
+ }
+
+ auth := types.Authorization{
+ ChainID: *chainID,
+ Address: common.HexToAddress("0x3"),
+ Nonce: 1,
+ }
+
+ tx := &types.SetCodeTransaction{
+ DynamicFeeTransaction: types.DynamicFeeTransaction{
+ CommonTx: types.CommonTx{
+ Nonce: 4,
+ To: &common.Address{0x2},
+ Value: two,
+ GasLimit: 21000,
+ Data: []byte("data"),
+ },
+ ChainID: chainID,
+ TipCap: two,
+ FeeCap: three,
+ AccessList: accessList,
+ Timeboosted: timeboostedVals[i],
+ },
+ Authorizations: []types.Authorization{auth},
+ }
+
+ buf := bytes.NewBuffer(nil)
+ err := tx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ // Decode using DecodeRLPTransaction pattern
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := types.DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*types.SetCodeTransaction)
+ require.True(t, ok)
+
+ require.EqualValues(t, tx.Timeboosted, tx2.Timeboosted)
+ require.EqualValues(t, tx.GasLimit, tx2.GasLimit)
+ require.EqualValues(t, tx.TipCap.Bytes(), tx2.TipCap.Bytes())
+ require.EqualValues(t, tx.FeeCap.Bytes(), tx2.FeeCap.Bytes())
+ require.EqualValues(t, tx.Value.Bytes(), tx2.Value.Bytes())
+ require.EqualValues(t, tx.Data, tx2.Data)
+ require.EqualValues(t, tx.To, tx2.To)
+ require.EqualValues(t, tx.ChainID.Bytes(), tx2.ChainID.Bytes())
+ require.EqualValues(t, len(tx.AccessList), len(tx2.AccessList))
+ require.EqualValues(t, len(tx.Authorizations), len(tx2.Authorizations))
+ require.EqualValues(t, timeboostedVals[i], tx.IsTimeBoosted())
+ }
+}
+
+func Test_ArbRetryTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []bool{true, false}
+ for i := 0; i < 2; i++ {
+ two := big.NewInt(2)
+ chainID := big.NewInt(1)
+ ticketId := common.HexToHash("0x123")
+ toAddr := common.HexToAddress("0x2")
+
+ tx := &ArbitrumRetryTx{
+ ChainId: chainID,
+ Nonce: 4,
+ From: common.HexToAddress("0x1"),
+ GasFeeCap: two,
+ Gas: 21000,
+ To: &toAddr,
+ Value: two,
+ Data: []byte("data"),
+ TicketId: ticketId,
+ RefundTo: common.HexToAddress("0x3"),
+ MaxRefund: two,
+ SubmissionFeeRefund: two,
+ Timeboosted: timeboostedVals[i],
+ }
+
+ buf := bytes.NewBuffer(nil)
+ err := tx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ // Decode using DecodeRLPTransaction pattern
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := types.DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*ArbitrumRetryTx)
+ require.True(t, ok)
+
+ require.EqualValues(t, tx.Timeboosted, tx2.Timeboosted)
+ require.EqualValues(t, tx.Gas, tx2.Gas)
+ require.EqualValues(t, tx.GasFeeCap, tx2.GasFeeCap)
+ require.EqualValues(t, tx.Value, tx2.Value)
+ require.EqualValues(t, tx.Data, tx2.Data)
+ require.EqualValues(t, tx.To, tx2.To)
+ require.EqualValues(t, tx.From, tx2.From)
+ require.EqualValues(t, tx.Nonce, tx2.Nonce)
+ require.EqualValues(t, tx.ChainId, tx2.ChainId)
+ require.EqualValues(t, tx.TicketId, tx2.TicketId)
+ require.EqualValues(t, tx.RefundTo, tx2.RefundTo)
+ require.EqualValues(t, tx.MaxRefund, tx2.MaxRefund)
+ require.EqualValues(t, tx.SubmissionFeeRefund, tx2.SubmissionFeeRefund)
+ require.EqualValues(t, timeboostedVals[i], tx.IsTimeBoosted())
+
+ }
+}
diff --git a/arb/txn/tx_timebosted.go b/arb/txn/tx_timebosted.go
new file mode 100644
index 00000000000..8ff941a1741
--- /dev/null
+++ b/arb/txn/tx_timebosted.go
@@ -0,0 +1,11 @@
+package txn
+
+type NoTimeBoosted bool
+
+func (tx *NoTimeBoosted) IsTimeBoosted() *bool {
+ return nil
+}
+
+func (tx *NoTimeBoosted) SetTimeboosted(_ *bool) {
+
+}
diff --git a/cl/beacon/beaconevents/model.go b/cl/beacon/beaconevents/model.go
index d40e6b47104..f8ed85c977f 100644
--- a/cl/beacon/beaconevents/model.go
+++ b/cl/beacon/beaconevents/model.go
@@ -37,6 +37,7 @@ type (
BlsToExecutionChangesData = cltypes.SignedBLSToExecutionChange
ContributionAndProofData = cltypes.SignedContributionAndProof
BlobSidecarData = cltypes.BlobSidecar
+ DataColumnSidecarData = cltypes.DataColumnSidecar
)
// DataColumnSidecarData includes block_root and slot for SSE events
diff --git a/cl/phase1/forkchoice/forkchoice.go b/cl/phase1/forkchoice/forkchoice.go
index 30932022ce7..4d7cb47a03b 100644
--- a/cl/phase1/forkchoice/forkchoice.go
+++ b/cl/phase1/forkchoice/forkchoice.go
@@ -732,6 +732,15 @@ func (f *ForkChoiceStore) addProposerLookahead(slot uint64, proposerLookahead so
return nil
}
+func (f *ForkChoiceStore) addProposerLookahead(slot uint64, proposerLookahead solid.Uint64VectorSSZ) {
+ epoch := slot / f.beaconCfg.SlotsPerEpoch
+ if _, ok := f.proposerLookahead.Get(epoch); !ok {
+ pl := solid.NewUint64VectorSSZ(proposerLookahead.Length())
+ proposerLookahead.CopyTo(pl)
+ f.proposerLookahead.Add(epoch, pl)
+ }
+}
+
func (f *ForkChoiceStore) GetPendingConsolidations(blockRoot common.Hash) (*solid.ListSSZ[*solid.PendingConsolidation], bool) {
return f.pendingConsolidations.Get(blockRoot)
}
diff --git a/cl/rpc/rpc.go b/cl/rpc/rpc.go
index 51d5278d209..0dab2148c4e 100644
--- a/cl/rpc/rpc.go
+++ b/cl/rpc/rpc.go
@@ -118,13 +118,13 @@ func (b *BeaconRpcP2P) SendColumnSidecarsByRootIdentifierReq(
ctx context.Context,
req *solid.ListSSZ[*cltypes.DataColumnsByRootIdentifier],
) ([]*cltypes.DataColumnSidecar, string, error) {
- filteredReq, pid, _, err := b.columnDataPeers.pickPeerRoundRobin(ctx, req)
- if err != nil {
- return nil, pid, err
- }
+ // filteredReq, pid, _, err := b.columnDataPeers.pickPeerRoundRobin(ctx, req)
+ // if err != nil {
+ // return nil, pid, err
+ // }
var buffer buffer.Buffer
- if err := ssz_snappy.EncodeAndWrite(&buffer, filteredReq); err != nil {
+ if err := ssz_snappy.EncodeAndWrite(&buffer, req); err != nil {
return nil, "", err
}
diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go
index e804f983dcc..02dc70e2768 100644
--- a/cmd/abigen/main.go
+++ b/cmd/abigen/main.go
@@ -103,18 +103,6 @@ func abigen(c *cli.Context) error {
if c.String(pkgFlag.Name) == "" {
utils.Fatalf("No destination package specified (--pkg)")
}
- var lang bind.Lang
- switch c.String(langFlag.Name) {
- case "go":
- lang = bind.LangGo
- case "java":
- lang = bind.LangJava
- case "objc":
- lang = bind.LangObjC
- utils.Fatalf("Objc binding generation is uncompleted")
- default:
- utils.Fatalf("Unsupported destination language \"%s\" (--lang)", c.String(langFlag.Name))
- }
// If the entire solidity code was specified, build and bind based on that
var (
abis []string
@@ -207,7 +195,7 @@ func abigen(c *cli.Context) error {
}
}
// Generate the contract binding
- code, err := bind.Bind(types, abis, bins, sigs, c.String(pkgFlag.Name), lang, libs, aliases)
+ code, err := bind.Bind(types, abis, bins, sigs, c.String(pkgFlag.Name), libs, aliases)
if err != nil {
utils.Fatalf("Failed to generate ABI binding: %v", err)
}
diff --git a/cmd/bumper/internal/schema/schema.go b/cmd/bumper/internal/schema/schema.go
index c8f65171f0b..44f30643b83 100644
--- a/cmd/bumper/internal/schema/schema.go
+++ b/cmd/bumper/internal/schema/schema.go
@@ -6,6 +6,8 @@ import (
"github.com/erigontech/erigon/db/snaptype"
"gopkg.in/yaml.v3"
+ "os"
+ "sort"
)
type TwoVers struct {
diff --git a/cmd/capcli/cli.go b/cmd/capcli/cli.go
index 689dc29a921..f6bc7ae04a3 100644
--- a/cmd/capcli/cli.go
+++ b/cmd/capcli/cli.go
@@ -22,6 +22,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "github.com/erigontech/erigon/db/snaptype"
"io"
"math"
"net/http"
diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go
index 70b74dd20ee..1daa2fac7b0 100644
--- a/cmd/downloader/main.go
+++ b/cmd/downloader/main.go
@@ -64,6 +64,7 @@ import (
"github.com/erigontech/erigon/node/paths"
"github.com/erigontech/erigon/p2p/nat"
+ _ "github.com/erigontech/erigon/arb/chain" // Register Arbitrum chains
_ "github.com/erigontech/erigon/polygon/chain" // Register Polygon chains
_ "github.com/erigontech/erigon/db/snaptype2" //hack
diff --git a/cmd/erigon/node/node.go b/cmd/erigon/node/node.go
index 25eb8b3a288..aba20ecf051 100644
--- a/cmd/erigon/node/node.go
+++ b/cmd/erigon/node/node.go
@@ -110,6 +110,8 @@ func NewNodConfigUrfave(ctx *cli.Context, debugMux *http.ServeMux, logger log.Lo
logger.Info("Starting Erigon on Gnosis Mainnet...")
case networkname.Chiado:
logger.Info("Starting Erigon on Chiado testnet...")
+ case networkname.ArbiturmSepolia:
+ logger.Info("Starting Erigon on Arbitrum Sepolia testnet...")
case "", networkname.Mainnet:
if !ctx.IsSet(utils.NetworkIdFlag.Name) {
logger.Info("Starting Erigon on Ethereum mainnet...")
diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go
index 53bea9d11ac..96ad8628d18 100644
--- a/cmd/evm/runner.go
+++ b/cmd/evm/runner.go
@@ -325,6 +325,7 @@ func runCmd(ctx *cli.Context) error {
blockContext := evmtypes.BlockContext{
BlockNumber: runtimeConfig.BlockNumber.Uint64(),
Time: runtimeConfig.Time.Uint64(),
+ ArbOSVersion: 0,
}
rules = blockContext.Rules(chainConfig)
}
diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go
index 4491651e3eb..399ff4220e6 100644
--- a/cmd/hack/hack.go
+++ b/cmd/hack/hack.go
@@ -56,6 +56,7 @@ import (
"github.com/erigontech/erigon/node/ethconfig"
"github.com/erigontech/erigon/node/logging"
+ _ "github.com/erigontech/erigon/arb/chain" // Register Arbitrum chains
_ "github.com/erigontech/erigon/polygon/chain" // Register Polygon chains
)
diff --git a/cmd/hack/tool/tool.go b/cmd/hack/tool/tool.go
index 634095c3c90..4c3f1638856 100644
--- a/cmd/hack/tool/tool.go
+++ b/cmd/hack/tool/tool.go
@@ -19,6 +19,8 @@ package tool
import (
"context"
+ arbparams "github.com/erigontech/erigon/arb/chain/params"
+ "github.com/erigontech/erigon/common"
"github.com/erigontech/erigon/db/kv"
"github.com/erigontech/erigon/db/rawdb"
"github.com/erigontech/erigon/execution/chain"
@@ -33,6 +35,9 @@ func Check(e error) {
func ChainConfig(tx kv.Tx) *chain.Config {
genesisBlockHash, err := rawdb.ReadCanonicalHash(tx, 0)
Check(err)
+ if genesisBlockHash == (common.Hash{}) {
+ return arbparams.ArbitrumOneChainConfig()
+ }
chainConfig, err := rawdb.ReadChainConfig(tx, genesisBlockHash)
Check(err)
return chainConfig
diff --git a/cmd/integration/commands/flags.go b/cmd/integration/commands/flags.go
index bfacb7103da..4351a3f325c 100644
--- a/cmd/integration/commands/flags.go
+++ b/cmd/integration/commands/flags.go
@@ -44,6 +44,8 @@ var (
chain string // Which chain to use (mainnet, sepolia, etc.)
outputCsvFile string
+ l2rpc, l2rpcReceipt string // L2 RPC addresses for arbitrum block dowloading
+
startTxNum uint64
dbWriteMap bool
@@ -138,6 +140,10 @@ func withConcurrentCommitment(cmd *cobra.Command) {
cmd.Flags().BoolVar(&statecfg.ExperimentalConcurrentCommitment, utils.ExperimentalConcurrentCommitmentFlag.Name, utils.ExperimentalConcurrentCommitmentFlag.Value, utils.ExperimentalConcurrentCommitmentFlag.Usage)
}
+func withConcurrentCommitment(cmd *cobra.Command) {
+ cmd.Flags().BoolVar(&statecfg.ExperimentalConcurrentCommitment, utils.ExperimentalConcurrentCommitmentFlag.Name, utils.ExperimentalConcurrentCommitmentFlag.Value, utils.ExperimentalConcurrentCommitmentFlag.Usage)
+}
+
func withBatchSize(cmd *cobra.Command) {
cmd.Flags().StringVar(&batchSizeStr, "batchSize", cli.BatchSizeFlag.Value, cli.BatchSizeFlag.Usage)
}
@@ -180,6 +186,14 @@ func withOutputCsvFile(cmd *cobra.Command) {
cmd.Flags().StringVar(&outputCsvFile, "output.csv.file", "", "location to output csv data")
}
+func withL2RPCaddress(cmd *cobra.Command) {
+ cmd.Flags().StringVar(&l2rpc, cli.L2RPCAddrFlag.Name, "", cli.L2RPCAddrFlag.Usage)
+}
+
+func withL2RPCReceiptAddress(cmd *cobra.Command) {
+ cmd.Flags().StringVar(&l2rpcReceipt, cli.L2RPCReceiptAddrFlag.Name, "", cli.L2RPCReceiptAddrFlag.Usage)
+}
+
func withChaosMonkey(cmd *cobra.Command) {
cmd.Flags().BoolVar(&syncCfg.ChaosMonkey, utils.ChaosMonkeyFlag.Name, utils.ChaosMonkeyFlag.Value, utils.ChaosMonkeyFlag.Usage)
}
diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go
index eb7fcda4818..0a177a41066 100644
--- a/cmd/integration/commands/stages.go
+++ b/cmd/integration/commands/stages.go
@@ -87,6 +87,7 @@ import (
"github.com/erigontech/erigon/polygon/heimdall"
"github.com/erigontech/erigon/polygon/heimdall/poshttp"
+ _ "github.com/erigontech/erigon/arb/chain" // Register Arbitrum chains
_ "github.com/erigontech/erigon/polygon/chain" // Register Polygon chains
)
@@ -451,6 +452,8 @@ func init() {
withChain(cmdStageHeaders)
withHeimdall(cmdStageHeaders)
withChaosMonkey(cmdStageHeaders)
+ withL2RPCaddress(cmdStageHeaders)
+ withL2RPCReceiptAddress(cmdStageHeaders)
rootCmd.AddCommand(cmdStageHeaders)
withConfig(cmdStageBodies)
@@ -810,11 +813,13 @@ func stageExec(db kv.TemporalRwDB, ctx context.Context, logger log.Logger) error
genesis := readGenesis(chain)
br, _ := blocksIO(db, logger)
+ ethdb.InitialiazeLocalWasmTarget()
+
notifications := shards.NewNotifications(nil)
cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, chainConfig, engine, vmConfig, notifications,
/*stateStream=*/ false,
/*badBlockHalt=*/ true,
- dirs, br, nil, genesis, syncCfg, nil /*experimentalBAL=*/, false)
+ dirs, br, nil, genesis, syncCfg, nil /*experimentalBAL=*/, false, wasmdb.OpenArbitrumWasmDB(ctx, dirs.ArbitrumWasm))
if unwind > 0 {
if err := db.ViewTemporal(ctx, func(tx kv.TemporalTx) error {
@@ -938,8 +943,6 @@ func stageExec(db kv.TemporalRwDB, ctx context.Context, logger log.Logger) error
break
}
}
-
- return nil
}
func stageCustomTrace(db kv.TemporalRwDB, ctx context.Context, logger log.Logger) error {
@@ -1182,6 +1185,7 @@ func newSync(ctx context.Context, db kv.TemporalRwDB, miningConfig *buildercfg.M
) {
dirs, pm := datadir.New(datadirCli), fromdb.PruneMode(db)
+ ethdb.InitialiazeLocalWasmTarget()
vmConfig := &vm.Config{}
genesis := readGenesis(chain)
diff --git a/cmd/integration/commands/state_domains.go b/cmd/integration/commands/state_domains.go
index cb02bade1dc..008ce31331e 100644
--- a/cmd/integration/commands/state_domains.go
+++ b/cmd/integration/commands/state_domains.go
@@ -57,6 +57,7 @@ import (
"github.com/erigontech/erigon/node/ethconfig"
"github.com/erigontech/erigon/node/nodecfg"
+ _ "github.com/erigontech/erigon/arb/chain" // Register Arbitrum chains
_ "github.com/erigontech/erigon/polygon/chain" // Register Polygon chains
)
diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go
index 01bbdf957ea..6e0807aaa32 100644
--- a/cmd/integration/commands/state_stages.go
+++ b/cmd/integration/commands/state_stages.go
@@ -50,6 +50,7 @@ import (
"github.com/erigontech/erigon/node/nodecfg"
"github.com/erigontech/erigon/node/shards"
+ _ "github.com/erigontech/erigon/arb/chain" // Register Arbitrum chains
_ "github.com/erigontech/erigon/polygon/chain" // Register Polygon chains
)
@@ -139,7 +140,9 @@ func init() {
withChain(stateStages)
withHeimdall(stateStages)
withWorkers(stateStages)
- withChaosMonkey(stateStages)
+ //withChaosMonkey(stateStages)
+ withL2RPCaddress(stateStages)
+ withL2RPCReceiptAddress(stateStages)
rootCmd.AddCommand(stateStages)
withConfig(loopExecCmd)
@@ -150,6 +153,8 @@ func init() {
withHeimdall(loopExecCmd)
withWorkers(loopExecCmd)
withChaosMonkey(loopExecCmd)
+ withL2RPCaddress(loopExecCmd)
+ withL2RPCReceiptAddress(loopExecCmd)
rootCmd.AddCommand(loopExecCmd)
}
@@ -186,7 +191,7 @@ func syncBySmallSteps(db kv.TemporalRwDB, miningConfig buildercfg.MiningConfig,
}
br, _ := blocksIO(db, logger1)
- execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, chainConfig, engine, vmConfig, notifications, false, true, dirs, br, nil, spec.Genesis, syncCfg, nil, false)
+ execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, chainConfig, engine, vmConfig, notifications, false, true, dirs, br, nil, spec.Genesis, syncCfg, nil, false, wasmdb.OpenArbitrumWasmDB(ctx, dirs.ArbitrumWasm))
execUntilFunc := func(execToBlock uint64) stagedsync.ExecFunc {
return func(badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, doms *execctx.SharedDomains, rwTx kv.TemporalRwTx, logger log.Logger) error {
@@ -376,7 +381,7 @@ func loopExec(db kv.TemporalRwDB, ctx context.Context, unwind uint64, logger log
initialCycle := false
br, _ := blocksIO(db, logger)
notifications := shards.NewNotifications(nil)
- cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, chainConfig, engine, vmConfig, notifications, false, true, dirs, br, nil, spec.Genesis, syncCfg, nil, false)
+ cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, chainConfig, engine, vmConfig, notifications, false, true, dirs, br, nil, spec.Genesis, syncCfg, nil, false, wasmdb.OpenArbitrumWasmDB(ctx, dirs.ArbitrumWasm))
// set block limit of execute stage
sync.MockExecFunc(stages.Execution, func(badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, sd *execctx.SharedDomains, tx kv.TemporalRwTx, logger log.Logger) error {
diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go
index 73c2ee6d687..677721f60c2 100644
--- a/cmd/rpcdaemon/cli/config.go
+++ b/cmd/rpcdaemon/cli/config.go
@@ -415,6 +415,7 @@ func RemoteServices(ctx context.Context, cfg *httpcfg.HttpCfg, logger log.Logger
return nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, errors.New("chain config not found in db. Need start erigon at least once on this db")
}
cfg.Snap.ChainName = cc.ChainName
+ cfg.IsArbitrum = cc.IsArbitrum()
// Configure sapshots
cfg.Sync, err = features.EnableSyncCfg(rawDB, cfg.Sync)
if err != nil {
diff --git a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go
index cc42a6736c4..92e94f19c57 100644
--- a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go
+++ b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go
@@ -98,6 +98,7 @@ type HttpCfg struct {
LogDirVerbosity string
LogDirPath string
+ BatchResponseMaxSize int // Maximum response size
BatchLimit int // Maximum number of requests in a batch
ReturnDataLimit int // Maximum number of bytes returned from calls (like eth_call)
AllowUnprotectedTxs bool // Whether to allow non EIP-155 protected transactions txs over RPC
@@ -106,4 +107,6 @@ type HttpCfg struct {
OtsMaxPageSize uint64
RPCSlowLogThreshold time.Duration
+
+ IsArbitrum bool // Whether the chain is Arbitrum
}
diff --git a/cmd/snapshots/genfromrpc/genfromrpc.go b/cmd/snapshots/genfromrpc/genfromrpc.go
index e4715a40446..e8119cae699 100644
--- a/cmd/snapshots/genfromrpc/genfromrpc.go
+++ b/cmd/snapshots/genfromrpc/genfromrpc.go
@@ -5,10 +5,14 @@ import (
"errors"
"fmt"
"math/big"
+ "strings"
+ "sync/atomic"
"time"
"github.com/holiman/uint256"
"github.com/urfave/cli/v2"
+ "golang.org/x/sync/errgroup"
+ "golang.org/x/time/rate"
"github.com/erigontech/erigon/cmd/utils"
"github.com/erigontech/erigon/common"
@@ -18,8 +22,10 @@ import (
"github.com/erigontech/erigon/db/kv"
"github.com/erigontech/erigon/db/kv/mdbx"
"github.com/erigontech/erigon/db/rawdb"
+ "github.com/erigontech/erigon/execution/stagedsync/stages"
"github.com/erigontech/erigon/execution/types"
"github.com/erigontech/erigon/rpc"
+ turbocli "github.com/erigontech/erigon/turbo/cli"
)
// CLI flags
@@ -41,11 +47,23 @@ var FromBlock = cli.Uint64Flag{
Value: 0,
}
+var NoWrite = cli.BoolFlag{
+ Name: "no-write",
+ Usage: "Avoid writing to the database",
+ Value: false,
+}
+
+var Arbitrum = cli.BoolFlag{
+ Name: "arbitrum", // this shit wants me to use their shitty tx format.
+ Usage: "Avoid writing to the database",
+ Value: false,
+}
+
var Command = cli.Command{
Action: func(cliCtx *cli.Context) error { return genFromRPc(cliCtx) },
Name: "genfromrpc",
Usage: "genfromrpc utilities",
- Flags: []cli.Flag{&utils.DataDirFlag, &RpcAddr, &Verify, &FromBlock},
+ Flags: []cli.Flag{&utils.DataDirFlag, &RpcAddr, &Verify, &FromBlock, &Arbitrum, &turbocli.L2RPCReceiptAddrFlag},
Description: ``,
}
@@ -81,7 +99,14 @@ type BlockJson struct {
Transactions []map[string]any `json:"transactions"`
}
-// --- Helper functions ---
+// ReceiptJson holds the minimal receipt data needed for timeboosted transactions
+type ReceiptJson struct {
+ Status hexutil.Uint64 `json:"status"`
+ Type string `json:"type"`
+ TransactionHash common.Hash `json:"transactionHash"`
+ Timeboosted *bool `json:"timeboosted,omitempty"`
+ GasUsed *hexutil.Big `json:"gasUsed,omitempty"`
+}
// convertHexToBigInt converts a hex string (with a "0x" prefix) to a *big.Int.
func convertHexToBigInt(hexStr string) *big.Int {
@@ -101,6 +126,14 @@ func getUint256FromField(rawTx map[string]any, field string) *uint256.Int {
return nil
}
+func isRetryableError(err error) bool {
+ if err == nil {
+ return false
+ }
+ errStr := err.Error()
+ return strings.Contains(errStr, "429") || strings.Contains(strings.ToLower(errStr), "internal server")
+}
+
// buildDynamicFeeFields sets the common dynamic fee fields from rawTx.
func buildDynamicFeeFields(tx *types.DynamicFeeTransaction, rawTx map[string]any) {
if chainID := getUint256FromField(rawTx, "chainId"); chainID != nil {
@@ -296,61 +329,573 @@ func makeEip7702Tx(commonTx *types.CommonTx, rawTx map[string]any) types.Transac
}},
}
buildDynamicFeeFields(&tx.DynamicFeeTransaction, rawTx)
+ if rawAuths, ok := rawTx["authorizationList"].([]interface{}); ok {
+ var auths []types.Authorization
+ for _, a := range rawAuths {
+ if auth, ok := a.(map[string]interface{}); ok {
+
+ cid := getUint256FromField(auth, "chainId")
+ yparity := getUint256FromField(auth, "yParity")
+ r := getUint256FromField(auth, "r")
+ s := getUint256FromField(auth, "s")
+ nonce := getUint256FromField(auth, "nonce")
+
+ ja := types.Authorization{
+ Address: common.HexToAddress(auth["address"].(string)),
+ }
+
+ ja.ChainID = *cid
+ ja.YParity = uint8(yparity.Uint64())
+ ja.R.SetFromBig(r.ToBig())
+ ja.S.SetFromBig(s.ToBig())
+ ja.Nonce = nonce.Uint64()
+
+ auths = append(auths, ja)
+ }
+ }
+ tx.Authorizations = auths
+ }
+
// TODO: Add any additional EIP-7702–specific processing here.
return tx
}
-// unMarshalTransactions decodes a slice of raw transactions into types.Transactions.
-func unMarshalTransactions(rawTxs []map[string]any) (types.Transactions, error) {
- var txs types.Transactions
+func makeArbitrumLegacyTxFunc(commonTx *types.CommonTx, rawTx map[string]interface{}) types.Transaction {
+ tx := &types.ArbitrumLegacyTxData{LegacyTx: &types.LegacyTx{CommonTx: *commonTx}}
+
+ if gasPriceHex, ok := rawTx["gasPrice"].(string); ok {
+ tx.GasPrice = uint256.MustFromHex(gasPriceHex)
+ }
+ if l1BlockNum, ok := rawTx["l1BlockNumber"].(string); ok {
+ tx.L1BlockNumber = convertHexToBigInt(l1BlockNum).Uint64()
+ // } else {
+ // if l1BlockNum, ok := rawTx["blockNumber"].(string); ok {
+ // tx.L1BlockNumber = convertHexToBigInt(l1BlockNum).Uint64()
+ // }
+ }
+ if effectiveGasPrice, ok := rawTx["effectiveGasPrice"].(string); ok {
+ tx.EffectiveGasPrice = convertHexToBigInt(effectiveGasPrice).Uint64()
+ }
+ // if hashOverride, ok := rawTx["hashOverride"].(string); ok {
+ // tx.HashOverride = common.HexToHash(hashOverride)
+ // }
+ if hashOverride, ok := rawTx["hash"].(string); ok {
+ tx.HashOverride = common.HexToHash(hashOverride)
+ }
+ sender, ok := commonTx.GetSender()
+ if ok {
+ tx.OverrideSender = &sender
+ }
+
+ // return types.NewArbitrumLegacyTx(&types.LegacyTx{CommonTx: *commonTx, GasPrice: tx.GasPrice}, tx.HashOverride, tx.EffectiveGasPrice, tx.L1BlockNumber, tx.OverrideSender)
+ return tx
+}
+
+func makeRetryableTxFunc(commonTx *types.CommonTx, rawTx map[string]interface{}) types.Transaction {
+ tx := &types.ArbitrumSubmitRetryableTx{}
+
+ // Chain ID: required field (hex string)
+ if chainIdHex, ok := rawTx["chainId"].(string); ok {
+ tx.ChainId = convertHexToBigInt(chainIdHex)
+ }
- for _, rawTx := range rawTxs {
- commonTx, err := parseCommonTx(rawTx)
+ // Request ID: expected as a hex string
+ if requestIdHex, ok := rawTx["requestId"].(string); ok {
+ tx.RequestId = common.HexToHash(requestIdHex)
+ }
+
+ // From: expected as a hex string address.
+ if fromHex, ok := rawTx["from"].(string); ok {
+ tx.From = common.HexToAddress(fromHex)
+ }
+
+ // L1BaseFee: expected as a hex string.
+ if l1BaseFeeHex, ok := rawTx["l1BaseFee"].(string); ok {
+ tx.L1BaseFee = convertHexToBigInt(l1BaseFeeHex)
+ }
+
+ // DepositValue: expected as a hex string.
+ if depositValueHex, ok := rawTx["depositValue"].(string); ok {
+ tx.DepositValue = convertHexToBigInt(depositValueHex)
+ }
+
+ // GasFeeCap: expected as a hex string.
+ if gasFeeCapHex, ok := rawTx["maxFeePerGas"].(string); ok {
+ tx.GasFeeCap = convertHexToBigInt(gasFeeCapHex)
+ }
+
+ // Gas limit: taken from the commonTx already parsed.
+ tx.Gas = commonTx.GasLimit
+
+ // RetryTo: expected as a hex string address. If empty, nil indicates contract creation.
+ if retryToHex, ok := rawTx["retryTo"].(string); ok && retryToHex != "" {
+ addr := common.HexToAddress(retryToHex)
+ tx.RetryTo = &addr
+ }
+
+ // RetryValue: expected as a hex string.
+ if retryValueHex, ok := rawTx["retryValue"].(string); ok {
+ tx.RetryValue = convertHexToBigInt(retryValueHex)
+ }
+
+ // Beneficiary: expected as a hex string address.
+ if beneficiaryHex, ok := rawTx["beneficiary"].(string); ok {
+ tx.Beneficiary = common.HexToAddress(beneficiaryHex)
+ }
+
+ // MaxSubmissionFee: expected as a hex string.
+ if maxSubmissionFeeHex, ok := rawTx["maxSubmissionFee"].(string); ok {
+ tx.MaxSubmissionFee = convertHexToBigInt(maxSubmissionFeeHex)
+ }
+
+ // FeeRefundAddr: expected as a hex string address.
+ if feeRefundAddrHex, ok := rawTx["refundTo"].(string); ok {
+ tx.FeeRefundAddr = common.HexToAddress(feeRefundAddrHex)
+ }
+
+ // RetryData: expected as a hex string (with "0x" prefix) that will be decoded to bytes.
+ if retryDataHex, ok := rawTx["retryData"].(string); ok && len(retryDataHex) >= 2 && retryDataHex[:2] == "0x" {
+ tx.RetryData = common.Hex2Bytes(retryDataHex[2:])
+ }
+
+ return tx
+}
+
+func makeArbitrumRetryTx(commonTx *types.CommonTx, rawTx map[string]interface{}) types.Transaction {
+ tx := &types.ArbitrumRetryTx{}
+
+ // ChainId (expected as a hex string, e.g., "0x1")
+ if chainIdHex, ok := rawTx["chainId"].(string); ok {
+ tx.ChainId = convertHexToBigInt(chainIdHex)
+ }
+
+ // Nonce is taken from the common transaction fields.
+ tx.Nonce = commonTx.Nonce
+
+ // From (expected as a hex string address)
+ if fromHex, ok := rawTx["from"].(string); ok {
+ tx.From = common.HexToAddress(fromHex)
+ }
+
+ // GasFeeCap (expected as a hex string)
+ if gasFeeCapHex, ok := rawTx["maxFeePerGas"].(string); ok {
+ tx.GasFeeCap = convertHexToBigInt(gasFeeCapHex)
+ }
+
+ // Gas limit is taken from the common transaction fields.
+ tx.Gas = commonTx.GasLimit
+
+ // To is optional. A non-empty hex string is converted to an address pointer;
+ // if missing or empty, nil indicates contract creation.
+ if toStr, ok := rawTx["to"].(string); ok && toStr != "" {
+ addr := common.HexToAddress(toStr)
+ tx.To = &addr
+ }
+
+ // Value (expected as a hex string)
+ if valueStr, ok := rawTx["value"].(string); ok {
+ tx.Value = convertHexToBigInt(valueStr)
+ }
+
+ // Data is taken from the common transaction fields.
+ tx.Data = commonTx.Data
+
+ // TicketId (expected as a hex string)
+ if ticketIdHex, ok := rawTx["ticketId"].(string); ok {
+ tx.TicketId = common.HexToHash(ticketIdHex)
+ }
+
+ // RefundTo (expected as a hex string address)
+ if refundToHex, ok := rawTx["refundTo"].(string); ok {
+ tx.RefundTo = common.HexToAddress(refundToHex)
+ }
+
+ // MaxRefund (expected as a hex string)
+ if maxRefundHex, ok := rawTx["maxRefund"].(string); ok {
+ tx.MaxRefund = convertHexToBigInt(maxRefundHex)
+ }
+
+ // SubmissionFeeRefund (expected as a hex string)
+ if submissionFeeRefundHex, ok := rawTx["submissionFeeRefund"].(string); ok {
+ tx.SubmissionFeeRefund = convertHexToBigInt(submissionFeeRefundHex)
+ }
+
+ return tx
+}
+
+// makeArbitrumContractTx builds an ArbitrumContractTx from the common transaction fields
+// and the raw JSON transaction data.
+func makeArbitrumContractTx(commonTx *types.CommonTx, rawTx map[string]interface{}) types.Transaction {
+ tx := &types.ArbitrumContractTx{}
+
+ // ChainId (expected as a hex string, e.g. "0x1")
+ if chainIdHex, ok := rawTx["chainId"].(string); ok {
+ tx.ChainId = convertHexToBigInt(chainIdHex)
+ }
+
+ // RequestId (expected as a hex string)
+ if requestIdHex, ok := rawTx["requestId"].(string); ok {
+ tx.RequestId = common.HexToHash(requestIdHex)
+ }
+
+ // From (expected as a hex string address)
+ if fromHex, ok := rawTx["from"].(string); ok {
+ tx.From = common.HexToAddress(fromHex)
+ }
+
+ // GasFeeCap (expected as a hex string)
+ if gasFeeCapHex, ok := rawTx["maxFeePerGas"].(string); ok {
+ tx.GasFeeCap = convertHexToBigInt(gasFeeCapHex)
+ }
+
+ // Gas limit: obtained from the common transaction fields.
+ tx.Gas = commonTx.GasLimit
+
+ // To: if present and non-empty, convert to an address pointer;
+ // if missing or empty, nil indicates contract creation.
+ if toStr, ok := rawTx["to"].(string); ok && toStr != "" {
+ addr := common.HexToAddress(toStr)
+ tx.To = &addr
+ }
+
+ // Value (expected as a hex string)
+ if valueStr, ok := rawTx["value"].(string); ok {
+ tx.Value = convertHexToBigInt(valueStr)
+ }
+
+ // Data: taken from the common transaction fields.
+ tx.Data = commonTx.Data
+
+ return tx
+}
+
+func makeArbitrumUnsignedTx(commonTx *types.CommonTx, rawTx map[string]interface{}) types.Transaction {
+ tx := &types.ArbitrumUnsignedTx{GasFeeCap: big.NewInt(0)}
+
+ // ChainId: expected as a hex string (e.g., "0x1")
+ if chainIdHex, ok := rawTx["chainId"].(string); ok {
+ tx.ChainId = convertHexToBigInt(chainIdHex)
+ }
+
+ // From: expected as a hex string address.
+ if fromHex, ok := rawTx["from"].(string); ok {
+ tx.From = common.HexToAddress(fromHex)
+ }
+
+ // Nonce: already parsed and stored in commonTx.
+ tx.Nonce = commonTx.Nonce
+
+ // GasFeeCap: expected as a hex string.
+ if gasFeeCapHex, ok := rawTx["maxFeePerGas"].(string); ok {
+ tx.GasFeeCap = convertHexToBigInt(gasFeeCapHex)
+ } else if gasFeeCapHex, ok := rawTx["gasPrice"].(string); ok {
+ tx.GasFeeCap = convertHexToBigInt(gasFeeCapHex)
+ }
+
+ // Gas: taken directly from commonTx.
+ tx.Gas = commonTx.GasLimit
+
+ // To: if provided and non-empty, convert to an address pointer.
+ if toStr, ok := rawTx["to"].(string); ok && toStr != "" {
+ addr := common.HexToAddress(toStr)
+ tx.To = &addr
+ }
+
+ // Value: expected as a hex string.
+ if valueStr, ok := rawTx["value"].(string); ok {
+ tx.Value = convertHexToBigInt(valueStr)
+ }
+
+ // Data: taken directly from commonTx.
+ tx.Data = commonTx.Data
+ return tx
+}
+
+func makeArbitrumDepositTx(commonTx *types.CommonTx, rawTx map[string]interface{}) types.Transaction {
+ tx := &types.ArbitrumDepositTx{}
+
+ // ChainId: expected as a hex string (e.g., "0x1")
+ if chainIdHex, ok := rawTx["chainId"].(string); ok {
+ tx.ChainId = convertHexToBigInt(chainIdHex)
+ }
+
+ // L1RequestId: expected as a hex string.
+ if l1RequestIdHex, ok := rawTx["requestId"].(string); ok {
+ tx.L1RequestId = common.HexToHash(l1RequestIdHex)
+ }
+
+ // From: expected as a hex string address.
+ if fromHex, ok := rawTx["from"].(string); ok {
+ tx.From = common.HexToAddress(fromHex)
+ }
+
+ // To: expected as a hex string address.
+ if toHex, ok := rawTx["to"].(string); ok {
+ tx.To = common.HexToAddress(toHex)
+ }
+
+ // Value: expected as a hex string.
+ if valueStr, ok := rawTx["value"].(string); ok {
+ tx.Value = convertHexToBigInt(valueStr)
+ }
+
+ return tx
+}
+
+// Transaction types that can have the timeboosted flag set
+// - LegacyTx
+// - AccessListTx
+// - DynamicFeeTx
+// - SetCodeTx
+// - BlobTx
+// - ArbitrumRetryTx
+var timeboostedTxTypes = map[string]bool{
+ "0x0": true,
+ "0x1": true,
+ "0x2": true,
+ "0x3": true,
+ "0x4": true,
+ "0x68": true,
+
+ "0x69": true, // no timbeoosted but for simplicity of checking
+}
+
+// genFromRPc connects to the RPC, fetches blocks starting from the given block,
+// and writes them into the local database.
+func genFromRPc(cliCtx *cli.Context) error {
+ dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name))
+ jsonRpcAddr := cliCtx.String(RpcAddr.Name)
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
+
+ // Connect to RPC.
+ client, err := rpc.Dial(jsonRpcAddr, log.Root())
+ if err != nil {
+ log.Warn("Error connecting to RPC", "err", err)
+ return err
+ }
+ verification := cliCtx.Bool(Verify.Name)
+ isArbitrum := cliCtx.Bool(Arbitrum.Name)
+
+ receiptRpcAddr := cliCtx.String(turbocli.L2RPCReceiptAddrFlag.Name)
+ var receiptClient *rpc.Client
+ if isArbitrum && receiptRpcAddr != "" {
+ receiptClient, err = rpc.Dial(receiptRpcAddr, log.Root())
if err != nil {
- return nil, fmt.Errorf("failed to parse common fields: %w", err)
+ log.Warn("Error connecting to receipt RPC", "err", err, "url", receiptRpcAddr)
+ return err
}
+ log.Info("Connected to receipt RPC", "url", receiptRpcAddr)
+ }
- var tx types.Transaction
- // Determine the transaction type based on the "type" field.
- typeTx, ok := rawTx["type"].(string)
- if !ok {
- return nil, errors.New("missing tx type")
+ db := mdbx.MustOpen(dirs.Chaindata)
+ defer db.Close()
+ var start uint64
+ if from := cliCtx.Uint64(FromBlock.Name); from > 0 {
+ start = from
+ } else {
+ var curBlock uint64
+ err = db.Update(context.Background(), func(tx kv.RwTx) error {
+ curBlock, err = stages.GetStageProgress(tx, stages.Bodies)
+ return err
+ })
+ if err != nil {
+ log.Warn("can't check current block", "err", err)
+ }
+ if curBlock == 0 {
+ // write arb genesis
+ // log.Info("Writing arbitrum sepolia-rollup genesis")
+
+ // gen := chain.ArbSepoliaRollupGenesisBlock()
+
+ // b := core.MustCommitGenesis(gen, db, dirs, log.New())
+ // log.Info("wrote arbitrum sepolia-rollup genesis", "block_hash", b.Hash().String(), "state_root", b.Root().String())
+ } else {
+ start = curBlock + 1
+ }
+ }
+
+ // Query latest block number.
+ var latestBlockHex string
+ if err := client.CallContext(context.Background(), &latestBlockHex, "eth_blockNumber"); err != nil {
+ log.Warn("Error fetching latest block number", "err", err)
+ return err
+ }
+
+ latestBlock := new(big.Int)
+ latestBlock.SetString(latestBlockHex[2:], 16)
+
+ noWrite := cliCtx.Bool(NoWrite.Name)
+
+ _, err = GetAndCommitBlocks(context.Background(), db, nil, client, receiptClient, start, latestBlock.Uint64(), verification, isArbitrum, noWrite, nil)
+ return err
+}
+
+var (
+ receiptQueries = new(atomic.Uint64)
+ prevReceiptTime = new(atomic.Uint64)
+)
+
+func GetAndCommitBlocks(ctx context.Context, db kv.RwDB, rwTx kv.RwTx, client, receiptClient *rpc.Client, startBlockNum, endBlockNum uint64, verify, isArbitrum, dryRun bool, f func(tx kv.RwTx, lastBlockNum uint64) error) (lastBlockNum uint64, err error) {
+ var (
+ batchSize = uint64(5)
+ blockRPS, blockBurst = 5000, 5 // rps, amount of simultaneous requests
+ receiptRPS, receiptBurst = 590, 3 // rps, amount of simultaneous requests
+
+ logInterval = time.Second * 40
+ logEvery = time.NewTicker(logInterval)
+ lastBlockHash common.Hash
+ totalBlocks = endBlockNum - startBlockNum
+ )
+
+ defer logEvery.Stop()
+
+ if receiptClient != nil {
+ receiptClient.SetRequestLimit(rate.Limit(receiptRPS), receiptBurst)
+ }
+ client.SetRequestLimit(rate.Limit(blockRPS), blockBurst)
+
+ for prev := startBlockNum; prev < endBlockNum; {
+ blocks, err := FetchBlocksBatch(client, receiptClient, prev, endBlockNum, batchSize, verify, isArbitrum)
+ if err != nil {
+ log.Warn("Error fetching block batch", "startBlockNum", prev, "err", err)
+ return lastBlockNum, err
}
+ if len(blocks) == 0 {
+ log.Info("No more blocks fetched, exiting", "latestFetchedBlock", lastBlockNum, "hash", lastBlockHash)
+ }
+
+ last := blocks[len(blocks)-1]
+ lastBlockNum = last.NumberU64()
+ prev = lastBlockNum + 1
+ lastBlockHash = last.Hash()
+
+ select {
+ case <-logEvery.C:
+ blkSec := float64(prev-startBlockNum) / logInterval.Seconds()
+ log.Info("Progress", "block", prev-1,
+ "blocks ahead", common.PrettyCounter(endBlockNum-prev), "done%", fmt.Sprintf("%.2f", (1-(float64(endBlockNum-prev)/float64(totalBlocks)))*100),
+ "hash", lastBlockHash, "blk/s", fmt.Sprintf("%.2f", blkSec))
+ startBlockNum = prev
- switch typeTx {
- case "0x0": // Legacy
- tx = makeLegacyTx(commonTx, rawTx)
- case "0x1": // Access List
- tx = makeAccessListTx(commonTx, rawTx)
- case "0x2": // EIP-1559
- tx = makeEip1559Tx(commonTx, rawTx)
- case "0x3": // EIP-4844
- tx = makeEip4844Tx(commonTx, rawTx)
- case "0x4": // EIP-7702
- tx = makeEip7702Tx(commonTx, rawTx)
+ prevReceiptTime.Store(uint64(time.Now().Unix()))
+ receiptQueries.Store(0)
default:
- return nil, fmt.Errorf("unknown tx type: %s", typeTx)
}
- txs = append(txs, tx)
+
+ if dryRun {
+ continue
+ }
+
+ if rwTx != nil {
+ err = commitUpdate(rwTx, blocks)
+ if err != nil {
+ return 0, err
+ }
+ if f != nil {
+ err = f(rwTx, lastBlockNum)
+ }
+
+ } else {
+ err = db.Update(ctx, func(tx kv.RwTx) error {
+ if err := commitUpdate(tx, blocks); err != nil {
+ return err
+ }
+ if f != nil {
+ err = f(tx, lastBlockNum)
+ }
+ return err
+ })
+ }
+ if err != nil {
+ return 0, err
+ }
}
- return txs, nil
+ return lastBlockNum, nil
+}
+
+func commitUpdate(tx kv.RwTx, blocks []*types.Block) error {
+ var latest *types.Block
+ var blockNum uint64
+ var firstBlockNum uint64
+ for _, blk := range blocks {
+ blockNum = blk.NumberU64()
+ if firstBlockNum == 0 {
+ firstBlockNum = blockNum
+ }
+
+ //if err := rawdb.WriteBlock(tx, blk); err != nil {
+ if err := rawdb.WriteHeader(tx, blk.Header()); err != nil {
+ return fmt.Errorf("error writing block %d: %w", blockNum, err)
+ }
+ if _, err := rawdb.WriteRawBodyIfNotExists(tx, blk.Hash(), blockNum, blk.RawBody()); err != nil {
+ return fmt.Errorf("cannot write body: %s", err)
+ }
+
+ parentTd, err := rawdb.ReadTd(tx, blk.Header().ParentHash, blockNum-1)
+ if err != nil || parentTd == nil {
+ return fmt.Errorf("failed to read parent total difficulty for block %d: %w", blockNum, err)
+ }
+ td := new(big.Int).Add(parentTd, blk.Difficulty())
+ if err = rawdb.WriteTd(tx, blk.Hash(), blockNum, td); err != nil {
+ return fmt.Errorf("failed to write total difficulty %d: %w", blockNum, err)
+ }
+
+ if err = rawdb.WriteCanonicalHash(tx, blk.Hash(), blockNum); err != nil {
+ return fmt.Errorf("error writing canonical hash %d: %w", blockNum, err)
+ }
+
+ latest = blk
+ rawdb.WriteHeadBlockHash(tx, latest.Hash())
+ if err := rawdb.WriteHeadHeaderHash(tx, latest.Hash()); err != nil {
+ return err
+ }
+ }
+
+ if latest != nil {
+ if err := rawdbv3.TxNums.Truncate(tx, firstBlockNum); err != nil {
+ return err
+ }
+ if err := rawdb.AppendCanonicalTxNums(tx, firstBlockNum); err != nil {
+ return err
+ }
+
+ syncStages := []stages.SyncStage{
+ stages.Headers, // updated by cfg.bodyDownload.UpdateFromDb(tx);
+ stages.Bodies,
+ stages.BlockHashes,
+ stages.Senders,
+ }
+ for _, stage := range syncStages {
+ if err := stages.SaveStageProgress(tx, stage, blockNum); err != nil {
+ return fmt.Errorf("failed to save stage progress for stage %q at block %d: %w", stage, blockNum, err)
+ }
+ }
+ }
+ return nil
}
-// getBlockByNumber retrieves a block via RPC, decodes it, and (if requested) verifies its hash.
-func getBlockByNumber(client *rpc.Client, blockNumber *big.Int, verify bool) (*types.Block, error) {
+// GetBlockByNumber retrieves a block via RPC, decodes it, and (if requested) verifies its hash.
+func GetBlockByNumber(ctx context.Context, client, receiptClient *rpc.Client, blockNumber *big.Int, verify, isArbitrum bool) (*types.Block, error) {
var block BlockJson
- err := client.CallContext(context.Background(), &block, "eth_getBlockByNumber", fmt.Sprintf("0x%x", blockNumber), true)
+ err := client.CallContext(ctx, &block, "eth_getBlockByNumber", fmt.Sprintf("0x%x", blockNumber), true)
if err != nil {
return nil, err
}
- txs, err := unMarshalTransactions(block.Transactions)
+ txs, err := unMarshalTransactions(ctx, receiptClient, block.Transactions, verify, isArbitrum)
if err != nil {
return nil, err
}
// Derive the TxHash from the decoded transactions.
- block.TxHash = types.DeriveSha(txs)
+ txHash := types.DeriveSha(txs)
+ if verify && txHash != block.TxHash {
+ log.Error("transactionHash mismatch", "expected", block.TxHash, "got", txHash, "num", blockNumber)
+ for i, tx := range txs {
+ log.Error("tx", "index", i, "hash", tx.Hash(), "type", tx.Type())
+ }
+ return nil, fmt.Errorf("tx hash mismatch, expected %s, got %s. num=%d", block.TxHash, txHash, blockNumber)
+ }
blk := types.NewBlockFromNetwork(&types.Header{
ParentHash: block.ParentHash,
UncleHash: block.UncleHash,
@@ -387,73 +932,167 @@ func getBlockByNumber(client *rpc.Client, blockNumber *big.Int, verify bool) (*t
return blk, nil
}
-// genFromRPc connects to the RPC, fetches blocks starting from the given block,
-// and writes them into the local database.
-func genFromRPc(cliCtx *cli.Context) error {
- dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name))
- jsonRpcAddr := cliCtx.String(RpcAddr.Name)
- log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
+func unMarshalTransactions(ctx context.Context, client *rpc.Client, rawTxs []map[string]interface{}, verify bool, isArbitrum bool) (types.Transactions, error) {
+ txs := make(types.Transactions, len(rawTxs))
- // Connect to RPC.
- client, err := rpc.Dial(jsonRpcAddr, log.Root())
- if err != nil {
- log.Warn("Error connecting to RPC", "err", err)
- return err
- }
+ receiptsEnabled := client != nil
+ var unmarshalWg errgroup.Group
- verification := cliCtx.Bool(Verify.Name)
- db := mdbx.MustOpen(dirs.Chaindata)
- defer db.Close()
+ for i, rawTx := range rawTxs {
+ idx := i
+ txData := rawTx
+ unmarshalWg.Go(func() error {
+ commonTx, err := parseCommonTx(txData)
+ if err != nil {
+ return fmt.Errorf("failed to parse common fields at index %d: %w", idx, err)
+ }
- // Query latest block number.
- var latestBlockHex string
- if err := client.CallContext(context.Background(), &latestBlockHex, "eth_blockNumber"); err != nil {
- log.Warn("Error fetching latest block number", "err", err)
- return err
- }
- latestBlock := new(big.Int)
- latestBlock.SetString(latestBlockHex[2:], 16)
+ typeTx, ok := txData["type"].(string)
+ if !ok {
+ return fmt.Errorf("missing tx type at index %d", idx)
+ }
+ var tx types.Transaction
+ switch typeTx {
+ case "0x0": // Legacy
+ tx = makeLegacyTx(commonTx, txData)
+ case "0x1": // Access List
+ tx = makeAccessListTx(commonTx, txData)
+ case "0x2": // EIP-1559
+ tx = makeEip1559Tx(commonTx, txData)
+ case "0x3": // EIP-4844
+ tx = makeEip4844Tx(commonTx, txData)
+ case "0x4": // EIP-7702
+ tx = makeEip7702Tx(commonTx, txData)
+ case "0x64": // ArbitrumDepositTxType
+ tx = makeArbitrumDepositTx(commonTx, txData)
+ case "0x65": // ArbitrumUnsignedTxType
+ tx = makeArbitrumUnsignedTx(commonTx, txData)
+ case "0x66": // ArbitrumContractTxType
+ tx = makeArbitrumContractTx(commonTx, txData)
+ case "0x68": // ArbitrumRetryTxType
+ tx = makeArbitrumRetryTx(commonTx, txData)
+ case "0x69": // ArbitrumSubmitRetryableTxType
+ tx = makeRetryableTxFunc(commonTx, txData)
+ case "0x6a": // ArbitrumInternalTxType
+ var chainID *uint256.Int
+ if chainIDOut := getUint256FromField(txData, "chainId"); chainIDOut != nil {
+ chainID = chainIDOut
+ } else {
+ return fmt.Errorf("missing chainId in ArbitrumInternalTxType at index %d", idx)
+ }
+ tx = &types.ArbitrumInternalTx{
+ Data: commonTx.Data,
+ ChainId: chainID,
+ }
+ case "0x78": // ArbitrumLegacyTxType
+ tx = makeArbitrumLegacyTxFunc(commonTx, txData)
+ default:
+ return fmt.Errorf("unknown tx type: %s at index %d", typeTx, idx)
+ }
+ //if txData["hash"] == "0xf468d0b9e699ddeb7635108b9d9a1d970913fc8272e576d71d7c320897001cf4" {
+ // log.Info("debug tx", "index", idx, "type", typeTx, "data", txData)
+ //}
+
+ if receiptsEnabled && timeboostedTxTypes[typeTx] {
+ if txData["hash"] == "" {
+ return errors.New("missing tx hash for receipt fetch")
+ }
+
+ receiptQueries.Add(1)
+
+ maxRetries := 4
+ backoff := time.Millisecond * 150
+
+ var receipt ReceiptJson
+ for attempt := 0; attempt < maxRetries; attempt++ {
+ err = client.CallContext(ctx, &receipt, "eth_getTransactionReceipt", txData["hash"])
+ //fmt.Printf("%s %+v\n", receipt.TransactionHash.String(), receipt)
+ if err == nil {
+ if tx.Hash() != receipt.TransactionHash {
+ receipt = ReceiptJson{}
+ continue
+ }
+ break
+ }
+ if !isRetryableError(err) {
+ break
+ }
+
+ receipt = ReceiptJson{}
+
+ if attempt < maxRetries-1 {
+ time.Sleep(backoff)
+ backoff *= 2
+ }
+ }
- var blockNumber big.Int
- start := cliCtx.Uint64(FromBlock.Name)
- // Process blocks from the starting block up to the latest.
- for i := start; i < latestBlock.Uint64(); {
- prev := i
- prevTime := time.Now()
- timer := time.NewTimer(40 * time.Second)
- err := db.Update(context.TODO(), func(tx kv.RwTx) error {
- for blockNum := i; blockNum < latestBlock.Uint64(); blockNum++ {
- blockNumber.SetUint64(blockNum)
- blk, err := getBlockByNumber(client, &blockNumber, verification)
if err != nil {
- return fmt.Errorf("error fetching block %d: %w", blockNum, err)
+ log.Info("receipt queries", "total", receiptQueries.Load())
+ return fmt.Errorf("failed to get receipt for tx %s after %d attempts: %w", txData["hash"], maxRetries, err)
+ }
+ if tx.Hash() != receipt.TransactionHash {
+ log.Error("fetched receipt tx hash mismatch", "expected", txData["hash"],
+ "got", receipt.TransactionHash, "txIndex", idx,
+ "receipt", fmt.Sprintf("%+v", receipt))
+ return fmt.Errorf("receipt tx hash mismatch for tx %s", txData["hash"])
}
- if err := rawdb.WriteBlock(tx, blk); err != nil {
- return fmt.Errorf("error writing block %d: %w", blockNum, err)
+
+ if receipt.Timeboosted != nil {
+ tx.SetTimeboosted(receipt.Timeboosted)
}
- if err := rawdb.WriteCanonicalHash(tx, blk.Hash(), blockNum); err != nil {
- return fmt.Errorf("error writing canonical hash %d: %w", blockNum, err)
+ if tx.Type() == types.ArbitrumSubmitRetryableTxType {
+ if egu := receipt.GasUsed; egu != nil && egu.Uint64() > 0 {
+ if srtx, ok := tx.(*types.ArbitrumSubmitRetryableTx); ok {
+ srtx.EffectiveGasUsed = egu.Uint64()
+ tx = srtx
+ }
+ }
}
+ }
- // Update the progress counter.
- i = blockNum + 1
+ txs[idx] = tx
- select {
- case <-timer.C:
- blkSec := float64(blockNum-prev) / time.Since(prevTime).Seconds()
- log.Info("Block processed", "block", blockNum, "hash", blk.Hash(), "blk/s", fmt.Sprintf("%.2f", blkSec))
- return nil
- default:
- // continue processing without waiting
- }
+ return nil
+ })
+ }
+
+ if err := unmarshalWg.Wait(); err != nil {
+ return nil, err
+ }
+ return txs, nil
+}
+
+// FetchBlocksBatch fetches multiple blocks concurrently and returns them sorted by block number
+func FetchBlocksBatch(client, receiptClient *rpc.Client, startBlock, endBlock, batchSize uint64, verify, isArbitrum bool) ([]*types.Block, error) {
+ if startBlock >= endBlock {
+ return nil, nil
+ }
+
+ actualBatchSize := batchSize
+ if endBlock-startBlock < batchSize {
+ actualBatchSize = endBlock - startBlock
+ }
+
+ blocks := make([]*types.Block, actualBatchSize)
+ var eg errgroup.Group
+
+ for i := uint64(0); i < actualBatchSize; i++ {
+ idx := i
+ blockNum := startBlock + i
+
+ eg.Go(func() error {
+ blockNumber := new(big.Int).SetUint64(blockNum)
+ blk, err := GetBlockByNumber(context.Background(), client, receiptClient, blockNumber, verify, isArbitrum)
+ if err != nil {
+ return fmt.Errorf("error fetching block %d: %w", blockNum, err)
}
+ blocks[idx] = blk
return nil
})
- timer.Stop()
- if err != nil {
- log.Warn("Error updating db", "err", err)
- return err
- }
}
- return nil
+
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+ return blocks, nil
}
diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go
index 6b8dd3f90da..d7c577d5f60 100644
--- a/cmd/state/commands/opcode_tracer.go
+++ b/cmd/state/commands/opcode_tracer.go
@@ -732,7 +732,8 @@ func runBlock(engine rules.Engine, ibs *state.IntraBlockState, txnWriter state.S
chainConfig *chain2.Config, getHeader func(hash common.Hash, number uint64) (*types.Header, error), block *types.Block, vmConfig vm.Config, trace bool, logger log.Logger) (types.Receipts, error) {
header := block.Header()
vmConfig.TraceJumpDest = true
- gp := new(protocol.GasPool).AddGas(block.GasLimit()).AddBlobGas(chainConfig.GetMaxBlobGasPerBlock(header.Time))
+ arbOsVersion := types.GetArbOSVersion(header, chainConfig)
+ gp := new(protocol.GasPool).AddGas(block.GasLimit()).AddBlobGas(chainConfig.GetMaxBlobGasPerBlock(header.Time, arbOsVersion))
gasUsed := new(uint64)
usedBlobGas := new(uint64)
var receipts types.Receipts
diff --git a/cmd/state/commands/root.go b/cmd/state/commands/root.go
index d75696c045a..433bc198238 100644
--- a/cmd/state/commands/root.go
+++ b/cmd/state/commands/root.go
@@ -33,6 +33,7 @@ import (
"github.com/erigontech/erigon/node/debug"
"github.com/erigontech/erigon/node/logging"
+ _ "github.com/erigontech/erigon/arb/chain" // Register Arbitrum chains
_ "github.com/erigontech/erigon/polygon/chain" // Register Polygon chains
)
diff --git a/cmd/utils/app/snapshots_cmd.go b/cmd/utils/app/snapshots_cmd.go
index 4e73a605c07..bf9e3879e95 100644
--- a/cmd/utils/app/snapshots_cmd.go
+++ b/cmd/utils/app/snapshots_cmd.go
@@ -498,6 +498,16 @@ var snapshotCommand = cli.Command{
},
},
},
+ {
+ Name: "compareIdx",
+ Action: doCompareIdx,
+ Description: "compares to accessors (recsplit) files",
+ Flags: joinFlags([]cli.Flag{
+ &cli.PathFlag{Name: "first", Required: true},
+ &cli.PathFlag{Name: "second", Required: true},
+ &cli.BoolFlag{Name: "skip-size-check", Required: false, Value: false},
+ }),
+ },
},
}
@@ -893,6 +903,22 @@ func doRollbackSnapshotsToBlock(ctx context.Context, blockNum uint64, prompt boo
return nil
}
+func doRmStateSnapshots(cliCtx *cli.Context) error {
+ dirs, l, err := datadir.New(cliCtx.String(utils.DataDirFlag.Name)).MustFlock()
+ if err != nil {
+ return err
+ }
+ defer l.Unlock()
+
+ removeLatest := cliCtx.Bool("latest")
+ stepRange := cliCtx.String("step")
+ domainNames := cliCtx.StringSlice("domain")
+ dryRun := cliCtx.Bool("dry-run")
+ promptUser := true // CLI should always prompt the user
+
+ return DeleteStateSnapshots(dirs, removeLatest, promptUser, dryRun, stepRange, domainNames...)
+}
+
func doBtSearch(cliCtx *cli.Context) error {
_, l, err := datadir.New(cliCtx.String(utils.DataDirFlag.Name)).MustFlock()
if err != nil {
@@ -1148,6 +1174,10 @@ func doIntegrity(cliCtx *cli.Context) error {
if err := integrity.CheckCommitmentHistVal(ctx, db, blockReader, failFast, logger); err != nil {
return err
}
+ case integrity.Publishable:
+ if err := doPublishable(cliCtx); err != nil {
+ return err
+ }
default:
return fmt.Errorf("unknown check: %s", chk)
}
@@ -1419,6 +1449,10 @@ func checkIfStateSnapshotsPublishable(dirs datadir.Dirs) error {
return fmt.Errorf("gap at start: state snaps start at (%d-%d). snaptype: accounts", accFiles[0].From, accFiles[0].To)
}
+ if accFiles[0].From != 0 {
+ return fmt.Errorf("gap at start: state snaps start at (%d-%d). snaptype: accounts", accFiles[0].From, accFiles[0].To)
+ }
+
prevFrom, prevTo := accFiles[0].From, accFiles[0].To
for i := 1; i < len(accFiles); i++ {
res := accFiles[i]
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index 3cda2b7c3db..54ed6283822 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -71,6 +71,7 @@ import (
"github.com/erigontech/erigon/txnprovider/shutter/shuttercfg"
"github.com/erigontech/erigon/txnprovider/txpool/txpoolcfg"
+ _ "github.com/erigontech/erigon/arb/chain" // Register Arbitrum chains
_ "github.com/erigontech/erigon/polygon/chain" // Register Polygon chains
)
@@ -1395,6 +1396,10 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config, nodeName, datadir string, l
setBoolIfSet(&cfg.MetricsEnabled, &MetricsEnabledFlag)
setBoolIfSet(&cfg.EnableWitProtocol, &PolygonPosWitProtocolFlag)
+ if ctx.IsSet(PolygonPosWitProtocolFlag.Name) {
+ cfg.EnableWitProtocol = ctx.Bool(PolygonPosWitProtocolFlag.Name)
+ }
+
logger.Info("Maximum peer count", "total", cfg.MaxPeers)
if netrestrict := ctx.String(NetrestrictFlag.Name); netrestrict != "" {
@@ -2006,6 +2011,8 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C
},
)
if err != nil {
+ log.Error("Failed to create downloader config", "err", err)
+ return
panic(err)
}
downloadernat.DoNat(nodeConfig.P2P.NAT, cfg.Downloader.ClientConfig, logger)
diff --git a/common/disk/disk_darwin.go b/common/disk/disk_darwin.go
index 959875e2f7c..2654ff229f7 100644
--- a/common/disk/disk_darwin.go
+++ b/common/disk/disk_darwin.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Erigon Authors
+// Copyright 2025 The Erigon Authors
// This file is part of Erigon.
//
// Erigon is free software: you can redistribute it and/or modify
diff --git a/core/vm/contract.go b/core/vm/contract.go
new file mode 100644
index 00000000000..2c6c5f17c3f
--- /dev/null
+++ b/core/vm/contract.go
@@ -0,0 +1,292 @@
+// Copyright 2015 The go-ethereum Authors
+// (original work)
+// Copyright 2024 The Erigon Authors
+// (modifications)
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package vm
+
+import (
+ "fmt"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/dbg"
+ "github.com/erigontech/erigon/common/log/v3"
+ "github.com/erigontech/erigon/execution/tracing"
+ "github.com/hashicorp/golang-lru/v2/simplelru"
+ "github.com/holiman/uint256"
+
+ "github.com/erigontech/erigon/arb/multigas"
+)
+
+// ContractRef is a reference to the contract's backing object
+type ContractRef interface {
+ Address() common.Address
+}
+
+// AccountRef implements ContractRef.
+//
+// Account references are used during EVM initialisation and
+// it's primary use is to fetch addresses. Removing this object
+// proves difficult because of the cached jump destinations which
+// are fetched from the parent contract (i.e. the caller), which
+// is a ContractRef.
+type AccountRef common.Address
+
+// Address casts AccountRef to a Address
+func (ar AccountRef) Address() common.Address { return (common.Address)(ar) }
+
+// Contract represents an ethereum contract in the state database. It contains
+// the contract code, calling arguments. Contract implements ContractRef
+type Contract struct {
+ // CallerAddress is the result of the caller which initialised this
+ // contract. However when the "call method" is delegated this value
+ // needs to be initialised to that of the caller's caller.
+ CallerAddress common.Address
+ caller ContractRef
+ self common.Address
+ jumpdests *JumpDestCache // Aggregated result of JUMPDEST analysis.
+ analysis bitvec // Locally cached result of JUMPDEST analysis
+
+ Code []byte
+ CodeHash common.Hash
+ CodeAddr *common.Address
+ Input []byte
+
+ Gas uint64
+ value *uint256.Int
+
+ // Arbitrum
+ delegateOrCallcode bool
+ // is the execution frame represented by this object a contract deployment
+ IsDeployment bool
+ IsSystemCall bool
+
+ // Arbitrum: total used multi-dimensional gas
+ UsedMultiGas multigas.MultiGas
+ RetainedMultiGas multigas.MultiGas
+}
+
+// arbitrum
+func (c *Contract) IsDelegateOrCallcode() bool {
+ return c.delegateOrCallcode
+}
+
+type JumpDestCache struct {
+ *simplelru.LRU[common.Hash, bitvec]
+ hit, total int
+ trace bool
+}
+
+var (
+ JumpDestCacheLimit = dbg.EnvInt("JD_LRU", 128)
+ jumpDestCacheTrace = dbg.EnvBool("JD_LRU_TRACE", false)
+)
+
+func NewJumpDestCache(limit int) *JumpDestCache {
+ c, err := simplelru.NewLRU[common.Hash, bitvec](limit, nil)
+ if err != nil {
+ panic(err)
+ }
+ return &JumpDestCache{LRU: c, trace: jumpDestCacheTrace}
+}
+
+func (c *JumpDestCache) LogStats() {
+ if c == nil || !c.trace {
+ return
+ }
+ log.Warn("[dbg] JumpDestCache", "hit", c.hit, "total", c.total, "limit", JumpDestCacheLimit, "ratio", fmt.Sprintf("%.2f", float64(c.hit)/float64(c.total)))
+}
+
+// NewContract returns a new contract environment for the execution of EVM.
+func NewContract(caller ContractRef, addr common.Address, value *uint256.Int, gas uint64, jumpDest *JumpDestCache) *Contract {
+ return &Contract{
+ CallerAddress: caller.Address(), caller: caller, self: addr,
+ value: value,
+ // Gas should be a pointer so it can safely be reduced through the run
+ // This pointer will be off the state transition
+ Gas: gas,
+ jumpdests: jumpDest,
+
+ UsedMultiGas: multigas.ZeroGas(),
+ RetainedMultiGas: multigas.ZeroGas(),
+ }
+}
+
+// First result tells us if the destination is valid
+// Second result tells us if the code bitmap was used
+func (c *Contract) validJumpdest(dest *uint256.Int) (bool, bool) {
+ udest, overflow := dest.Uint64WithOverflow()
+ // PC cannot go beyond len(code) and certainly can't be bigger than 64bits.
+ // Don't bother checking for JUMPDEST in that case.
+ if overflow || udest >= uint64(len(c.Code)) {
+ fmt.Printf("invalid jump dest: %s (code size: %d)\n", dest.Hex(), len(c.Code))
+ return false, false
+ }
+ // Only JUMPDESTs allowed for destinations
+ if OpCode(c.Code[udest]) != JUMPDEST {
+ fmt.Printf("invalid jump dest opcode: %s at %d\n", OpCode(c.Code[udest]).String(), udest)
+ return false, false
+ }
+ return c.isCode(udest), true
+}
+
+// isCode returns true if the provided PC location is an actual opcode, as
+// opposed to a data-segment following a PUSHN operation.
+func (c *Contract) isCode(udest uint64) bool {
+ // Do we already have an analysis laying around?
+ //if c.analysis != nil {
+ // return c.analysis.codeSegment(udest)
+ //}
+ // Do we have a contract hash already?
+ // If we do have a hash, that means it's a 'regular' contract. For regular
+ // contracts ( not temporary initcode), we store the analysis in a map
+ if c.CodeHash != (common.Hash{}) {
+ // Does parent context have the analysis?
+ c.jumpdests.total++
+ analysis, exist := c.jumpdests.Get(c.CodeHash)
+ if !exist {
+ // Do the analysis and save in parent context
+ // We do not need to store it in c.analysis
+ analysis = codeBitmap(c.Code)
+ c.jumpdests.Add(c.CodeHash, analysis)
+ } else {
+ c.jumpdests.hit++
+ }
+ // Also stash it in current contract for faster access
+ c.analysis = analysis
+ return c.analysis.codeSegment(udest)
+ }
+
+ // We don't have the code hash, most likely a piece of initcode not already
+ // in state trie. In that case, we do an analysis, and save it locally, so
+ // we don't have to recalculate it for every JUMP instruction in the execution
+ // However, we don't save it within the parent context
+ if c.analysis == nil {
+ c.analysis = codeBitmap(c.Code)
+ }
+
+ return c.analysis.codeSegment(udest)
+}
+
+// AsDelegate sets the contract to be a delegate call and returns the current
+// contract (for chaining calls)
+func (c *Contract) AsDelegate() *Contract {
+ // NOTE: caller must, at all times be a contract. It should never happen
+ // that caller is something other than a Contract.
+ parent := c.caller.(*Contract)
+ c.CallerAddress = parent.CallerAddress
+ c.value = parent.value
+
+ return c
+}
+
+// GetOp returns the n'th element in the contract's byte array
+func (c *Contract) GetOp(n uint64) OpCode {
+ if n < uint64(len(c.Code)) {
+ return OpCode(c.Code[n])
+ }
+
+ return STOP
+}
+
+// Caller returns the caller of the contract.
+//
+// Caller will recursively call caller when the contract is a delegate
+// call, including that of caller's caller.
+func (c *Contract) Caller() common.Address {
+ return c.CallerAddress
+}
+
+// UseGas attempts the use gas and subtracts it and returns true on success
+func (c *Contract) UseGas(gas uint64, tracer *tracing.Hooks, reason tracing.GasChangeReason) (ok bool) {
+ // We collect the gas change reason today, future changes will add gas change(s) tracking with reason
+ _ = reason
+
+ if c.Gas < gas {
+ return false
+ }
+
+ if tracer != nil && tracer.OnGasChange != nil && reason != tracing.GasChangeIgnored {
+ tracer.OnGasChange(c.Gas, c.Gas-gas, reason)
+ }
+ c.Gas -= gas
+ return true
+}
+
+// RefundGas refunds gas to the contract
+func (c *Contract) RefundGas(gas uint64, tracer *tracing.Hooks, reason tracing.GasChangeReason) {
+ // We collect the gas change reason today, future changes will add gas change(s) tracking with reason
+ _ = reason
+
+ if gas == 0 {
+ return
+ }
+ if tracer != nil && tracer.OnGasChange != nil && reason != tracing.GasChangeIgnored {
+ tracer.OnGasChange(c.Gas, c.Gas+gas, reason)
+ }
+ c.Gas += gas
+}
+
+// Address returns the contracts address
+func (c *Contract) Address() common.Address {
+ return c.self
+}
+
+// Value returns the contract's value (sent to it from it's caller)
+func (c *Contract) Value() *uint256.Int {
+ return c.value
+}
+
+// SetCallCode sets the code of the contract and address of the backing data object
+func (c *Contract) SetCallCode(addr *common.Address, hash common.Hash, code []byte) {
+ c.Code = code
+ c.CodeHash = hash
+ c.CodeAddr = addr
+}
+
+// SetCodeOptionalHash can be used to provide code, but it's optional to provide hash.
+// In case hash is not provided, the jumpdest analysis will not be saved to the parent context
+func (c *Contract) SetCodeOptionalHash(addr *common.Address, codeAndHash *codeAndHash) {
+ c.Code = codeAndHash.code
+ c.CodeHash = codeAndHash.hash
+ c.CodeAddr = addr
+}
+
+func (c *Contract) JumpDest() *JumpDestCache {
+ return c.jumpdests
+}
+
+// UseMultiGas attempts the use gas, subtracts it, increments usedMultiGas, and returns true on success
+func (c *Contract) UseMultiGas(multiGas multigas.MultiGas, logger *tracing.Hooks, reason tracing.GasChangeReason) (ok bool) {
+ if !c.UseGas(multiGas.SingleGas(), logger, reason) {
+ return false
+ }
+ c.UsedMultiGas.SaturatingAddInto(multiGas)
+ return true
+}
+
+func (c *Contract) GetTotalUsedMultiGas() multigas.MultiGas {
+ var total multigas.MultiGas
+ var underflow bool
+ if total, underflow = c.UsedMultiGas.SafeSub(c.RetainedMultiGas); underflow {
+ // NOTE: This should never happen, but if it does, log it and continue
+ log.Trace("used contract gas underflow", "used", c.UsedMultiGas, "retained", c.RetainedMultiGas)
+ // But since not all places are instrumented yet, clamp to zero for safety
+ return c.UsedMultiGas.SaturatingSub(c.RetainedMultiGas)
+ }
+ return total
+}
diff --git a/db/datadir/dirs.go b/db/datadir/dirs.go
index 1d86857e1db..8086804185f 100644
--- a/db/datadir/dirs.go
+++ b/db/datadir/dirs.go
@@ -42,6 +42,7 @@ type Dirs struct {
DataDir string
RelativeDataDir string // like dataDir, but without filepath.Abs() resolution
Chaindata string
+ ArbitrumWasm string
Tmp string
Snap string
SnapIdx string
@@ -83,6 +84,7 @@ func New(datadir string) Dirs {
dirs.CaplinColumnData,
dirs.CaplinHistory,
filepath.Join(datadir, "logs"),
+ dirs.ArbitrumWasm,
)
return dirs
@@ -126,6 +128,7 @@ func Open(datadir string) Dirs {
CaplinLatest: filepath.Join(datadir, "caplin", "latest"),
CaplinGenesis: filepath.Join(datadir, "caplin", "genesis-state"),
CaplinHistory: filepath.Join(datadir, "caplin", "history"),
+ ArbitrumWasm: filepath.Join(datadir, "arbitrumwasm"),
}
return dirs
}
diff --git a/db/integrity/integrity_action_type.go b/db/integrity/integrity_action_type.go
index 64863425f36..27023ce7d33 100644
--- a/db/integrity/integrity_action_type.go
+++ b/db/integrity/integrity_action_type.go
@@ -42,6 +42,7 @@ var AllChecks = []Check{
BorSpans, BorCheckpoints, RCacheNoDups, CommitmentRoot,
CommitmentKvi, CommitmentKvDeref, StateProgress,
Publishable,
+ BorSpans, BorCheckpoints, RCacheNoDups, Publishable,
}
var NonDefaultChecks = []Check{CommitmentHistVal}
diff --git a/db/kv/dbcfg/db_constants.go b/db/kv/dbcfg/db_constants.go
index 9be5700b9ed..89696273256 100644
--- a/db/kv/dbcfg/db_constants.go
+++ b/db/kv/dbcfg/db_constants.go
@@ -11,4 +11,8 @@ const (
PolygonBridgeDB = "polygon-bridge"
CaplinDB = "caplin"
TemporaryDB = "temporary"
+ ArbitrumDB = "arbitrum"
+ ArbWasmDB = "arb-wasm" // ArbWasmDB - is a separate DB for arbitrum Wasm cod
+ ArbClassicDB = "arb-classic"
+ ArbStreamerDB = "arb_streamer"
)
diff --git a/db/kv/tables.go b/db/kv/tables.go
index 4c338a611d9..c9ae14f8593 100644
--- a/db/kv/tables.go
+++ b/db/kv/tables.go
@@ -264,6 +264,13 @@ const (
//Diagnostics tables
DiagSystemInfo = "DiagSystemInfo"
DiagSyncStages = "DiagSyncStages"
+
+ ArbOSUtilsBucket = "arbOSUtils" // arbitrum os utils bucket
+ ArbNodeBucket = "arbNode" // arbitrum node bucket
+ ArbNodeTxStreamBucket = "arbNodeTXstream"
+ ArbWasmPrefixBucket = "arbWasm" // arbitrum wasm store prefix; wasm version
+ ArbWasmActivationBucket = "wasmActivation" // arbitrum bucket for wasm activations
+ ArbInboxTrackerBucket = "arbInboxTracker" // arbitrum bucket to keep inbox messages and meta
)
// Keys
@@ -376,6 +383,12 @@ var ChaindataTables = []string{
TblTracesToKeys,
TblTracesToIdx,
+ ArbOSUtilsBucket,
+ ArbNodeBucket,
+ ArbNodeTxStreamBucket,
+ ArbWasmPrefixBucket,
+ ArbWasmActivationBucket,
+
TblPruningProgress,
MaxTxNum,
@@ -550,6 +563,22 @@ var ChaindataTablesCfg = TableCfg{
TblTracesFromIdx: {Flags: DupSort},
TblTracesToKeys: {Flags: DupSort},
TblTracesToIdx: {Flags: DupSort},
+
+ ArbWasmPrefixBucket: {},
+ ArbOSUtilsBucket: {},
+ ArbWasmActivationBucket: {},
+ ArbNodeBucket: {},
+ ArbInboxTrackerBucket: {},
+ ArbNodeTxStreamBucket: {},
+}
+
+var ArbitrumTablesCfg = TableCfg{
+ ArbWasmPrefixBucket: {},
+ ArbOSUtilsBucket: {},
+ ArbWasmActivationBucket: {},
+ ArbNodeBucket: {},
+ ArbInboxTrackerBucket: {},
+ ArbNodeTxStreamBucket: {},
}
var AuRaTablesCfg = TableCfg{
@@ -585,7 +614,7 @@ var PolygonBridgeTablesCfg = TableCfg{}
func TablesCfgByLabel(label Label) TableCfg {
switch label {
- case dbcfg.ChainDB, dbcfg.TemporaryDB, dbcfg.CaplinDB: //TODO: move caplindb tables to own table config
+ case dbcfg.ChainDB, dbcfg.TemporaryDB, dbcfg.CaplinDB, dbcfg.ArbitrumDB, dbcfg.ArbClassicDB, dbcfg.ArbWasmDB, dbcfg.ArbStreamerDB: //TODO: move caplindb tables to own table config
return ChaindataTablesCfg
case dbcfg.TxPoolDB:
return TxpoolTablesCfg
diff --git a/db/seg/decompress.go b/db/seg/decompress.go
index 6db61e3f49b..ecbbe910fed 100644
--- a/db/seg/decompress.go
+++ b/db/seg/decompress.go
@@ -679,6 +679,10 @@ func (g *Getter) Next(buf []byte) ([]byte, uint64) {
savePos := g.dataP
wordLen := g.nextPos(true)
wordLen-- // because when create huffman tree we do ++ , because 0 is terminator
+ if wordLen < 0 {
+ log.Error("invalid wordLen", "filename", g.fName, "pos", savePos, "buf len", len(buf))
+ return nil, 0
+ }
if wordLen == 0 {
if g.dataBit > 0 {
g.dataP++
diff --git a/db/seg/decompress_test.go b/db/seg/decompress_test.go
index c08e62e5b75..2736daf3fc0 100644
--- a/db/seg/decompress_test.go
+++ b/db/seg/decompress_test.go
@@ -20,6 +20,7 @@ import (
"context"
"encoding/binary"
"fmt"
+ "github.com/erigontech/erigon/common/dir"
"math/rand"
"os"
"path/filepath"
diff --git a/db/seg/sais/utils.c b/db/seg/sais/utils.c
new file mode 100644
index 00000000000..112f43b5261
--- /dev/null
+++ b/db/seg/sais/utils.c
@@ -0,0 +1,32 @@
+#include "utils.h"
+
+int lcp_kasai(const unsigned char *T, int *SA, int *LCP, int *FTR, int *INV, int sa_size, int n)
+{
+ for (int i = 0, j = 0; i < sa_size; i++)
+ {
+ if ((SA[i] & 1) == 0)
+ FTR[j++] = SA[i] >> 1;
+ }
+
+ for (int i = 0; i < n; i++)
+ INV[FTR[i]] = i;
+
+ for (int i = 0, k = 0; i < n; i++, k ? k-- : 0)
+ {
+ if (INV[i] == n - 1)
+ {
+ k = 0;
+ continue;
+ }
+
+ int j = FTR[INV[i] + 1];
+
+ while (i + k < n && j + k < n && (int)T[(i + k) * 2] != 0 &&
+ (int)T[(j + k) * 2] != 0 && T[(i + k) * 2 + 1] == T[(j + k) * 2 + 1])
+ k++;
+
+ LCP[INV[i]] = k;
+ }
+
+ return 0;
+}
diff --git a/db/seg/sais/utils.h b/db/seg/sais/utils.h
new file mode 100644
index 00000000000..18dbca719c9
--- /dev/null
+++ b/db/seg/sais/utils.h
@@ -0,0 +1,5 @@
+#ifndef _UTILS_H
+#define _UTILS_H 1
+
+extern int lcp_kasai(const unsigned char *T, int *SA, int *LCP, int *FTR, int *INV, int sa_size, int n);
+#endif /* _UTILS_H */
diff --git a/db/snapcfg/util.go b/db/snapcfg/util.go
index 5c27930feae..c5e530cb493 100644
--- a/db/snapcfg/util.go
+++ b/db/snapcfg/util.go
@@ -58,13 +58,14 @@ type preverifiedRegistry struct {
var registry = &preverifiedRegistry{
data: map[string]Preverified{
- networkname.Mainnet: fromEmbeddedToml(snapshothashes.Mainnet),
- networkname.Sepolia: fromEmbeddedToml(snapshothashes.Sepolia),
- networkname.Amoy: fromEmbeddedToml(snapshothashes.Amoy),
- networkname.BorMainnet: fromEmbeddedToml(snapshothashes.BorMainnet),
- networkname.Gnosis: fromEmbeddedToml(snapshothashes.Gnosis),
- networkname.Chiado: fromEmbeddedToml(snapshothashes.Chiado),
- networkname.Hoodi: fromEmbeddedToml(snapshothashes.Hoodi),
+ networkname.Mainnet: fromEmbeddedToml(snapshothashes.Mainnet),
+ networkname.Sepolia: fromEmbeddedToml(snapshothashes.Sepolia),
+ networkname.Amoy: fromEmbeddedToml(snapshothashes.Amoy),
+ networkname.BorMainnet: fromEmbeddedToml(snapshothashes.BorMainnet),
+ networkname.Gnosis: fromEmbeddedToml(snapshothashes.Gnosis),
+ networkname.Chiado: fromEmbeddedToml(snapshothashes.Chiado),
+ networkname.Hoodi: fromEmbeddedToml(snapshothashes.Hoodi),
+ networkname.ArbiturmSepolia: fromEmbeddedToml(snapshothashes.ArbSepolia),
},
cached: make(map[string]*Cfg),
}
@@ -120,6 +121,7 @@ var (
&snapshothashes.Gnosis,
&snapshothashes.Chiado,
&snapshothashes.Hoodi,
+ &snapshothashes.ArbSepolia,
}
)
@@ -472,13 +474,14 @@ func KnownCfg(networkName string) (*Cfg, bool) {
}
var KnownWebseeds = map[string][]string{
- networkname.Mainnet: webseedsParse(webseed.Mainnet),
- networkname.Sepolia: webseedsParse(webseed.Sepolia),
- networkname.Amoy: webseedsParse(webseed.Amoy),
- networkname.BorMainnet: webseedsParse(webseed.BorMainnet),
- networkname.Gnosis: webseedsParse(webseed.Gnosis),
- networkname.Chiado: webseedsParse(webseed.Chiado),
- networkname.Hoodi: webseedsParse(webseed.Hoodi),
+ networkname.Mainnet: webseedsParse(webseed.Mainnet),
+ networkname.Sepolia: webseedsParse(webseed.Sepolia),
+ networkname.Amoy: webseedsParse(webseed.Amoy),
+ networkname.BorMainnet: webseedsParse(webseed.BorMainnet),
+ networkname.Gnosis: webseedsParse(webseed.Gnosis),
+ networkname.Chiado: webseedsParse(webseed.Chiado),
+ networkname.Hoodi: webseedsParse(webseed.Hoodi),
+ networkname.ArbiturmSepolia: webseedsParse(webseed.ArbSepolia),
}
func webseedsParse(in []byte) (res []string) {
@@ -522,24 +525,26 @@ func LoadRemotePreverified(ctx context.Context) (err error) {
}
KnownWebseeds = map[string][]string{
- networkname.Mainnet: webseedsParse(webseed.Mainnet),
- networkname.Sepolia: webseedsParse(webseed.Sepolia),
- networkname.Amoy: webseedsParse(webseed.Amoy),
- networkname.BorMainnet: webseedsParse(webseed.BorMainnet),
- networkname.Gnosis: webseedsParse(webseed.Gnosis),
- networkname.Chiado: webseedsParse(webseed.Chiado),
- networkname.Hoodi: webseedsParse(webseed.Hoodi),
+ networkname.Mainnet: webseedsParse(webseed.Mainnet),
+ networkname.Sepolia: webseedsParse(webseed.Sepolia),
+ networkname.Amoy: webseedsParse(webseed.Amoy),
+ networkname.BorMainnet: webseedsParse(webseed.BorMainnet),
+ networkname.Gnosis: webseedsParse(webseed.Gnosis),
+ networkname.Chiado: webseedsParse(webseed.Chiado),
+ networkname.Hoodi: webseedsParse(webseed.Hoodi),
+ networkname.ArbiturmSepolia: webseedsParse(webseed.ArbSepolia),
}
// Re-load the preverified hashes
registry.Reset(map[string]Preverified{
- networkname.Mainnet: fromEmbeddedToml(snapshothashes.Mainnet),
- networkname.Sepolia: fromEmbeddedToml(snapshothashes.Sepolia),
- networkname.Amoy: fromEmbeddedToml(snapshothashes.Amoy),
- networkname.BorMainnet: fromEmbeddedToml(snapshothashes.BorMainnet),
- networkname.Gnosis: fromEmbeddedToml(snapshothashes.Gnosis),
- networkname.Chiado: fromEmbeddedToml(snapshothashes.Chiado),
- networkname.Hoodi: fromEmbeddedToml(snapshothashes.Hoodi),
+ networkname.Mainnet: fromEmbeddedToml(snapshothashes.Mainnet),
+ networkname.Sepolia: fromEmbeddedToml(snapshothashes.Sepolia),
+ networkname.Amoy: fromEmbeddedToml(snapshothashes.Amoy),
+ networkname.BorMainnet: fromEmbeddedToml(snapshothashes.BorMainnet),
+ networkname.Gnosis: fromEmbeddedToml(snapshothashes.Gnosis),
+ networkname.Chiado: fromEmbeddedToml(snapshothashes.Chiado),
+ networkname.Hoodi: fromEmbeddedToml(snapshothashes.Hoodi),
+ networkname.ArbiturmSepolia: fromEmbeddedToml(snapshothashes.ArbSepolia),
})
return
}
@@ -556,6 +561,8 @@ func GetToml(networkName string) []byte {
return snapshothashes.Mainnet
case networkname.Sepolia:
return snapshothashes.Sepolia
+ //case networkname.Mumbai:
+ // return snapshothashes.Mumbai
case networkname.Amoy:
return snapshothashes.Amoy
case networkname.BorMainnet:
diff --git a/db/snapshotsync/freezeblocks/block_reader.go b/db/snapshotsync/freezeblocks/block_reader.go
index 348b5b5100d..4ca1b97e51d 100644
--- a/db/snapshotsync/freezeblocks/block_reader.go
+++ b/db/snapshotsync/freezeblocks/block_reader.go
@@ -1148,7 +1148,7 @@ func (r *BlockReader) txsFromSnapshot(baseTxnID uint64, txCount uint32, txsSeg *
if err != nil {
return nil, nil, err
}
- txs[i].SetSender(accounts.InternAddress(senders[i]))
+ // txs[i].SetSender(accounts.InternAddress(senders[i])) // TODO arbitrum
}
return txs, senders, nil
@@ -1388,6 +1388,9 @@ func (r *BlockReader) CurrentBlock(db kv.Tx) (*types.Block, error) {
if err != nil {
return nil, fmt.Errorf("failed HeaderNumber: %w", err)
}
+ if headNumber == nil {
+ return nil, fmt.Errorf("head block number not found for head hash %x", headHash)
+ }
block, _, err := r.blockWithSenders(context.Background(), db, headHash, *headNumber, true)
return block, err
}
@@ -1578,6 +1581,15 @@ func (t *txBlockIndexWithBlockReader) BlockNumber(tx kv.Tx, txNum uint64) (block
}
if blockIndex == len(bodies) {
+ // total := uint64(0)
+ // for i := len(bodies) - 1; i >= 0; i-- {
+ // total++
+ // if total == 100 {
+ // break
+ // }
+ // mtx, _ := cache.GetLastMaxTxNum(bodies[i].Range, getMaxTxNum(bodies[i]))
+ // fmt.Printf("maxTxNum %d %s\n", mtx, bodies[i].Src().FileName())
+ // }
// not in snapshots
blockNum, ok, err = rawdbv3.DefaultTxBlockIndexInstance.BlockNumber(tx, txNum)
if err != nil {
diff --git a/db/snapshotsync/freezeblocks/block_snapshots.go b/db/snapshotsync/freezeblocks/block_snapshots.go
index e8295f1854f..af8519bda69 100644
--- a/db/snapshotsync/freezeblocks/block_snapshots.go
+++ b/db/snapshotsync/freezeblocks/block_snapshots.go
@@ -688,14 +688,17 @@ func DumpTxs(ctx context.Context, db kv.RoDB, chainConfig *chain.Config, blockFr
sender = senders[j]
} else {
signer := types.LatestSignerForChainID(chainConfig.ChainID)
- s, err := txn2.Sender(*signer)
+ signerEth, err := txn2.Sender(*signer)
+ signerArb := types.NewArbitrumSigner(*signerEth)
+
+ sender, err = signerArb.Sender(txn2)
if err != nil {
return nil, err
}
- sender = s.Value()
}
valueBuf = valueBuf[:0]
+ // TODO ARB seems like first byte of txn hash and it's sender does not used anywhere
valueBuf = append(valueBuf, hashFirstByte...)
valueBuf = append(valueBuf, sender[:]...)
valueBuf = append(valueBuf, v...)
diff --git a/db/snapshotsync/freezeblocks/transaction_snapshot_test.go b/db/snapshotsync/freezeblocks/transaction_snapshot_test.go
new file mode 100644
index 00000000000..ad6e20c2f31
--- /dev/null
+++ b/db/snapshotsync/freezeblocks/transaction_snapshot_test.go
@@ -0,0 +1,258 @@
+package freezeblocks_test
+
+import (
+ "bytes"
+ "context"
+ "crypto/ecdsa"
+ "math/big"
+ "path/filepath"
+ "testing"
+
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/crypto"
+ "github.com/erigontech/erigon/db/recsplit"
+ "github.com/erigontech/erigon/db/seg"
+ "github.com/erigontech/erigon/db/snaptype"
+ "github.com/erigontech/erigon/db/snaptype2"
+ "github.com/erigontech/erigon/db/version"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/erigontech/erigon/log/v3"
+)
+
+func TestTransactionSnapshotEncodeDecode(t *testing.T) {
+ logger := log.New()
+ dir := t.TempDir()
+
+ key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ chainConfig := chain.TestChainConfig
+ signer := types.LatestSigner(chainConfig)
+
+ txs := createTestTransactions(t, key, addr, *signer)
+
+ segmentFile := filepath.Join(dir, snaptype.SegmentFileName(version.V1_0, 0, 1000, snaptype2.Transactions.Enum()))
+ idxFile := filepath.Join(dir, snaptype.IdxFileName(version.V1_0, 0, 1000, snaptype2.Transactions.Name()))
+
+ encodeTransactions(t, txs, segmentFile, idxFile, dir, logger)
+
+ decodedTxs := decodeTransactions(t, segmentFile, len(txs))
+
+ verifyTransactions(t, txs, decodedTxs)
+}
+
+func createTestTransactions(t *testing.T, key *ecdsa.PrivateKey, addr common.Address, signer types.Signer) []types.Transaction {
+ txs := make([]types.Transaction, 0)
+
+ legacyTo := common.HexToAddress("0xdeadbeef")
+ timeboostedTrue := true
+ timeboostedFalse := false
+
+ legacyTx := &types.LegacyTx{
+ CommonTx: types.CommonTx{
+ Nonce: 0,
+ GasLimit: 21000,
+ To: &legacyTo,
+ Value: uint256.NewInt(100),
+ Data: nil,
+ },
+ GasPrice: uint256.NewInt(1000000000),
+ Timeboosted: &timeboostedTrue,
+ }
+ signedLegacy, err := types.SignTx(legacyTx, signer, key)
+ require.NoError(t, err)
+ txs = append(txs, signedLegacy)
+
+ dynamicTo := common.HexToAddress("0xcafebabe")
+ chainID := signer.ChainID()
+ dynamicTx := &types.DynamicFeeTransaction{
+ CommonTx: types.CommonTx{
+ Nonce: 1,
+ GasLimit: 21000,
+ To: &dynamicTo,
+ Value: uint256.NewInt(200),
+ Data: nil,
+ },
+ ChainID: chainID,
+ TipCap: uint256.NewInt(1000000000),
+ FeeCap: uint256.NewInt(2000000000),
+ Timeboosted: &timeboostedFalse,
+ }
+ signedDynamic, err := types.SignTx(dynamicTx, signer, key)
+ require.NoError(t, err)
+ txs = append(txs, signedDynamic)
+
+ arbRetryTo := common.HexToAddress("0xbeefcafe")
+ arbRetryTx := &types.ArbitrumRetryTx{
+ ChainId: signer.ChainID().ToBig(),
+ Nonce: 2,
+ GasFeeCap: big.NewInt(3000000000),
+ Gas: 50000,
+ To: &arbRetryTo,
+ Value: big.NewInt(300),
+ Data: []byte{0x01, 0x02, 0x03},
+ TicketId: common.HexToHash("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef"),
+ RefundTo: addr,
+ MaxRefund: big.NewInt(1000),
+ SubmissionFeeRefund: big.NewInt(500),
+ From: addr,
+ Timeboosted: &timeboostedTrue,
+ }
+ txs = append(txs, arbRetryTx)
+
+ retryTo := common.HexToAddress("0xf00dbabe")
+ arbSubmitRetryable := &types.ArbitrumSubmitRetryableTx{
+ ChainId: signer.ChainID().ToBig(),
+ RequestId: common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"),
+ From: addr,
+ L1BaseFee: big.NewInt(1000000000),
+ DepositValue: big.NewInt(5000),
+ GasFeeCap: big.NewInt(4000000000),
+ Gas: 100000,
+ RetryTo: &retryTo,
+ RetryValue: big.NewInt(400),
+ Beneficiary: common.HexToAddress("0xdeadcafe"),
+ MaxSubmissionFee: big.NewInt(2000),
+ FeeRefundAddr: addr,
+ RetryData: []byte("retry-data"),
+ EffectiveGasUsed: 75000,
+ }
+ txs = append(txs, arbSubmitRetryable)
+
+ return txs
+}
+
+func encodeTransactions(t *testing.T, txs []types.Transaction, segmentFile, idxFile, tmpDir string, logger log.Logger) {
+ compressCfg := seg.DefaultCfg
+ compressCfg.MinPatternScore = 100
+
+ c, err := seg.NewCompressor(context.Background(), "test-txs", segmentFile, tmpDir, compressCfg, log.LvlDebug, logger)
+ require.NoError(t, err)
+ defer c.Close()
+ c.DisableFsync()
+
+ for i, tx := range txs {
+ var txBuf bytes.Buffer
+ err := tx.EncodeRLP(&txBuf)
+ require.NoError(t, err, "failed to encode tx %d", i)
+
+ sender, hasSender := tx.GetSender()
+ var senderBytes [20]byte
+ if hasSender {
+ senderBytes = sender
+ }
+
+ hash := tx.Hash()
+ hashByte := hash[:1]
+
+ txBytes := txBuf.Bytes()
+ buf := make([]byte, 0, len(hashByte)+len(senderBytes)+len(txBytes))
+ buf = append(buf, hashByte...)
+ buf = append(buf, senderBytes[:]...)
+ buf = append(buf, txBytes...)
+
+ err = c.AddWord(buf)
+ require.NoError(t, err, "failed to add tx %d", i)
+ }
+
+ err = c.Compress()
+ require.NoError(t, err)
+
+ idx, err := recsplit.NewRecSplit(recsplit.RecSplitArgs{
+ KeyCount: len(txs),
+ BucketSize: 10,
+ TmpDir: tmpDir,
+ IndexFile: idxFile,
+ LeafSize: 8,
+ }, logger)
+ require.NoError(t, err)
+ defer idx.Close()
+ idx.DisableFsync()
+
+ for i := uint64(0); i < uint64(len(txs)); i++ {
+ err = idx.AddKey([]byte{byte(i)}, i)
+ require.NoError(t, err)
+ }
+
+ err = idx.Build(context.Background())
+ require.NoError(t, err)
+}
+
+func decodeTransactions(t *testing.T, segmentFile string, expectedCount int) []types.Transaction {
+ d, err := seg.NewDecompressor(segmentFile)
+ require.NoError(t, err)
+ defer d.Close()
+
+ require.Equal(t, expectedCount, d.Count(), "decompressor count mismatch")
+
+ txs := make([]types.Transaction, 0, expectedCount)
+ g := d.MakeGetter()
+
+ for g.HasNext() {
+ buf, _ := g.Next(nil)
+ require.True(t, len(buf) > 1+20, "buffer too short")
+
+ tx, err := types.DecodeTransaction(buf[1+20:])
+ require.NoError(t, err)
+
+ txs = append(txs, tx)
+ }
+
+ require.Equal(t, expectedCount, len(txs), "decoded transaction count mismatch")
+ return txs
+}
+
+func verifyTransactions(t *testing.T, original, decoded []types.Transaction) {
+ require.Equal(t, len(original), len(decoded), "transaction count mismatch")
+
+ for i := range original {
+ origTx := original[i]
+ decodedTx := decoded[i]
+
+ require.Equal(t, origTx.Type(), decodedTx.Type(), "tx %d: type mismatch", i)
+ require.Equal(t, origTx.Hash(), decodedTx.Hash(), "tx %d: hash mismatch", i)
+ require.Equal(t, origTx.GetNonce(), decodedTx.GetNonce(), "tx %d: nonce mismatch", i)
+ require.Equal(t, origTx.GetGasLimit(), decodedTx.GetGasLimit(), "tx %d: gas limit mismatch", i)
+
+ origTimeboosted := origTx.IsTimeBoosted()
+ decodedTimeboosted := decodedTx.IsTimeBoosted()
+ if origTimeboosted != nil {
+ require.NotNil(t, decodedTimeboosted, "tx %d: timeboosted should not be nil", i)
+ require.Equal(t, *origTimeboosted, *decodedTimeboosted, "tx %d: timeboosted value mismatch", i)
+ }
+
+ if origTx.Type() == types.ArbitrumSubmitRetryableTxType {
+ origRetryable, ok1 := origTx.Unwrap().(*types.ArbitrumSubmitRetryableTx)
+ decodedRetryable, ok2 := decodedTx.Unwrap().(*types.ArbitrumSubmitRetryableTx)
+
+ require.True(t, ok1 && ok2, "tx %d: failed to unwrap ArbitrumSubmitRetryableTx", i)
+ require.Equal(t, origRetryable.EffectiveGasUsed, decodedRetryable.EffectiveGasUsed,
+ "tx %d: EffectiveGasUsed mismatch", i)
+
+ t.Logf("Tx %d (ArbitrumSubmitRetryableTx): EffectiveGasUsed preserved correctly: %d",
+ i, decodedRetryable.EffectiveGasUsed)
+ }
+
+ if origTx.Type() == types.ArbitrumRetryTxType {
+ origRetry, ok1 := origTx.Unwrap().(*types.ArbitrumRetryTx)
+ decodedRetry, ok2 := decodedTx.Unwrap().(*types.ArbitrumRetryTx)
+
+ require.True(t, ok1 && ok2, "tx %d: failed to unwrap ArbitrumRetryTx", i)
+
+ if origRetry.Timeboosted != nil {
+ require.NotNil(t, decodedRetry.Timeboosted, "tx %d: ArbitrumRetryTx Timeboosted should not be nil", i)
+ require.Equal(t, *origRetry.Timeboosted, *decodedRetry.Timeboosted,
+ "tx %d: ArbitrumRetryTx Timeboosted mismatch", i)
+
+ t.Logf("Tx %d (ArbitrumRetryTx): Timeboosted preserved correctly: %v",
+ i, *decodedRetry.Timeboosted)
+ }
+ }
+
+ t.Logf("Tx %d verified: type=0x%x, hash=%s, timeboosted=%v",
+ i, decodedTx.Type(), decodedTx.Hash().Hex(), decodedTimeboosted)
+ }
+}
diff --git a/db/snapshotsync/snapshots_test.go b/db/snapshotsync/snapshots_test.go
index a3e820fef4a..132099d1e1a 100644
--- a/db/snapshotsync/snapshots_test.go
+++ b/db/snapshotsync/snapshots_test.go
@@ -702,6 +702,15 @@ func TestParseCompressedFileName(t *testing.T) {
require.Equal("bodies", f.TypeString)
require.Equal(".torrent4014494284", f.Ext)
+ f, e3, ok = snaptype.ParseFileName("", stat("v1.0-070200-070300-bodies.seg.torrent4014494284"))
+ require.True(ok)
+ require.False(e3)
+ require.Equal(f.Type.Enum(), snaptype2.Bodies.Enum())
+ require.Equal(70200_000, int(f.From))
+ require.Equal(70300_000, int(f.To))
+ require.Equal("bodies", f.TypeString)
+ require.Equal(".torrent4014494284", f.Ext)
+
f, e3, ok = snaptype.ParseFileName("", stat("v1.0-accounts.24-28.ef"))
require.True(ok)
require.True(e3)
diff --git a/db/snaptype2/block_types.go b/db/snaptype2/block_types.go
index a56d87688b9..0a1ac6d9ae5 100644
--- a/db/snaptype2/block_types.go
+++ b/db/snaptype2/block_types.go
@@ -18,6 +18,7 @@ package snaptype2
import (
"context"
+ "crypto/rand"
"encoding/binary"
"errors"
"fmt"
@@ -48,6 +49,7 @@ func init() {
snapcfg.RegisterKnownTypes(networkname.Gnosis, ethereumTypes)
snapcfg.RegisterKnownTypes(networkname.Chiado, ethereumTypes)
snapcfg.RegisterKnownTypes(networkname.Hoodi, ethereumTypes)
+ snapcfg.RegisterKnownTypes(networkname.ArbiturmSepolia, ethereumTypes)
}
var Enums = struct {
@@ -289,6 +291,7 @@ var (
defer d.MadvSequential().DisableReadAhead()
defer bodiesSegment.MadvSequential().DisableReadAhead()
+ uniq := make(map[common.Hash]uint64, 1_000_00)
for {
g, bodyGetter := d.MakeGetter(), bodiesSegment.MakeGetter()
var ti, offset, nextPos uint64
@@ -338,6 +341,16 @@ var (
}
txnHash = txn.Hash()
}
+ // if chainConfig.IsArbitrum() {
+ _, ok := uniq[txnHash]
+ uniq[txnHash]++
+ if ok {
+ _, err = rand.Read(txnHash[:])
+ if err != nil {
+ return fmt.Errorf("failed to generate new txnHash: %w", err)
+ }
+ }
+ // }
if err := txnHashIdx.AddKey(txnHash[:], offset); err != nil {
return err
@@ -359,6 +372,8 @@ var (
logger.Warn("Building recsplit. Collision happened. It's ok. Restarting with another salt...", "err", err)
txnHashIdx.ResetNextSalt()
txnHash2BlockNumIdx.ResetNextSalt()
+
+ uniq = make(map[common.Hash]uint64, 1_000_00)
continue
}
return fmt.Errorf("txnHashIdx: %w", err)
@@ -368,6 +383,7 @@ var (
logger.Warn("Building recsplit. Collision happened. It's ok. Restarting with another salt...", "err", err)
txnHashIdx.ResetNextSalt()
txnHash2BlockNumIdx.ResetNextSalt()
+ uniq = make(map[common.Hash]uint64, 1_000_00)
continue
}
return fmt.Errorf("txnHash2BlockNumIdx: %w", err)
diff --git a/db/state/aggregator.go b/db/state/aggregator.go
index 1cde0eb39c3..46b34275c25 100644
--- a/db/state/aggregator.go
+++ b/db/state/aggregator.go
@@ -327,6 +327,9 @@ func (a *Aggregator) OpenFolder() error {
if err := a.openFolder(); err != nil {
return fmt.Errorf("OpenFolder: %w", err)
}
+ if err := a.openFolder(); err != nil {
+ return fmt.Errorf("OpenFolder: %w", err)
+ }
return nil
}
diff --git a/db/state/aggregator_ext_test.go b/db/state/aggregator_ext_test.go
new file mode 100644
index 00000000000..e9e34ef5eb7
--- /dev/null
+++ b/db/state/aggregator_ext_test.go
@@ -0,0 +1,872 @@
+// Copyright 2024 The Erigon Authors
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package state_test
+
+import (
+ "context"
+ "encoding/binary"
+ "encoding/hex"
+ "math"
+ "math/rand"
+ "path/filepath"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/dir"
+ "github.com/erigontech/erigon/common/length"
+ "github.com/erigontech/erigon/db/config3"
+ "github.com/erigontech/erigon/db/kv"
+ "github.com/erigontech/erigon/db/kv/dbcfg"
+ "github.com/erigontech/erigon/db/kv/mdbx"
+ "github.com/erigontech/erigon/db/kv/order"
+ "github.com/erigontech/erigon/db/kv/rawdbv3"
+ "github.com/erigontech/erigon/db/kv/stream"
+ "github.com/erigontech/erigon/db/kv/temporal"
+ "github.com/erigontech/erigon/db/state"
+ "github.com/erigontech/erigon/execution/types/accounts"
+ "github.com/erigontech/erigon/log/v3"
+)
+
+func TestAggregatorV3_RestartOnFiles(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+
+ logger := log.New()
+ stepSize := uint64(100)
+ ctx := context.Background()
+ db, agg := testDbAndAggregatorv3(t, stepSize)
+ dirs := agg.Dirs()
+
+ tx, err := db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ domains, err := state.NewSharedDomains(tx, log.New())
+ require.NoError(t, err)
+ defer domains.Close()
+
+ txs := stepSize * 5
+ t.Logf("step=%d tx_count=%d\n", stepSize, txs)
+
+ rnd := newRnd(0)
+ keys := make([][]byte, txs)
+
+ for txNum := uint64(1); txNum <= txs; txNum++ {
+ addr, loc := make([]byte, length.Addr), make([]byte, length.Hash)
+ n, err := rnd.Read(addr)
+ require.NoError(t, err)
+ require.Equal(t, length.Addr, n)
+
+ n, err = rnd.Read(loc)
+ require.NoError(t, err)
+ require.Equal(t, length.Hash, n)
+
+ acc := accounts.Account{
+ Nonce: txNum,
+ Balance: *uint256.NewInt(1000000000000),
+ CodeHash: common.Hash{},
+ Incarnation: 0,
+ }
+ buf := accounts.SerialiseV3(&acc)
+ err = domains.DomainPut(kv.AccountsDomain, tx, addr, buf[:], txNum, nil, 0)
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.StorageDomain, tx, composite(addr, loc), []byte{addr[0], loc[0]}, txNum, nil, 0)
+ require.NoError(t, err)
+
+ keys[txNum-1] = append(addr, loc...)
+ }
+
+ // flush and build files
+ err = domains.Flush(context.Background(), tx)
+ require.NoError(t, err)
+
+ progress := tx.Debug().DomainProgress(kv.AccountsDomain)
+ require.Equal(t, 5, int(progress/stepSize))
+
+ err = tx.Commit()
+ require.NoError(t, err)
+
+ err = agg.BuildFiles(txs)
+ require.NoError(t, err)
+
+ agg.Close()
+ db.Close()
+
+ // remove database files
+ require.NoError(t, dir.RemoveAll(dirs.Chaindata))
+
+ // open new db and aggregator instances
+ newDb := mdbx.New(dbcfg.ChainDB, logger).InMem(t, dirs.Chaindata).MustOpen()
+ t.Cleanup(newDb.Close)
+
+ newAgg := state.New(agg.Dirs()).StepSize(stepSize).MustOpen(ctx, newDb)
+ require.NoError(t, newAgg.OpenFolder())
+
+ db, _ = temporal.New(newDb, newAgg)
+
+ tx, err = db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ newDoms, err := state.NewSharedDomains(tx, log.New())
+ require.NoError(t, err)
+ defer newDoms.Close()
+
+ err = newDoms.SeekCommitment(ctx, tx)
+ require.NoError(t, err)
+ latestTx := newDoms.TxNum()
+ t.Logf("seek to latest_tx=%d", latestTx)
+
+ miss := uint64(0)
+ for i, key := range keys {
+ if uint64(i+1) >= txs-stepSize {
+ continue // finishtx always stores last agg step in db which we deleted, so missing values which were not aggregated is expected
+ }
+ stored, _, err := tx.GetLatest(kv.AccountsDomain, key[:length.Addr])
+ require.NoError(t, err)
+ if len(stored) == 0 {
+ miss++
+ //fmt.Printf("%x [%d/%d]", key, miss, i+1) // txnum starts from 1
+ continue
+ }
+ acc := accounts.Account{}
+ err = accounts.DeserialiseV3(&acc, stored)
+ require.NoError(t, err)
+
+ require.Equal(t, i+1, int(acc.Nonce))
+
+ storedV, _, err := tx.GetLatest(kv.StorageDomain, key)
+ require.NoError(t, err)
+ require.NotEmpty(t, storedV)
+ _ = key[0]
+ _ = storedV[0]
+ require.Equal(t, key[0], storedV[0])
+ require.Equal(t, key[length.Addr], storedV[1])
+ }
+ newAgg.Close()
+
+ require.NoError(t, err)
+}
+
+func TestAggregatorV3_ReplaceCommittedKeys(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+ ctx := context.Background()
+ aggStep := uint64(20)
+
+ db, _ := testDbAndAggregatorv3(t, aggStep)
+
+ tx, err := db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ domains, err := state.NewSharedDomains(tx, log.New())
+ require.NoError(t, err)
+ defer domains.Close()
+
+ var latestCommitTxNum uint64
+ commit := func(txn uint64) error {
+ err = domains.Flush(ctx, tx)
+ require.NoError(t, err)
+
+ err = tx.Commit()
+ require.NoError(t, err)
+
+ tx, err = db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+
+ domains, err = state.NewSharedDomains(tx, log.New())
+ require.NoError(t, err)
+ atomic.StoreUint64(&latestCommitTxNum, txn)
+ return nil
+ }
+
+ txs := (aggStep) * config3.StepsInFrozenFile
+ t.Logf("step=%d tx_count=%d", aggStep, txs)
+
+ rnd := newRnd(0)
+ keys := make([][]byte, txs/2)
+
+ var prev1, prev2 []byte
+ var txNum uint64
+ for txNum = uint64(1); txNum <= txs/2; txNum++ {
+ addr, loc := make([]byte, length.Addr), make([]byte, length.Hash)
+ n, err := rnd.Read(addr)
+ require.NoError(t, err)
+ require.Equal(t, length.Addr, n)
+
+ n, err = rnd.Read(loc)
+ require.NoError(t, err)
+ require.Equal(t, length.Hash, n)
+ keys[txNum-1] = append(addr, loc...)
+
+ acc := accounts.Account{
+ Nonce: 1,
+ Balance: *uint256.NewInt(0),
+ CodeHash: common.Hash{},
+ Incarnation: 0,
+ }
+ buf := accounts.SerialiseV3(&acc)
+
+ err = domains.DomainPut(kv.AccountsDomain, tx, addr, buf, txNum, prev1, 0)
+ require.NoError(t, err)
+ prev1 = buf
+
+ err = domains.DomainPut(kv.StorageDomain, tx, composite(addr, loc), []byte{addr[0], loc[0]}, txNum, prev2, 0)
+ require.NoError(t, err)
+ prev2 = []byte{addr[0], loc[0]}
+
+ }
+ require.NoError(t, commit(txNum))
+
+ half := txs / 2
+ for txNum = txNum + 1; txNum <= txs; txNum++ {
+ addr, loc := keys[txNum-1-half][:length.Addr], keys[txNum-1-half][length.Addr:]
+
+ prev, step, err := tx.GetLatest(kv.AccountsDomain, keys[txNum-1-half])
+ require.NoError(t, err)
+ err = domains.DomainPut(kv.StorageDomain, tx, composite(addr, loc), []byte{addr[0], loc[0]}, txNum, prev, step)
+ require.NoError(t, err)
+ }
+
+ err = tx.Commit()
+
+ tx, err = db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ for i, key := range keys {
+
+ storedV, _, err := tx.GetLatest(kv.StorageDomain, key)
+ require.NotNil(t, storedV, "key %x not found %d", key, i)
+ require.NoError(t, err)
+ require.Equal(t, key[0], storedV[0])
+ require.Equal(t, key[length.Addr], storedV[1])
+ }
+ require.NoError(t, err)
+}
+
+func TestAggregatorV3_Merge(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+ db, agg := testDbAndAggregatorv3(t, 10)
+
+ rwTx, err := db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer rwTx.Rollback()
+
+ domains, err := state.NewSharedDomains(rwTx, log.New())
+ require.NoError(t, err)
+ defer domains.Close()
+
+ txs := uint64(1000)
+ rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ var (
+ commKey1 = []byte("someCommKey")
+ commKey2 = []byte("otherCommKey")
+ )
+
+ // keys are encodings of numbers 1..31
+ // each key changes value on every txNum which is multiple of the key
+ var maxWrite, otherMaxWrite uint64
+ for txNum := uint64(1); txNum <= txs; txNum++ {
+
+ addr, loc := make([]byte, length.Addr), make([]byte, length.Hash)
+
+ n, err := rnd.Read(addr)
+ require.NoError(t, err)
+ require.Equal(t, length.Addr, n)
+
+ n, err = rnd.Read(loc)
+ require.NoError(t, err)
+ require.Equal(t, length.Hash, n)
+ acc := accounts.Account{
+ Nonce: 1,
+ Balance: *uint256.NewInt(0),
+ CodeHash: common.Hash{},
+ Incarnation: 0,
+ }
+ buf := accounts.SerialiseV3(&acc)
+ err = domains.DomainPut(kv.AccountsDomain, rwTx, addr, buf, txNum, nil, 0)
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.StorageDomain, rwTx, composite(addr, loc), []byte{addr[0], loc[0]}, txNum, nil, 0)
+ require.NoError(t, err)
+
+ var v [8]byte
+ binary.BigEndian.PutUint64(v[:], txNum)
+ if txNum%135 == 0 {
+ pv, step, err := domains.GetLatest(kv.CommitmentDomain, rwTx, commKey2)
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.CommitmentDomain, rwTx, commKey2, v[:], txNum, pv, step)
+ require.NoError(t, err)
+ otherMaxWrite = txNum
+ } else {
+ pv, step, err := domains.GetLatest(kv.CommitmentDomain, rwTx, commKey1)
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.CommitmentDomain, rwTx, commKey1, v[:], txNum, pv, step)
+ require.NoError(t, err)
+ maxWrite = txNum
+ }
+ require.NoError(t, err)
+
+ }
+
+ err = domains.Flush(context.Background(), rwTx)
+ require.NoError(t, err)
+
+ require.NoError(t, err)
+ err = rwTx.Commit()
+ require.NoError(t, err)
+
+ mustSeeFile := func(files []string, folderName, fileNameWithoutVersion string) bool { //file-version agnostic
+ for _, f := range files {
+ if strings.HasPrefix(f, folderName) && strings.HasSuffix(f, fileNameWithoutVersion) {
+ return true
+ }
+ }
+ return false
+ }
+
+ onChangeCalls, onDelCalls := 0, 0
+ agg.OnFilesChange(func(newFiles []string) {
+ if len(newFiles) == 0 {
+ return
+ }
+
+ onChangeCalls++
+ if onChangeCalls == 1 {
+ mustSeeFile(newFiles, "domain", "accounts.0-2.kv") //TODO: when we build `accounts.0-1.kv` - we sending empty notifcation
+ require.False(t, filepath.IsAbs(newFiles[0])) // expecting non-absolute paths (relative as of snapshots dir)
+ }
+ }, func(deletedFiles []string) {
+ if len(deletedFiles) == 0 {
+ return
+ }
+
+ onDelCalls++
+ if onDelCalls == 1 {
+ mustSeeFile(deletedFiles, "domain", "accounts.0-1.kv")
+ mustSeeFile(deletedFiles, "domain", "commitment.0-1.kv")
+ mustSeeFile(deletedFiles, "history", "accounts.0-1.v")
+ mustSeeFile(deletedFiles, "accessor", "accounts.0-1.vi")
+
+ mustSeeFile(deletedFiles, "domain", "accounts.1-2.kv")
+ require.False(t, filepath.IsAbs(deletedFiles[0])) // expecting non-absolute paths (relative as of snapshots dir)
+ }
+ })
+
+ err = agg.BuildFiles(txs)
+ require.NoError(t, err)
+ require.Equal(t, 13, onChangeCalls)
+ require.Equal(t, 14, onDelCalls)
+
+ { //prune
+ rwTx, err = db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer rwTx.Rollback()
+
+ _, err := state.AggTx(rwTx).PruneSmallBatches(context.Background(), time.Hour, rwTx)
+ require.NoError(t, err)
+
+ err = rwTx.Commit()
+ require.NoError(t, err)
+ }
+
+ onChangeCalls, onDelCalls = 0, 0
+ err = agg.MergeLoop(context.Background())
+ require.NoError(t, err)
+ require.Equal(t, 0, onChangeCalls)
+ require.Equal(t, 0, onDelCalls)
+
+ // Check the history
+ roTx, err := db.BeginTemporalRo(context.Background())
+ require.NoError(t, err)
+ defer roTx.Rollback()
+
+ v, _, err := roTx.GetLatest(kv.CommitmentDomain, commKey1)
+ require.NoError(t, err)
+ require.Equal(t, maxWrite, binary.BigEndian.Uint64(v[:]))
+
+ v, _, err = roTx.GetLatest(kv.CommitmentDomain, commKey2)
+ require.NoError(t, err)
+ require.Equal(t, otherMaxWrite, binary.BigEndian.Uint64(v[:]))
+}
+
+func TestAggregatorV3_PruneSmallBatches(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+ aggStep := uint64(2)
+ db, agg := testDbAndAggregatorv3(t, aggStep)
+
+ tx, err := db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ domains, err := state.NewSharedDomains(tx, log.New())
+ require.NoError(t, err)
+ defer domains.Close()
+
+ maxTx := aggStep * 3
+ t.Logf("step=%d tx_count=%d\n", aggStep, maxTx)
+
+ rnd := newRnd(0)
+
+ generateSharedDomainsUpdates(t, domains, tx, maxTx, rnd, length.Addr, 10, aggStep/2)
+
+ // flush and build files
+ err = domains.Flush(context.Background(), tx)
+ require.NoError(t, err)
+
+ var (
+ // until pruning
+ accountsRange map[string][]byte
+ storageRange map[string][]byte
+ codeRange map[string][]byte
+ accountHistRange map[string][]byte
+ storageHistRange map[string][]byte
+ codeHistRange map[string][]byte
+ )
+ maxInt := math.MaxInt
+ {
+ it, err := tx.Debug().RangeLatest(kv.AccountsDomain, nil, nil, maxInt)
+ require.NoError(t, err)
+ accountsRange = extractKVErrIterator(t, it)
+
+ it, err = tx.Debug().RangeLatest(kv.StorageDomain, nil, nil, maxInt)
+ require.NoError(t, err)
+ storageRange = extractKVErrIterator(t, it)
+
+ it, err = tx.Debug().RangeLatest(kv.CodeDomain, nil, nil, maxInt)
+ require.NoError(t, err)
+ codeRange = extractKVErrIterator(t, it)
+
+ its, err := tx.HistoryRange(kv.AccountsDomain, 0, int(maxTx), order.Asc, maxInt)
+ require.NoError(t, err)
+ accountHistRange = extractKVErrIterator(t, its)
+ its, err = tx.HistoryRange(kv.CodeDomain, 0, int(maxTx), order.Asc, maxInt)
+ require.NoError(t, err)
+ codeHistRange = extractKVErrIterator(t, its)
+ its, err = tx.HistoryRange(kv.StorageDomain, 0, int(maxTx), order.Asc, maxInt)
+ require.NoError(t, err)
+ storageHistRange = extractKVErrIterator(t, its)
+ }
+
+ err = tx.Commit()
+ require.NoError(t, err)
+
+ err = agg.BuildFiles(maxTx)
+ require.NoError(t, err)
+
+ buildTx, err := db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer buildTx.Rollback()
+
+ for i := 0; i < 10; i++ {
+ _, err = buildTx.PruneSmallBatches(context.Background(), time.Second*3)
+ require.NoError(t, err)
+ }
+ err = buildTx.Commit()
+ require.NoError(t, err)
+
+ afterTx, err := db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer afterTx.Rollback()
+
+ var (
+ // after pruning
+ accountsRangeAfter map[string][]byte
+ storageRangeAfter map[string][]byte
+ codeRangeAfter map[string][]byte
+ accountHistRangeAfter map[string][]byte
+ storageHistRangeAfter map[string][]byte
+ codeHistRangeAfter map[string][]byte
+ )
+
+ {
+ it, err := afterTx.Debug().RangeLatest(kv.AccountsDomain, nil, nil, maxInt)
+ require.NoError(t, err)
+ accountsRangeAfter = extractKVErrIterator(t, it)
+
+ it, err = afterTx.Debug().RangeLatest(kv.StorageDomain, nil, nil, maxInt)
+ require.NoError(t, err)
+ storageRangeAfter = extractKVErrIterator(t, it)
+
+ it, err = afterTx.Debug().RangeLatest(kv.CodeDomain, nil, nil, maxInt)
+ require.NoError(t, err)
+ codeRangeAfter = extractKVErrIterator(t, it)
+
+ its, err := afterTx.HistoryRange(kv.AccountsDomain, 0, int(maxTx), order.Asc, maxInt)
+ require.NoError(t, err)
+ accountHistRangeAfter = extractKVErrIterator(t, its)
+ its, err = afterTx.HistoryRange(kv.CodeDomain, 0, int(maxTx), order.Asc, maxInt)
+ require.NoError(t, err)
+ codeHistRangeAfter = extractKVErrIterator(t, its)
+ its, err = afterTx.HistoryRange(kv.StorageDomain, 0, int(maxTx), order.Asc, maxInt)
+ require.NoError(t, err)
+ storageHistRangeAfter = extractKVErrIterator(t, its)
+ }
+
+ {
+ // compare
+ compareMapsBytes(t, accountsRange, accountsRangeAfter)
+ compareMapsBytes(t, storageRange, storageRangeAfter)
+ compareMapsBytes(t, codeRange, codeRangeAfter)
+ compareMapsBytes(t, accountHistRange, accountHistRangeAfter)
+ compareMapsBytes(t, storageHistRange, storageHistRangeAfter)
+ compareMapsBytes(t, codeHistRange, codeHistRangeAfter)
+ }
+
+}
+
+func TestSharedDomain_CommitmentKeyReplacement(t *testing.T) {
+ t.Parallel()
+
+ stepSize := uint64(5)
+ db, agg := testDbAndAggregatorv3(t, stepSize)
+
+ ctx := context.Background()
+ rwTx, err := db.BeginTemporalRw(ctx)
+ require.NoError(t, err)
+ defer rwTx.Rollback()
+
+ domains, err := state.NewSharedDomains(rwTx, log.New())
+ require.NoError(t, err)
+ defer domains.Close()
+
+ rnd := newRnd(2342)
+ maxTx := stepSize * 8
+
+ // 1. generate data
+ data := generateSharedDomainsUpdates(t, domains, rwTx, maxTx, rnd, length.Addr, 10, stepSize)
+ fillRawdbTxNumsIndexForSharedDomains(t, rwTx, maxTx, stepSize)
+
+ err = domains.Flush(ctx, rwTx)
+ require.NoError(t, err)
+
+ // 2. remove just one key and compute commitment
+ var txNum uint64
+ removedKey := []byte{}
+ for key := range data {
+ removedKey = []byte(key)[:length.Addr]
+ txNum = maxTx + 1
+ err = domains.DomainDel(kv.AccountsDomain, rwTx, removedKey, txNum, nil, 0)
+ require.NoError(t, err)
+ break
+ }
+
+ // 3. calculate commitment with all data +removed key
+ expectedHash, err := domains.ComputeCommitment(context.Background(), false, txNum/stepSize, txNum, "")
+ require.NoError(t, err)
+ domains.Close()
+
+ err = rwTx.Commit()
+ require.NoError(t, err)
+
+ t.Logf("expected hash: %x", expectedHash)
+ err = agg.BuildFiles(stepSize * 16)
+ require.NoError(t, err)
+
+ err = rwTx.Commit()
+ require.NoError(t, err)
+
+ rwTx, err = db.BeginTemporalRw(ctx)
+ require.NoError(t, err)
+ defer rwTx.Rollback()
+
+ // 4. restart on same (replaced keys) files
+ domains, err = state.NewSharedDomains(rwTx, log.New())
+ require.NoError(t, err)
+ defer domains.Close()
+
+ // 5. delete same key. commitment should be the same
+ txNum = maxTx + 1
+ err = domains.DomainDel(kv.AccountsDomain, rwTx, removedKey, txNum, nil, 0)
+ require.NoError(t, err)
+
+ resultHash, err := domains.ComputeCommitment(context.Background(), false, txNum/stepSize, txNum, "")
+ require.NoError(t, err)
+
+ t.Logf("result hash: %x", resultHash)
+ require.Equal(t, expectedHash, resultHash)
+}
+
+func TestAggregatorV3_MergeValTransform(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+
+ t.Parallel()
+ db, agg := testDbAndAggregatorv3(t, 5)
+ rwTx, err := db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer rwTx.Rollback()
+
+ agg.ForTestReplaceKeysInValues(kv.CommitmentDomain, true)
+
+ domains, err := state.NewSharedDomains(rwTx, log.New())
+ require.NoError(t, err)
+ defer domains.Close()
+
+ txs := uint64(100)
+ rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
+
+ state := make(map[string][]byte)
+
+ // keys are encodings of numbers 1..31
+ // each key changes value on every txNum which is multiple of the key
+ //var maxWrite, otherMaxWrite uint64
+ for txNum := uint64(1); txNum <= txs; txNum++ {
+
+ addr, loc := make([]byte, length.Addr), make([]byte, length.Hash)
+
+ n, err := rnd.Read(addr)
+ require.NoError(t, err)
+ require.Equal(t, length.Addr, n)
+
+ n, err = rnd.Read(loc)
+ require.NoError(t, err)
+ require.Equal(t, length.Hash, n)
+ acc := accounts.Account{
+ Nonce: 1,
+ Balance: *uint256.NewInt(txNum * 1e6),
+ CodeHash: common.Hash{},
+ Incarnation: 0,
+ }
+ buf := accounts.SerialiseV3(&acc)
+ err = domains.DomainPut(kv.AccountsDomain, rwTx, addr, buf, txNum, nil, 0)
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.StorageDomain, rwTx, composite(addr, loc), []byte{addr[0], loc[0]}, txNum, nil, 0)
+ require.NoError(t, err)
+
+ if (txNum+1)%agg.StepSize() == 0 {
+ _, err := domains.ComputeCommitment(context.Background(), true, txNum/10, txNum, "")
+ require.NoError(t, err)
+ }
+
+ state[string(addr)] = buf
+ state[string(addr)+string(loc)] = []byte{addr[0], loc[0]}
+ }
+
+ err = domains.Flush(context.Background(), rwTx)
+ require.NoError(t, err)
+
+ err = rwTx.Commit()
+ require.NoError(t, err)
+
+ err = agg.BuildFiles(txs)
+ require.NoError(t, err)
+
+ rwTx, err = db.BeginTemporalRw(context.Background())
+ require.NoError(t, err)
+ defer rwTx.Rollback()
+
+ _, err = rwTx.PruneSmallBatches(context.Background(), time.Hour)
+ require.NoError(t, err)
+
+ err = rwTx.Commit()
+ require.NoError(t, err)
+
+ err = agg.MergeLoop(context.Background())
+ require.NoError(t, err)
+}
+
+func compareMapsBytes(t *testing.T, m1, m2 map[string][]byte) {
+ t.Helper()
+ for k, v := range m1 {
+ if len(v) == 0 {
+ require.Equal(t, []byte{}, v)
+ } else {
+ require.Equal(t, m2[k], v)
+ }
+ delete(m2, k)
+ }
+ require.Emptyf(t, m2, "m2 should be empty got %d: %v", len(m2), m2)
+}
+
+func fillRawdbTxNumsIndexForSharedDomains(t *testing.T, rwTx kv.RwTx, maxTx, commitEvery uint64) {
+ t.Helper()
+
+ for txn := uint64(1); txn <= maxTx; txn++ {
+ err := rawdbv3.TxNums.Append(rwTx, txn, txn/commitEvery)
+ require.NoError(t, err)
+ }
+}
+
+func extractKVErrIterator(t *testing.T, it stream.KV) map[string][]byte {
+ t.Helper()
+
+ accounts := make(map[string][]byte)
+ for it.HasNext() {
+ k, v, err := it.Next()
+ require.NoError(t, err)
+ accounts[hex.EncodeToString(k)] = common.Copy(v)
+ }
+
+ return accounts
+}
+
+func generateSharedDomainsUpdates(t *testing.T, domains *state.SharedDomains, tx kv.TemporalTx, maxTxNum uint64, rnd *rndGen, keyMaxLen, keysCount, commitEvery uint64) map[string]struct{} {
+ t.Helper()
+ usedKeys := make(map[string]struct{}, keysCount*maxTxNum)
+ for txNum := uint64(1); txNum <= maxTxNum; txNum++ {
+ used := generateSharedDomainsUpdatesForTx(t, domains, tx, txNum, rnd, usedKeys, keyMaxLen, keysCount)
+ for k := range used {
+ usedKeys[k] = struct{}{}
+ }
+ if txNum%commitEvery == 0 {
+ // domains.SetTrace(true)
+ rh, err := domains.ComputeCommitment(context.Background(), true, txNum/commitEvery, txNum, "")
+ require.NoErrorf(t, err, "txNum=%d", txNum)
+ t.Logf("commitment %x txn=%d", rh, txNum)
+ }
+ }
+ return usedKeys
+}
+
+func generateSharedDomainsUpdatesForTx(t *testing.T, domains *state.SharedDomains, tx kv.TemporalTx, txNum uint64, rnd *rndGen, prevKeys map[string]struct{}, keyMaxLen, keysCount uint64) map[string]struct{} {
+ t.Helper()
+
+ getKey := func() ([]byte, bool) {
+ r := rnd.IntN(100)
+ if r < 50 && len(prevKeys) > 0 {
+ ri := rnd.IntN(len(prevKeys))
+ for k := range prevKeys {
+ if ri == 0 {
+ return []byte(k), true
+ }
+ ri--
+ }
+ } else {
+ return []byte(generateRandomKey(rnd, keyMaxLen)), false
+ }
+ panic("unreachable")
+ }
+
+ const maxStorageKeys = 10
+ usedKeys := make(map[string]struct{}, keysCount)
+
+ for j := uint64(0); j < keysCount; j++ {
+ key, existed := getKey()
+
+ r := rnd.IntN(101)
+ switch {
+ case r <= 33:
+ acc := accounts.Account{
+ Nonce: txNum,
+ Balance: *uint256.NewInt(txNum * 100_000),
+ CodeHash: common.Hash{},
+ Incarnation: 0,
+ }
+ buf := accounts.SerialiseV3(&acc)
+ prev, step, err := domains.GetLatest(kv.AccountsDomain, tx, key)
+ require.NoError(t, err)
+
+ usedKeys[string(key)] = struct{}{}
+
+ err = domains.DomainPut(kv.AccountsDomain, tx, key, buf, txNum, prev, step)
+ require.NoError(t, err)
+
+ case r > 33 && r <= 66:
+ codeUpd := make([]byte, rnd.IntN(24576))
+ _, err := rnd.Read(codeUpd)
+ require.NoError(t, err)
+ for limit := 1000; len(key) > length.Addr && limit > 0; limit-- {
+ key, existed = getKey() //nolint
+ if !existed {
+ continue
+ }
+ }
+ usedKeys[string(key)] = struct{}{}
+
+ prev, step, err := domains.GetLatest(kv.CodeDomain, tx, key)
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.CodeDomain, tx, key, codeUpd, txNum, prev, step)
+ require.NoError(t, err)
+ case r > 80:
+ if !existed {
+ continue
+ }
+ usedKeys[string(key)] = struct{}{}
+
+ err := domains.DomainDel(kv.AccountsDomain, tx, key, txNum, nil, 0)
+ require.NoError(t, err)
+
+ case r > 66 && r <= 80:
+ // need to create account because commitment trie requires it (accounts are upper part of trie)
+ if len(key) > length.Addr {
+ key = key[:length.Addr]
+ }
+
+ prev, step, err := domains.GetLatest(kv.AccountsDomain, tx, key)
+ require.NoError(t, err)
+ if prev == nil {
+ usedKeys[string(key)] = struct{}{}
+ acc := accounts.Account{
+ Nonce: txNum,
+ Balance: *uint256.NewInt(txNum * 100_000),
+ CodeHash: common.Hash{},
+ Incarnation: 0,
+ }
+ buf := accounts.SerialiseV3(&acc)
+ err = domains.DomainPut(kv.AccountsDomain, tx, key, buf, txNum, prev, step)
+ require.NoError(t, err)
+ }
+
+ sk := make([]byte, length.Hash+length.Addr)
+ copy(sk, key)
+
+ for i := 0; i < maxStorageKeys; i++ {
+ loc := generateRandomKeyBytes(rnd, 32)
+ copy(sk[length.Addr:], loc)
+ usedKeys[string(sk)] = struct{}{}
+
+ prev, step, err := domains.GetLatest(kv.StorageDomain, tx, sk[:length.Addr])
+ require.NoError(t, err)
+
+ err = domains.DomainPut(kv.StorageDomain, tx, sk, uint256.NewInt(txNum).Bytes(), txNum, prev, step)
+ require.NoError(t, err)
+ }
+
+ }
+ }
+ return usedKeys
+}
diff --git a/db/state/history.go b/db/state/history.go
index 28168fa3eb9..fb4cb98b111 100644
--- a/db/state/history.go
+++ b/db/state/history.go
@@ -217,8 +217,7 @@ func (h *History) buildVi(ctx context.Context, item *FilesItem, ps *background.P
}
if iiItem.decompressor == nil {
- fromStep, toStep := item.StepRange(h.stepSize)
- return fmt.Errorf("buildVI: got iiItem with nil decompressor %s %d-%d", h.FilenameBase, fromStep, toStep)
+ return fmt.Errorf("buildVI: got iiItem with nil decompressor %s %d-%d", h.filenameBase, item.startTxNum/h.aggregationStep, item.endTxNum/h.aggregationStep)
}
idxPath := h.vAccessorNewFilePath(item.StepRange(h.stepSize))
diff --git a/db/state/metrics.go b/db/state/metrics.go
index e9e07061a56..56169c6943b 100644
--- a/db/state/metrics.go
+++ b/db/state/metrics.go
@@ -55,6 +55,7 @@ var (
mxPruneSizeIndex = metrics.GetOrCreateCounter(`domain_prune_size{type="index"}`)
mxBuildTook = metrics.GetOrCreateSummary("domain_build_files_took")
mxStepTook = metrics.GetOrCreateSummary("domain_step_took")
+ mxFlushTook = metrics.GetOrCreateSummary("domain_flush_took")
)
var (
diff --git a/diagnostics/metrics/parsing.go b/diagnostics/metrics/parsing.go
index a838697da34..fcffa78a9ca 100644
--- a/diagnostics/metrics/parsing.go
+++ b/diagnostics/metrics/parsing.go
@@ -124,4 +124,4 @@ func validateIdent(s string) error {
return nil
}
-var identRegexp = regexp.MustCompile("^[a-zA-Z_:.][a-zA-Z0-9_:.]*$")
+var identRegexp = regexp.MustCompile("^[a-zA-Z_:./][a-zA-Z0-9_:./]*$")
diff --git a/docs/programmers_guide/db_faq.md b/docs/programmers_guide/db_faq.md
index a1384e7d608..700aeecadf1 100644
--- a/docs/programmers_guide/db_faq.md
+++ b/docs/programmers_guide/db_faq.md
@@ -25,7 +25,7 @@ Erigon uses MDBX storage engine. But most information on the Internet about LMDB
We have Go, Rust and C++ implementations of `RoKV` interface. See [interfaces repository](https://github.com/erigontech/interfaces) for details.
-Rationale and Architecture of DB interface: [./../../ethdb/Readme.md](../../ethdb/Readme.md)
+Rationale and Architecture of DB interface: [./../../Readme.md](../../turbo/Readme.md)
MDBX: [docs](https://libmdbx.dqdkfa.ru/)
and [mdbx.h](https://github.com/erigontech/libmdbx/blob/master/mdbx.h)
diff --git a/erigon-lib/types/signerapi/types.go b/erigon-lib/types/signerapi/types.go
new file mode 100644
index 00000000000..35a7e9074a9
--- /dev/null
+++ b/erigon-lib/types/signerapi/types.go
@@ -0,0 +1,961 @@
+// Copyright 2018 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package apitypes
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+//import (
+// "bytes"
+// "crypto/sha256"
+// "encoding/json"
+// "errors"
+// "fmt"
+// "math/big"
+// "reflect"
+// "regexp"
+// "sort"
+// "strconv"
+// "strings"
+//
+// kzg4844 "github.com/crate-crypto/go-kzg-4844"
+// "github.com/erigontech/erigon/common"
+// "github.com/erigontech/erigon/common/hexutil"
+// "github.com/erigontech/erigon/common/math"
+// "github.com/erigontech/erigon/crypto"
+// "github.com/erigontech/erigon/types/accounts"
+// "github.com/erigontech/erigon/core/types"
+// "github.com/holiman/uint256"
+//)
+
+var typedDataReferenceTypeRegexp = regexp.MustCompile(`^[A-Za-z](\w*)(\[\])?$`)
+
+type ValidationInfo struct {
+ Typ string `json:"type"`
+ Message string `json:"message"`
+}
+type ValidationMessages struct {
+ Messages []ValidationInfo
+}
+
+const (
+ WARN = "WARNING"
+ CRIT = "CRITICAL"
+ INFO = "Info"
+)
+
+func (vs *ValidationMessages) Crit(msg string) {
+ vs.Messages = append(vs.Messages, ValidationInfo{CRIT, msg})
+}
+func (vs *ValidationMessages) Warn(msg string) {
+ vs.Messages = append(vs.Messages, ValidationInfo{WARN, msg})
+}
+func (vs *ValidationMessages) Info(msg string) {
+ vs.Messages = append(vs.Messages, ValidationInfo{INFO, msg})
+}
+
+// GetWarnings returns an error with all messages of type WARN of above, or nil if no warnings were present
+func (v *ValidationMessages) GetWarnings() error {
+ var messages []string
+ for _, msg := range v.Messages {
+ if msg.Typ == WARN || msg.Typ == CRIT {
+ messages = append(messages, msg.Message)
+ }
+ }
+ if len(messages) > 0 {
+ return fmt.Errorf("validation failed: %s", strings.Join(messages, ","))
+ }
+ return nil
+}
+
+//// SendTxArgs represents the arguments to submit a transaction
+//// This struct is identical to ethapi.TransactionArgs, except for the usage of
+//// common.MixedcaseAddress in From and To
+//type SendTxArgs struct {
+// From common.MixedcaseAddress `json:"from"`
+// To *common.MixedcaseAddress `json:"to"`
+// Gas hexutil.Uint64 `json:"gas"`
+// GasPrice *hexutil.Big `json:"gasPrice"`
+// MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"`
+// MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"`
+// Value hexutil.Big `json:"value"`
+// Nonce hexutil.Uint64 `json:"nonce"`
+//
+// // We accept "data" and "input" for backwards-compatibility reasons.
+// // "input" is the newer name and should be preferred by clients.
+// // Issue detail: https://github.com/ethereum/go-ethereum/issues/15628
+// Data *hexutil.Bytes `json:"data,omitempty"`
+// Input *hexutil.Bytes `json:"input,omitempty"`
+//
+// // For non-legacy transactions
+// AccessList *types.AccessList `json:"accessList,omitempty"`
+// ChainID *hexutil.Big `json:"chainId,omitempty"`
+//
+// // For BlobTxType
+// BlobFeeCap *hexutil.Big `json:"maxFeePerBlobGas,omitempty"`
+// BlobHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
+//
+// // For BlobTxType transactions with blob sidecar
+// Blobs []kzg4844.Blob `json:"blobs,omitempty"`
+// Commitments []kzg4844.Commitment `json:"commitments,omitempty"`
+// Proofs []kzg4844.Proof `json:"proofs,omitempty"`
+//}
+//
+//func (args SendTxArgs) String() string {
+// s, err := json.Marshal(args)
+// if err == nil {
+// return string(s)
+// }
+// return err.Error()
+//}
+//
+//// data retrieves the transaction calldata. Input field is preferred.
+//func (args *SendTxArgs) data() []byte {
+// if args.Input != nil {
+// return *args.Input
+// }
+// if args.Data != nil {
+// return *args.Data
+// }
+// return nil
+//}
+//
+//// ToTransaction converts the arguments to a transaction.
+//func (args *SendTxArgs) ToTransaction() (*types.Transaction, error) {
+// // Add the To-field, if specified
+// var to *common.Address
+// if args.To != nil {
+// dstAddr := args.To.Address()
+// to = &dstAddr
+// }
+// if err := args.validateTxSidecar(); err != nil {
+// return nil, err
+// }
+// var data types.Transaction
+// switch {
+// case args.BlobHashes != nil:
+// al := types.AccessList{}
+// if args.AccessList != nil {
+// al = *args.AccessList
+// }
+// data = &types.BlobTx{
+// DynamicFeeTransaction: types.DynamicFeeTransaction{
+// ChainID: uint256.MustFromBig((*big.Int)(args.ChainID)),
+// CommonTx: types.CommonTx{
+// To: to,
+// Nonce: uint64(args.Nonce),
+// Gas: uint64(args.Gas),
+// GasFeeCap: uint256.MustFromBig((*big.Int)(args.MaxFeePerGas)),
+// GasTipCap: uint256.MustFromBig((*big.Int)(args.MaxPriorityFeePerGas)),
+// Value: uint256.MustFromBig((*big.Int)(&args.Value)),
+// Data: args.data(),
+// AccessList: al,
+// BlobHashes: args.BlobHashes,
+// BlobFeeCap: uint256.MustFromBig((*big.Int)(args.BlobFeeCap)),
+// },
+// },
+// }
+// if args.Blobs != nil {
+// data.(*types.BlobTx).Sidecar = &types.BlobTxSidecar{
+// Blobs: args.Blobs,
+// Commitments: args.Commitments,
+// Proofs: args.Proofs,
+// }
+// }
+//
+// case args.MaxFeePerGas != nil:
+// al := types.AccessList{}
+// if args.AccessList != nil {
+// al = *args.AccessList
+// }
+// data = &types.DynamicFeeTx{
+// To: to,
+// ChainID: (*big.Int)(args.ChainID),
+// Nonce: uint64(args.Nonce),
+// Gas: uint64(args.Gas),
+// GasFeeCap: (*big.Int)(args.MaxFeePerGas),
+// GasTipCap: (*big.Int)(args.MaxPriorityFeePerGas),
+// Value: (*big.Int)(&args.Value),
+// Data: args.data(),
+// AccessList: al,
+// }
+// case args.AccessList != nil:
+// data = &types.AccessListTx{
+// To: to,
+// ChainID: (*big.Int)(args.ChainID),
+// Nonce: uint64(args.Nonce),
+// Gas: uint64(args.Gas),
+// GasPrice: (*big.Int)(args.GasPrice),
+// Value: (*big.Int)(&args.Value),
+// Data: args.data(),
+// AccessList: *args.AccessList,
+// }
+// default:
+// data = &types.LegacyTx{
+// To: to,
+// Nonce: uint64(args.Nonce),
+// Gas: uint64(args.Gas),
+// GasPrice: (*big.Int)(args.GasPrice),
+// Value: (*big.Int)(&args.Value),
+// Data: args.data(),
+// }
+// }
+//
+// return types.NewArbTx(data), nil
+//}
+//
+//// validateTxSidecar validates blob data, if present
+//func (args *SendTxArgs) validateTxSidecar() error {
+// // No blobs, we're done.
+// if args.Blobs == nil {
+// return nil
+// }
+//
+// n := len(args.Blobs)
+// // Assume user provides either only blobs (w/o hashes), or
+// // blobs together with commitments and proofs.
+// if args.Commitments == nil && args.Proofs != nil {
+// return errors.New(`blob proofs provided while commitments were not`)
+// } else if args.Commitments != nil && args.Proofs == nil {
+// return errors.New(`blob commitments provided while proofs were not`)
+// }
+//
+// // len(blobs) == len(commitments) == len(proofs) == len(hashes)
+// if args.Commitments != nil && len(args.Commitments) != n {
+// return fmt.Errorf("number of blobs and commitments mismatch (have=%d, want=%d)", len(args.Commitments), n)
+// }
+// if args.Proofs != nil && len(args.Proofs) != n {
+// return fmt.Errorf("number of blobs and proofs mismatch (have=%d, want=%d)", len(args.Proofs), n)
+// }
+// if args.BlobHashes != nil && len(args.BlobHashes) != n {
+// return fmt.Errorf("number of blobs and hashes mismatch (have=%d, want=%d)", len(args.BlobHashes), n)
+// }
+//
+// if args.Commitments == nil {
+// // Generate commitment and proof.
+// commitments := make([]kzg4844.KZGCommitment, n)
+// proofs := make([]kzg4844.KZGProof, n)
+// for i, b := range args.Blobs {
+// c, err := kzg4844.BlobToCommitment(b)
+// if err != nil {
+// return fmt.Errorf("blobs[%d]: error computing commitment: %v", i, err)
+// }
+// commitments[i] = c
+// p, err := kzg4844.ComputeBlobProof(b, c)
+// if err != nil {
+// return fmt.Errorf("blobs[%d]: error computing proof: %v", i, err)
+// }
+// proofs[i] = p
+// }
+// args.Commitments = commitments
+// args.Proofs = proofs
+// } else {
+// for i, b := range args.Blobs {
+// if err := kzg4844.VerifyBlobProof(b, args.Commitments[i], args.Proofs[i]); err != nil {
+// return fmt.Errorf("failed to verify blob proof: %v", err)
+// }
+// }
+// }
+//
+// hashes := make([]common.Hash, n)
+// hasher := sha256.New()
+// for i, c := range args.Commitments {
+// hashes[i] = kzg4844.CalcBlobHashV1(hasher, &c)
+// }
+// if args.BlobHashes != nil {
+// for i, h := range hashes {
+// if h != args.BlobHashes[i] {
+// return fmt.Errorf("blob hash verification failed (have=%s, want=%s)", args.BlobHashes[i], h)
+// }
+// }
+// } else {
+// args.BlobHashes = hashes
+// }
+// return nil
+//}
+//
+//type SigFormat struct {
+// Mime string
+// ByteVersion byte
+//}
+//
+//var (
+// IntendedValidator = SigFormat{
+// accounts.MimetypeDataWithValidator,
+// 0x00,
+// }
+// DataTyped = SigFormat{
+// accounts.MimetypeTypedData,
+// 0x01,
+// }
+// ApplicationClique = SigFormat{
+// accounts.MimetypeClique,
+// 0x02,
+// }
+// TextPlain = SigFormat{
+// accounts.MimetypeTextPlain,
+// 0x45,
+// }
+//)
+//
+//type ValidatorData struct {
+// Address common.Address
+// Message hexutil.Bytes
+//}
+//
+//// TypedData is a type to encapsulate EIP-712 typed messages
+//type TypedData struct {
+// Types Types `json:"types"`
+// PrimaryType string `json:"primaryType"`
+// Domain TypedDataDomain `json:"domain"`
+// Message TypedDataMessage `json:"message"`
+//}
+//
+//// Type is the inner type of an EIP-712 message
+//type Type struct {
+// Name string `json:"name"`
+// Type string `json:"type"`
+//}
+//
+//func (t *Type) isArray() bool {
+// return strings.HasSuffix(t.Type, "[]")
+//}
+//
+//// typeName returns the canonical name of the type. If the type is 'Person[]', then
+//// this method returns 'Person'
+//func (t *Type) typeName() string {
+// if strings.HasSuffix(t.Type, "[]") {
+// return strings.TrimSuffix(t.Type, "[]")
+// }
+// return t.Type
+//}
+//
+//type Types map[string][]Type
+//
+//type TypePriority struct {
+// Type string
+// Value uint
+//}
+//
+//type TypedDataMessage = map[string]interface{}
+//
+//// TypedDataDomain represents the domain part of an EIP-712 message.
+//type TypedDataDomain struct {
+// Name string `json:"name"`
+// Version string `json:"version"`
+// ChainId *math.HexOrDecimal256 `json:"chainId"`
+// VerifyingContract string `json:"verifyingContract"`
+// Salt string `json:"salt"`
+//}
+//
+//// TypedDataAndHash is a helper function that calculates a hash for typed data conforming to EIP-712.
+//// This hash can then be safely used to calculate a signature.
+////
+//// See https://eips.ethereum.org/EIPS/eip-712 for the full specification.
+////
+//// This gives context to the signed typed data and prevents signing of transactions.
+//func TypedDataAndHash(typedData TypedData) ([]byte, string, error) {
+// domainSeparator, err := typedData.HashStruct("EIP712Domain", typedData.Domain.Map())
+// if err != nil {
+// return nil, "", err
+// }
+// typedDataHash, err := typedData.HashStruct(typedData.PrimaryType, typedData.Message)
+// if err != nil {
+// return nil, "", err
+// }
+// rawData := fmt.Sprintf("\x19\x01%s%s", string(domainSeparator), string(typedDataHash))
+// return crypto.Keccak256([]byte(rawData)), rawData, nil
+//}
+//
+//// HashStruct generates a keccak256 hash of the encoding of the provided data
+//func (typedData *TypedData) HashStruct(primaryType string, data TypedDataMessage) (hexutil.Bytes, error) {
+// encodedData, err := typedData.EncodeData(primaryType, data, 1)
+// if err != nil {
+// return nil, err
+// }
+// return crypto.Keccak256(encodedData), nil
+//}
+//
+//// Dependencies returns an array of custom types ordered by their hierarchical reference tree
+//func (typedData *TypedData) Dependencies(primaryType string, found []string) []string {
+// primaryType = strings.TrimSuffix(primaryType, "[]")
+// includes := func(arr []string, str string) bool {
+// for _, obj := range arr {
+// if obj == str {
+// return true
+// }
+// }
+// return false
+// }
+//
+// if includes(found, primaryType) {
+// return found
+// }
+// if typedData.Types[primaryType] == nil {
+// return found
+// }
+// found = append(found, primaryType)
+// for _, field := range typedData.Types[primaryType] {
+// for _, dep := range typedData.Dependencies(field.Type, found) {
+// if !includes(found, dep) {
+// found = append(found, dep)
+// }
+// }
+// }
+// return found
+//}
+//
+//// EncodeType generates the following encoding:
+//// `name ‖ "(" ‖ member₁ ‖ "," ‖ member₂ ‖ "," ‖ … ‖ memberₙ ")"`
+////
+//// each member is written as `type ‖ " " ‖ name` encodings cascade down and are sorted by name
+//func (typedData *TypedData) EncodeType(primaryType string) hexutil.Bytes {
+// // Get dependencies primary first, then alphabetical
+// deps := typedData.Dependencies(primaryType, []string{})
+// if len(deps) > 0 {
+// slicedDeps := deps[1:]
+// sort.Strings(slicedDeps)
+// deps = append([]string{primaryType}, slicedDeps...)
+// }
+//
+// // Format as a string with fields
+// var buffer bytes.Buffer
+// for _, dep := range deps {
+// buffer.WriteString(dep)
+// buffer.WriteString("(")
+// for _, obj := range typedData.Types[dep] {
+// buffer.WriteString(obj.Type)
+// buffer.WriteString(" ")
+// buffer.WriteString(obj.Name)
+// buffer.WriteString(",")
+// }
+// buffer.Truncate(buffer.Len() - 1)
+// buffer.WriteString(")")
+// }
+// return buffer.Bytes()
+//}
+//
+//// TypeHash creates the keccak256 hash of the data
+//func (typedData *TypedData) TypeHash(primaryType string) hexutil.Bytes {
+// return crypto.Keccak256(typedData.EncodeType(primaryType))
+//}
+//
+//// EncodeData generates the following encoding:
+//// `enc(value₁) ‖ enc(value₂) ‖ … ‖ enc(valueₙ)`
+////
+//// each encoded member is 32-byte long
+//func (typedData *TypedData) EncodeData(primaryType string, data map[string]interface{}, depth int) (hexutil.Bytes, error) {
+// if err := typedData.validate(); err != nil {
+// return nil, err
+// }
+//
+// buffer := bytes.Buffer{}
+//
+// // Verify extra data
+// if exp, got := len(typedData.Types[primaryType]), len(data); exp < got {
+// return nil, fmt.Errorf("there is extra data provided in the message (%d < %d)", exp, got)
+// }
+//
+// // Add typehash
+// buffer.Write(typedData.TypeHash(primaryType))
+//
+// // Add field contents. Structs and arrays have special handlers.
+// for _, field := range typedData.Types[primaryType] {
+// encType := field.Type
+// encValue := data[field.Name]
+// if encType[len(encType)-1:] == "]" {
+// arrayValue, err := convertDataToSlice(encValue)
+// if err != nil {
+// return nil, dataMismatchError(encType, encValue)
+// }
+//
+// arrayBuffer := bytes.Buffer{}
+// parsedType := strings.Split(encType, "[")[0]
+// for _, item := range arrayValue {
+// if typedData.Types[parsedType] != nil {
+// mapValue, ok := item.(map[string]interface{})
+// if !ok {
+// return nil, dataMismatchError(parsedType, item)
+// }
+// encodedData, err := typedData.EncodeData(parsedType, mapValue, depth+1)
+// if err != nil {
+// return nil, err
+// }
+// arrayBuffer.Write(crypto.Keccak256(encodedData))
+// } else {
+// bytesValue, err := typedData.EncodePrimitiveValue(parsedType, item, depth)
+// if err != nil {
+// return nil, err
+// }
+// arrayBuffer.Write(bytesValue)
+// }
+// }
+//
+// buffer.Write(crypto.Keccak256(arrayBuffer.Bytes()))
+// } else if typedData.Types[field.Type] != nil {
+// mapValue, ok := encValue.(map[string]interface{})
+// if !ok {
+// return nil, dataMismatchError(encType, encValue)
+// }
+// encodedData, err := typedData.EncodeData(field.Type, mapValue, depth+1)
+// if err != nil {
+// return nil, err
+// }
+// buffer.Write(crypto.Keccak256(encodedData))
+// } else {
+// byteValue, err := typedData.EncodePrimitiveValue(encType, encValue, depth)
+// if err != nil {
+// return nil, err
+// }
+// buffer.Write(byteValue)
+// }
+// }
+// return buffer.Bytes(), nil
+//}
+//
+//// Attempt to parse bytes in different formats: byte array, hex string, hexutil.Bytes.
+//func parseBytes(encType interface{}) ([]byte, bool) {
+// // Handle array types.
+// val := reflect.ValueOf(encType)
+// if val.Kind() == reflect.Array && val.Type().Elem().Kind() == reflect.Uint8 {
+// v := reflect.MakeSlice(reflect.TypeOf([]byte{}), val.Len(), val.Len())
+// reflect.Copy(v, val)
+// return v.Bytes(), true
+// }
+//
+// switch v := encType.(type) {
+// case []byte:
+// return v, true
+// case hexutil.Bytes:
+// return v, true
+// case string:
+// bytes, err := hexutil.Decode(v)
+// if err != nil {
+// return nil, false
+// }
+// return bytes, true
+// default:
+// return nil, false
+// }
+//}
+//
+//func parseInteger(encType string, encValue interface{}) (*big.Int, error) {
+// var (
+// length int
+// signed = strings.HasPrefix(encType, "int")
+// b *big.Int
+// )
+// if encType == "int" || encType == "uint" {
+// length = 256
+// } else {
+// lengthStr := ""
+// if strings.HasPrefix(encType, "uint") {
+// lengthStr = strings.TrimPrefix(encType, "uint")
+// } else {
+// lengthStr = strings.TrimPrefix(encType, "int")
+// }
+// atoiSize, err := strconv.Atoi(lengthStr)
+// if err != nil {
+// return nil, fmt.Errorf("invalid size on integer: %v", lengthStr)
+// }
+// length = atoiSize
+// }
+// switch v := encValue.(type) {
+// case *math.HexOrDecimal256:
+// b = (*big.Int)(v)
+// case *big.Int:
+// b = v
+// case string:
+// var hexIntValue math.HexOrDecimal256
+// if err := hexIntValue.UnmarshalText([]byte(v)); err != nil {
+// return nil, err
+// }
+// b = (*big.Int)(&hexIntValue)
+// case float64:
+// // JSON parses non-strings as float64. Fail if we cannot
+// // convert it losslessly
+// if float64(int64(v)) == v {
+// b = big.NewInt(int64(v))
+// } else {
+// return nil, fmt.Errorf("invalid float value %v for type %v", v, encType)
+// }
+// }
+// if b == nil {
+// return nil, fmt.Errorf("invalid integer value %v/%v for type %v", encValue, reflect.TypeOf(encValue), encType)
+// }
+// if b.BitLen() > length {
+// return nil, fmt.Errorf("integer larger than '%v'", encType)
+// }
+// if !signed && b.Sign() == -1 {
+// return nil, fmt.Errorf("invalid negative value for unsigned type %v", encType)
+// }
+// return b, nil
+//}
+//
+//// EncodePrimitiveValue deals with the primitive values found
+//// while searching through the typed data
+//func (typedData *TypedData) EncodePrimitiveValue(encType string, encValue interface{}, depth int) ([]byte, error) {
+// switch encType {
+// case "address":
+// retval := make([]byte, 32)
+// switch val := encValue.(type) {
+// case string:
+// if common.IsHexAddress(val) {
+// copy(retval[12:], common.HexToAddress(val).Bytes())
+// return retval, nil
+// }
+// case []byte:
+// if len(val) == 20 {
+// copy(retval[12:], val)
+// return retval, nil
+// }
+// case [20]byte:
+// copy(retval[12:], val[:])
+// return retval, nil
+// }
+// return nil, dataMismatchError(encType, encValue)
+// case "bool":
+// boolValue, ok := encValue.(bool)
+// if !ok {
+// return nil, dataMismatchError(encType, encValue)
+// }
+// if boolValue {
+// return math.PaddedBigBytes(common.Big1, 32), nil
+// }
+// return math.PaddedBigBytes(common.Big0, 32), nil
+// case "string":
+// strVal, ok := encValue.(string)
+// if !ok {
+// return nil, dataMismatchError(encType, encValue)
+// }
+// return crypto.Keccak256([]byte(strVal)), nil
+// case "bytes":
+// bytesValue, ok := parseBytes(encValue)
+// if !ok {
+// return nil, dataMismatchError(encType, encValue)
+// }
+// return crypto.Keccak256(bytesValue), nil
+// }
+// if strings.HasPrefix(encType, "bytes") {
+// lengthStr := strings.TrimPrefix(encType, "bytes")
+// length, err := strconv.Atoi(lengthStr)
+// if err != nil {
+// return nil, fmt.Errorf("invalid size on bytes: %v", lengthStr)
+// }
+// if length < 0 || length > 32 {
+// return nil, fmt.Errorf("invalid size on bytes: %d", length)
+// }
+// if byteValue, ok := parseBytes(encValue); !ok || len(byteValue) != length {
+// return nil, dataMismatchError(encType, encValue)
+// } else {
+// // Right-pad the bits
+// dst := make([]byte, 32)
+// copy(dst, byteValue)
+// return dst, nil
+// }
+// }
+// if strings.HasPrefix(encType, "int") || strings.HasPrefix(encType, "uint") {
+// b, err := parseInteger(encType, encValue)
+// if err != nil {
+// return nil, err
+// }
+// return math.U256Bytes(b), nil
+// }
+// return nil, fmt.Errorf("unrecognized type '%s'", encType)
+//}
+//
+//// dataMismatchError generates an error for a mismatch between
+//// the provided type and data
+//func dataMismatchError(encType string, encValue interface{}) error {
+// return fmt.Errorf("provided data '%v' doesn't match type '%s'", encValue, encType)
+//}
+//
+//func convertDataToSlice(encValue interface{}) ([]interface{}, error) {
+// var outEncValue []interface{}
+// rv := reflect.ValueOf(encValue)
+// if rv.Kind() == reflect.Slice {
+// for i := 0; i < rv.Len(); i++ {
+// outEncValue = append(outEncValue, rv.Index(i).Interface())
+// }
+// } else {
+// return outEncValue, fmt.Errorf("provided data '%v' is not slice", encValue)
+// }
+// return outEncValue, nil
+//}
+//
+//// validate makes sure the types are sound
+//func (typedData *TypedData) validate() error {
+// if err := typedData.Types.validate(); err != nil {
+// return err
+// }
+// if err := typedData.Domain.validate(); err != nil {
+// return err
+// }
+// return nil
+//}
+//
+//// Map generates a map version of the typed data
+//func (typedData *TypedData) Map() map[string]interface{} {
+// dataMap := map[string]interface{}{
+// "types": typedData.Types,
+// "domain": typedData.Domain.Map(),
+// "primaryType": typedData.PrimaryType,
+// "message": typedData.Message,
+// }
+// return dataMap
+//}
+//
+//// Format returns a representation of typedData, which can be easily displayed by a user-interface
+//// without in-depth knowledge about 712 rules
+//func (typedData *TypedData) Format() ([]*NameValueType, error) {
+// domain, err := typedData.formatData("EIP712Domain", typedData.Domain.Map())
+// if err != nil {
+// return nil, err
+// }
+// ptype, err := typedData.formatData(typedData.PrimaryType, typedData.Message)
+// if err != nil {
+// return nil, err
+// }
+// var nvts []*NameValueType
+// nvts = append(nvts, &NameValueType{
+// Name: "EIP712Domain",
+// Value: domain,
+// Typ: "domain",
+// })
+// nvts = append(nvts, &NameValueType{
+// Name: typedData.PrimaryType,
+// Value: ptype,
+// Typ: "primary type",
+// })
+// return nvts, nil
+//}
+//
+//func (typedData *TypedData) formatData(primaryType string, data map[string]interface{}) ([]*NameValueType, error) {
+// var output []*NameValueType
+//
+// // Add field contents. Structs and arrays have special handlers.
+// for _, field := range typedData.Types[primaryType] {
+// encName := field.Name
+// encValue := data[encName]
+// item := &NameValueType{
+// Name: encName,
+// Typ: field.Type,
+// }
+// if field.isArray() {
+// arrayValue, _ := convertDataToSlice(encValue)
+// parsedType := field.typeName()
+// for _, v := range arrayValue {
+// if typedData.Types[parsedType] != nil {
+// mapValue, _ := v.(map[string]interface{})
+// mapOutput, err := typedData.formatData(parsedType, mapValue)
+// if err != nil {
+// return nil, err
+// }
+// item.Value = mapOutput
+// } else {
+// primitiveOutput, err := formatPrimitiveValue(field.Type, encValue)
+// if err != nil {
+// return nil, err
+// }
+// item.Value = primitiveOutput
+// }
+// }
+// } else if typedData.Types[field.Type] != nil {
+// if mapValue, ok := encValue.(map[string]interface{}); ok {
+// mapOutput, err := typedData.formatData(field.Type, mapValue)
+// if err != nil {
+// return nil, err
+// }
+// item.Value = mapOutput
+// } else {
+// item.Value = ""
+// }
+// } else {
+// primitiveOutput, err := formatPrimitiveValue(field.Type, encValue)
+// if err != nil {
+// return nil, err
+// }
+// item.Value = primitiveOutput
+// }
+// output = append(output, item)
+// }
+// return output, nil
+//}
+//
+//func formatPrimitiveValue(encType string, encValue interface{}) (string, error) {
+// switch encType {
+// case "address":
+// if stringValue, ok := encValue.(string); !ok {
+// return "", fmt.Errorf("could not format value %v as address", encValue)
+// } else {
+// return common.HexToAddress(stringValue).String(), nil
+// }
+// case "bool":
+// if boolValue, ok := encValue.(bool); !ok {
+// return "", fmt.Errorf("could not format value %v as bool", encValue)
+// } else {
+// return fmt.Sprintf("%t", boolValue), nil
+// }
+// case "bytes", "string":
+// return fmt.Sprintf("%s", encValue), nil
+// }
+// if strings.HasPrefix(encType, "bytes") {
+// return fmt.Sprintf("%s", encValue), nil
+// }
+// if strings.HasPrefix(encType, "uint") || strings.HasPrefix(encType, "int") {
+// if b, err := parseInteger(encType, encValue); err != nil {
+// return "", err
+// } else {
+// return fmt.Sprintf("%d (%#x)", b, b), nil
+// }
+// }
+// return "", fmt.Errorf("unhandled type %v", encType)
+//}
+//
+//// Validate checks if the types object is conformant to the specs
+//func (t Types) validate() error {
+// for typeKey, typeArr := range t {
+// if len(typeKey) == 0 {
+// return fmt.Errorf("empty type key")
+// }
+// for i, typeObj := range typeArr {
+// if len(typeObj.Type) == 0 {
+// return fmt.Errorf("type %q:%d: empty Type", typeKey, i)
+// }
+// if len(typeObj.Name) == 0 {
+// return fmt.Errorf("type %q:%d: empty Name", typeKey, i)
+// }
+// if typeKey == typeObj.Type {
+// return fmt.Errorf("type %q cannot reference itself", typeObj.Type)
+// }
+// if isPrimitiveTypeValid(typeObj.Type) {
+// continue
+// }
+// // Must be reference type
+// if _, exist := t[typeObj.typeName()]; !exist {
+// return fmt.Errorf("reference type %q is undefined", typeObj.Type)
+// }
+// if !typedDataReferenceTypeRegexp.MatchString(typeObj.Type) {
+// return fmt.Errorf("unknown reference type %q", typeObj.Type)
+// }
+// }
+// }
+// return nil
+//}
+//
+//// Checks if the primitive value is valid
+//func isPrimitiveTypeValid(primitiveType string) bool {
+// if primitiveType == "address" ||
+// primitiveType == "address[]" ||
+// primitiveType == "bool" ||
+// primitiveType == "bool[]" ||
+// primitiveType == "string" ||
+// primitiveType == "string[]" ||
+// primitiveType == "bytes" ||
+// primitiveType == "bytes[]" ||
+// primitiveType == "int" ||
+// primitiveType == "int[]" ||
+// primitiveType == "uint" ||
+// primitiveType == "uint[]" {
+// return true
+// }
+// // For 'bytesN', 'bytesN[]', we allow N from 1 to 32
+// for n := 1; n <= 32; n++ {
+// // e.g. 'bytes28' or 'bytes28[]'
+// if primitiveType == fmt.Sprintf("bytes%d", n) || primitiveType == fmt.Sprintf("bytes%d[]", n) {
+// return true
+// }
+// }
+// // For 'intN','intN[]' and 'uintN','uintN[]' we allow N in increments of 8, from 8 up to 256
+// for n := 8; n <= 256; n += 8 {
+// if primitiveType == fmt.Sprintf("int%d", n) || primitiveType == fmt.Sprintf("int%d[]", n) {
+// return true
+// }
+// if primitiveType == fmt.Sprintf("uint%d", n) || primitiveType == fmt.Sprintf("uint%d[]", n) {
+// return true
+// }
+// }
+// return false
+//}
+//
+//// validate checks if the given domain is valid, i.e. contains at least
+//// the minimum viable keys and values
+//func (domain *TypedDataDomain) validate() error {
+// if domain.ChainId == nil && len(domain.Name) == 0 && len(domain.Version) == 0 && len(domain.VerifyingContract) == 0 && len(domain.Salt) == 0 {
+// return errors.New("domain is undefined")
+// }
+//
+// return nil
+//}
+//
+//// Map is a helper function to generate a map version of the domain
+//func (domain *TypedDataDomain) Map() map[string]interface{} {
+// dataMap := map[string]interface{}{}
+//
+// if domain.ChainId != nil {
+// dataMap["chainId"] = domain.ChainId
+// }
+//
+// if len(domain.Name) > 0 {
+// dataMap["name"] = domain.Name
+// }
+//
+// if len(domain.Version) > 0 {
+// dataMap["version"] = domain.Version
+// }
+//
+// if len(domain.VerifyingContract) > 0 {
+// dataMap["verifyingContract"] = domain.VerifyingContract
+// }
+//
+// if len(domain.Salt) > 0 {
+// dataMap["salt"] = domain.Salt
+// }
+// return dataMap
+//}
+//
+//// NameValueType is a very simple struct with Name, Value and Type. It's meant for simple
+//// json structures used to communicate signing-info about typed data with the UI
+//type NameValueType struct {
+// Name string `json:"name"`
+// Value interface{} `json:"value"`
+// Typ string `json:"type"`
+//}
+//
+//// Pprint returns a pretty-printed version of nvt
+//func (nvt *NameValueType) Pprint(depth int) string {
+// output := bytes.Buffer{}
+// output.WriteString(strings.Repeat("\u00a0", depth*2))
+// output.WriteString(fmt.Sprintf("%s [%s]: ", nvt.Name, nvt.Typ))
+// if nvts, ok := nvt.Value.([]*NameValueType); ok {
+// output.WriteString("\n")
+// for _, next := range nvts {
+// sublevel := next.Pprint(depth + 1)
+// output.WriteString(sublevel)
+// }
+// } else {
+// if nvt.Value != nil {
+// output.WriteString(fmt.Sprintf("%q\n", nvt.Value))
+// } else {
+// output.WriteString("\n")
+// }
+// }
+// return output.String()
+//}
diff --git a/erigon-lib/types/signerapi/types_test.go b/erigon-lib/types/signerapi/types_test.go
new file mode 100644
index 00000000000..d8abf0bfc49
--- /dev/null
+++ b/erigon-lib/types/signerapi/types_test.go
@@ -0,0 +1,143 @@
+// Copyright 2023 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package apitypes
+
+//import (
+// "crypto/sha256"
+// "encoding/json"
+// "testing"
+//
+// "github.com/erigontech/erigon/common"
+// "github.com/erigontech/erigon/core/types"
+// //"github.com/erigontech/erigon/crypto/kzg"
+// "github.com/holiman/uint256"
+//)
+//
+//func TestIsPrimitive(t *testing.T) {
+// t.Parallel()
+// // Expected positives
+// for i, tc := range []string{
+// "int24", "int24[]", "uint88", "uint88[]", "uint", "uint[]", "int256", "int256[]",
+// "uint96", "uint96[]", "int96", "int96[]", "bytes17[]", "bytes17",
+// } {
+// if !isPrimitiveTypeValid(tc) {
+// t.Errorf("test %d: expected '%v' to be a valid primitive", i, tc)
+// }
+// }
+// // Expected negatives
+// for i, tc := range []string{
+// "int257", "int257[]", "uint88 ", "uint88 []", "uint257", "uint-1[]",
+// "uint0", "uint0[]", "int95", "int95[]", "uint1", "uint1[]", "bytes33[]", "bytess",
+// } {
+// if isPrimitiveTypeValid(tc) {
+// t.Errorf("test %d: expected '%v' to not be a valid primitive", i, tc)
+// }
+// }
+//}
+//
+//func TestTxArgs(t *testing.T) {
+// for i, tc := range []struct {
+// data []byte
+// want common.Hash
+// wantType uint8
+// }{
+// {
+// data: []byte(`{"from":"0x1b442286e32ddcaa6e2570ce9ed85f4b4fc87425","accessList":[],"blobVersionedHashes":["0x010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c444014"],"chainId":"0x7","gas":"0x124f8","gasPrice":"0x693d4ca8","input":"0x","maxFeePerBlobGas":"0x3b9aca00","maxFeePerGas":"0x6fc23ac00","maxPriorityFeePerGas":"0x3b9aca00","nonce":"0x0","r":"0x2a922afc784d07e98012da29f2f37cae1f73eda78aa8805d3df6ee5dbb41ec1","s":"0x4f1f75ae6bcdf4970b4f305da1a15d8c5ddb21f555444beab77c9af2baab14","to":"0x1b442286e32ddcaa6e2570ce9ed85f4b4fc87425","type":"0x1","v":"0x0","value":"0x0","yParity":"0x0"}`),
+// want: common.HexToHash("0x7d53234acc11ac5b5948632c901a944694e228795782f511887d36fd76ff15c4"),
+// wantType: types.BlobTxType,
+// },
+// {
+// // on input, we don't read the type, but infer the type from the arguments present
+// data: []byte(`{"from":"0x1b442286e32ddcaa6e2570ce9ed85f4b4fc87425","accessList":[],"chainId":"0x7","gas":"0x124f8","gasPrice":"0x693d4ca8","input":"0x","maxFeePerBlobGas":"0x3b9aca00","maxFeePerGas":"0x6fc23ac00","maxPriorityFeePerGas":"0x3b9aca00","nonce":"0x0","r":"0x2a922afc784d07e98012da29f2f37cae1f73eda78aa8805d3df6ee5dbb41ec1","s":"0x4f1f75ae6bcdf4970b4f305da1a15d8c5ddb21f555444beab77c9af2baab14","to":"0x1b442286e32ddcaa6e2570ce9ed85f4b4fc87425","type":"0x12","v":"0x0","value":"0x0","yParity":"0x0"}`),
+// want: common.HexToHash("0x7919e2b0b9b543cb87a137b6ff66491ec7ae937cb88d3c29db4d9b28073dce53"),
+// wantType: types.DynamicFeeTxType,
+// },
+// } {
+// var txArgs SendTxArgs
+// if err := json.Unmarshal(tc.data, &txArgs); err != nil {
+// t.Fatal(err)
+// }
+// tx, err := txArgs.ToTransaction()
+// if err != nil {
+// t.Fatal(err)
+// }
+// if have := tx.Type(); have != tc.wantType {
+// t.Errorf("test %d, have type %d, want type %d", i, have, tc.wantType)
+// }
+// if have := tx.Hash(); have != tc.want {
+// t.Errorf("test %d: have %v, want %v", i, have, tc.want)
+// }
+// d2, err := json.Marshal(txArgs)
+// if err != nil {
+// t.Fatal(err)
+// }
+// var txArgs2 SendTxArgs
+// if err := json.Unmarshal(d2, &txArgs2); err != nil {
+// t.Fatal(err)
+// }
+// tx1, _ := txArgs.ToTransaction()
+// tx2, _ := txArgs2.ToTransaction()
+// if have, want := tx1.Hash(), tx2.Hash(); have != want {
+// t.Errorf("test %d: have %v, want %v", i, have, want)
+// }
+// }
+// /*
+// End to end testing:
+//
+// $ go run ./cmd/clef --advanced --suppress-bootwarn
+//
+// $ go run ./cmd/geth --nodiscover --maxpeers 0 --signer /home/user/.clef/clef.ipc console
+//
+// > tx={"from":"0x1b442286e32ddcaa6e2570ce9ed85f4b4fc87425","to":"0x1b442286e32ddcaa6e2570ce9ed85f4b4fc87425","gas":"0x124f8","maxFeePerGas":"0x6fc23ac00","maxPriorityFeePerGas":"0x3b9aca00","value":"0x0","nonce":"0x0","input":"0x","accessList":[],"maxFeePerBlobGas":"0x3b9aca00","blobVersionedHashes":["0x010657f37554c781402a22917dee2f75def7ab966d7b770905398eba3c444014"]}
+// > eth.signTransaction(tx)
+// */
+//}
+//
+////func TestBlobTxs(t *testing.T) {
+//// blob := kzg4844.Blob{0x1}
+//// commitment, err := kzg4844.BlobToCommitment(blob)
+//// if err != nil {
+//// t.Fatal(err)
+//// }
+//// proof, err := kzg4844.ComputeBlobProof(blob, commitment)
+//// if err != nil {
+//// t.Fatal(err)
+//// }
+////
+//// hash := kzg4844.CalcBlobHashV1(sha256.New(), &commitment)
+//// b := &types.BlobTx{
+//// ChainID: uint256.NewInt(6),
+//// Nonce: 8,
+//// GasTipCap: uint256.NewInt(500),
+//// GasFeeCap: uint256.NewInt(600),
+//// Gas: 21000,
+//// BlobFeeCap: uint256.NewInt(700),
+//// BlobHashes: []common.Hash{hash},
+//// Value: uint256.NewInt(100),
+//// //Sidecar: &types.BlobTxSidecar{
+//// // Blobs: []kzg4844.Blob{blob},
+//// // Commitments: []kzg4844.Commitment{commitment},
+//// // Proofs: []kzg4844.Proof{proof},
+//// //},
+//// }
+//// tx := types.NewArbTx(b)
+//// data, err := json.Marshal(tx)
+//// if err != nil {
+//// t.Fatal(err)
+//// }
+//// t.Logf("tx %v", string(data))
+////}
diff --git a/eth/ethutils/receipt.go b/eth/ethutils/receipt.go
new file mode 100644
index 00000000000..03e2376cb0a
--- /dev/null
+++ b/eth/ethutils/receipt.go
@@ -0,0 +1,151 @@
+// Copyright 2024 The Erigon Authors
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package ethutils
+
+import (
+ "math/big"
+
+ "github.com/holiman/uint256"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/hexutil"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/consensus/misc"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/erigontech/erigon/log/v3"
+)
+
+func MarshalReceipt(
+ receipt *types.Receipt,
+ txn types.Transaction,
+ chainConfig *chain.Config,
+ header *types.Header,
+ txnHash common.Hash,
+ signed bool,
+ withBlockTimestamp bool,
+) map[string]interface{} {
+ var chainId *big.Int
+ switch t := txn.(type) {
+ case *types.LegacyTx:
+ if t.Protected() {
+ chainId = types.DeriveChainId(&t.V).ToBig()
+ }
+ default:
+ chainId = txn.GetChainID().ToBig()
+ }
+
+ var from common.Address
+ if signed {
+ signer := types.NewArbitrumSigner(*types.LatestSignerForChainID(chainId))
+ from, _ = signer.Sender(txn)
+ }
+
+ var logsToMarshal interface{}
+
+ if withBlockTimestamp {
+ if receipt.Logs != nil {
+ rpcLogs := []*types.RPCLog{}
+ for _, l := range receipt.Logs {
+ rpcLogs = append(rpcLogs, types.ToRPCTransactionLog(l, header, txnHash, uint64(receipt.TransactionIndex)))
+ }
+ logsToMarshal = rpcLogs
+ } else {
+ logsToMarshal = make([]*types.RPCLog, 0)
+ }
+ } else {
+ if receipt.Logs == nil {
+ logsToMarshal = make([]*types.Log, 0)
+ } else {
+ logsToMarshal = receipt.Logs
+ }
+ }
+
+ fields := map[string]interface{}{
+ "blockHash": receipt.BlockHash,
+ "blockNumber": hexutil.Uint64(receipt.BlockNumber.Uint64()),
+ "transactionHash": txnHash,
+ "transactionIndex": hexutil.Uint64(receipt.TransactionIndex),
+ "from": from,
+ "to": txn.GetTo(),
+ "type": hexutil.Uint(txn.Type()),
+ "gasUsed": hexutil.Uint64(receipt.GasUsed),
+ "cumulativeGasUsed": hexutil.Uint64(receipt.CumulativeGasUsed),
+ "contractAddress": nil,
+ "logs": logsToMarshal,
+ "logsBloom": types.CreateBloom(types.Receipts{receipt}),
+ }
+
+ if !chainConfig.IsLondon(header.Number.Uint64()) {
+ fields["effectiveGasPrice"] = (*hexutil.Big)(txn.GetTipCap().ToBig())
+ } else {
+ baseFee, _ := uint256.FromBig(header.BaseFee)
+ gasPrice := new(big.Int).Add(header.BaseFee, txn.GetEffectiveGasTip(baseFee).ToBig())
+ fields["effectiveGasPrice"] = (*hexutil.Big)(gasPrice)
+ }
+
+ // Assign receipt status.
+ fields["status"] = hexutil.Uint64(receipt.Status)
+
+ // If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation
+ if receipt.ContractAddress != (common.Address{}) {
+ fields["contractAddress"] = receipt.ContractAddress
+ }
+
+ // Set derived blob related fields
+ numBlobs := len(txn.GetBlobHashes())
+ if numBlobs > 0 {
+ if header.ExcessBlobGas == nil {
+ log.Warn("excess blob gas not set when trying to marshal blob tx")
+ } else {
+ blobGasPrice, err := misc.GetBlobGasPrice(chainConfig, *header.ExcessBlobGas, header.Time)
+ if err != nil {
+ log.Error(err.Error())
+ }
+ fields["blobGasPrice"] = (*hexutil.Big)(blobGasPrice.ToBig())
+ fields["blobGasUsed"] = hexutil.Uint64(misc.GetBlobGasUsed(numBlobs))
+ }
+ }
+
+ // Set arbitrum related fields
+ if chainConfig.IsArbitrum() {
+ fields["gasUsedForL1"] = hexutil.Uint64(receipt.GasUsedForL1)
+
+ if chainConfig.IsArbitrumNitro(header.Number) {
+ fields["effectiveGasPrice"] = hexutil.Uint64(header.BaseFee.Uint64())
+ fields["l1BlockNumber"] = hexutil.Uint64(types.DeserializeHeaderExtraInformation(header).L1BlockNumber)
+ fields["timeboosted"] = txn.IsTimeBoosted()
+ } else {
+ arbTx, ok := txn.(*types.ArbitrumLegacyTxData)
+ if !ok {
+ log.Error("Expected transaction to contain arbitrum data", "txHash", txn.Hash())
+ } else {
+ fields["effectiveGasPrice"] = hexutil.Uint64(arbTx.EffectiveGasPrice)
+ fields["l1BlockNumber"] = hexutil.Uint64(arbTx.L1BlockNumber)
+ }
+ }
+
+ // For ArbitrumSubmitRetryableTx we have to take the effective gas used from txn itself and correct cumulativeGasUsed
+ if arbitrumTx, ok := txn.(*types.ArbitrumSubmitRetryableTx); ok {
+ // Find the cumulative gas used by subtracting the gas used from receipt
+ cumulativeGasUSed := receipt.CumulativeGasUsed - receipt.GasUsed
+ fields["effectiveGasPrice"] = hexutil.Uint64(cumulativeGasUSed + arbitrumTx.EffectiveGasUsed)
+ // gasUsed is what transaction keeps
+ fields["gasUsed"] = hexutil.Uint64(arbitrumTx.EffectiveGasUsed)
+ }
+ }
+ return fields
+}
diff --git a/eth/integrity/receipts_no_duplicates.go b/eth/integrity/receipts_no_duplicates.go
new file mode 100644
index 00000000000..8054dadbe59
--- /dev/null
+++ b/eth/integrity/receipts_no_duplicates.go
@@ -0,0 +1,151 @@
+package integrity
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/erigontech/erigon/db/kv"
+ "github.com/erigontech/erigon/db/kv/rawdbv3"
+ "github.com/erigontech/erigon/db/rawdb/rawtemporaldb"
+ "github.com/erigontech/erigon/log/v3"
+ "github.com/erigontech/erigon/turbo/services"
+)
+
+// CheckReceiptsNoDups performs integrity checks on receipts to ensure no duplicates exist.
+// This function uses parallel processing for improved performance.
+func CheckReceiptsNoDups(ctx context.Context, db kv.TemporalRoDB, blockReader services.FullBlockReader, failFast bool) (err error) {
+ defer func() {
+ log.Info("[integrity] ReceiptsNoDups: done", "err", err)
+ }()
+
+ logEvery := time.NewTicker(10 * time.Second)
+ defer logEvery.Stop()
+
+ txNumsReader := blockReader.TxnumReader(ctx)
+
+ if err := ValidateDomainProgress(db, kv.ReceiptDomain, txNumsReader); err != nil {
+ return nil
+ }
+
+ tx, err := db.BeginTemporalRo(ctx)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ receiptProgress := tx.Debug().DomainProgress(kv.ReceiptDomain)
+ fromBlock := uint64(1)
+ toBlock, _, _ := txNumsReader.FindBlockNum(tx, receiptProgress)
+
+ log.Info("[integrity] ReceiptsNoDups starting", "fromBlock", fromBlock, "toBlock", toBlock)
+
+ return parallelChunkCheck(ctx, fromBlock, toBlock, db, blockReader, failFast, ReceiptsNoDupsRange)
+}
+
+func ReceiptsNoDupsRange(ctx context.Context, fromBlock, toBlock uint64, tx kv.TemporalTx, blockReader services.FullBlockReader, failFast bool) (err error) {
+ txNumsReader := blockReader.TxnumReader(ctx)
+ fromTxNum, err := txNumsReader.Min(tx, fromBlock)
+ if err != nil {
+ return err
+ }
+
+ if toBlock > 0 {
+ toBlock-- // [fromBlock,toBlock)
+ }
+
+ toTxNum, err := txNumsReader.Max(tx, toBlock)
+ if err != nil {
+ return err
+ }
+
+ prevCumUsedGas := -1
+ prevLogIdxAfterTx := uint32(0)
+ blockNum := fromBlock
+ var _min, _max uint64
+ _min, _ = txNumsReader.Min(tx, fromBlock)
+ _max, _ = txNumsReader.Max(tx, fromBlock)
+ for txNum := fromTxNum; txNum <= toTxNum; txNum++ {
+ cumUsedGas, _, logIdxAfterTx, err := rawtemporaldb.ReceiptAsOf(tx, txNum+1)
+ if err != nil {
+ return err
+ }
+
+ blockChanged := txNum == _min
+ if blockChanged {
+ prevCumUsedGas = 0
+ prevLogIdxAfterTx = 0
+ }
+
+ strongMonotonicCumGasUsed := int(cumUsedGas) > prevCumUsedGas
+ if !strongMonotonicCumGasUsed && txNum != _min && txNum != _max { // system tx can be skipped
+ err := fmt.Errorf("CheckReceiptsNoDups: non-monotonic cumGasUsed at txnum: %d, block: %d(%d-%d), cumGasUsed=%d, prevCumGasUsed=%d", txNum, blockNum, _min, _max, cumUsedGas, prevCumUsedGas)
+ if failFast {
+ return err
+ }
+ log.Error(err.Error())
+ }
+
+ monotonicLogIdx := logIdxAfterTx >= prevLogIdxAfterTx
+ if !monotonicLogIdx && txNum != _min && txNum != _max {
+ err := fmt.Errorf("CheckReceiptsNoDups: non-monotonic logIndex at txnum: %d, block: %d(%d-%d), logIdxAfterTx=%d, prevLogIdxAfterTx=%d", txNum, blockNum, _min, _max, logIdxAfterTx, prevLogIdxAfterTx)
+ if failFast {
+ return err
+ }
+ log.Error(err.Error())
+ }
+
+ prevCumUsedGas = int(cumUsedGas)
+ prevLogIdxAfterTx = logIdxAfterTx
+
+ if txNum == _max {
+ blockNum++
+ _min = _max + 1
+ _max, _ = txNumsReader.Max(tx, blockNum)
+ }
+
+ if txNum%1000 == 0 {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+ }
+ }
+ return nil
+}
+
+func ValidateDomainProgress(db kv.TemporalRoDB, domain kv.Domain, txNumsReader rawdbv3.TxNumsReader) (err error) {
+ tx, err := db.BeginTemporalRo(context.Background())
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ receiptProgress := tx.Debug().DomainProgress(domain)
+ accProgress := tx.Debug().DomainProgress(kv.AccountsDomain)
+ if accProgress > receiptProgress {
+ e1, _, _ := txNumsReader.FindBlockNum(tx, receiptProgress)
+ e2, _, _ := txNumsReader.FindBlockNum(tx, accProgress)
+
+ // accProgress can be greater than domainProgress in some scenarios..
+ // e.g. account vs receipt
+ // like systemTx can update accounts, but no receipt is added for those tx.
+ // Similarly a series of empty blocks towards the end can cause big gaps...
+ // The message is kept because it might also happen due to problematic cases
+ // like StageCustomTrace execution not having gone through to the end leading to missing data in receipt/rcache.
+ msg := fmt.Sprintf("[integrity] %s=%d (%d) is behind AccountDomain=%d(%d); this might be okay, please check", domain.String(), receiptProgress, e1, accProgress, e2)
+ log.Warn(msg)
+ return nil
+ } else if accProgress < receiptProgress {
+ // something very wrong
+ e1, _, _ := txNumsReader.FindBlockNum(tx, receiptProgress)
+ e2, _, _ := txNumsReader.FindBlockNum(tx, accProgress)
+
+ err := fmt.Errorf("[integrity] %s=%d (%d) is ahead of AccountDomain=%d(%d)", domain.String(), receiptProgress, e1, accProgress, e2)
+ log.Error(err.Error())
+ return err
+
+ }
+ return nil
+}
diff --git a/execution/abi/bind/backends/simulated.go b/execution/abi/bind/backends/simulated.go
index ed8096d63b5..8e9435451a5 100644
--- a/execution/abi/bind/backends/simulated.go
+++ b/execution/abi/bind/backends/simulated.go
@@ -169,7 +169,8 @@ func (b *SimulatedBackend) emptyPendingBlock() {
b.pendingBlock = blockChain.Blocks[0]
b.pendingReceipts = blockChain.Receipts[0]
b.pendingHeader = blockChain.Headers[0]
- b.gasPool = new(protocol.GasPool).AddGas(b.pendingHeader.GasLimit).AddBlobGas(b.m.ChainConfig.GetMaxBlobGasPerBlock(b.pendingHeader.Time))
+ arbOsVersion := types.GetArbOSVersion(b.pendingHeader, b.m.ChainConfig)
+ b.gasPool = new(protocol.GasPool).AddGas(b.pendingHeader.GasLimit).AddBlobGas(b.m.ChainConfig.GetMaxBlobGasPerBlock(b.pendingHeader.Time, arbOsVersion))
if b.pendingReaderTx != nil {
b.pendingReaderTx.Rollback()
}
@@ -624,7 +625,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs
} else {
hi = b.pendingBlock.GasLimit()
}
- if hi > params.MaxTxnGasLimit && b.m.ChainConfig.IsOsaka(b.pendingBlock.Time()) {
+ if hi > params.MaxTxnGasLimit && /*!b.m.ChainConfig.IsArbitrum() &&*/ b.m.ChainConfig.IsOsaka(0, b.pendingBlock.Time(), 0) {
// Cap the maximum gas allowance according to EIP-7825 if Osaka
hi = params.MaxTxnGasLimit
}
diff --git a/execution/abi/bind/base.go b/execution/abi/bind/base.go
index 89f8bdfc528..8152b1c5987 100644
--- a/execution/abi/bind/base.go
+++ b/execution/abi/bind/base.go
@@ -24,6 +24,8 @@ import (
"errors"
"fmt"
"math/big"
+ "strings"
+ "sync"
"github.com/holiman/uint256"
@@ -44,6 +46,8 @@ type CallOpts struct {
From common.Address // Optional the sender address, otherwise the first account is used
BlockNumber *big.Int // Optional the block number on which the call should be performed
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
+
+ BlockHash common.Hash // Arbitrum; Optional the block hash on which the call should be performed
}
// TransactOpts is the collection of authorization data required to create a
@@ -58,6 +62,13 @@ type TransactOpts struct {
GasLimit uint64 // Gas limit to set for the transaction execution (0 = estimate)
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
+
+ // Arbitrum specific fields
+ GasTipCap *big.Int // Gas priority fee cap to use for the 1559 transaction execution (nil = gas price oracle)
+ GasMargin uint64 // Arbitrum: adjusts gas estimate by this many basis points (0 = no adjustment)
+ GasFeeCap *big.Int // Gas fee cap to use for the 1559 transaction execution (nil = gas price oracle)
+ NoSend bool // Do all transact steps but do not send the transaction
+
}
// FilterOpts is the collection of options to fine tune filtering for events
@@ -76,6 +87,32 @@ type WatchOpts struct {
Context context.Context // Network context to support cancellation and timeouts (nil = no timeout)
}
+// Arbitrum
+// MetaData collects all metadata for a bound contract.
+type MetaData struct {
+ mu sync.Mutex
+ Sigs map[string]string
+ Bin string
+ ABI string
+ ab *abi.ABI
+}
+
+func (m *MetaData) GetAbi() (*abi.ABI, error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if m.ab != nil {
+ return m.ab, nil
+ }
+ if parsed, err := abi.JSON(strings.NewReader(m.ABI)); err != nil {
+ return nil, err
+ } else {
+ m.ab = &parsed
+ }
+ return m.ab, nil
+}
+
+// End of Arbitrum
+
// BoundContract is the base wrapper object that reflects a contract on the
// Ethereum network. It contains a collection of methods that are used by the
// higher level contract bindings to operate.
@@ -240,6 +277,27 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
if overflow {
return nil, errors.New("gasPriceBig higher than 2^256-1")
}
+
+ // // Estimate TipCap Arbitrum
+ // gasTipCap := opts.GasTipCap
+ // if gasTipCap == nil {
+ // tip, err := c.transactor.SuggestGasTipCap(ensureContext(opts.Context))
+ // if err != nil {
+ // return nil, err
+ // }
+ // gasTipCap = tip
+ // }
+ // // Estimate FeeCap
+ // gasFeeCap := opts.GasFeeCap
+ // if gasFeeCap == nil {
+ // gasFeeCap = new(big.Int).Add(
+ // gasTipCap,
+ // new(big.Int).Mul(head.BaseFee, big.NewInt(basefeeWiggleMultiplier)),
+ // )
+ // }
+ // if gasFeeCap.Cmp(gasTipCap) < 0 {
+ // return nil, fmt.Errorf("maxFeePerGas (%v) < maxPriorityFeePerGas (%v)", gasFeeCap, gasTipCap)
+ // }
gasLimit := opts.GasLimit
if gasLimit == 0 {
// Gas estimation cannot succeed without code for method invocations
@@ -256,6 +314,12 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i
if err != nil {
return nil, fmt.Errorf("failed to estimate gas needed: %w", err)
}
+
+ // Arbitrum: adjust the estimate
+ adjustedLimit := gasLimit * (10000 + opts.GasMargin) / 10000
+ if adjustedLimit > gasLimit {
+ gasLimit = adjustedLimit
+ }
}
// Create the transaction, sign it and schedule it for execution
var rawTx types.Transaction
diff --git a/execution/abi/bind/bind_test.go b/execution/abi/bind/bind_test.go
index d933974f6da..1f2c09f9f70 100644
--- a/execution/abi/bind/bind_test.go
+++ b/execution/abi/bind/bind_test.go
@@ -1157,7 +1157,7 @@ var bindTests = []struct {
}
//quick check to see if contents were copied
- // (See accounts/abi/unpack_test.go for more extensive testing)
+ // (See execution/abi/unpack_test.go for more extensive testing)
if retrievedArr[4][3][2] != testArr[4][3][2] {
t.Fatalf("Retrieved value does not match expected value! got: %d, expected: %d. %v", retrievedArr[4][3][2], testArr[4][3][2], err)
}
@@ -1809,8 +1809,8 @@ var bindTests = []struct {
`
"math/big"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/execution/abi/bind"
+ "github.com/ethereum/go-ethereum/execution/abi/bind/backends"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node/ethconfig"
@@ -1879,8 +1879,8 @@ var bindTests = []struct {
`
"math/big"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/execution/abi/bind"
+ "github.com/ethereum/go-ethereum/execution/abi/bind/backends"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node/ethconfig"
@@ -1931,8 +1931,8 @@ var bindTests = []struct {
imports: `
"math/big"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/execution/abi/bind"
+ "github.com/ethereum/go-ethereum/execution/abi/bind/backends"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node/ethconfig"
@@ -1979,8 +1979,8 @@ var bindTests = []struct {
imports: `
"math/big"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/execution/abi/bind"
+ "github.com/ethereum/go-ethereum/execution/abi/bind/backends"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node/ethconfig"
@@ -2019,8 +2019,8 @@ var bindTests = []struct {
imports: `
"math/big"
- "github.com/ethereum/go-ethereum/accounts/abi/bind"
- "github.com/ethereum/go-ethereum/accounts/abi/bind/backends"
+ "github.com/ethereum/go-ethereum/execution/abi/bind"
+ "github.com/ethereum/go-ethereum/execution/abi/bind/backends"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/node/ethconfig"
diff --git a/execution/abi/bind/source.go.tpl b/execution/abi/bind/source.go.tpl
new file mode 100644
index 00000000000..efce987576c
--- /dev/null
+++ b/execution/abi/bind/source.go.tpl
@@ -0,0 +1,487 @@
+// Code generated - DO NOT EDIT.
+// This file is a generated binding and any manual changes will be lost.
+
+package {{.Package}}
+
+import (
+ "math/big"
+ "strings"
+ "errors"
+
+ ethereum "github.com/erigontech/erigon"
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/abi"
+ "github.com/erigontech/erigon/execution/abi/bind"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/erigontech/erigon/p2p/event"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var (
+ _ = errors.New
+ _ = big.NewInt
+ _ = strings.NewReader
+ _ = ethereum.NotFound
+ _ = bind.Bind
+ _ = common.Big1
+ _ = types.BloomLookup
+ _ = event.NewSubscription
+ _ = abi.ConvertType
+)
+
+{{$structs := .Structs}}
+{{range $structs}}
+ // {{.Name}} is an auto generated low-level Go binding around an user-defined struct.
+ type {{.Name}} struct {
+ {{range $field := .Fields}}
+ {{$field.Name}} {{$field.Type}}{{end}}
+ }
+{{end}}
+
+{{range $contract := .Contracts}}
+ // {{.Type}}MetaData contains all meta data concerning the {{.Type}} contract.
+ var {{.Type}}MetaData = &bind.MetaData{
+ ABI: "{{.InputABI}}",
+ {{if $contract.FuncSigs -}}
+ Sigs: map[string]string{
+ {{range $strsig, $binsig := .FuncSigs}}"{{$binsig}}": "{{$strsig}}",
+ {{end}}
+ },
+ {{end -}}
+ {{if .InputBin -}}
+ Bin: "0x{{.InputBin}}",
+ {{end}}
+ }
+ // {{.Type}}ABI is the input ABI used to generate the binding from.
+ // Deprecated: Use {{.Type}}MetaData.ABI instead.
+ var {{.Type}}ABI = {{.Type}}MetaData.ABI
+
+ {{if $contract.FuncSigs}}
+ // Deprecated: Use {{.Type}}MetaData.Sigs instead.
+ // {{.Type}}FuncSigs maps the 4-byte function signature to its string representation.
+ var {{.Type}}FuncSigs = {{.Type}}MetaData.Sigs
+ {{end}}
+
+ {{if .InputBin}}
+ // {{.Type}}Bin is the compiled bytecode used for deploying new contracts.
+ // Deprecated: Use {{.Type}}MetaData.Bin instead.
+ var {{.Type}}Bin = {{.Type}}MetaData.Bin
+
+ // Deploy{{.Type}} deploys a new Ethereum contract, binding an instance of {{.Type}} to it.
+ func Deploy{{.Type}}(auth *bind.TransactOpts, backend bind.ContractBackend {{range .Constructor.Inputs}}, {{.Name}} {{bindtype .Type $structs}}{{end}}) (common.Address, types.Transaction, *{{.Type}}, error) {
+ parsed, err := {{.Type}}MetaData.GetAbi()
+ if err != nil {
+ return common.Address{}, nil, nil, err
+ }
+ if parsed == nil {
+ return common.Address{}, nil, nil, errors.New("GetABI returned nil")
+ }
+ {{range $pattern, $name := .Libraries}}
+ {{decapitalise $name}}Addr, _, _, _ := Deploy{{capitalise $name}}(auth, backend)
+ {{$contract.Type}}Bin = strings.ReplaceAll({{$contract.Type}}Bin, "__${{$pattern}}$__", {{decapitalise $name}}Addr.String()[2:])
+ {{end}}
+ address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex({{.Type}}Bin), backend {{range .Constructor.Inputs}}, {{.Name}}{{end}})
+ if err != nil {
+ return common.Address{}, nil, nil, err
+ }
+ return address, tx, &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
+ }
+ {{end}}
+
+ // {{.Type}} is an auto generated Go binding around an Ethereum contract.
+ type {{.Type}} struct {
+ {{.Type}}Caller // Read-only binding to the contract
+ {{.Type}}Transactor // Write-only binding to the contract
+ {{.Type}}Filterer // Log filterer for contract events
+ }
+
+ // {{.Type}}Caller is an auto generated read-only Go binding around an Ethereum contract.
+ type {{.Type}}Caller struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+ }
+
+ // {{.Type}}Transactor is an auto generated write-only Go binding around an Ethereum contract.
+ type {{.Type}}Transactor struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+ }
+
+ // {{.Type}}Filterer is an auto generated log filtering Go binding around an Ethereum contract events.
+ type {{.Type}}Filterer struct {
+ contract *bind.BoundContract // Generic contract wrapper for the low level calls
+ }
+
+ // {{.Type}}Session is an auto generated Go binding around an Ethereum contract,
+ // with pre-set call and transact options.
+ type {{.Type}}Session struct {
+ Contract *{{.Type}} // Generic contract binding to set the session for
+ CallOpts bind.CallOpts // Call options to use throughout this session
+ TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
+ }
+
+ // {{.Type}}CallerSession is an auto generated read-only Go binding around an Ethereum contract,
+ // with pre-set call options.
+ type {{.Type}}CallerSession struct {
+ Contract *{{.Type}}Caller // Generic contract caller binding to set the session for
+ CallOpts bind.CallOpts // Call options to use throughout this session
+ }
+
+ // {{.Type}}TransactorSession is an auto generated write-only Go binding around an Ethereum contract,
+ // with pre-set transact options.
+ type {{.Type}}TransactorSession struct {
+ Contract *{{.Type}}Transactor // Generic contract transactor binding to set the session for
+ TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session
+ }
+
+ // {{.Type}}Raw is an auto generated low-level Go binding around an Ethereum contract.
+ type {{.Type}}Raw struct {
+ Contract *{{.Type}} // Generic contract binding to access the raw methods on
+ }
+
+ // {{.Type}}CallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract.
+ type {{.Type}}CallerRaw struct {
+ Contract *{{.Type}}Caller // Generic read-only contract binding to access the raw methods on
+ }
+
+ // {{.Type}}TransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract.
+ type {{.Type}}TransactorRaw struct {
+ Contract *{{.Type}}Transactor // Generic write-only contract binding to access the raw methods on
+ }
+
+ // New{{.Type}} creates a new instance of {{.Type}}, bound to a specific deployed contract.
+ func New{{.Type}}(address common.Address, backend bind.ContractBackend) (*{{.Type}}, error) {
+ contract, err := bind{{.Type}}(address, backend, backend, backend)
+ if err != nil {
+ return nil, err
+ }
+ return &{{.Type}}{ {{.Type}}Caller: {{.Type}}Caller{contract: contract}, {{.Type}}Transactor: {{.Type}}Transactor{contract: contract}, {{.Type}}Filterer: {{.Type}}Filterer{contract: contract} }, nil
+ }
+
+ // New{{.Type}}Caller creates a new read-only instance of {{.Type}}, bound to a specific deployed contract.
+ func New{{.Type}}Caller(address common.Address, caller bind.ContractCaller) (*{{.Type}}Caller, error) {
+ contract, err := bind{{.Type}}(address, caller, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &{{.Type}}Caller{contract: contract}, nil
+ }
+
+ // New{{.Type}}Transactor creates a new write-only instance of {{.Type}}, bound to a specific deployed contract.
+ func New{{.Type}}Transactor(address common.Address, transactor bind.ContractTransactor) (*{{.Type}}Transactor, error) {
+ contract, err := bind{{.Type}}(address, nil, transactor, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &{{.Type}}Transactor{contract: contract}, nil
+ }
+
+ // New{{.Type}}Filterer creates a new log filterer instance of {{.Type}}, bound to a specific deployed contract.
+ func New{{.Type}}Filterer(address common.Address, filterer bind.ContractFilterer) (*{{.Type}}Filterer, error) {
+ contract, err := bind{{.Type}}(address, nil, nil, filterer)
+ if err != nil {
+ return nil, err
+ }
+ return &{{.Type}}Filterer{contract: contract}, nil
+ }
+
+ // bind{{.Type}} binds a generic wrapper to an already deployed contract.
+ func bind{{.Type}}(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) {
+ parsed, err := {{.Type}}MetaData.GetAbi()
+ if err != nil {
+ return nil, err
+ }
+ return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil
+ }
+
+ // Call invokes the (constant) contract method with params as input values and
+ // sets the output to result. The result type might be a single field for simple
+ // returns, a slice of interfaces for anonymous returns and a struct for named
+ // returns.
+ func (_{{$contract.Type}} *{{$contract.Type}}Raw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+ return _{{$contract.Type}}.Contract.{{$contract.Type}}Caller.contract.Call(opts, result, method, params...)
+ }
+
+ // Transfer initiates a plain transaction to move funds to the contract, calling
+ // its default method if one is available.
+ func (_{{$contract.Type}} *{{$contract.Type}}Raw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.{{$contract.Type}}Transactor.contract.Transfer(opts)
+ }
+
+ // Transact invokes the (paid) contract method with params as input values.
+ func (_{{$contract.Type}} *{{$contract.Type}}Raw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.{{$contract.Type}}Transactor.contract.Transact(opts, method, params...)
+ }
+
+ // Call invokes the (constant) contract method with params as input values and
+ // sets the output to result. The result type might be a single field for simple
+ // returns, a slice of interfaces for anonymous returns and a struct for named
+ // returns.
+ func (_{{$contract.Type}} *{{$contract.Type}}CallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error {
+ return _{{$contract.Type}}.Contract.contract.Call(opts, result, method, params...)
+ }
+
+ // Transfer initiates a plain transaction to move funds to the contract, calling
+ // its default method if one is available.
+ func (_{{$contract.Type}} *{{$contract.Type}}TransactorRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.contract.Transfer(opts)
+ }
+
+ // Transact invokes the (paid) contract method with params as input values.
+ func (_{{$contract.Type}} *{{$contract.Type}}TransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.contract.Transact(opts, method, params...)
+ }
+
+ {{range .Calls}}
+ // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
+ //
+ // Solidity: {{.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Caller) {{.Normalized.Name}}(opts *bind.CallOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} },{{else}}{{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}}{{end}} error) {
+ var out []interface{}
+ err := _{{$contract.Type}}.contract.Call(opts, &out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
+ {{if .Structured}}
+ outstruct := new(struct{ {{range .Normalized.Outputs}} {{.Name}} {{bindtype .Type $structs}}; {{end}} })
+ if err != nil {
+ return *outstruct, err
+ }
+ {{range $i, $t := .Normalized.Outputs}}
+ outstruct.{{.Name}} = *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}}
+
+ return *outstruct, err
+ {{else}}
+ if err != nil {
+ return {{range $i, $_ := .Normalized.Outputs}}*new({{bindtype .Type $structs}}), {{end}} err
+ }
+ {{range $i, $t := .Normalized.Outputs}}
+ out{{$i}} := *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}}
+
+ return {{range $i, $t := .Normalized.Outputs}}out{{$i}}, {{end}} err
+ {{end}}
+ }
+
+ // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
+ //
+ // Solidity: {{.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
+ return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
+ }
+
+ // {{.Normalized.Name}} is a free data retrieval call binding the contract method 0x{{printf "%x" .Original.ID}}.
+ //
+ // Solidity: {{.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}CallerSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) ({{if .Structured}}struct{ {{range .Normalized.Outputs}}{{.Name}} {{bindtype .Type $structs}};{{end}} }, {{else}} {{range .Normalized.Outputs}}{{bindtype .Type $structs}},{{end}} {{end}} error) {
+ return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.CallOpts {{range .Normalized.Inputs}}, {{.Name}}{{end}})
+ }
+ {{end}}
+
+ {{range .Transacts}}
+ // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
+ //
+ // Solidity: {{.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Transactor) {{.Normalized.Name}}(opts *bind.TransactOpts {{range .Normalized.Inputs}}, {{.Name}} {{bindtype .Type $structs}} {{end}}) (types.Transaction, error) {
+ return _{{$contract.Type}}.contract.Transact(opts, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}})
+ }
+
+ // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
+ //
+ // Solidity: {{.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Session) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
+ }
+
+ // {{.Normalized.Name}} is a paid mutator transaction binding the contract method 0x{{printf "%x" .Original.ID}}.
+ //
+ // Solidity: {{.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) {{.Normalized.Name}}({{range $i, $_ := .Normalized.Inputs}}{{if ne $i 0}},{{end}} {{.Name}} {{bindtype .Type $structs}} {{end}}) (types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.{{.Normalized.Name}}(&_{{$contract.Type}}.TransactOpts {{range $i, $_ := .Normalized.Inputs}}, {{.Name}}{{end}})
+ }
+ {{end}}
+
+ {{if .Fallback}}
+ // Fallback is a paid mutator transaction binding the contract fallback function.
+ //
+ // Solidity: {{.Fallback.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Fallback(opts *bind.TransactOpts, calldata []byte) (types.Transaction, error) {
+ return _{{$contract.Type}}.contract.RawTransact(opts, calldata)
+ }
+
+ // Fallback is a paid mutator transaction binding the contract fallback function.
+ //
+ // Solidity: {{.Fallback.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Session) Fallback(calldata []byte) (types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
+ }
+
+ // Fallback is a paid mutator transaction binding the contract fallback function.
+ //
+ // Solidity: {{.Fallback.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Fallback(calldata []byte) (types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.Fallback(&_{{$contract.Type}}.TransactOpts, calldata)
+ }
+ {{end}}
+
+ {{if .Receive}}
+ // Receive is a paid mutator transaction binding the contract receive function.
+ //
+ // Solidity: {{.Receive.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Transactor) Receive(opts *bind.TransactOpts) (types.Transaction, error) {
+ return _{{$contract.Type}}.contract.RawTransact(opts, nil) // calldata is disallowed for receive function
+ }
+
+ // Receive is a paid mutator transaction binding the contract receive function.
+ //
+ // Solidity: {{.Receive.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Session) Receive() (types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
+ }
+
+ // Receive is a paid mutator transaction binding the contract receive function.
+ //
+ // Solidity: {{.Receive.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}TransactorSession) Receive() (types.Transaction, error) {
+ return _{{$contract.Type}}.Contract.Receive(&_{{$contract.Type}}.TransactOpts)
+ }
+ {{end}}
+
+ {{range .Events}}
+ // {{$contract.Type}}{{.Normalized.Name}}Iterator is returned from Filter{{.Normalized.Name}} and is used to iterate over the raw logs and unpacked data for {{.Normalized.Name}} events raised by the {{$contract.Type}} contract.
+ type {{$contract.Type}}{{.Normalized.Name}}Iterator struct {
+ Event *{{$contract.Type}}{{.Normalized.Name}} // Event containing the contract specifics and raw log
+
+ contract *bind.BoundContract // Generic contract to use for unpacking event data
+ event string // Event name to use for unpacking event data
+
+ logs chan types.Log // Log channel receiving the found contract events
+ sub ethereum.Subscription // Subscription for errors, completion and termination
+ done bool // Whether the subscription completed delivering logs
+ fail error // Occurred error to stop iteration
+ }
+ // Next advances the iterator to the subsequent event, returning whether there
+ // are any more events found. In case of a retrieval or parsing error, false is
+ // returned and Error() can be queried for the exact failure.
+ func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Next() bool {
+ // If the iterator failed, stop iterating
+ if (it.fail != nil) {
+ return false
+ }
+ // If the iterator completed, deliver directly whatever's available
+ if (it.done) {
+ select {
+ case log := <-it.logs:
+ it.Event = new({{$contract.Type}}{{.Normalized.Name}})
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ default:
+ return false
+ }
+ }
+ // Iterator still in progress, wait for either a data or an error event
+ select {
+ case log := <-it.logs:
+ it.Event = new({{$contract.Type}}{{.Normalized.Name}})
+ if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil {
+ it.fail = err
+ return false
+ }
+ it.Event.Raw = log
+ return true
+
+ case err := <-it.sub.Err():
+ it.done = true
+ it.fail = err
+ return it.Next()
+ }
+ }
+ // Error returns any retrieval or parsing error occurred during filtering.
+ func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Error() error {
+ return it.fail
+ }
+ // Close terminates the iteration process, releasing any pending underlying
+ // resources.
+ func (it *{{$contract.Type}}{{.Normalized.Name}}Iterator) Close() error {
+ it.sub.Unsubscribe()
+ return nil
+ }
+
+ // {{$contract.Type}}{{.Normalized.Name}} represents a {{.Normalized.Name}} event raised by the {{$contract.Type}} contract.
+ type {{$contract.Type}}{{.Normalized.Name}} struct { {{range .Normalized.Inputs}}
+ {{capitalise .Name}} {{if .Indexed}}{{bindtopictype .Type $structs}}{{else}}{{bindtype .Type $structs}}{{end}}; {{end}}
+ Raw types.Log // Blockchain specific contextual infos
+ }
+
+ // Filter{{.Normalized.Name}} is a free log retrieval operation binding the contract event 0x{{printf "%x" .Original.ID}}.
+ //
+ // Solidity: {{.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Filter{{.Normalized.Name}}(opts *bind.FilterOpts{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (*{{$contract.Type}}{{.Normalized.Name}}Iterator, error) {
+ {{range .Normalized.Inputs}}
+ {{if .Indexed}}var {{.Name}}Rule []interface{}
+ for _, {{.Name}}Item := range {{.Name}} {
+ {{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
+ }{{end}}{{end}}
+
+ logs, sub, err := _{{$contract.Type}}.contract.FilterLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
+ if err != nil {
+ return nil, err
+ }
+ return &{{$contract.Type}}{{.Normalized.Name}}Iterator{contract: _{{$contract.Type}}.contract, event: "{{.Original.Name}}", logs: logs, sub: sub}, nil
+ }
+
+ // Watch{{.Normalized.Name}} is a free log subscription operation binding the contract event 0x{{printf "%x" .Original.ID}}.
+ //
+ // Solidity: {{.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Watch{{.Normalized.Name}}(opts *bind.WatchOpts, sink chan<- *{{$contract.Type}}{{.Normalized.Name}}{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}} []{{bindtype .Type $structs}}{{end}}{{end}}) (event.Subscription, error) {
+ {{range .Normalized.Inputs}}
+ {{if .Indexed}}var {{.Name}}Rule []interface{}
+ for _, {{.Name}}Item := range {{.Name}} {
+ {{.Name}}Rule = append({{.Name}}Rule, {{.Name}}Item)
+ }{{end}}{{end}}
+
+ logs, sub, err := _{{$contract.Type}}.contract.WatchLogs(opts, "{{.Original.Name}}"{{range .Normalized.Inputs}}{{if .Indexed}}, {{.Name}}Rule{{end}}{{end}})
+ if err != nil {
+ return nil, err
+ }
+ return event.NewSubscription(func(quit <-chan struct{}) error {
+ defer sub.Unsubscribe()
+ for {
+ select {
+ case log := <-logs:
+ // New log arrived, parse the event and forward to the user
+ event := new({{$contract.Type}}{{.Normalized.Name}})
+ if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
+ return err
+ }
+ event.Raw = log
+
+ select {
+ case sink <- event:
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ case err := <-sub.Err():
+ return err
+ case <-quit:
+ return nil
+ }
+ }
+ }), nil
+ }
+
+ // Parse{{.Normalized.Name}} is a log parse operation binding the contract event 0x{{printf "%x" .Original.ID}}.
+ //
+ // Solidity: {{.Original.String}}
+ func (_{{$contract.Type}} *{{$contract.Type}}Filterer) Parse{{.Normalized.Name}}(log types.Log) (*{{$contract.Type}}{{.Normalized.Name}}, error) {
+ event := new({{$contract.Type}}{{.Normalized.Name}})
+ if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil {
+ return nil, err
+ }
+ event.Raw = log
+ return event, nil
+ }
+
+ {{end}}
+{{end}}
\ No newline at end of file
diff --git a/execution/abi/bind/util.go b/execution/abi/bind/util.go
index 9820c3d6af7..929baf08750 100644
--- a/execution/abi/bind/util.go
+++ b/execution/abi/bind/util.go
@@ -22,6 +22,7 @@ package bind
import (
"context"
"errors"
+ "fmt"
"time"
"github.com/erigontech/erigon/common"
@@ -77,3 +78,24 @@ func WaitDeployed(ctx context.Context, b DeployBackend, txn types.Transaction) (
}
return receipt.ContractAddress, err
}
+
+// ResolveNameConflict returns the next available name for a given thing.
+// This helper can be used for lots of purposes:
+//
+// - In solidity function overloading is supported, this function can fix
+// the name conflicts of overloaded functions.
+// - In golang binding generation, the parameter(in function, event, error,
+// and struct definition) name will be converted to camelcase style which
+// may eventually lead to name conflicts.
+//
+// Name conflicts are mostly resolved by adding number suffix. e.g. if the abi contains
+// Methods "send" and "send1", ResolveNameConflict would return "send2" for input "send".
+func ResolveNameConflict(rawName string, used func(string) bool) string {
+ name := rawName
+ ok := used(name)
+ for idx := 0; ok; idx++ {
+ name = fmt.Sprintf("%s%d", rawName, idx)
+ ok = used(name)
+ }
+ return name
+}
diff --git a/execution/chain/chain_config.go b/execution/chain/chain_config.go
index 1f8752b5079..931fbf23382 100644
--- a/execution/chain/chain_config.go
+++ b/execution/chain/chain_config.go
@@ -25,10 +25,12 @@ import (
"sync"
"time"
+ "github.com/erigontech/erigon/arb/chain/types"
+ "github.com/erigontech/erigon/arb/osver"
"github.com/erigontech/erigon/common"
"github.com/erigontech/erigon/common/generics"
"github.com/erigontech/erigon/common/hexutil"
- "github.com/erigontech/erigon/execution/protocol/params"
+ "github.com/erigontech/erigon/execution/chain/params"
"github.com/erigontech/erigon/execution/types/accounts"
)
@@ -116,6 +118,8 @@ type Config struct {
// Account Abstraction
AllowAA bool
+
+ ArbitrumChainParams types.ArbitrumChainParams `json:"arbitrum,omitempty"`
}
var (
@@ -325,6 +329,9 @@ func (c *Config) IsBerlin(num uint64) bool {
// IsLondon returns whether num is either equal to the London fork block or greater.
func (c *Config) IsLondon(num uint64) bool {
+ if c.IsArbitrum() {
+ return isBlockForked(new(big.Int).SetUint64(c.ArbitrumChainParams.GenesisBlockNum), big.NewInt(int64(num)))
+ }
return isForked(c.LondonBlock, num)
}
@@ -339,7 +346,10 @@ func (c *Config) IsGrayGlacier(num uint64) bool {
}
// IsShanghai returns whether time is either equal to the Shanghai fork time or greater.
-func (c *Config) IsShanghai(time uint64) bool {
+func (c *Config) IsShanghai(time uint64, currentArbosVersion uint64) bool {
+ if c.IsArbitrum() {
+ return currentArbosVersion >= osver.ArbosVersion_11
+ }
return isForked(c.ShanghaiTime, time)
}
@@ -362,7 +372,10 @@ func (c *Config) IsBhilai(num uint64) bool {
}
// IsCancun returns whether time is either equal to the Cancun fork time or greater.
-func (c *Config) IsCancun(time uint64) bool {
+func (c *Config) IsCancun(time, currentArbosVersion uint64) bool {
+ if c.IsArbitrum() {
+ return currentArbosVersion >= osver.ArbosVersion_20
+ }
return isForked(c.CancunTime, time)
}
@@ -372,13 +385,19 @@ func (c *Config) IsAmsterdam(time uint64) bool {
}
// IsPrague returns whether time is either equal to the Prague fork time or greater.
-func (c *Config) IsPrague(time uint64) bool {
+func (c *Config) IsPrague(time uint64, currentArbosVersion uint64) bool {
+ if c.IsArbitrum() {
+ return currentArbosVersion >= osver.ArbosVersion_40
+ }
return isForked(c.PragueTime, time)
}
// IsOsaka returns whether time is either equal to the Osaka fork time or greater.
-func (c *Config) IsOsaka(time uint64) bool {
- return isForked(c.OsakaTime, time)
+func (c *Config) IsOsaka(num, time, currentArbosVersion uint64) bool {
+ if c.IsArbitrum() {
+ return currentArbosVersion >= osver.ArbosVersion_50
+ }
+ return c.IsLondon(num) && isForked(c.OsakaTime, time)
}
func (c *Config) GetBurntContract(num uint64) accounts.Address {
@@ -396,7 +415,7 @@ func (c *Config) GetMinBlobGasPrice() uint64 {
return 1 // MIN_BLOB_GASPRICE (EIP-4844)
}
-func (c *Config) GetBlobConfig(time uint64) *params.BlobConfig {
+func (c *Config) GetBlobConfig(time uint64, currentArbosVer uint64) *params.BlobConfig {
c.parseBlobScheduleOnce.Do(func() {
// Populate with default values
c.parsedBlobSchedule = make(map[uint64]*params.BlobConfig)
@@ -404,7 +423,9 @@ func (c *Config) GetBlobConfig(time uint64) *params.BlobConfig {
c.parsedBlobSchedule[c.CancunTime.Uint64()] = ¶ms.DefaultCancunBlobConfig
}
if c.PragueTime != nil {
- c.parsedBlobSchedule[c.PragueTime.Uint64()] = ¶ms.DefaultPragueBlobConfig
+ if c.IsPrague(time, currentArbosVer) {
+ c.parsedBlobSchedule[c.PragueTime.Uint64()] = ¶ms.DefaultPragueBlobConfig
+ }
}
// Override with supplied values
@@ -449,33 +470,34 @@ func (c *Config) GetBlobConfig(time uint64) *params.BlobConfig {
return ConfigValueLookup(c.parsedBlobSchedule, time)
}
-func (c *Config) GetMaxBlobsPerBlock(time uint64) uint64 {
- if blobConfig := c.GetBlobConfig(time); blobConfig != nil {
+func (c *Config) GetMaxBlobsPerBlock(time uint64, currentArbosVer uint64) uint64 {
+ if blobConfig := c.GetBlobConfig(time, currentArbosVer); blobConfig != nil {
return blobConfig.Max
}
return 0
}
-func (c *Config) GetMaxBlobGasPerBlock(time uint64) uint64 {
- return c.GetMaxBlobsPerBlock(time) * params.GasPerBlob
+func (c *Config) GetMaxBlobGasPerBlock(time uint64, currentArbosVer uint64) uint64 {
+ return c.GetMaxBlobsPerBlock(time, currentArbosVer) * params.GasPerBlob
}
-func (c *Config) GetTargetBlobsPerBlock(time uint64) uint64 {
- if blobConfig := c.GetBlobConfig(time); blobConfig != nil {
+func (c *Config) GetTargetBlobsPerBlock(time uint64, currentArbosVer uint64) uint64 {
+ if blobConfig := c.GetBlobConfig(time, currentArbosVer); blobConfig != nil {
return blobConfig.Target
}
return 0
}
-func (c *Config) GetBlobGasPriceUpdateFraction(time uint64) uint64 {
- if blobConfig := c.GetBlobConfig(time); blobConfig != nil {
+func (c *Config) GetBlobGasPriceUpdateFraction(time uint64, currentArbosVer uint64) uint64 {
+ if blobConfig := c.GetBlobConfig(time, currentArbosVer); blobConfig != nil {
return blobConfig.BaseFeeUpdateFraction
}
return 0
}
func (c *Config) GetMaxRlpBlockSize(time uint64) int {
- if c.IsOsaka(time) {
+ // TODO arbitrum fields
+ if c.IsOsaka(0, time, 0) {
return params.MaxRlpBlockSize
}
return math.MaxInt
@@ -493,10 +515,10 @@ func (c *Config) SecondsPerSlot() uint64 {
func (c *Config) SystemContracts(time uint64) map[string]accounts.Address {
contracts := map[string]accounts.Address{}
- if c.IsCancun(time) {
+ if c.IsCancun(time, 0 /* currentArbosVersion */) {
contracts["BEACON_ROOTS_ADDRESS"] = params.BeaconRootsAddress
}
- if c.IsPrague(time) {
+ if c.IsPrague(time, 0 /* currentArbosVersion */) {
contracts["CONSOLIDATION_REQUEST_PREDEPLOY_ADDRESS"] = params.ConsolidationRequestAddress
contracts["DEPOSIT_CONTRACT_ADDRESS"] = accounts.InternAddress(c.DepositContract)
contracts["HISTORY_STORAGE_ADDRESS"] = params.HistoryStorageAddress
@@ -730,6 +752,40 @@ type Rules struct {
IsCancun, IsNapoli, IsBhilai bool
IsPrague, IsOsaka, IsAmsterdam bool
IsAura bool
+ IsArbitrum, IsStylus, IsDia bool
+ ArbOSVersion uint64
+}
+
+// Rules ensures c's ChainID is not nil and returns a new Rules instance
+func (c *Config) ChainRules(num uint64, time, currentArbosVersion uint64) *Rules {
+ chainID := c.ChainID
+ if chainID == nil {
+ chainID = new(big.Int)
+ }
+
+ return &Rules{
+ ChainID: new(big.Int).Set(chainID),
+ IsHomestead: c.IsHomestead(num),
+ IsTangerineWhistle: c.IsTangerineWhistle(num),
+ IsSpuriousDragon: c.IsSpuriousDragon(num),
+ IsByzantium: c.IsByzantium(num),
+ IsConstantinople: c.IsConstantinople(num),
+ IsPetersburg: c.IsPetersburg(num),
+ IsIstanbul: c.IsIstanbul(num),
+ IsBerlin: c.IsBerlin(num),
+ IsLondon: c.IsLondon(num),
+ IsShanghai: c.IsShanghai(time, currentArbosVersion) || c.IsAgra(num),
+ IsCancun: c.IsCancun(time, currentArbosVersion),
+ IsNapoli: c.IsNapoli(num),
+ IsBhilai: c.IsBhilai(num),
+ IsPrague: c.IsPrague(time, currentArbosVersion) || c.IsBhilai(num),
+ IsOsaka: c.IsOsaka(num, time, currentArbosVersion),
+ IsAura: c.Aura != nil,
+ ArbOSVersion: currentArbosVersion,
+ IsArbitrum: c.IsArbitrum(),
+ IsStylus: c.IsArbitrum() && currentArbosVersion >= osver.ArbosVersion_Stylus,
+ IsDia: c.IsArbitrum() && currentArbosVersion >= osver.ArbosVersion_50,
+ }
}
// isForked returns whether a fork scheduled at block s is active at the given head block.
@@ -743,3 +799,128 @@ func isForked(s *big.Int, head uint64) bool {
func (c *Config) IsPreMerge(blockNumber uint64) bool {
return c.MergeHeight != nil && blockNumber < c.MergeHeight.Uint64()
}
+
+func (c *Config) IsArbitrum() bool {
+ return c.ArbitrumChainParams.EnableArbOS
+}
+
+func (c *Config) IsArbitrumNitro(num *big.Int) bool {
+ return c.IsArbitrum() && isBlockForked(new(big.Int).SetUint64(c.ArbitrumChainParams.GenesisBlockNum), num)
+}
+
+// isBlockForked returns whether a fork scheduled at block s is active at the
+// given head block. Whilst this method is the same as isTimestampForked, they
+// are explicitly separate for clearer reading.
+func isBlockForked(s, head *big.Int) bool {
+ if s == nil || head == nil {
+ return false
+ }
+ return s.Cmp(head) <= 0
+}
+
+func (c *Config) MaxCodeSize() uint64 {
+ if c.ArbitrumChainParams.MaxCodeSize == 0 {
+ return 24576
+ }
+ return c.ArbitrumChainParams.MaxCodeSize
+}
+
+func (c *Config) MaxInitCodeSize() uint64 {
+ if c.ArbitrumChainParams.MaxInitCodeSize == 0 {
+ return c.MaxCodeSize() * 2
+ }
+ return c.ArbitrumChainParams.MaxInitCodeSize
+}
+
+func (c *Config) DebugMode() bool {
+ return c.ArbitrumChainParams.AllowDebugPrecompiles
+}
+
+func newBlockCompatError(what string, storedblock, newblock *big.Int) *ConfigCompatError {
+ var rew *big.Int
+ switch {
+ case storedblock == nil:
+ rew = newblock
+ case newblock == nil || storedblock.Cmp(newblock) < 0:
+ rew = storedblock
+ default:
+ rew = newblock
+ }
+ err := &ConfigCompatError{
+ What: what,
+ StoredConfig: storedblock,
+ NewConfig: newblock,
+ RewindTo: 0,
+ }
+ if rew != nil && rew.Sign() > 0 {
+ err.RewindTo = rew.Uint64() - 1
+ }
+ return err
+}
+
+func (c *Config) checkArbitrumCompatible(newcfg *Config, head *big.Int) *ConfigCompatError {
+ if c.IsArbitrum() != newcfg.IsArbitrum() {
+ // This difference applies to the entire chain, so report that the genesis block is where the difference appears.
+ return newBlockCompatError("isArbitrum", common.Big0, common.Big0)
+ }
+ if !c.IsArbitrum() {
+ return nil
+ }
+ cArb := &c.ArbitrumChainParams
+ newArb := &newcfg.ArbitrumChainParams
+ if cArb.GenesisBlockNum != newArb.GenesisBlockNum {
+ return newBlockCompatError("genesisblocknum", new(big.Int).SetUint64(cArb.GenesisBlockNum), new(big.Int).SetUint64(newArb.GenesisBlockNum))
+ }
+ return nil
+}
+
+// DefaultCacheConfigWithScheme returns a deep copied default cache config with
+// a provided trie node scheme.
+func DefaultCacheConfigWithScheme(scheme string) *CacheConfig {
+ config := *defaultCacheConfig
+ config.StateScheme = scheme
+ return &config
+}
+
+// defaultCacheConfig are the default caching values if none are specified by the
+// user (also used during testing).
+var defaultCacheConfig = &CacheConfig{
+ // Arbitrum Config Options TODO remove
+ TriesInMemory: 128,
+ TrieRetention: 30 * time.Minute,
+ MaxNumberOfBlocksToSkipStateSaving: 0,
+ MaxAmountOfGasToSkipStateSaving: 0,
+
+ TrieCleanLimit: 256,
+ TrieDirtyLimit: 256,
+ TrieTimeLimit: 5 * time.Minute,
+ SnapshotLimit: 256,
+ SnapshotWait: true,
+ // StateScheme: rawdb.HashScheme,
+}
+
+// CacheConfig contains the configuration values for the trie database
+// and state snapshot these are resident in a blockchain.
+type CacheConfig struct {
+ TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory
+ TrieCleanNoPrefetch bool // Whether to disable heuristic state prefetching for followup blocks
+ TrieDirtyLimit int // Memory limit (MB) at which to start flushing dirty trie nodes to disk
+ TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node)
+ TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk
+ SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory
+ Preimages bool // Whether to store preimage of trie key to the disk
+ StateHistory uint64 // Number of blocks from head whose state histories are reserved.
+ StateScheme string // Scheme used to store ethereum states and merkle tree nodes on top
+
+ SnapshotRestoreMaxGas uint64 // Rollback up to this much gas to restore snapshot (otherwise snapshot recalculated from nothing)
+
+ // Arbitrum: configure GC window
+ TriesInMemory uint64 // Height difference before which a trie may not be garbage-collected
+ TrieRetention time.Duration // Time limit before which a trie may not be garbage-collected
+
+ MaxNumberOfBlocksToSkipStateSaving uint32
+ MaxAmountOfGasToSkipStateSaving uint64
+
+ SnapshotNoBuild bool // Whether the background generation is allowed
+ SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it
+}
diff --git a/execution/chain/chain_config_test.go b/execution/chain/chain_config_test.go
index 2b9770488ea..73dca123a33 100644
--- a/execution/chain/chain_config_test.go
+++ b/execution/chain/chain_config_test.go
@@ -96,19 +96,19 @@ func TestNilBlobSchedule(t *testing.T) {
c.PragueTime = big.NewInt(2)
// Everything should be 0 before Cancun
- assert.Equal(t, uint64(0), c.GetTargetBlobsPerBlock(0))
- assert.Equal(t, uint64(0), c.GetMaxBlobsPerBlock(0))
- assert.Equal(t, uint64(0), c.GetBlobGasPriceUpdateFraction(0))
+ assert.Equal(t, uint64(0), c.GetTargetBlobsPerBlock(0, 0))
+ assert.Equal(t, uint64(0), c.GetMaxBlobsPerBlock(0, 0))
+ assert.Equal(t, uint64(0), c.GetBlobGasPriceUpdateFraction(0, 0))
// Original EIP-4844 values
- assert.Equal(t, uint64(3), c.GetTargetBlobsPerBlock(1))
- assert.Equal(t, uint64(6), c.GetMaxBlobsPerBlock(1))
- assert.Equal(t, uint64(3338477), c.GetBlobGasPriceUpdateFraction(1))
+ assert.Equal(t, uint64(3), c.GetTargetBlobsPerBlock(1, 0))
+ assert.Equal(t, uint64(6), c.GetMaxBlobsPerBlock(1, 0))
+ assert.Equal(t, uint64(3338477), c.GetBlobGasPriceUpdateFraction(1, 0))
// EIP-7691: Blob throughput increase
- assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(2))
- assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(2))
- assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(2))
+ assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(2, 0))
+ assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(2, 0))
+ assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(2, 0))
}
// EIP-7892
@@ -148,49 +148,49 @@ func TestBlobParameterOnlyHardforks(t *testing.T) {
}
time := uint64(0)
- assert.Equal(t, uint64(0), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(0), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(0), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(0), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(0), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(0), c.GetBlobGasPriceUpdateFraction(time, 0))
time = cancunTime
- assert.Equal(t, uint64(3), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(6), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(3338477), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(3), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(6), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(3338477), c.GetBlobGasPriceUpdateFraction(time, 0))
time = (cancunTime + pragueTime) / 2
- assert.Equal(t, uint64(3), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(6), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(3338477), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(3), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(6), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(3338477), c.GetBlobGasPriceUpdateFraction(time, 0))
time = pragueTime
- assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time, 0))
time = (pragueTime + bpo1time) / 2
- assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time, 0))
time = bpo1time
- assert.Equal(t, uint64(24), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(48), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(24), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(48), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time, 0))
time = (bpo1time + bpo2time) / 2
- assert.Equal(t, uint64(24), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(48), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(24), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(48), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time, 0))
time = bpo2time
- assert.Equal(t, uint64(36), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(56), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(36), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(56), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time, 0))
time = bpo2time * 2
- assert.Equal(t, uint64(36), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(56), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(36), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(56), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time, 0))
}
func TestBlobParameterInactiveHardfork(t *testing.T) {
@@ -221,9 +221,9 @@ func TestBlobParameterInactiveHardfork(t *testing.T) {
}
time := pragueTime * 2
- assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time, 0))
}
func TestBlobParameterDencunAndPectraAtGenesis(t *testing.T) {
@@ -245,7 +245,44 @@ func TestBlobParameterDencunAndPectraAtGenesis(t *testing.T) {
}
// Prague should take priority
- assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(0))
- assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(0))
- assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(0))
+ assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(0, 0))
+ assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(0, 0))
+ assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(0, 0))
+}
+
+func TestArbitrumSpecifics(t *testing.T) {
+ cfg := &Config{
+ ChainID: big.NewInt(1337),
+ Consensus: EtHashConsensus,
+ HomesteadBlock: big.NewInt(0),
+ TangerineWhistleBlock: big.NewInt(0),
+ SpuriousDragonBlock: big.NewInt(0),
+ ByzantiumBlock: big.NewInt(0),
+ ConstantinopleBlock: big.NewInt(0),
+ PetersburgBlock: big.NewInt(0),
+ IstanbulBlock: big.NewInt(0),
+ MuirGlacierBlock: big.NewInt(0),
+ BerlinBlock: big.NewInt(0),
+ Ethash: new(EthashConfig),
+ ArbitrumChainParams: ArbitrumChainParams{EnableArbOS: true},
+ }
+
+ // Check if this is Arbitrum in the end
+ assert.True(t, cfg.IsArbitrum())
+
+ // London is enabled since genesis
+ assert.True(t, cfg.IsLondon(0))
+
+ // Shanghai based on Arbos
+ assert.False(t, cfg.IsShanghai(0, ArbosVersion_10))
+ assert.True(t, cfg.IsShanghai(0, ArbosVersion_11))
+
+ // Cancun based on Arbos
+ assert.False(t, cfg.IsCancun(0, ArbosVersion_11))
+ assert.True(t, cfg.IsCancun(0, ArbosVersion_20))
+
+ // Prague based on Arbos
+ assert.False(t, cfg.IsPrague(0, ArbosVersion_20))
+ assert.True(t, cfg.IsPrague(0, ArbosVersion_40))
+
}
diff --git a/execution/chain/networkname/network_name.go b/execution/chain/networkname/network_name.go
index 990922dd8b1..0c3e23b446d 100644
--- a/execution/chain/networkname/network_name.go
+++ b/execution/chain/networkname/network_name.go
@@ -33,6 +33,8 @@ const (
Gnosis = "gnosis"
BorE2ETestChain2Val = "bor-e2e-test-2Val"
Chiado = "chiado"
+ ArbitrumMainnet = "arb1"
+ ArbiturmSepolia = "arb-sepolia"
Test = "test"
)
@@ -46,6 +48,8 @@ var All = []string{
BorDevnet,
Gnosis,
Chiado,
+ ArbitrumMainnet,
+ ArbiturmSepolia,
Test,
}
diff --git a/execution/chain/params/protocol.go b/execution/chain/params/protocol.go
new file mode 100644
index 00000000000..610a968c1d5
--- /dev/null
+++ b/execution/chain/params/protocol.go
@@ -0,0 +1,266 @@
+// Copyright 2015 The go-ethereum Authors
+// (original work)
+// Copyright 2024 The Erigon Authors
+// (modifications)
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package params
+
+import (
+ "math/big"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/types/accounts"
+)
+
+const (
+ GasLimitBoundDivisor uint64 = 1024 // The bound divisor of the gas limit, used in update calculations.
+ MinBlockGasLimit uint64 = 5000 // Minimum the block gas limit may ever be.
+ MaxBlockGasLimit uint64 = 0x7fffffffffffffff // Maximum the block gas limit may ever be.
+ MaxTxnGasLimit uint64 = 16_777_216 // See EIP-7825: Transaction Gas Limit Cap.
+ GenesisGasLimit uint64 = 4712388 // Gas limit of the Genesis block.
+
+ MaximumExtraDataSize uint64 = 32 // Maximum size extra data may be after Genesis.
+ CallValueTransferGas uint64 = 9000 // Paid for CALL when the value transfer is non-zero.
+ CallNewAccountGas uint64 = 25000 // Paid for CALL when the destination address didn't exist prior.
+ TxGas uint64 = 21000 // Per transaction not creating a contract. NOTE: Not payable on data of calls between transactions.
+ TxGasContractCreation uint64 = 53000 // Per transaction that creates a contract. NOTE: Not payable on data of calls between transactions.
+ TxAAGas uint64 = 15000 // Per account abstraction transaction
+ TxDataZeroGas uint64 = 4 // Per byte of data attached to a transaction that equals zero. NOTE: Not payable on data of calls between transactions.
+
+ QuadCoeffDiv uint64 = 512 // Divisor for the quadratic particle of the memory cost equation.
+ LogDataGas uint64 = 8 // Per byte in a LOG* operation's data.
+ CallStipend uint64 = 2300 // Free gas given at beginning of call.
+
+ Keccak256Gas uint64 = 30 // Once per KECCAK256 operation.
+ Keccak256WordGas uint64 = 6 // Once per word of the KECCAK256 operation's data.
+ InitCodeWordGas uint64 = 2 // Once per word of the init code when creating a contract.
+
+ SstoreSetGas uint64 = 20000 // Once per SLOAD operation.
+ SstoreResetGas uint64 = 5000 // Once per SSTORE operation if the zeroness changes from zero.
+ SstoreClearGas uint64 = 5000 // Once per SSTORE operation if the zeroness doesn't change.
+ SstoreRefundGas uint64 = 15000 // Once per SSTORE operation if the zeroness changes to zero.
+
+ NetSstoreNoopGas uint64 = 200 // Once per SSTORE operation if the value doesn't change.
+ NetSstoreInitGas uint64 = 20000 // Once per SSTORE operation from clean zero.
+ NetSstoreCleanGas uint64 = 5000 // Once per SSTORE operation from clean non-zero.
+ NetSstoreDirtyGas uint64 = 200 // Once per SSTORE operation from dirty.
+
+ NetSstoreClearRefund uint64 = 15000 // Once per SSTORE operation for clearing an originally existing storage slot
+ NetSstoreResetRefund uint64 = 4800 // Once per SSTORE operation for resetting to the original non-zero value
+ NetSstoreResetClearRefund uint64 = 19800 // Once per SSTORE operation for resetting to the original zero value
+
+ LogTopicBytes uint64 = 32 // 32 bytes per topic represents the hash size that gets stored in history.
+ LogTopicHistoryGas uint64 = LogDataGas * LogTopicBytes // History growth gas per topic
+ LogTopicComputationGas uint64 = LogTopicGas - LogTopicHistoryGas // Computation gas per topic
+
+ SstoreSentryGasEIP2200 uint64 = 2300 // Minimum gas required to be present for an SSTORE call, not consumed
+ SstoreSetGasEIP2200 uint64 = 20000 // Once per SSTORE operation from clean zero to non-zero
+ SstoreResetGasEIP2200 uint64 = 5000 // Once per SSTORE operation from clean non-zero to something else
+ SstoreClearsScheduleRefundEIP2200 uint64 = 15000 // Once per SSTORE operation for clearing an originally existing storage slot
+
+ ColdAccountAccessCostEIP2929 = uint64(2600) // COLD_ACCOUNT_ACCESS_COST
+ ColdSloadCostEIP2929 = uint64(2100) // COLD_SLOAD_COST
+ WarmStorageReadCostEIP2929 = uint64(100) // WARM_STORAGE_READ_COST
+
+ // In EIP-2200: SstoreResetGas was 5000.
+ // In EIP-2929: SstoreResetGas was changed to '5000 - COLD_SLOAD_COST'.
+ // In EIP-3529: SSTORE_CLEARS_SCHEDULE is defined as SSTORE_RESET_GAS + ACCESS_LIST_STORAGE_KEY_COST
+ // Which becomes: 5000 - 2100 + 1900 = 4800
+ SstoreClearsScheduleRefundEIP3529 = SstoreResetGasEIP2200 - ColdSloadCostEIP2929 + TxAccessListStorageKeyGas
+
+ JumpdestGas uint64 = 1 // Once per JUMPDEST operation.
+ EpochDuration uint64 = 30000 // Duration between proof-of-work epochs.
+
+ CreateDataGas uint64 = 200 //
+ CallCreateDepth uint64 = 1024 // Maximum depth of call/create stack.
+ ExpGas uint64 = 10 // Once per EXP instruction
+ LogGas uint64 = 375 // Per LOG* operation.
+ CopyGas uint64 = 3 //
+ StackLimit uint64 = 1024 // Maximum size of VM stack allowed.
+ TierStepGas uint64 = 0 // Once per operation, for a selection of them.
+ LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas.
+ CreateGas uint64 = 32000 // Once per CREATE operation & contract-creation transaction.
+ Create2Gas uint64 = 32000 // Once per CREATE2 operation
+ SelfdestructRefundGas uint64 = 24000 // Refunded following a selfdestruct operation.
+ MemoryGas uint64 = 3 // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL.
+
+ TxDataNonZeroGasFrontier uint64 = 68 // Per byte of data attached to a transaction that is not equal to zero. NOTE: Not payable on data of calls between transactions.
+ TxDataNonZeroGasEIP2028 uint64 = 16 // Per byte of non zero data attached to a transaction after EIP 2028 (part in Istanbul)
+ TxAccessListAddressGas uint64 = 2400 // Per address specified in EIP 2930 access list
+ TxAccessListStorageKeyGas uint64 = 1900 // Per storage key specified in EIP 2930 access list
+ TxTotalCostFloorPerToken uint64 = 10 // Per token of calldata in a transaction, as a minimum the txn must pay (EIP-7623)
+
+ // These have been changed during the course of the chain
+ CallGasFrontier uint64 = 40 // Once per CALL operation & message call transaction.
+ CallGasEIP150 uint64 = 700 // Static portion of gas for CALL-derivatives after EIP 150 (Tangerine)
+ BalanceGasFrontier uint64 = 20 // The cost of a BALANCE operation
+ BalanceGasEIP150 uint64 = 400 // The cost of a BALANCE operation after Tangerine
+ BalanceGasEIP1884 uint64 = 700 // The cost of a BALANCE operation after EIP 1884 (part of Istanbul)
+ ExtcodeSizeGasFrontier uint64 = 20 // Cost of EXTCODESIZE before EIP 150 (Tangerine)
+ ExtcodeSizeGasEIP150 uint64 = 700 // Cost of EXTCODESIZE after EIP 150 (Tangerine)
+ SloadGasFrontier uint64 = 50
+ SloadGasEIP150 uint64 = 200
+ SloadGasEIP1884 uint64 = 800 // Cost of SLOAD after EIP 1884 (part of Istanbul)
+ SloadGasEIP2200 uint64 = 800 // Cost of SLOAD after EIP 2200 (part of Istanbul)
+ ExtcodeHashGasConstantinople uint64 = 400 // Cost of EXTCODEHASH (introduced in Constantinople)
+ ExtcodeHashGasEIP1884 uint64 = 700 // Cost of EXTCODEHASH after EIP 1884 (part in Istanbul)
+ SelfdestructGasEIP150 uint64 = 5000 // Cost of SELFDESTRUCT post EIP 150 (Tangerine)
+
+ // EXP has a dynamic portion depending on the size of the exponent
+ ExpByteFrontier uint64 = 10 // was set to 10 in Frontier
+ ExpByteEIP160 uint64 = 50 // was raised to 50 during EIP 160 (Spurious Dragon)
+
+ // Extcodecopy has a dynamic AND a static cost. This represents only the
+ // static portion of the gas. It was changed during EIP 150 (Tangerine)
+ ExtcodeCopyBaseFrontier uint64 = 20
+ ExtcodeCopyBaseEIP150 uint64 = 700
+
+ // CreateBySelfdestructGas is used when the refunded account is one that does
+ // not exist. This logic is similar to call.
+ // Introduced in Tangerine Whistle (Eip 150)
+ CreateBySelfdestructGas uint64 = 25000
+
+ BaseFeeChangeDenominator = 8 // Bounds the amount the base fee can change between blocks.
+ BaseFeeChangeDenominatorPostDelhi = 16 // Bounds the amount the base fee can change between blocks post delhi hard fork for polygon networks.
+ BaseFeeChangeDenominatorPostBhilai = 64 // Bounds the amount the base fee can change between blocks post bhilai hard fork for polygon networks.
+ ElasticityMultiplier = 2 // Bounds the maximum gas limit an EIP-1559 block may have.
+ InitialBaseFee = 1000000000 // Initial base fee for EIP-1559 blocks.
+
+ MaxCodeSize = 24576 // Maximum bytecode to permit for a contract
+ MaxCodeSizePostAhmedabad = 32768 // Maximum bytecode to permit for a contract post Ahmedabad hard fork (bor / polygon pos) (32KB)
+ MaxInitCodeSize = 2 * MaxCodeSize // Maximum initcode to permit in a creation transaction and create instructions
+
+ // Precompiled contract gas prices
+
+ TendermintHeaderValidateGas uint64 = 3000 // Gas for validate tendermiint consensus state
+ IAVLMerkleProofValidateGas uint64 = 3000 // Gas for validate merkle proof
+
+ EcrecoverGas uint64 = 3000 // Elliptic curve sender recovery gas price
+ Sha256BaseGas uint64 = 60 // Base price for a SHA256 operation
+ Sha256PerWordGas uint64 = 12 // Per-word price for a SHA256 operation
+ Ripemd160BaseGas uint64 = 600 // Base price for a RIPEMD160 operation
+ Ripemd160PerWordGas uint64 = 120 // Per-word price for a RIPEMD160 operation
+ IdentityBaseGas uint64 = 15 // Base price for a data copy operation
+ IdentityPerWordGas uint64 = 3 // Per-work price for a data copy operation
+
+ Bn254AddGasByzantium uint64 = 500 // Byzantium gas needed for an elliptic curve addition
+ Bn254AddGasIstanbul uint64 = 150 // Gas needed for an elliptic curve addition
+ Bn254ScalarMulGasByzantium uint64 = 40000 // Byzantium gas needed for an elliptic curve scalar multiplication
+ Bn254ScalarMulGasIstanbul uint64 = 6000 // Gas needed for an elliptic curve scalar multiplication
+ Bn254PairingBaseGasByzantium uint64 = 100000 // Byzantium base price for an elliptic curve pairing check
+ Bn254PairingBaseGasIstanbul uint64 = 45000 // Base price for an elliptic curve pairing check
+ Bn254PairingPerPointGasByzantium uint64 = 80000 // Byzantium per-point price for an elliptic curve pairing check
+ Bn254PairingPerPointGasIstanbul uint64 = 34000 // Per-point price for an elliptic curve pairing check
+
+ Bls12381G1AddGas uint64 = 375 // Price for BLS12-381 elliptic curve G1 point addition
+ Bls12381G1MulGas uint64 = 12000 // Price for BLS12-381 elliptic curve G1 point scalar multiplication
+ Bls12381G2AddGas uint64 = 600 // Price for BLS12-381 elliptic curve G2 point addition
+ Bls12381G2MulGas uint64 = 22500 // Price for BLS12-381 elliptic curve G2 point scalar multiplication
+ Bls12381PairingBaseGas uint64 = 37700 // Base gas price for BLS12-381 elliptic curve pairing check
+ Bls12381PairingPerPairGas uint64 = 32600 // Per-point pair gas price for BLS12-381 elliptic curve pairing check
+ Bls12381MapFpToG1Gas uint64 = 5500 // Gas price for BLS12-381 mapping field element to G1 operation
+ Bls12381MapFp2ToG2Gas uint64 = 23800 // Gas price for BLS12-381 mapping field element to G2 operation
+
+ // The Refund Quotient is the cap on how much of the used gas can be refunded. Before EIP-3529,
+ // up to half the consumed gas could be refunded.
+ RefundQuotient uint64 = 2
+ RefundQuotientEIP3529 uint64 = 5 // After EIP-3529: refunds are capped to gasUsed / 5
+
+ // EIP-4844: Shard Blob Transactions
+ PointEvaluationGas uint64 = 50000
+ FieldElementsPerBlob = 4096 // each field element is 32 bytes
+ BlobSize = FieldElementsPerBlob * 32
+ GasPerBlob uint64 = 1 << 17
+ BlobBaseCost uint64 = 1 << 13 // EIP-7918: Blob base fee bounded by execution cost
+
+ // EIP-7594: PeerDAS - Peer Data Availability Sampling
+ // See https://github.com/ethereum/consensus-specs/blob/dev/specs/fulu/polynomial-commitments-sampling.md
+ FieldElementsPerExtBlob = 2 * FieldElementsPerBlob // Number of field elements in a Reed-Solomon extended blob
+ FieldElementsPerCell uint64 = 64 // Number of Field elements in a cell
+ BytesPerCell = FieldElementsPerCell * 32 // The number of bytes in a cell
+ CellsPerExtBlob = FieldElementsPerExtBlob / FieldElementsPerCell // The number of cells in an extended blob
+ MaxBlobsPerTxn = 6 // https://github.com/ethereum/EIPs/pull/9981
+
+ // PIP-27: secp256r1 elliptic curve signature verifier gas price
+ P256VerifyGas uint64 = 3450
+ P256VerifyGasEIP7951 uint64 = 6900
+
+ // EIP-2935: Historical block hashes in state
+ BlockHashHistoryServeWindow uint64 = 8191
+ BlockHashOldWindow uint64 = 256
+
+ // EIP-7702: Set EOA account code
+ SetCodeMagicPrefix = byte(0x05)
+ PerEmptyAccountCost = 25000
+ PerAuthBaseCost = 12500
+
+ // EIP-7934: RLP Execution Block Size Limit
+ MaxBlockSize = 10_485_760 // 10 MiB
+ MaxBlockSizeSafetyMargin = 2_097_152 // 2 MiB
+ MaxRlpBlockSize = MaxBlockSize - MaxBlockSizeSafetyMargin
+)
+
+var (
+ // EIP-7702: Set EOA account code
+ DelegatedDesignationPrefix = []byte{0xef, 0x01, 0x00}
+ DelegatedCodeHash = common.HexToHash("0xeadcdba66a79ab5dce91622d1d75c8cff5cff0b96944c3bf1072cd08ce018329")
+)
+
+// EIP-4788: Beacon block root in the EVM
+var BeaconRootsAddress = accounts.InternAddress(common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02"))
+
+// EIP-2935: Historical block hashes in state
+var HistoryStorageAddress = accounts.InternAddress(common.HexToAddress("0x0000F90827F1C53a10cb7A02335B175320002935"))
+
+// EIP-7002: Execution layer triggerable withdrawals
+var WithdrawalRequestAddress = accounts.InternAddress(common.HexToAddress("0x00000961Ef480Eb55e80D19ad83579A64c007002"))
+
+// EIP-7251
+var ConsolidationRequestAddress = accounts.InternAddress(common.HexToAddress("0x0000BBdDc7CE488642fb579F8B00f3a590007251"))
+
+var (
+ // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
+ Bls12381MSMDiscountTableG1 = [128]uint64{1000, 949, 848, 797, 764, 750, 738, 728, 719, 712, 705, 698, 692, 687, 682, 677, 673, 669, 665, 661, 658, 654, 651, 648, 645, 642, 640, 637, 635, 632, 630, 627, 625, 623, 621, 619, 617, 615, 613, 611, 609, 608, 606, 604, 603, 601, 599, 598, 596, 595, 593, 592, 591, 589, 588, 586, 585, 584, 582, 581, 580, 579, 577, 576, 575, 574, 573, 572, 570, 569, 568, 567, 566, 565, 564, 563, 562, 561, 560, 559, 558, 557, 556, 555, 554, 553, 552, 551, 550, 549, 548, 547, 547, 546, 545, 544, 543, 542, 541, 540, 540, 539, 538, 537, 536, 536, 535, 534, 533, 532, 532, 531, 530, 529, 528, 528, 527, 526, 525, 525, 524, 523, 522, 522, 521, 520, 520, 519}
+
+ Bls12381MSMDiscountTableG2 = [128]uint64{1000, 1000, 923, 884, 855, 832, 812, 796, 782, 770, 759, 749, 740, 732, 724, 717, 711, 704, 699, 693, 688, 683, 679, 674, 670, 666, 663, 659, 655, 652, 649, 646, 643, 640, 637, 634, 632, 629, 627, 624, 622, 620, 618, 615, 613, 611, 609, 607, 606, 604, 602, 600, 598, 597, 595, 593, 592, 590, 589, 587, 586, 584, 583, 582, 580, 579, 578, 576, 575, 574, 573, 571, 570, 569, 568, 567, 566, 565, 563, 562, 561, 560, 559, 558, 557, 556, 555, 554, 553, 552, 552, 551, 550, 549, 548, 547, 546, 545, 545, 544, 543, 542, 541, 541, 540, 539, 538, 537, 537, 536, 535, 535, 534, 533, 532, 532, 531, 530, 530, 529, 528, 528, 527, 526, 526, 525, 524, 524}
+)
+
+var (
+ DifficultyBoundDivisor = big.NewInt(2048) // The bound divisor of the difficulty, used in the update calculations.
+ GenesisDifficulty = big.NewInt(131072) // Difficulty of the Genesis block.
+ MinimumDifficulty = big.NewInt(131072) // The minimum that the difficulty may ever be.
+ DurationLimit = big.NewInt(13) // The decision boundary on the blocktime duration used to determine whether difficulty should go up or not.
+)
+
+// See EIP-7840: Add blob schedule to EL config files
+type BlobConfig struct {
+ BaseFeeUpdateFraction uint64 `json:"baseFeeUpdateFraction"`
+ Max uint64 `json:"max"`
+ Target uint64 `json:"target"`
+}
+
+var DefaultCancunBlobConfig = BlobConfig{
+ Target: 3,
+ Max: 6,
+ BaseFeeUpdateFraction: 3338477,
+}
+
+var DefaultPragueBlobConfig = BlobConfig{
+ Target: 6,
+ Max: 9,
+ BaseFeeUpdateFraction: 5007716,
+}
diff --git a/execution/chain/rules.go b/execution/chain/rules.go
index aece1b751f7..e782582ded1 100644
--- a/execution/chain/rules.go
+++ b/execution/chain/rules.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Erigon Authors
+// Copyright 2025 The Erigon Authors
// This file is part of Erigon.
//
// Erigon is free software: you can redistribute it and/or modify
diff --git a/execution/chain/spec/config_test.go b/execution/chain/spec/config_test.go
index e0821d1e421..1cc7e6f9216 100644
--- a/execution/chain/spec/config_test.go
+++ b/execution/chain/spec/config_test.go
@@ -108,15 +108,15 @@ func TestMainnetBlobSchedule(t *testing.T) {
c := Mainnet.Config
// Original EIP-4844 values
time := c.CancunTime.Uint64()
- assert.Equal(t, uint64(6), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(3), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(3338477), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(6), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(3), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(3338477), c.GetBlobGasPriceUpdateFraction(time, 0))
// EIP-7691: Blob throughput increase
time = c.PragueTime.Uint64()
- assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(9), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(6), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(5007716), c.GetBlobGasPriceUpdateFraction(time, 0))
}
func TestGnosisBlobSchedule(t *testing.T) {
@@ -124,12 +124,12 @@ func TestGnosisBlobSchedule(t *testing.T) {
// Cancun values
time := c.CancunTime.Uint64()
- assert.Equal(t, uint64(2), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(1), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(1112826), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(2), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(1), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(1112826), c.GetBlobGasPriceUpdateFraction(time, 0))
// should remain the same in Pectra for Gnosis
- assert.Equal(t, uint64(2), c.GetMaxBlobsPerBlock(time))
- assert.Equal(t, uint64(1), c.GetTargetBlobsPerBlock(time))
- assert.Equal(t, uint64(1112826), c.GetBlobGasPriceUpdateFraction(time))
+ assert.Equal(t, uint64(2), c.GetMaxBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(1), c.GetTargetBlobsPerBlock(time, 0))
+ assert.Equal(t, uint64(1112826), c.GetBlobGasPriceUpdateFraction(time, 0))
}
diff --git a/execution/engineapi/engine_server.go b/execution/engineapi/engine_server.go
index 4974ff75bde..29f548ce533 100644
--- a/execution/engineapi/engine_server.go
+++ b/execution/engineapi/engine_server.go
@@ -168,10 +168,11 @@ func (e *EngineServer) Start(
}
func (s *EngineServer) checkWithdrawalsPresence(time uint64, withdrawals types.Withdrawals) error {
- if !s.config.IsShanghai(time) && withdrawals != nil {
+ var arbosVersion uint64
+ if !s.config.IsShanghai(time, arbosVersion) && withdrawals != nil {
return &rpc.InvalidParamsError{Message: "withdrawals before Shanghai"}
}
- if s.config.IsShanghai(time) && withdrawals == nil {
+ if s.config.IsShanghai(time, arbosVersion) && withdrawals == nil {
return &rpc.InvalidParamsError{Message: "missing withdrawals list"}
}
return nil
@@ -308,10 +309,10 @@ func (s *EngineServer) newPayload(ctx context.Context, req *engine_types.Executi
}
log.Debug(fmt.Sprintf("bal from header: %s", blockAccessList.DebugString()))
- if (!s.config.IsCancun(header.Time) && version >= clparams.DenebVersion) ||
- (s.config.IsCancun(header.Time) && version < clparams.DenebVersion) ||
- (!s.config.IsPrague(header.Time) && version >= clparams.ElectraVersion) ||
- (s.config.IsPrague(header.Time) && version < clparams.ElectraVersion) || // osaka has no new newPayload method
+ if (!s.config.IsCancun(header.Time, 0) && version >= clparams.DenebVersion) ||
+ (s.config.IsCancun(header.Time 0) && version < clparams.DenebVersion) ||
+ (!s.config.IsPrague(header.Time 0) && version >= clparams.ElectraVersion) ||
+ (s.config.IsPrague(header.Time 0) && version < clparams.ElectraVersion) || // osaka has no new newPayload method
(!s.config.IsAmsterdam(header.Time) && version >= clparams.GloasVersion) ||
(s.config.IsAmsterdam(header.Time) && version < clparams.GloasVersion) {
return nil, &rpc.UnsupportedForkError{Message: "Unsupported fork"}
@@ -352,7 +353,8 @@ func (s *EngineServer) newPayload(ctx context.Context, req *engine_types.Executi
}
if version >= clparams.DenebVersion {
- err := misc.ValidateBlobs(req.BlobGasUsed.Uint64(), s.config.GetMaxBlobGasPerBlock(header.Time), s.config.GetMaxBlobsPerBlock(header.Time), expectedBlobHashes, &transactions)
+ arbOsVersion := types.GetArbOSVersion(&header, s.config)
+ err := misc.ValidateBlobs(req.BlobGasUsed.Uint64(), s.config.GetMaxBlobGasPerBlock(header.Time, arbOsVersion), s.config.GetMaxBlobsPerBlock(header.Time, arbOsVersion), expectedBlobHashes, &transactions)
if errors.Is(err, misc.ErrNilBlobHashes) {
return nil, &rpc.InvalidParamsError{Message: "nil blob hashes array"}
}
@@ -589,13 +591,15 @@ func (s *EngineServer) getPayload(ctx context.Context, payloadId uint64, version
}
}
+ bn := data.ExecutionPayload.BlockNumber
ts := data.ExecutionPayload.Timestamp
- if (!s.config.IsCancun(ts) && version >= clparams.DenebVersion) ||
- (s.config.IsCancun(ts) && version < clparams.DenebVersion) ||
- (!s.config.IsPrague(ts) && version >= clparams.ElectraVersion) ||
- (s.config.IsPrague(ts) && version < clparams.ElectraVersion) ||
- (!s.config.IsOsaka(ts) && version >= clparams.FuluVersion) ||
- (s.config.IsOsaka(ts) && version < clparams.FuluVersion) ||
+ var arbosVersion uint64
+ if (!s.config.IsCancun(ts, arbosVersion) && version >= clparams.DenebVersion) ||
+ (s.config.IsCancun(ts, arbosVersion) && version < clparams.DenebVersion) ||
+ (!s.config.IsPrague(ts, arbosVersion) && version >= clparams.ElectraVersion) ||
+ (s.config.IsPrague(ts, arbosVersion) && version < clparams.ElectraVersion) ||
+ (!s.config.IsOsaka(ts, arbosVersion) && version >= clparams.FuluVersion) ||
+ (s.config.IsOsaka(ts, arbosVersion) && version < clparams.FuluVersion) ||
(!s.config.IsAmsterdam(ts) && version >= clparams.GloasVersion) ||
(s.config.IsAmsterdam(ts) && version < clparams.GloasVersion) {
return nil, &rpc.UnsupportedForkError{Message: "Unsupported fork"}
@@ -686,10 +690,11 @@ func (s *EngineServer) forkchoiceUpdated(ctx context.Context, forkchoiceState *e
}
timestamp := uint64(payloadAttributes.Timestamp)
- if !s.config.IsCancun(timestamp) && version >= clparams.DenebVersion { // V3 before cancun
+ var arbosVersion uint64
+ if !s.config.IsCancun(timestamp, arbosVersion) && version >= clparams.DenebVersion { // V3 before cancun
return nil, &rpc.UnsupportedForkError{Message: "Unsupported fork"}
}
- if s.config.IsCancun(timestamp) && version < clparams.DenebVersion { // Not V3 after cancun
+ if s.config.IsCancun(timestamp, arbosVersion) && version < clparams.DenebVersion { // Not V3 after cancun
return nil, &rpc.UnsupportedForkError{Message: "Unsupported fork"}
}
diff --git a/execution/exec/historical_trace_worker.go b/execution/exec/historical_trace_worker.go
index 7d4ff4ada87..9bbf742eca3 100644
--- a/execution/exec/historical_trace_worker.go
+++ b/execution/exec/historical_trace_worker.go
@@ -115,7 +115,7 @@ func NewHistoricalTraceWorker(
vmCfg: &vm.Config{},
}
ie.evm = vm.NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, execArgs.ChainConfig, *ie.vmCfg)
- ie.taskGasPool.AddBlobGas(execArgs.ChainConfig.GetMaxBlobGasPerBlock(0))
+ ie.taskGasPool.AddBlobGas(execArgs.ChainConfig.GetMaxBlobGasPerBlock(0, ie.evm.Context.ArbOSVersion))
ie.ibs = state.New(ie.stateReader)
return ie
}
@@ -202,7 +202,7 @@ func (rw *HistoricalTraceWorker) RunTxTask(txTask *TxTask) *TxResult {
default:
tracer := calltracer.NewCallTracer(nil)
result.Err = func() error {
- rw.taskGasPool.Reset(txTask.Tx().GetGasLimit(), cc.GetMaxBlobGasPerBlock(header.Time))
+ rw.taskGasPool.Reset(txTask.Tx().GetGasLimit(), cc.GetMaxBlobGasPerBlock(header.Time, rw.evm.Context.ArbOSVersion))
rw.vmCfg.Tracer = tracer.Tracer().Hooks
ibs.SetTxContext(txTask.BlockNumber(), txTask.TxIndex)
txn := txTask.Tx()
diff --git a/execution/exec3/state.go b/execution/exec3/state.go
new file mode 100644
index 00000000000..248267b4010
--- /dev/null
+++ b/execution/exec3/state.go
@@ -0,0 +1,526 @@
+// Copyright 2024 The Erigon Authors
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package exec3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+
+ "github.com/erigontech/nitro-erigon/arbos"
+ "github.com/erigontech/nitro-erigon/arbos/arbosState"
+ "github.com/erigontech/nitro-erigon/arbos/arbostypes"
+ "github.com/erigontech/nitro-erigon/gethhook"
+ "github.com/erigontech/nitro-erigon/statetransfer"
+
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/dbg"
+ "github.com/erigontech/erigon/core"
+ "github.com/erigontech/erigon/core/genesiswrite"
+ "github.com/erigontech/erigon/core/state"
+ "github.com/erigontech/erigon/core/tracing"
+ "github.com/erigontech/erigon/core/vm"
+ "github.com/erigontech/erigon/core/vm/evmtypes"
+ "github.com/erigontech/erigon/db/datadir"
+ "github.com/erigontech/erigon/db/kv"
+ "github.com/erigontech/erigon/eth/consensuschain"
+ "github.com/erigontech/erigon/execution/aa"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/consensus"
+ "github.com/erigontech/erigon/execution/exec3/calltracer"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/erigontech/erigon/log/v3"
+ "github.com/erigontech/erigon/turbo/services"
+ "github.com/erigontech/erigon/turbo/shards"
+)
+
+var arbTrace bool
+
+func init() {
+ gethhook.RequireHookedGeth()
+ arbTrace = dbg.EnvBool("ARB_TRACE", false)
+}
+
+var noop = state.NewNoopWriter()
+
+type Worker struct {
+ lock sync.Locker
+ logger log.Logger
+ chainDb kv.RoDB
+ chainTx kv.TemporalTx
+ background bool // if true - worker does manage RoTx (begin/rollback) in .ResetTx()
+ blockReader services.FullBlockReader
+ in *state.QueueWithRetry
+ rs *state.ParallelExecutionState
+ stateWriter *state.Writer
+ stateReader state.ResettableStateReader
+ historyMode bool // if true - stateReader is HistoryReaderV3, otherwise it's state reader
+ chainConfig *chain.Config
+
+ ctx context.Context
+ engine consensus.Engine
+ genesis *types.Genesis
+ resultCh *state.ResultsQueue
+ chain consensus.ChainReader
+
+ callTracer *calltracer.CallTracer
+ taskGasPool *core.GasPool
+ hooks *tracing.Hooks
+
+ evm *vm.EVM
+ ibs *state.IntraBlockState
+ vmCfg vm.Config
+
+ dirs datadir.Dirs
+
+ isMining bool
+}
+
+func NewWorker(lock sync.Locker, logger log.Logger, hooks *tracing.Hooks, ctx context.Context, background bool, chainDb kv.RoDB, in *state.QueueWithRetry, blockReader services.FullBlockReader, chainConfig *chain.Config, genesis *types.Genesis, results *state.ResultsQueue, engine consensus.Engine, dirs datadir.Dirs, isMining bool) *Worker {
+ w := &Worker{
+ lock: lock,
+ chainDb: chainDb,
+ in: in,
+
+ logger: logger,
+ ctx: ctx,
+
+ background: background,
+ blockReader: blockReader,
+
+ chainConfig: chainConfig,
+ genesis: genesis,
+ resultCh: results,
+ engine: engine,
+
+ evm: vm.NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, chainConfig, vm.Config{}),
+ callTracer: calltracer.NewCallTracer(hooks),
+ taskGasPool: new(core.GasPool),
+ hooks: hooks,
+
+ dirs: dirs,
+
+ isMining: isMining,
+ }
+ w.vmCfg = vm.Config{Tracer: w.callTracer.Tracer().Hooks, NoBaseFee: true}
+ w.evm = vm.NewEVM(evmtypes.BlockContext{}, evmtypes.TxContext{}, nil, chainConfig, w.vmCfg)
+ arbOSVersion := w.evm.Context.ArbOSVersion
+ w.taskGasPool.AddBlobGas(chainConfig.GetMaxBlobGasPerBlock(0, arbOSVersion))
+ w.ibs = state.New(w.stateReader)
+ return w
+}
+
+func (rw *Worker) LogLRUStats() { rw.evm.Config().JumpDestCache.LogStats() }
+
+func (rw *Worker) ResetState(rs *state.ParallelExecutionState, accumulator *shards.Accumulator) {
+ rw.rs = rs
+ if rw.background {
+ rw.SetReader(state.NewReaderParallelV3(rs.Domains()))
+ } else {
+ rw.SetReader(state.NewReaderV3(rs.TemporalGetter()))
+ }
+ rw.stateWriter = state.NewWriter(rs.TemporalPutDel(), accumulator, 0)
+}
+
+func (rw *Worker) SetGaspool(gp *core.GasPool) {
+ rw.taskGasPool = gp
+}
+
+func (rw *Worker) Tx() kv.TemporalTx { return rw.chainTx }
+func (rw *Worker) DiscardReadList() { rw.stateReader.DiscardReadList() }
+func (rw *Worker) ResetTx(chainTx kv.Tx) {
+ if rw.background && rw.chainTx != nil {
+ rw.chainTx.Rollback()
+ rw.chainTx = nil
+ }
+ if chainTx != nil {
+ rw.chainTx = chainTx.(kv.TemporalTx)
+ rw.stateReader.SetTx(rw.chainTx)
+ rw.chain = consensuschain.NewReader(rw.chainConfig, rw.chainTx, rw.blockReader, rw.logger)
+ }
+}
+
+func (rw *Worker) Run() (err error) {
+ defer func() { // convert panic to err - because it's background workers
+ if rec := recover(); rec != nil {
+ err = fmt.Errorf("exec3.Worker panic: %s, %s", rec, dbg.Stack())
+ }
+ }()
+
+ for txTask, ok := rw.in.Next(rw.ctx); ok; txTask, ok = rw.in.Next(rw.ctx) {
+ //fmt.Println("RTX", txTask.BlockNum, txTask.TxIndex, txTask.TxNum, txTask.Final)
+ rw.RunTxTask(txTask, rw.isMining)
+ if err := rw.resultCh.Add(rw.ctx, txTask); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (rw *Worker) RunTxTask(txTask *state.TxTask, isMining bool) {
+ rw.lock.Lock()
+ defer rw.lock.Unlock()
+ rw.RunTxTaskNoLock(txTask, isMining, false)
+}
+
+// Needed to set history reader when need to offset few txs from block beginning and does not break processing,
+// like compute gas used for block and then to set state reader to continue processing on latest data.
+func (rw *Worker) SetReader(reader state.ResettableStateReader) {
+ rw.stateReader = reader
+ rw.stateReader.SetTx(rw.Tx())
+ rw.ibs.Reset()
+ rw.ibs = state.New(rw.stateReader)
+
+ switch reader.(type) {
+ case *state.HistoryReaderV3:
+ rw.historyMode = true
+ case *state.ReaderV3:
+ rw.historyMode = false
+ default:
+ rw.historyMode = false
+ //fmt.Printf("[worker] unknown reader %T: historyMode is set to disabled\n", reader)
+ }
+}
+
+func (rw *Worker) SetArbitrumWasmDB(wasmDB wasmdb.WasmIface) {
+ if rw.chainConfig.IsArbitrum() {
+ rw.ibs.SetWasmDB(wasmDB)
+ }
+}
+
+func (rw *Worker) RunTxTaskNoLock(txTask *state.TxTask, isMining, skipPostEvaluation bool) {
+ if txTask.HistoryExecution && !rw.historyMode {
+ // in case if we cancelled execution and commitment happened in the middle of the block, we have to process block
+ // from the beginning until committed txNum and only then disable history mode.
+ // Needed to correctly evaluate spent gas and other things.
+ rw.SetReader(state.NewHistoryReaderV3())
+ } else if !txTask.HistoryExecution && rw.historyMode {
+ if rw.background {
+ rw.SetReader(state.NewReaderParallelV3(rw.rs.Domains()))
+ } else {
+ rw.SetReader(state.NewReaderV3(rw.rs.TemporalGetter()))
+ }
+ }
+ if rw.background && rw.chainTx == nil {
+ var err error
+ if rw.chainTx, err = rw.chainDb.(kv.TemporalRoDB).BeginTemporalRo(rw.ctx); err != nil {
+ panic(err)
+ }
+ rw.stateReader.SetTx(rw.chainTx)
+ rw.chain = consensuschain.NewReader(rw.chainConfig, rw.chainTx, rw.blockReader, rw.logger)
+ }
+ txTask.Error = nil
+
+ rw.stateReader.SetTxNum(txTask.TxNum)
+ rw.stateWriter.SetTxNum(txTask.TxNum)
+ rw.rs.Domains().SetTxNum(txTask.TxNum)
+ rw.stateReader.ResetReadSet()
+ rw.stateWriter.ResetWriteSet()
+ if rw.chainConfig.IsArbitrum() && txTask.BlockNum > 0 {
+ if rw.evm.ProcessingHookSet.CompareAndSwap(false, true) {
+ rw.evm.ProcessingHook = arbos.NewTxProcessorIBS(rw.evm, state.NewArbitrum(rw.ibs), txTask.TxAsMessage)
+ } else {
+ rw.evm.ProcessingHook.SetMessage(txTask.TxAsMessage, state.NewArbitrum(rw.ibs))
+ }
+ }
+
+ rw.ibs.Reset()
+ ibs, hooks, cc := rw.ibs, rw.hooks, rw.chainConfig
+ rw.ibs.SetTrace(arbTrace)
+ ibs.SetHooks(hooks)
+
+ var err error
+ rules, header := txTask.Rules, txTask.Header
+ if arbTrace {
+ fmt.Printf("txNum=%d blockNum=%d history=%t\n", txTask.TxNum, txTask.BlockNum, txTask.HistoryExecution)
+ }
+
+ switch {
+ case txTask.TxIndex == -1:
+ if txTask.BlockNum == 0 {
+
+ //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum)
+ _, ibs, err = genesiswrite.GenesisToBlock(nil, rw.genesis, rw.dirs, rw.logger)
+ if err != nil {
+ panic(err)
+ }
+ // For Genesis, rules should be empty, so that empty accounts can be included
+ rules = &chain.Rules{}
+
+ if rw.chainConfig.IsArbitrum() { // initialize arbos once
+ ibsa := state.NewArbitrum(rw.ibs)
+ accountsPerSync := uint(100000) // const for sep-rollup
+ initMessage, err := arbostypes.GetSepoliaRollupInitMessage()
+ if err != nil {
+ rw.logger.Error("Failed to get Sepolia Rollup init message", "err", err)
+ return
+ }
+
+ initData := statetransfer.ArbosInitializationInfo{
+ NextBlockNumber: 0,
+ }
+ initReader := statetransfer.NewMemoryInitDataReader(&initData)
+ stateRoot, err := arbosState.InitializeArbosInDatabase(ibsa, rw.rs.Domains(), rw.rs.TemporalPutDel(), initReader, rw.chainConfig, initMessage, rw.evm.Context.Time, accountsPerSync)
+ if err != nil {
+ rw.logger.Error("Failed to init ArbOS", "err", err)
+ return
+ }
+ _ = stateRoot
+ rw.logger.Info("ArbOS initialized", "stateRoot", stateRoot) // todo this produces invalid state isnt it?
+ }
+ break
+ }
+
+ // Block initialisation
+ //fmt.Printf("txNum=%d, blockNum=%d, initialisation of the block\n", txTask.TxNum, txTask.BlockNum)
+ syscall := func(contract common.Address, data []byte, ibs *state.IntraBlockState, header *types.Header, constCall bool) ([]byte, error) {
+ ret, err := core.SysCallContract(contract, data, cc, ibs, header, rw.engine, constCall /* constCall */, rw.vmCfg)
+ return ret, err
+ }
+ rw.engine.Initialize(cc, rw.chain, header, ibs, syscall, rw.logger, hooks)
+ txTask.Error = ibs.FinalizeTx(rules, noop)
+ case txTask.Final:
+ if txTask.BlockNum == 0 {
+ break
+ }
+
+ if arbTrace {
+ fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum)
+ }
+ rw.callTracer.Reset()
+ ibs.SetTxContext(txTask.BlockNum, txTask.TxIndex)
+
+ // End of block transaction in a block
+ syscall := func(contract common.Address, data []byte) ([]byte, error) {
+ ret, err := core.SysCallContract(contract, data, cc, ibs, header, rw.engine, false /* constCall */, rw.vmCfg)
+ txTask.Logs = append(txTask.Logs, ibs.GetRawLogs(txTask.TxIndex)...)
+ return ret, err
+ }
+
+ if isMining {
+ _, _, err = rw.engine.FinalizeAndAssemble(cc, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, rw.chain, syscall, nil, rw.logger)
+ } else {
+ _, err = rw.engine.Finalize(cc, types.CopyHeader(header), ibs, txTask.Txs, txTask.Uncles, txTask.BlockReceipts, txTask.Withdrawals, rw.chain, syscall, skipPostEvaluation, rw.logger)
+ }
+ if err != nil {
+ txTask.Error = err
+ } else {
+ txTask.TraceFroms = rw.callTracer.Froms()
+ txTask.TraceTos = rw.callTracer.Tos()
+ if txTask.TraceFroms == nil {
+ txTask.TraceFroms = map[common.Address]struct{}{}
+ }
+ if txTask.TraceTos == nil {
+ txTask.TraceTos = map[common.Address]struct{}{}
+ }
+ txTask.TraceTos[txTask.Coinbase] = struct{}{}
+ for _, uncle := range txTask.Uncles {
+ txTask.TraceTos[uncle.Coinbase] = struct{}{}
+ }
+ }
+ default:
+ rw.taskGasPool.Reset(txTask.Tx.GetGasLimit(), rw.chainConfig.GetMaxBlobGasPerBlock(header.Time, rules.ArbOSVersion)) // ARBITRUM only
+
+ rw.callTracer.Reset()
+ ibs.SetTxContext(txTask.BlockNum, txTask.TxIndex)
+ txn := txTask.Tx
+
+ if txTask.Tx.Type() == types.AccountAbstractionTxType {
+ if !cc.AllowAA {
+ txTask.Error = errors.New("account abstraction transactions are not allowed")
+ break
+ }
+
+ msg, err := txn.AsMessage(types.Signer{}, nil, nil)
+ if err != nil {
+ txTask.Error = err
+ break
+ }
+
+ rw.evm.ResetBetweenBlocks(txTask.EvmBlockContext, core.NewEVMTxContext(msg), ibs, rw.vmCfg, rules)
+ rw.execAATxn(txTask)
+ break
+ }
+
+ msg := txTask.TxAsMessage
+ rw.evm.ResetBetweenBlocks(txTask.EvmBlockContext, core.NewEVMTxContext(msg), ibs, rw.vmCfg, rules)
+
+ if hooks != nil && hooks.OnTxStart != nil {
+ hooks.OnTxStart(rw.evm.GetVMContext(), txn, msg.From())
+ }
+ // MA applytx
+ applyRes, err := core.ApplyMessage(rw.evm, msg, rw.taskGasPool, true /* refunds */, false /* gasBailout */, rw.engine)
+ if err != nil {
+ txTask.Error = err
+ if hooks != nil && hooks.OnTxEnd != nil {
+ hooks.OnTxEnd(nil, err)
+ }
+ } else {
+ txTask.Failed = applyRes.Failed()
+ txTask.GasUsed = applyRes.GasUsed
+ // Update the state with pending changes
+ ibs.SoftFinalise()
+ //txTask.Error = ibs.FinalizeTx(rules, noop)
+ txTask.Logs = ibs.GetRawLogs(txTask.TxIndex)
+ txTask.TraceFroms = rw.callTracer.Froms()
+ txTask.TraceTos = rw.callTracer.Tos()
+
+ txTask.CreateReceipt(rw.Tx())
+ if hooks != nil && hooks.OnTxEnd != nil {
+ hooks.OnTxEnd(txTask.BlockReceipts[txTask.TxIndex], nil)
+ }
+ }
+ }
+ if arbTrace {
+ fmt.Printf("---- txnIdx %d block %d DONE------\n", txTask.TxIndex, txTask.BlockNum)
+ }
+ // Prepare read set, write set and balanceIncrease set and send for serialisation
+ if txTask.Error == nil {
+ txTask.BalanceIncreaseSet = ibs.BalanceIncreaseSet()
+ if arbTrace {
+ for addr, bal := range txTask.BalanceIncreaseSet {
+ fmt.Printf("BalanceIncreaseSet [%x]=>[%d]\n", addr, &(bal.Amount))
+ }
+ }
+ if err = ibs.MakeWriteSet(rules, rw.stateWriter); err != nil {
+ panic(err)
+ }
+ txTask.ReadLists = rw.stateReader.ReadSet()
+ txTask.WriteLists = rw.stateWriter.WriteSet()
+ txTask.AccountPrevs, txTask.AccountDels, txTask.StoragePrevs, txTask.CodePrevs = rw.stateWriter.PrevAndDels()
+ }
+}
+
+func (rw *Worker) execAATxn(txTask *state.TxTask) {
+ if !txTask.InBatch {
+ // this is the first transaction in an AA transaction batch, run all validation frames, then execute execution frames in its own txtask
+ startIdx := uint64(txTask.TxIndex)
+ endIdx := startIdx + txTask.AAValidationBatchSize
+
+ validationResults := make([]state.AAValidationResult, txTask.AAValidationBatchSize+1)
+ log.Info("🕵️♂️[aa] found AA bundle", "startIdx", startIdx, "endIdx", endIdx)
+
+ var outerErr error
+ for i := startIdx; i <= endIdx; i++ {
+ rw.evm.ResetBetweenBlocks(txTask.EvmBlockContext, core.NewEVMTxContext(txTask.TxAsMessage), rw.ibs, rw.vmCfg, txTask.Rules)
+ // check if next n transactions are AA transactions and run validation
+ if txTask.Txs[i].Type() == types.AccountAbstractionTxType {
+ aaTxn, ok := txTask.Txs[i].(*types.AccountAbstractionTransaction)
+ if !ok {
+ outerErr = fmt.Errorf("invalid transaction type, expected AccountAbstractionTx, got %T", txTask.Tx)
+ break
+ }
+
+ paymasterContext, validationGasUsed, err := aa.ValidateAATransaction(aaTxn, rw.ibs, rw.taskGasPool, txTask.Header, rw.evm, rw.chainConfig)
+ if err != nil {
+ outerErr = err
+ break
+ }
+
+ validationResults[i-startIdx] = state.AAValidationResult{
+ PaymasterContext: paymasterContext,
+ GasUsed: validationGasUsed,
+ }
+ } else {
+ outerErr = fmt.Errorf("invalid txcount, expected txn %d to be type %d", i, types.AccountAbstractionTxType)
+ break
+ }
+ }
+
+ if outerErr != nil {
+ txTask.Error = outerErr
+ return
+ }
+ log.Info("✅[aa] validated AA bundle", "len", endIdx-startIdx+1)
+
+ txTask.ValidationResults = validationResults
+ }
+
+ if len(txTask.ValidationResults) == 0 {
+ txTask.Error = fmt.Errorf("found RIP-7560 but no remaining validation results, txIndex %d", txTask.TxIndex)
+ return
+ }
+
+ aaTxn := txTask.Tx.(*types.AccountAbstractionTransaction) // type cast checked earlier
+ validationRes := txTask.ValidationResults[0]
+ txTask.ValidationResults = txTask.ValidationResults[1:]
+
+ rw.evm.ResetBetweenBlocks(txTask.EvmBlockContext, core.NewEVMTxContext(txTask.TxAsMessage), rw.ibs, rw.vmCfg, txTask.Rules)
+ status, gasUsed, err := aa.ExecuteAATransaction(aaTxn, validationRes.PaymasterContext, validationRes.GasUsed, rw.taskGasPool, rw.evm, txTask.Header, rw.ibs)
+ if err != nil {
+ txTask.Error = err
+ return
+ }
+
+ txTask.Failed = status != 0
+ txTask.GasUsed = gasUsed
+ // Update the state with pending changes
+ rw.ibs.SoftFinalise()
+ txTask.Logs = rw.ibs.GetLogs(txTask.TxIndex, txTask.Tx.Hash(), txTask.BlockNum, txTask.BlockHash)
+ txTask.TraceFroms = rw.callTracer.Froms()
+ txTask.TraceTos = rw.callTracer.Tos()
+ txTask.CreateReceipt(rw.Tx())
+
+ log.Info("🚀[aa] executed AA bundle transaction", "txIndex", txTask.TxIndex, "status", status, "gasUsed", gasUsed)
+}
+
+func NewWorkersPool(lock sync.Locker, accumulator *shards.Accumulator, logger log.Logger, hooks *tracing.Hooks, ctx context.Context, background bool, chainDb kv.RoDB, rs *state.ParallelExecutionState, in *state.QueueWithRetry, blockReader services.FullBlockReader, chainConfig *chain.Config, genesis *types.Genesis, engine consensus.Engine, workerCount int, dirs datadir.Dirs, isMining bool) (reconWorkers []*Worker, applyWorker *Worker, rws *state.ResultsQueue, clear func(), wait func()) {
+ reconWorkers = make([]*Worker, workerCount)
+
+ resultChSize := workerCount * 8
+ rws = state.NewResultsQueue(resultChSize, workerCount) // workerCount * 4
+ {
+ // we all errors in background workers (except ctx.Cancel), because applyLoop will detect this error anyway.
+ // and in applyLoop all errors are critical
+ ctx, cancel := context.WithCancel(ctx)
+ g, ctx := errgroup.WithContext(ctx)
+ for i := 0; i < workerCount; i++ {
+ reconWorkers[i] = NewWorker(lock, logger, hooks, ctx, background, chainDb, in, blockReader, chainConfig, genesis, rws, engine, dirs, isMining)
+ reconWorkers[i].ResetState(rs, accumulator)
+ }
+ if background {
+ for i := 0; i < workerCount; i++ {
+ i := i
+ g.Go(func() error {
+ return reconWorkers[i].Run()
+ })
+ }
+ wait = func() { g.Wait() }
+ }
+
+ var clearDone bool
+ clear = func() {
+ if clearDone {
+ return
+ }
+ clearDone = true
+ cancel()
+ g.Wait()
+ for _, w := range reconWorkers {
+ w.ResetTx(nil)
+ }
+ //applyWorker.ResetTx(nil)
+ }
+ }
+ applyWorker = NewWorker(lock, logger, hooks, ctx, false, chainDb, in, blockReader, chainConfig, genesis, rws, engine, dirs, isMining)
+
+ return reconWorkers, applyWorker, rws, clear, wait
+}
diff --git a/execution/execmodule/block_building.go b/execution/execmodule/block_building.go
index 454ed27fb5d..bca37a697d1 100644
--- a/execution/execmodule/block_building.go
+++ b/execution/execmodule/block_building.go
@@ -35,10 +35,10 @@ import (
)
func (e *EthereumExecutionModule) checkWithdrawalsPresence(time uint64, withdrawals []*types.Withdrawal) error {
- if !e.config.IsShanghai(time) && withdrawals != nil {
+ if !e.config.IsShanghai(time, 0) && withdrawals != nil {
return &rpc.InvalidParamsError{Message: "withdrawals before shanghai"}
}
- if e.config.IsShanghai(time) && withdrawals == nil {
+ if e.config.IsShanghai(time, 0) && withdrawals == nil {
return &rpc.InvalidParamsError{Message: "missing withdrawals list"}
}
return nil
diff --git a/execution/fixedgas/intrinsic_gas.go b/execution/fixedgas/intrinsic_gas.go
new file mode 100644
index 00000000000..8a9b1bbf40c
--- /dev/null
+++ b/execution/fixedgas/intrinsic_gas.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The go-ethereum Authors
+// (original work)
+// Copyright 2024 The Erigon Authors
+// (modifications)
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package fixedgas
+
+import (
+ "github.com/erigontech/erigon/common/math"
+ "github.com/erigontech/erigon/execution/protocol/params"
+)
+
+// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
+// TODO: convert the input to a struct
+func IntrinsicGas(data []byte, accessListLen, storageKeysLen uint64, isContractCreation bool, isEIP2, isEIP2028, isEIP3860, isEIP7623, isAATxn bool, authorizationsLen uint64) (uint64, uint64, bool) {
+ // Zero and non-zero bytes are priced differently
+ dataLen := uint64(len(data))
+ dataNonZeroLen := uint64(0)
+ for _, byt := range data {
+ if byt != 0 {
+ dataNonZeroLen++
+ }
+ }
+
+ return CalcIntrinsicGas(dataLen, dataNonZeroLen, authorizationsLen, accessListLen, storageKeysLen, isContractCreation, isEIP2, isEIP2028, isEIP3860, isEIP7623, isAATxn)
+}
+
+// CalcIntrinsicGas computes the 'intrinsic gas' for a message with the given data.
+func CalcIntrinsicGas(dataLen, dataNonZeroLen, authorizationsLen, accessListLen, storageKeysLen uint64, isContractCreation, isEIP2, isEIP2028, isEIP3860, isEIP7623, isAATxn bool) (gas uint64, floorGas7623 uint64, overflow bool) {
+ // Set the starting gas for the raw transaction
+ if isContractCreation && isEIP2 {
+ gas = params.TxGasContractCreation
+ } else if isAATxn {
+ gas = params.TxAAGas
+ } else {
+ gas = params.TxGas
+ }
+ floorGas7623 = params.TxGas
+ // Bump the required gas by the amount of transactional data
+ if dataLen > 0 {
+ // Zero and non-zero bytes are priced differently
+ nz := dataNonZeroLen
+ // Make sure we don't exceed uint64 for all data combinations
+ nonZeroGas := params.TxDataNonZeroGasFrontier
+ if isEIP2028 {
+ nonZeroGas = params.TxDataNonZeroGasEIP2028
+ }
+
+ product, overflow := math.SafeMul(nz, nonZeroGas)
+ if overflow {
+ return 0, 0, true
+ }
+ gas, overflow = math.SafeAdd(gas, product)
+ if overflow {
+ return 0, 0, true
+ }
+
+ z := dataLen - nz
+
+ product, overflow = math.SafeMul(z, params.TxDataZeroGas)
+ if overflow {
+ return 0, 0, true
+ }
+ gas, overflow = math.SafeAdd(gas, product)
+ if overflow {
+ return 0, 0, true
+ }
+
+ if isContractCreation && isEIP3860 {
+ numWords := toWordSize(dataLen)
+ product, overflow = math.SafeMul(numWords, params.InitCodeWordGas)
+ if overflow {
+ return 0, 0, true
+ }
+ gas, overflow = math.SafeAdd(gas, product)
+ if overflow {
+ return 0, 0, true
+ }
+ }
+
+ if isEIP7623 {
+ tokenLen := dataLen + 3*nz
+ dataGas, overflow := math.SafeMul(tokenLen, params.TxTotalCostFloorPerToken)
+ if overflow {
+ return 0, 0, true
+ }
+ floorGas7623, overflow = math.SafeAdd(floorGas7623, dataGas)
+ if overflow {
+ return 0, 0, true
+ }
+ }
+ }
+ if accessListLen > 0 {
+ product, overflow := math.SafeMul(accessListLen, params.TxAccessListAddressGas)
+ if overflow {
+ return 0, 0, true
+ }
+ gas, overflow = math.SafeAdd(gas, product)
+ if overflow {
+ return 0, 0, true
+ }
+
+ product, overflow = math.SafeMul(storageKeysLen, params.TxAccessListStorageKeyGas)
+ if overflow {
+ return 0, 0, true
+ }
+ gas, overflow = math.SafeAdd(gas, product)
+ if overflow {
+ return 0, 0, true
+ }
+ }
+
+ // Add the cost of authorizations
+ product, overflow := math.SafeMul(authorizationsLen, params.PerEmptyAccountCost)
+ if overflow {
+ return 0, 0, true
+ }
+
+ gas, overflow = math.SafeAdd(gas, product)
+ if overflow {
+ return 0, 0, true
+ }
+
+ return gas, floorGas7623, false
+}
+
+// toWordSize returns the ceiled word size required for memory expansion.
+func toWordSize(size uint64) uint64 {
+ if size > math.MaxUint64-31 {
+ return math.MaxUint64/32 + 1
+ }
+ return (size + 31) / 32
+}
diff --git a/execution/protocol/block_exec.go b/execution/protocol/block_exec.go
index 5fa48e318a2..606d31fe1a6 100644
--- a/execution/protocol/block_exec.go
+++ b/execution/protocol/block_exec.go
@@ -97,7 +97,8 @@ func ExecuteBlockEphemerally(
gasUsed := new(uint64)
usedBlobGas := new(uint64)
gp := new(GasPool)
- gp.AddGas(block.GasLimit()).AddBlobGas(chainConfig.GetMaxBlobGasPerBlock(block.Time()))
+ arbOsVersion := types.GetArbOSVersion(header, chainConfig)
+ gp.AddGas(block.GasLimit()).AddBlobGas(chainConfig.GetMaxBlobGasPerBlock(block.Time(), arbOsVersion))
if vmConfig.Tracer != nil && vmConfig.Tracer.OnBlockStart != nil {
td := chainReader.GetTd(block.ParentHash(), block.NumberU64()-1)
@@ -451,3 +452,56 @@ func BlockPostValidation(gasUsed, blobGasUsed uint64, checkReceipts bool, receip
return nil
}
+
+// TODO arbiturm remnants?
+// WriteStatus status of write
+type WriteStatus byte
+
+const (
+ NonStatTx WriteStatus = iota
+ CanonStatTx
+ SideStatTx
+)
+
+// // WriteBlockAndSetHeadWithTime also counts processTime, which will cause intermittent TrieDirty cache writes
+// func (bc *BlockChain) WriteBlockAndSetHeadWithTime(block *types.Block, receipts []*types.Receipt, logs []*types.Log, state *state.IntraBlockState, emitHeadEvent bool, processTime time.Duration) (status WriteStatus, err error) {
+// if !bc.chainmu.TryLock() {
+// return NonStatTy, errChainStopped
+// }
+// defer bc.chainmu.Unlock()
+// bc.gcproc += processTime
+// return bc.writeBlockAndSetHead(block, receipts, logs, state, emitHeadEvent)
+// }
+
+// func (bc *BlockChain) ReorgToOldBlock(newHead *types.Block) error {
+// bc.wg.Add(1)
+// defer bc.wg.Done()
+// if _, err := bc.SetCanonical(newHead); err != nil {
+// return fmt.Errorf("error reorging to old block: %w", err)
+// }
+// return nil
+// }
+
+// func (bc *BlockChain) ClipToPostNitroGenesis(blockNum rpc.BlockNumber) (rpc.BlockNumber, rpc.BlockNumber) {
+// currentBlock := rpc.BlockNumber(bc.CurrentBlock().Number.Uint64())
+// nitroGenesis := rpc.BlockNumber(bc.Config().ArbitrumChainParams.GenesisBlockNum)
+// if blockNum == rpc.LatestBlockNumber || blockNum == rpc.PendingBlockNumber {
+// blockNum = currentBlock
+// }
+// if blockNum > currentBlock {
+// blockNum = currentBlock
+// }
+// if blockNum < nitroGenesis {
+// blockNum = nitroGenesis
+// }
+// return blockNum, currentBlock
+// }
+
+// func (bc *BlockChain) RecoverState(block *types.Block) error {
+// if bc.HasState(block.Root()) {
+// return nil
+// }
+// log.Warn("recovering block state", "num", block.Number(), "hash", block.Hash(), "root", block.Root())
+// _, err := bc.recoverAncestors(block)
+// return err
+// }
diff --git a/execution/protocol/evm.go b/execution/protocol/evm.go
index a3ab942e7c1..e96b4b2952c 100644
--- a/execution/protocol/evm.go
+++ b/execution/protocol/evm.go
@@ -83,13 +83,19 @@ func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) (comm
var transferFunc evmtypes.TransferFunc
var postApplyMessageFunc evmtypes.PostApplyMessageFunc
- if engine != nil {
+ if engine != nil && !config.IsArbitrum() {
transferFunc = engine.GetTransferFunc()
postApplyMessageFunc = engine.GetPostApplyMessageFunc()
} else {
transferFunc = rules.Transfer
postApplyMessageFunc = nil
}
+ // assert if network is ARB0 to change pervrandao
+ arbOsVersion := types.DeserializeHeaderExtraInformation(header).ArbOSFormatVersion
+ if arbOsVersion > osver.ArbosVersion_0 {
+ difficultyHash := common.BigToHash(header.Difficulty)
+ prevRandDao = &difficultyHash
+ }
blockContext := evmtypes.BlockContext{
CanTransfer: CanTransfer,
Transfer: transferFunc,
@@ -100,8 +106,11 @@ func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) (comm
Time: header.Time,
BaseFee: baseFee,
GasLimit: header.GasLimit,
+ BlockGasUsed: header.GasUsed,
PrevRanDao: prevRandDao,
BlobBaseFee: blobBaseFee,
+ BaseFeeInBlock: baseFee.Clone(),
+ ArbOSVersion: arbOsVersion,
}
if header.Difficulty != nil {
blockContext.Difficulty = new(big.Int).Set(header.Difficulty)
@@ -111,11 +120,16 @@ func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) (comm
// NewEVMTxContext creates a new transaction context for a single transaction.
func NewEVMTxContext(msg Message) evmtypes.TxContext {
- return evmtypes.TxContext{
+ etx := evmtypes.TxContext{
Origin: msg.From(),
GasPrice: *msg.GasPrice(),
BlobHashes: msg.BlobHashes(),
}
+ //// TODO arbiturm only? seems like not working/needed
+ if mf := msg.MaxFeePerBlobGas(); mf != nil {
+ etx.BlobFee = mf.Clone()
+ }
+ return etx
}
// GetHashFn returns a GetHashFunc which retrieves header hashes by number
diff --git a/execution/protocol/misc/eip4844.go b/execution/protocol/misc/eip4844.go
index eff71d528df..5bc0046d4c8 100644
--- a/execution/protocol/misc/eip4844.go
+++ b/execution/protocol/misc/eip4844.go
@@ -58,15 +58,16 @@ func CalcExcessBlobGas(config *chain.Config, parent *types.Header, currentHeader
if parent.BlobGasUsed != nil {
parentBlobGasUsed = *parent.BlobGasUsed
}
- target := config.GetTargetBlobsPerBlock(currentHeaderTime)
+ arbOsVersion := types.GetArbOSVersion(parent, config)
+ target := config.GetTargetBlobsPerBlock(currentHeaderTime, arbOsVersion)
targetBlobGas := target * params.GasPerBlob
if parentExcessBlobGas+parentBlobGasUsed < targetBlobGas {
return 0
}
- if config.IsOsaka(currentHeaderTime) {
+ if config.IsOsaka(parent.Number.Uint64()+1, currentHeaderTime, arbOsVersion) {
// EIP-7918: Blob base fee bounded by execution cost
- max := config.GetMaxBlobsPerBlock(currentHeaderTime)
+ max := config.GetMaxBlobsPerBlock(currentHeaderTime, arbOsVersion)
refBlobBaseFee, err := GetBlobGasPrice(config, parentExcessBlobGas, currentHeaderTime)
if err != nil {
panic(err) // should never happen assuming the parent is valid
@@ -135,7 +136,7 @@ func VerifyAbsenceOfCancunHeaderFields(header *types.Header) error {
}
func GetBlobGasPrice(config *chain.Config, excessBlobGas uint64, headerTime uint64) (uint256.Int, error) {
- return FakeExponential(uint256.NewInt(config.GetMinBlobGasPrice()), uint256.NewInt(config.GetBlobGasPriceUpdateFraction(headerTime)), excessBlobGas)
+ return FakeExponential(uint256.NewInt(config.GetMinBlobGasPrice()), uint256.NewInt(config.GetBlobGasPriceUpdateFraction(headerTime, 0)), excessBlobGas)
}
func GetBlobGasUsed(numBlobs int) uint64 {
diff --git a/execution/protocol/params/protocol.go b/execution/protocol/params/protocol.go
index 6fb82d74439..b1374feaa7f 100644
--- a/execution/protocol/params/protocol.go
+++ b/execution/protocol/params/protocol.go
@@ -63,6 +63,10 @@ const (
NetSstoreResetRefund uint64 = 4800 // Once per SSTORE operation for resetting to the original non-zero value
NetSstoreResetClearRefund uint64 = 19800 // Once per SSTORE operation for resetting to the original zero value
+ LogTopicBytes uint64 = 32 // 32 bytes per topic represents the hash size that gets stored in history.
+ LogTopicHistoryGas uint64 = LogDataGas * LogTopicBytes // History growth gas per topic
+ LogTopicComputationGas uint64 = LogTopicGas - LogTopicHistoryGas // Computation gas per topic
+
SstoreSentryGasEIP2200 uint64 = 2300 // Minimum gas required to be present for an SSTORE call, not consumed
SstoreSetGasEIP2200 uint64 = 20000 // Once per SSTORE operation from clean zero to non-zero
SstoreResetGasEIP2200 uint64 = 5000 // Once per SSTORE operation from clean non-zero to something else
@@ -210,9 +214,11 @@ const (
MaxRlpBlockSize = MaxBlockSize - MaxBlockSizeSafetyMargin
)
-// EIP-7702: Set EOA account code
-var DelegatedDesignationPrefix = []byte{0xef, 0x01, 0x00}
-var DelegatedCodeHash = common.HexToHash("0xeadcdba66a79ab5dce91622d1d75c8cff5cff0b96944c3bf1072cd08ce018329")
+var (
+ // EIP-7702: Set EOA account code
+ DelegatedDesignationPrefix = []byte{0xef, 0x01, 0x00}
+ DelegatedCodeHash = common.HexToHash("0xeadcdba66a79ab5dce91622d1d75c8cff5cff0b96944c3bf1072cd08ce018329")
+)
// EIP-4788: Beacon block root in the EVM
var BeaconRootsAddress = accounts.InternAddress(common.HexToAddress("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02"))
@@ -226,10 +232,12 @@ var WithdrawalRequestAddress = accounts.InternAddress(common.HexToAddress("0x000
// EIP-7251
var ConsolidationRequestAddress = accounts.InternAddress(common.HexToAddress("0x0000BBdDc7CE488642fb579F8B00f3a590007251"))
-// Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
-var Bls12381MSMDiscountTableG1 = [128]uint64{1000, 949, 848, 797, 764, 750, 738, 728, 719, 712, 705, 698, 692, 687, 682, 677, 673, 669, 665, 661, 658, 654, 651, 648, 645, 642, 640, 637, 635, 632, 630, 627, 625, 623, 621, 619, 617, 615, 613, 611, 609, 608, 606, 604, 603, 601, 599, 598, 596, 595, 593, 592, 591, 589, 588, 586, 585, 584, 582, 581, 580, 579, 577, 576, 575, 574, 573, 572, 570, 569, 568, 567, 566, 565, 564, 563, 562, 561, 560, 559, 558, 557, 556, 555, 554, 553, 552, 551, 550, 549, 548, 547, 547, 546, 545, 544, 543, 542, 541, 540, 540, 539, 538, 537, 536, 536, 535, 534, 533, 532, 532, 531, 530, 529, 528, 528, 527, 526, 525, 525, 524, 523, 522, 522, 521, 520, 520, 519}
+var (
+ // Gas discount table for BLS12-381 G1 and G2 multi exponentiation operations
+ Bls12381MSMDiscountTableG1 = [128]uint64{1000, 949, 848, 797, 764, 750, 738, 728, 719, 712, 705, 698, 692, 687, 682, 677, 673, 669, 665, 661, 658, 654, 651, 648, 645, 642, 640, 637, 635, 632, 630, 627, 625, 623, 621, 619, 617, 615, 613, 611, 609, 608, 606, 604, 603, 601, 599, 598, 596, 595, 593, 592, 591, 589, 588, 586, 585, 584, 582, 581, 580, 579, 577, 576, 575, 574, 573, 572, 570, 569, 568, 567, 566, 565, 564, 563, 562, 561, 560, 559, 558, 557, 556, 555, 554, 553, 552, 551, 550, 549, 548, 547, 547, 546, 545, 544, 543, 542, 541, 540, 540, 539, 538, 537, 536, 536, 535, 534, 533, 532, 532, 531, 530, 529, 528, 528, 527, 526, 525, 525, 524, 523, 522, 522, 521, 520, 520, 519}
-var Bls12381MSMDiscountTableG2 = [128]uint64{1000, 1000, 923, 884, 855, 832, 812, 796, 782, 770, 759, 749, 740, 732, 724, 717, 711, 704, 699, 693, 688, 683, 679, 674, 670, 666, 663, 659, 655, 652, 649, 646, 643, 640, 637, 634, 632, 629, 627, 624, 622, 620, 618, 615, 613, 611, 609, 607, 606, 604, 602, 600, 598, 597, 595, 593, 592, 590, 589, 587, 586, 584, 583, 582, 580, 579, 578, 576, 575, 574, 573, 571, 570, 569, 568, 567, 566, 565, 563, 562, 561, 560, 559, 558, 557, 556, 555, 554, 553, 552, 552, 551, 550, 549, 548, 547, 546, 545, 545, 544, 543, 542, 541, 541, 540, 539, 538, 537, 537, 536, 535, 535, 534, 533, 532, 532, 531, 530, 530, 529, 528, 528, 527, 526, 526, 525, 524, 524}
+ Bls12381MSMDiscountTableG2 = [128]uint64{1000, 1000, 923, 884, 855, 832, 812, 796, 782, 770, 759, 749, 740, 732, 724, 717, 711, 704, 699, 693, 688, 683, 679, 674, 670, 666, 663, 659, 655, 652, 649, 646, 643, 640, 637, 634, 632, 629, 627, 624, 622, 620, 618, 615, 613, 611, 609, 607, 606, 604, 602, 600, 598, 597, 595, 593, 592, 590, 589, 587, 586, 584, 583, 582, 580, 579, 578, 576, 575, 574, 573, 571, 570, 569, 568, 567, 566, 565, 563, 562, 561, 560, 559, 558, 557, 556, 555, 554, 553, 552, 552, 551, 550, 549, 548, 547, 546, 545, 545, 544, 543, 542, 541, 541, 540, 539, 538, 537, 537, 536, 535, 535, 534, 533, 532, 532, 531, 530, 530, 529, 528, 528, 527, 526, 526, 525, 524, 524}
+)
var (
DifficultyBoundDivisor = big.NewInt(2048) // The bound divisor of the difficulty, used in the update calculations.
@@ -256,3 +264,9 @@ var DefaultPragueBlobConfig = BlobConfig{
Max: 9,
BaseFeeUpdateFraction: 5007716,
}
+
+var DefaultOsakaBlobConfig = BlobConfig{
+ Target: 6,
+ Max: 9,
+ BaseFeeUpdateFraction: 5007716,
+}
diff --git a/execution/protocol/rules/ethash/sealer.go b/execution/protocol/rules/ethash/sealer.go
index b5b50b1c99d..486194cd46d 100644
--- a/execution/protocol/rules/ethash/sealer.go
+++ b/execution/protocol/rules/ethash/sealer.go
@@ -21,6 +21,7 @@ package ethash
import (
crand "crypto/rand"
+ "encoding/json"
"errors"
"math"
"math/big"
diff --git a/execution/protocol/rules/merge/merge.go b/execution/protocol/rules/merge/merge.go
index 5eee4bd742c..d3fd1aeb9ee 100644
--- a/execution/protocol/rules/merge/merge.go
+++ b/execution/protocol/rules/merge/merge.go
@@ -187,7 +187,11 @@ func (s *Merge) Finalize(config *chain.Config, header *types.Header, state *stat
}
var rs types.FlatRequests
- if config.IsPrague(header.Time) && !skipReceiptsEval {
+ var arbosVersion uint64
+ if config.IsArbitrum() {
+ arbosVersion = types.DeserializeHeaderExtraInformation(header).ArbOSFormatVersion
+ }
+ if config.IsPrague(header.Time, arbosVersion) && !skipReceiptsEval {
rs = make(types.FlatRequests, 0)
allLogs := make(types.Logs, 0)
for i, rec := range receipts {
@@ -239,7 +243,11 @@ func (s *Merge) FinalizeAndAssemble(config *chain.Config, header *types.Header,
if err != nil {
return nil, nil, err
}
- if config.IsPrague(header.Time) {
+ var arbosVersion uint64
+ if config.IsArbitrum() {
+ arbosVersion = types.DeserializeHeaderExtraInformation(header).ArbOSFormatVersion
+ }
+ if config.IsPrague(header.Time, arbosVersion) {
header.RequestsHash = outRequests.Hash()
}
return types.NewBlockForAsembling(header, txs, uncles, receipts, withdrawals), outRequests, nil
@@ -306,8 +314,13 @@ func (s *Merge) verifyHeader(chain rules.ChainHeaderReader, header, parent *type
return err
}
+ var arbosVersion uint64
+ if chain.Config().IsArbitrum() {
+ arbosVersion = types.DeserializeHeaderExtraInformation(header).ArbOSFormatVersion
+ }
+
// Verify existence / non-existence of withdrawalsHash
- shanghai := chain.Config().IsShanghai(header.Time)
+ shanghai := chain.Config().IsShanghai(header.Time, arbosVersion)
if shanghai && header.WithdrawalsHash == nil {
return errors.New("missing withdrawalsHash")
}
@@ -315,7 +328,7 @@ func (s *Merge) verifyHeader(chain rules.ChainHeaderReader, header, parent *type
return rules.ErrUnexpectedWithdrawals
}
- if !chain.Config().IsCancun(header.Time) {
+ if !chain.Config().IsCancun(header.Time, arbosVersion) {
return misc.VerifyAbsenceOfCancunHeaderFields(header)
}
if err := misc.VerifyPresenceOfCancunHeaderFields(header); err != nil {
@@ -327,7 +340,7 @@ func (s *Merge) verifyHeader(chain rules.ChainHeaderReader, header, parent *type
}
// Verify existence / non-existence of requestsHash
- prague := chain.Config().IsPrague(header.Time)
+ prague := chain.Config().IsPrague(header.Time, arbosVersion)
if prague && header.RequestsHash == nil {
return errors.New("missing requestsHash")
}
@@ -384,12 +397,17 @@ func (s *Merge) Initialize(config *chain.Config, chain rules.ChainHeaderReader,
}
}
- if cfg.IsCancun(header.Time) && header.ParentBeaconBlockRoot != nil {
+ var arbosVersion uint64
+ if cfg.IsArbitrum() {
+ arbosVersion = types.DeserializeHeaderExtraInformation(header).ArbOSFormatVersion
+ }
+
+ if cfg.IsCancun(header.Time, arbosVersion) && header.ParentBeaconBlockRoot != nil {
misc.ApplyBeaconRootEip4788(header.ParentBeaconBlockRoot, func(addr accounts.Address, data []byte) ([]byte, error) {
return syscall(addr, data, state, header, false /* constCall */)
}, tracer)
}
- if cfg.IsPrague(header.Time) {
+ if cfg.IsPrague(header.Time, arbosVersion) {
if err := misc.StoreBlockHashesEip2935(header, state); err != nil {
return err
}
diff --git a/execution/protocol/state_processor.go b/execution/protocol/state_processor.go
index 72f5e682b26..a450679fdba 100644
--- a/execution/protocol/state_processor.go
+++ b/execution/protocol/state_processor.go
@@ -82,6 +82,7 @@ func applyTransaction(config *chain.Config, engine rules.EngineReader, gp *GasPo
if usedBlobGas != nil {
*usedBlobGas += txn.GetBlobGas()
}
+ // TODO add resultFilter from Arbitrum?
// Set the receipt logs and create the bloom filter.
// based on the eip phase, we're passing whether the root touch-delete accounts.
@@ -158,3 +159,177 @@ func MakeReceipt(
receipt.TransactionIndex = uint(ibs.TxnIndex())
return receipt
}
+
+
+/// TODO move to separate file/package
+
+// Arbiturm modifications.
+// So applyArbTransaction all the same to applyTransaction but returns whole evm result and possibly get execution mode as parameter
+
+// applyTransaction attempts to apply a transaction to the given state database
+// and uses the input parameters for its environment. It returns the receipt
+// for the transaction, gas used and an error if the transaction failed,
+// indicating the block was invalid.
+func applyArbTransaction(config *chain.Config, engine consensus.EngineReader, gp *GasPool, ibs state.IntraBlockStateArbitrum,
+ stateWriter state.StateWriter, header *types.Header, txn types.Transaction, usedGas, usedBlobGas *uint64,
+ evm *vm.EVM, cfg vm.Config) (*types.Receipt, *evmtypes.ExecutionResult, error) {
+
+ var (
+ receipt *types.Receipt
+ err error
+ )
+
+ rules := evm.ChainRules()
+ blockNum := header.Number.Uint64()
+ msg, err := txn.AsMessage(*types.MakeSigner(config, blockNum, header.Time), header.BaseFee, rules)
+ if err != nil {
+ return nil, nil, err
+ }
+ msg.SetCheckNonce(!cfg.StatelessExec)
+
+ if cfg.Tracer != nil {
+ if cfg.Tracer.OnTxStart != nil {
+ cfg.Tracer.OnTxStart(evm.GetVMContext(), txn, msg.From())
+ }
+ if cfg.Tracer.OnTxEnd != nil {
+ defer func() {
+ cfg.Tracer.OnTxEnd(receipt, err)
+ }()
+ }
+ }
+
+ txContext := NewEVMTxContext(msg)
+ if cfg.TraceJumpDest {
+ txContext.TxHash = txn.Hash()
+ }
+
+ // Update the evm with the new transaction context.
+ evm.Reset(txContext, ibs.(*state.IntraBlockState))
+ result, err := ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ // Update the state with pending changes
+ if err = ibs.FinalizeTx(rules, stateWriter); err != nil {
+ return nil, nil, err
+ }
+ *usedGas += result.GasUsed
+ if usedBlobGas != nil {
+ *usedBlobGas += txn.GetBlobGas()
+ }
+ // TODO add resultFilter from Arbitrum?
+
+ // Set the receipt logs and create the bloom filter.
+ // based on the eip phase, we're passing whether the root touch-delete accounts.
+ if !cfg.NoReceipts {
+ // by the txn
+ receipt = &types.Receipt{Type: txn.Type(), CumulativeGasUsed: *usedGas}
+ if result.Failed() {
+ receipt.Status = types.ReceiptStatusFailed
+ } else {
+ receipt.Status = types.ReceiptStatusSuccessful
+ }
+ receipt.TxHash = txn.Hash()
+ receipt.GasUsed = result.GasUsed
+ // if the transaction created a contract, store the creation address in the receipt.
+ if msg.To() == nil {
+ receipt.ContractAddress = types.CreateAddress(evm.Origin, txn.GetNonce())
+ }
+ // Set the receipt logs and create a bloom for filtering
+ receipt.Logs = ibs.GetLogs(ibs.TxnIndex(), txn.Hash(), blockNum, header.Hash())
+ receipt.Bloom = types.CreateBloom(types.Receipts{receipt})
+ receipt.BlockNumber = header.Number
+ receipt.TransactionIndex = uint(ibs.TxnIndex())
+
+ // If the transaction created a contract, store the creation address in the receipt.
+ if result.TopLevelDeployed != nil {
+ receipt.ContractAddress = *result.TopLevelDeployed
+ }
+ evm.ProcessingHook.FillReceiptInfo(receipt)
+ }
+
+ return receipt, result, err
+}
+
+// ApplyTransaction attempts to apply a transaction to the given state database
+// and uses the input parameters for its environment. It returns the receipt
+// for the transaction, gas used and an error if the transaction failed,
+// indicating the block was invalid.
+func ApplyArbTransaction(config *chain.Config, blockHashFunc func(n uint64) (common.Hash, error), engine consensus.EngineReader,
+ author *common.Address, gp *GasPool, ibs state.IntraBlockStateArbitrum, stateWriter state.StateWriter,
+ header *types.Header, txn types.Transaction, usedGas, usedBlobGas *uint64, cfg vm.Config,
+) (*types.Receipt, *evmtypes.ExecutionResult, error) {
+ // Create a new context to be used in the EVM environment
+
+ // Add addresses to access list if applicable
+ // about the transaction and calling mechanisms.
+ // cfg.SkipAnalysis = SkipAnalysis(config, header.Number.Uint64())
+
+ blockContext := NewEVMBlockContext(header, blockHashFunc, engine, author, config)
+ vmenv := vm.NewEVM(blockContext, evmtypes.TxContext{}, ibs.(*state.IntraBlockState), config, cfg)
+
+ // ibss := ibs.(*state.IntraBlockState)
+
+ return applyArbTransaction(config, engine, gp, ibs, stateWriter, header, txn, usedGas, usedBlobGas, vmenv, cfg)
+}
+
+// ApplyArbTransactionVmenv attempts to apply a transaction to the given
+// state database using given environment. It returns the receipt
+// for the transaction, gas used and an error if the transaction failed,
+// indicating the block was invalid.
+func ApplyArbTransactionVmenv(config *chain.Config, engine consensus.EngineReader, gp *GasPool, ibs state.IntraBlockStateArbitrum, stateWriter state.StateWriter,
+ header *types.Header, txn types.Transaction, usedGas, usedBlobGas *uint64, cfg vm.Config, vmenv *vm.EVM,
+) (*types.Receipt, *evmtypes.ExecutionResult, error) {
+ return applyArbTransaction(config, engine, gp, ibs, stateWriter, header, txn, usedGas, usedBlobGas, vmenv, cfg)
+}
+
+// ProcessParentBlockHash stores the parent block hash in the history storage contract
+// as per EIP-2935/7709.
+func ProcessParentBlockHash(prevHash common.Hash, evm *vm.EVM) {
+ //if tracer := evm.Config.Tracer; tracer != nil {
+ // onSystemCallStart(tracer, evm.GetVMContext())
+ // if tracer.OnSystemCallEnd != nil {
+ // defer tracer.OnSystemCallEnd()
+ // }
+ //}
+ //tx
+ //msg := &Message{
+ // From: params.SystemAddress,
+ // GasLimit: 30_000_000,
+ // GasPrice: common.Big0,
+ // GasFeeCap: common.Big0,
+ // GasTipCap: common.Big0,
+ // To: ¶ms.HistoryStorageAddress,
+ // Data: prevHash.Bytes(),
+ //}
+ msg := types.NewMessage(
+ state.SystemAddress,
+ ¶ms.HistoryStorageAddress,
+ 0,
+ common.Num0,
+ 30_000_000,
+ common.Num0,
+ common.Num0,
+ common.Num0,
+ prevHash[:],
+ types.AccessList{},
+ false,
+ false,
+ false,
+ common.Num0,
+ )
+
+ //msg, err := args.ToMessage(30_000_000, evm.Context.BaseFee)
+ //evm
+ //evm.SetTxContext(NewEVMTxContext(msg))
+ //evm.StateDB.AddAddressToAccessList(params.HistoryStorageAddress)
+
+ _, _, _, err := evm.Call(vm.AccountRef(msg.From()), *msg.To(), msg.Data(), msg.Gas(), common.Num0, false)
+ if err != nil {
+ panic(err)
+ }
+ //if evm.StateDB.AccessEvents() != nil {
+ // evm.StateDB.AccessEvents().Merge(evm.AccessEvents)
+ //}
+ //evm.StateDB.Finalise(true)
+}
diff --git a/execution/protocol/state_transition.go b/execution/protocol/state_transition.go
index c7d78b3a8d4..b6fb7d2555f 100644
--- a/execution/protocol/state_transition.go
+++ b/execution/protocol/state_transition.go
@@ -25,6 +25,8 @@ import (
"fmt"
"slices"
+ "github.com/erigontech/erigon/arb/multigas"
+ "github.com/erigontech/erigon/execution/chain"
"github.com/holiman/uint256"
"github.com/erigontech/erigon/common"
@@ -42,6 +44,12 @@ import (
"github.com/erigontech/erigon/execution/vm/evmtypes"
)
+var arbTrace bool
+
+func init() {
+ arbTrace = dbg.EnvBool("ARB_TRACE", false)
+}
+
/*
The State Transitioning Model
@@ -199,7 +207,7 @@ func (st *StateTransition) buyGas(gasBailout bool) error {
// compute blob fee for eip-4844 data blobs if any
blobGasVal := uint256.Int{}
- if st.evm.ChainRules().IsCancun {
+ if st.evm.ChainRules().IsCancun && !st.evm.ChainRules().IsArbitrum {
blobGasVal, overflow = u256.MulOverflow(st.evm.Context.BlobBaseFee, u256.U64(st.msg.BlobGas()))
if overflow {
return fmt.Errorf("%w: overflow converting blob gas: %v", ErrInsufficientFunds, &blobGasVal)
@@ -251,6 +259,17 @@ func (st *StateTransition) buyGas(gasBailout bool) error {
st.evm.Config().Tracer.OnGasChange(0, st.msg.Gas(), tracing.GasChangeTxInitialBalance)
}
+ if tracer := st.evm.Config().Tracer; tracer != nil && tracer.CaptureArbitrumTransfer != nil {
+ var from = st.msg.From()
+ tracer.CaptureArbitrumTransfer(&from, nil, gasVal, true, "feePayment")
+ }
+
+ // Check for overflow before adding gas
+ if st.gasRemaining > math.MaxUint64-st.msg.Gas() {
+ panic(fmt.Sprintf("gasRemaining overflow in buyGas: gasRemaining=%d, msg.Gas()=%d", st.gasRemaining, st.msg.Gas()))
+ }
+
+ //fmt.Printf("buyGas: adding gas %d from %x\n", st.msg.Gas(), st.msg.From())
st.gasRemaining += st.msg.Gas()
st.initialGas = st.msg.Gas()
st.evm.BlobFee = blobGasVal
@@ -326,7 +345,11 @@ func (st *StateTransition) preCheck(gasBailout bool) error {
}
}
}
- if st.msg.BlobGas() > 0 && rules.IsCancun {
+ isCancun := rules.IsCancun
+ if st.evm.ChainConfig().IsArbitrum() {
+ isCancun = false
+ }
+ if st.msg.BlobGas() > 0 && isCancun {
blobGasPrice := st.evm.Context.BlobBaseFee
maxFeePerBlobGas := st.msg.MaxFeePerBlobGas()
if !st.evm.Config().NoBaseFee && blobGasPrice.Cmp(maxFeePerBlobGas) > 0 {
@@ -335,8 +358,25 @@ func (st *StateTransition) preCheck(gasBailout bool) error {
}
}
+ // TODO arbitrum
+ // Check that the user is paying at least the current blob fee
+ // if st.evm.ChainConfig().IsCancun(st.evm.Context.BlockNumber, st.evm.Context.Time, st.evm.Context.ArbOSVersion) {
+ // if st.blobGasUsed() > 0 {
+ // // Skip the checks if gas fields are zero and blobBaseFee was explicitly disabled (eth_call)
+ // skipCheck := st.evm.Config.NoBaseFee && msg.BlobGasFeeCap.BitLen() == 0
+ // if !skipCheck {
+ // // This will panic if blobBaseFee is nil, but blobBaseFee presence
+ // // is verified as part of header validation.
+ // if msg.BlobGasFeeCap.Cmp(st.evm.Context.BlobBaseFee) < 0 {
+ // return fmt.Errorf("%w: address %v blobGasFeeCap: %v, blobBaseFee: %v", ErrBlobFeeCapTooLow,
+ // msg.From.Hex(), msg.BlobGasFeeCap, st.evm.Context.BlobBaseFee)
+ // }
+ // }
+ // }
+ // }
// EIP-7825: Transaction Gas Limit Cap
- if st.msg.CheckGas() && rules.IsOsaka && st.msg.Gas() > params.MaxTxnGasLimit {
+ // TODO should skip for arbitrum?
+ if !rules.IsArbitrum && st.msg.CheckGas() && rules.IsOsaka && st.msg.Gas() > params.MaxTxnGasLimit {
return fmt.Errorf("%w: address %v, gas limit %d", ErrGasLimitTooHigh, from, st.msg.Gas())
}
@@ -356,6 +396,10 @@ func (st *StateTransition) ApplyFrame() (*evmtypes.ExecutionResult, error) {
}
msg := st.msg
+ // Check for overflow before adding gas
+ if st.gasRemaining > math.MaxUint64-st.msg.Gas() {
+ panic(fmt.Sprintf("gasRemaining overflow in ApplyFrame: gasRemaining=%d, msg.Gas()=%d", st.gasRemaining, st.msg.Gas()))
+ }
st.gasRemaining += st.msg.Gas()
st.initialGas = st.msg.Gas()
sender := msg.From()
@@ -387,7 +431,7 @@ func (st *StateTransition) ApplyFrame() (*evmtypes.ExecutionResult, error) {
vmerr error // vm errors do not effect consensus and are therefore not assigned to err
)
- ret, st.gasRemaining, vmerr = st.evm.Call(sender, st.to(), st.data, st.gasRemaining, st.value, false)
+ ret, st.gasRemaining, _, vmerr = st.evm.Call(sender, st.to(), st.data, st.gasRemaining, st.value, false)
result := &evmtypes.ExecutionResult{
GasUsed: st.gasUsed(),
@@ -419,6 +463,18 @@ func (st *StateTransition) ApplyFrame() (*evmtypes.ExecutionResult, error) {
// However if any consensus issue encountered, return the error directly with
// nil evm execution result.
func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (result *evmtypes.ExecutionResult, err error) {
+ endTxNow, startHookUsedMultiGas, err, returnData := st.evm.ProcessingHook.StartTxHook()
+ startHookUsedSingleGas := startHookUsedMultiGas.SingleGas()
+ if endTxNow {
+ return &evmtypes.ExecutionResult{
+ GasUsed: startHookUsedSingleGas,
+ Err: err,
+ ReturnData: returnData,
+ ScheduledTxes: st.evm.ProcessingHook.ScheduledTxes(),
+ UsedMultiGas: startHookUsedMultiGas,
+ }, nil
+ }
+
if st.evm.IntraBlockState().IsVersioned() {
defer func() {
if r := recover(); r != nil {
@@ -461,6 +517,17 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (result *
// 5. there is no overflow when calculating intrinsic gas
// 6. caller has enough balance to cover asset transfer for **topmost** call
+ // Arbitrum: drop tip for delayed (and old) messages
+ if st.evm.ProcessingHook.DropTip() && st.msg.GasPrice().Cmp(st.evm.Context.BaseFee) > 0 {
+ mmsg := st.msg.(*types.Message)
+ mmsg.SetGasPrice(st.evm.Context.BaseFee)
+ mmsg.SetTip(common.Num0)
+ mmsg.TxRunContext = types.NewMessageCommitContext(nil)
+
+ st.gasPrice = st.evm.Context.BaseFee
+ st.tipCap = common.Num0
+ st.msg = mmsg
+ }
// Check clauses 1-3 and 6, buy gas if everything is correct
if err := st.preCheck(gasBailout); err != nil {
return nil, err
@@ -486,12 +553,32 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (result *
// set code tx
auths := msg.Authorizations()
+ var gas uint64
+ var floorGas7623 uint64
+ var overflow bool
+ var usedMultiGas = multigas.ZeroGas()
+ var multiGas multigas.MultiGas
+
+ // TODO only for arbos50?
+ //if st.evm.ProcessingHook.IsArbitrum() {
+ multiGas, floorGas7623, overflow = multigas.IntrinsicMultiGas(st.data, uint64(len(accessTuples)), uint64(accessTuples.StorageKeys()), contractCreation, rules.IsHomestead, rules.IsIstanbul, isEIP3860, rules.IsPrague, false, uint64(len(auths)))
+ //usedMultiGas = usedMultiGas.SaturatingAdd(multiGas)
+ gas = multiGas.SingleGas()
+ //} else {
// Check clauses 4-5, subtract intrinsic gas if everything is correct
- gas, floorGas7623, overflow := fixedgas.IntrinsicGas(st.data, uint64(len(accessTuples)), uint64(accessTuples.StorageKeys()), contractCreation, rules.IsHomestead, rules.IsIstanbul, isEIP3860, rules.IsPrague, false, uint64(len(auths)))
+ gas2, floorGas76232, overflow2 := fixedgas.IntrinsicGas(st.data, uint64(len(accessTuples)), uint64(accessTuples.StorageKeys()), contractCreation, rules.IsHomestead, rules.IsIstanbul, isEIP3860, rules.IsPrague, false, uint64(len(auths)))
+ if multiGas.SingleGas() != gas2 || floorGas7623 != floorGas76232 || overflow != overflow2 {
+ fmt.Printf("Mg %d, fg7623 %d, ovf %v\n", multiGas.SingleGas(), floorGas7623, overflow)
+ fmt.Printf("g %d, fg7623 %d, ovf %v\n", gas2, floorGas76232, overflow2)
+ panic("intrinsic gas mismatch between multigas and fixedgas")
+ }
+ //}
+
if overflow {
return nil, ErrGasUintOverflow
}
- if st.gasRemaining < gas || st.gasRemaining < floorGas7623 {
+ if !rules.IsArbitrum && (st.gasRemaining < gas || st.gasRemaining < floorGas7623) {
+ fmt.Printf("st.gasRemaining %d, gas %d, floorGas7623 %d\n", st.gasRemaining, gas, floorGas7623)
return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gasRemaining, max(gas, floorGas7623))
}
@@ -500,10 +587,37 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (result *
return nil, err
}
+ // Gas limit suffices for the floor data cost (EIP-7623)
+ // TODO enable only at arbos50? skip at all??
+ if rules.IsPrague && st.evm.ProcessingHook.IsCalldataPricingIncreaseEnabled() {
+ floorDataGas, err := FloorDataGas(msg.Data())
+ if err != nil {
+ return nil, err
+ }
+ fmt.Printf("Checking floor data gas at tx with msg gas limit %d and floorDataGas %d\n", msg.Gas(), floorDataGas)
+ if msg.Gas() < floorDataGas {
+ return nil, fmt.Errorf("%w: have %d, want %d", errors.New("floor data gas bigger than gasLimit"), msg.Gas(), floorDataGas)
+ }
+ if floorDataGas != floorGas7623 {
+ fmt.Errorf("fdg %d - intrinsic gas %d", floorDataGas, floorGas7623)
+ }
+ }
if t := st.evm.Config().Tracer; t != nil && t.OnGasChange != nil {
t.OnGasChange(st.gasRemaining, st.gasRemaining-gas, tracing.GasChangeTxIntrinsicGas)
}
+ // Check for underflow before subtracting intrinsic gas (should be caught by earlier check, but be safe)
+ if st.gasRemaining < gas {
+ panic(fmt.Sprintf("gasRemaining underflow in TransitionDb (intrinsic gas): gasRemaining=%d, gas=%d", st.gasRemaining, gas))
+ }
st.gasRemaining -= gas
+ usedMultiGas = usedMultiGas.SaturatingAdd(multiGas)
+
+ tipReceipient, multiGas, err := st.evm.ProcessingHook.GasChargingHook(&st.gasRemaining, gas)
+ if err != nil {
+ return nil, err
+ }
+
+ usedMultiGas = usedMultiGas.SaturatingAdd(multiGas)
var bailout bool
// Gas bailout (for trace_call) should only be applied if there is not sufficient balance to perform value transfer
@@ -531,32 +645,80 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (result *
var (
ret []byte
vmerr error // vm errors do not effect consensus and are therefore not assigned to err
- )
+ deployedContract = new(common.Address)
+ )
if contractCreation {
// The reason why we don't increment nonce here is that we need the original
// nonce to calculate the address of the contract that is being created
// It does get incremented inside the `Create` call, after the computation
// of the contract's address, but before the execution of the code.
- ret, _, st.gasRemaining, vmerr = st.evm.Create(sender, st.data, st.gasRemaining, st.value, bailout)
+ ret, *deployedContract, st.gasRemaining, multiGas, vmerr = st.evm.Create(sender, st.data, st.gasRemaining, st.value, bailout)
+ usedMultiGas = usedMultiGas.SaturatingAdd(multiGas)
} else {
- ret, st.gasRemaining, vmerr = st.evm.Call(sender, st.to(), st.data, st.gasRemaining, st.value, bailout)
+ ret, st.gasRemaining, multiGas, vmerr = st.evm.Call(sender, st.to(), st.data, st.gasRemaining, st.value, bailout)
+ // TODO multiGas was not updated since last addition, why add again?
+ usedMultiGas = usedMultiGas.SaturatingAdd(multiGas)
}
if refunds && !gasBailout {
+ //refund := st.calcGasRefund(rules)
+ //usedMultiGas = st.reimburseGas(rules, refund, floorGas7623, usedMultiGas)
+
refundQuotient := params.RefundQuotient
if rules.IsLondon {
refundQuotient = params.RefundQuotientEIP3529
}
- gasUsed := st.gasUsed()
- refund := min(gasUsed/refundQuotient, st.state.GetRefund())
- gasUsed = gasUsed - refund
- if rules.IsPrague {
- gasUsed = max(floorGas7623, gasUsed)
+
+ if st.evm.ProcessingHook.IsArbitrum() {
+ // Refund the gas that was held to limit the amount of computation done.
+ //st.gasRemaining += st.calcHeldGasRefund() // affects .gasUsed()
+ frg := st.evm.ProcessingHook.ForceRefundGas()
+ //fmt.Printf("[%d] gas used %d force refund gas: %d, remains %d\n",
+ // st.evm.Context.BlockNumber, st.gasUsed(), frg, st.gasRemaining)
+ st.gasRemaining += frg
+
+ nonrefundable := st.evm.ProcessingHook.NonrefundableGas()
+ if nonrefundable < st.gasUsed() {
+ // Apply refund counter, capped to a refund quotient
+ refund := (st.gasUsed() - nonrefundable) / refundQuotient // Before EIP-3529
+ if refund > st.state.GetRefund() {
+ refund = st.state.GetRefund()
+ }
+ st.gasRemaining += refund
+ // Arbitrum: set the multigas refunds
+ usedMultiGas = usedMultiGas.WithRefund(refund)
+ }
+
+ if rules.IsPrague && st.evm.ProcessingHook.IsCalldataPricingIncreaseEnabled() {
+ // After EIP-7623: Data-heavy transactions pay the floor gas.
+ if st.gasUsed() < floorGas7623 {
+ usedMultiGas = usedMultiGas.SaturatingIncrement(multigas.ResourceKindL2Calldata, floorGas7623-usedMultiGas.SingleGas())
+ prev := st.gasRemaining
+ st.gasRemaining = st.initialGas - floorGas7623
+ if t := st.evm.Config().Tracer; t != nil && t.OnGasChange != nil {
+ t.OnGasChange(prev, st.gasRemaining, tracing.GasChangeTxDataFloor)
+ }
+ }
+ //if peakGasUsed < floorGas7623 {
+ // peakGasUsed = floorGas7623
+ //}// todo
+ }
+
+ } else { // Other networks
+ gasUsed := st.gasUsed()
+ refund := min(gasUsed/refundQuotient, st.state.GetRefund())
+ gasUsed = gasUsed - refund
+
+ if rules.IsPrague {
+ gasUsed = max(floorGas7623, gasUsed)
+ }
+ st.gasRemaining = st.initialGas - gasUsed
}
- st.gasRemaining = st.initialGas - gasUsed
+
st.refundGas()
} else if rules.IsPrague {
+ fmt.Println("i was not supposed to be in non-arbitrum prague")
st.gasRemaining = st.initialGas - max(floorGas7623, st.gasUsed())
}
@@ -572,19 +734,41 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (result *
tipAmount := u256.Mul(u256.U64(st.gasUsed()), effectiveTip) // gasUsed * effectiveTip = how much goes to the block producer (miner, validator)
if !st.noFeeBurnAndTip {
+ // MERGE_ARBITRUM, the following was in arbitrum branch:
+ /*
+ if rules.IsArbitrum {
+ if err := st.state.AddBalance(coinbase, *tipAmount, tracing.BalanceIncreaseRewardTransactionFee); err != nil {
+ return nil, fmt.Errorf("%w: %w", ErrStateTransitionFailed, err)
+ }
+ */
if err := st.state.AddBalance(coinbase, tipAmount, tracing.BalanceIncreaseRewardTransactionFee); err != nil {
return nil, fmt.Errorf("%w: %w", ErrStateTransitionFailed, err)
}
}
+ if st.evm.Config().NoBaseFee && msg.FeeCap().Sign() == 0 && msg.TipCap().Sign() == 0 {
+ // Skip fee payment when NoBaseFee is set and the fee fields
+ // are 0. This avoids a negative effectiveTip being applied to
+ // the coinbase when simulating calls.
+ } else {
+ if err := st.state.AddBalance(tipReceipient, *tipAmount, tracing.BalanceIncreaseRewardTransactionFee); err != nil {
+ return nil, fmt.Errorf("%w: %w", ErrStateTransitionFailed, err)
+ }
+ }
var burnAmount uint256.Int
var burntContractAddress accounts.Address
+ var tracingTipAmount *uint256.Int
if !msg.IsFree() && rules.IsLondon {
burntContractAddress = st.evm.ChainConfig().GetBurntContract(st.evm.Context.BlockNumber)
if !burntContractAddress.IsNil() {
burnAmount = u256.Mul(u256.U64(st.gasUsed()), st.evm.Context.BaseFee)
+ if arbTrace {
+ fmt.Printf("burnAddr %x tipAddr %x\n", burntContractAddress, tipReceipient)
+ }
+ tracingTipAmount = burnAmount.Clone()
+
if rules.IsAura && rules.IsPrague {
// https://github.com/gnosischain/specs/blob/master/network-upgrades/pectra.md#eip-4844-pectra
burnAmount = u256.Add(burnAmount, st.evm.BlobFee)
@@ -599,6 +783,15 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (result *
if dbg.TraceGas || st.state.Trace() || dbg.TraceAccount(st.msg.From().Handle()) {
fmt.Printf("%d (%d.%d) Fees %x: tipped: %d, burnt: %d, price: %d, gas: %d\n", st.state.BlockNumber(), st.state.TxIndex(), st.state.Incarnation(), st.msg.From(), &tipAmount, &burnAmount, st.gasPrice, st.gasUsed())
}
+ // Arbitrum: record the tip
+ if tracer := st.evm.Config().Tracer; tracer != nil && tracer.CaptureArbitrumTransfer != nil && !st.evm.ProcessingHook.DropTip() {
+ if !tracingTipAmount.IsZero() {
+ tracer.CaptureArbitrumTransfer(nil, &tipReceipient, tracingTipAmount, false, "tip")
+ }
+ }
+ //fmt.Printf("tx from %x used gas: %d, initGas %d remain %d %s\n", st.msg.From(), st.gasUsed(), st.initialGas, st.gasRemaining, usedMultiGas)
+
+ st.evm.ProcessingHook.EndTxHook(st.gasRemaining, vmerr == nil)
result = &evmtypes.ExecutionResult{
GasUsed: st.gasUsed(),
@@ -610,6 +803,11 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (result *
FeeTipped: tipAmount,
FeeBurnt: burnAmount,
EvmRefund: st.state.GetRefund(),
+
+ // Arbitrum
+ ScheduledTxes: st.evm.ProcessingHook.ScheduledTxes(),
+ TopLevelDeployed: deployedContract,
+ UsedMultiGas: usedMultiGas,
}
result.BurntContractAddress = burntContractAddress
@@ -621,6 +819,74 @@ func (st *StateTransition) TransitionDb(refunds bool, gasBailout bool) (result *
return result, nil
}
+// FloorDataGas computes the minimum gas required for a transaction based on its data tokens (EIP-7623).
+func FloorDataGas(data []byte) (uint64, error) {
+
+ var (
+ z = uint64(bytes.Count(data, []byte{0}))
+ nz = uint64(len(data)) - z
+ TxTokenPerNonZeroByte uint64 = 4 // Token cost per non-zero byte as specified by EIP-7623.
+ TxCostFloorPerToken uint64 = 10 // Cost floor per byte of data as specified by EIP-7623.
+ tokens = nz*TxTokenPerNonZeroByte + z
+ )
+ // Check for overflow
+ if (math.MaxUint64-params.TxGas)/TxCostFloorPerToken < tokens {
+ return 0, ErrGasUintOverflow
+ }
+ // Minimum gas required for a transaction based on its data tokens (EIP-7623).
+ return params.TxGas + tokens*TxCostFloorPerToken, nil
+}
+
+func (st *StateTransition) calcHeldGasRefund() uint64 {
+ return st.evm.ProcessingHook.ForceRefundGas()
+}
+
+// Arbitrum // TODO move
+// RevertedTxGasUsed maps specific transaction hashes that have been previously reverted to the amount
+// of GAS used by that specific transaction alone.
+var RevertedTxGasUsed = map[common.Hash]uint64{
+ // Arbitrum Sepolia (chain_id=421614). Tx timestamp: Oct-13-2025 03:30:36 AM +UTC
+ common.HexToHash("0x58df300a7f04fe31d41d24672786cbe1c58b4f3d8329d0d74392d814dd9f7e40"): 45174,
+}
+
+// handleRevertedTx attempts to process a reverted transaction. It returns
+// ErrExecutionReverted with the updated multiGas if a matching reverted
+// tx is found; otherwise, it returns nil error with unchangedmultiGas
+func (st *StateTransition) handleRevertedTx(msg *types.Message, usedMultiGas multigas.MultiGas) (multigas.MultiGas, error) {
+ if msg.Tx == nil {
+ return usedMultiGas, nil
+ }
+
+ txHash := msg.Tx.Hash()
+ if l2GasUsed, ok := RevertedTxGasUsed[txHash]; ok {
+ pn, err := st.state.GetNonce(msg.From())
+ if err != nil {
+ return usedMultiGas, fmt.Errorf("handle revert: %w", err)
+ }
+ err = st.state.SetNonce(msg.From(), uint64(pn)+1)
+ if err != nil {
+ return usedMultiGas, fmt.Errorf("handle revert: %w", err)
+ }
+
+ // Calculate adjusted gas since l2GasUsed contains params.TxGas
+ if l2GasUsed < params.TxGas {
+ panic(fmt.Sprintf("adjustedGas underflow in handleRevertedTx: l2GasUsed=%d, params.TxGas=%d", l2GasUsed, params.TxGas))
+ }
+ adjustedGas := l2GasUsed - params.TxGas
+ if st.gasRemaining < adjustedGas {
+ panic(fmt.Sprintf("gasRemaining underflow in handleRevertedTx: gasRemaining=%d, adjustedGas=%d", st.gasRemaining, adjustedGas))
+ }
+ st.gasRemaining -= adjustedGas
+
+ // Update multigas and return ErrExecutionReverted error
+ usedMultiGas = usedMultiGas.SaturatingAdd(multigas.ComputationGas(adjustedGas))
+ return usedMultiGas, vm.ErrExecutionReverted
+ }
+
+ return usedMultiGas, nil
+}
+
+
func (st *StateTransition) verifyAuthorities(auths []types.Authorization, contractCreation bool, chainID string) ([]accounts.Address, error) {
verifiedAuthorities := make([]accounts.Address, 0)
if len(auths) > 0 {
@@ -711,15 +977,148 @@ func (st *StateTransition) verifyAuthorities(auths []types.Authorization, contra
return verifiedAuthorities, nil
}
+func (st *StateTransition) calcGasRefund(rules *chain.Rules) uint64 {
+ //
+ //refundQuotient := params.RefundQuotient
+ //if rules.IsLondon {
+ // refundQuotient = params.RefundQuotientEIP3529
+ //}
+ //
+ //// Refund the gas that was held to limit the amount of computation done.
+ //st.gasRemaining += st.calcHeldGasRefund()
+ //
+ //if st.evm.ProcessingHook.IsArbitrum() {
+ // st.gasRemaining += st.evm.ProcessingHook.ForceRefundGas()
+ // nonrefundable := st.evm.ProcessingHook.NonrefundableGas()
+ // var refund uint64
+ // if nonrefundable < st.gasUsed() {
+ // // Apply refund counter, capped to a refund quotient
+ // refund = (st.gasUsed() - nonrefundable) / refundQuotient // Before EIP-3529
+ // if refund > st.state.GetRefund() {
+ // refund = st.state.GetRefund()
+ // }
+ // st.gasRemaining += refund
+ // }
+ //
+ // // Arbitrum: set the multigas refunds
+ // usedMultiGas = usedMultiGas.WithRefund(refund)
+ // if rules.IsPrague && st.evm.ProcessingHook.IsCalldataPricingIncreaseEnabled() {
+ // // After EIP-7623: Data-heavy transactions pay the floor gas.
+ // if st.gasUsed() < floorGas7623 {
+ // usedMultiGas = usedMultiGas.SaturatingIncrement(multigas.ResourceKindL2Calldata, floorGas7623-usedMultiGas.SingleGas())
+ // prev := st.gasRemaining
+ // st.gasRemaining = st.initialGas - floorGas7623
+ // if t := st.evm.Config().Tracer; t != nil && t.OnGasChange != nil {
+ // t.OnGasChange(prev, st.gasRemaining, tracing.GasChangeTxDataFloor)
+ // }
+ // }
+ // }
+ //
+ //} else { // Other networks
+ // gasUsed := st.gasUsed()
+ // refund := min(gasUsed/refundQuotient, st.state.GetRefund())
+ // gasUsed = gasUsed - refund
+ //
+ // if rules.IsPrague {
+ // gasUsed = max(floorGas7623, gasUsed)
+ // }
+ // st.gasRemaining = st.initialGas - gasUsed
+ //}
+ //
+ //st.refundGas()
+ refundQuotient := params.RefundQuotient
+ if rules.IsLondon {
+ refundQuotient = params.RefundQuotientEIP3529
+ }
+
+ var refund uint64
+ if !st.evm.ProcessingHook.IsArbitrum() {
+ refund = min(st.gasUsed()/refundQuotient, st.state.GetRefund())
+ } else { // Arbitrum
+ nonrefundable := st.evm.ProcessingHook.NonrefundableGas()
+ if nonrefundable < st.gasUsed() {
+ // Apply refund counter, capped to a refund quotient
+ refund = (st.gasUsed() - nonrefundable) / refundQuotient // Before EIP-3529
+ if refund > st.state.GetRefund() {
+ refund = st.state.GetRefund()
+ }
+ }
+ }
+
+ // Refund the gas that was held to limit the amount of computation done.
+ heldRefund := st.calcHeldGasRefund()
+ totalRefund := refund + heldRefund
+ if totalRefund < refund || totalRefund < heldRefund {
+ panic(fmt.Sprintf("calcGasRefund overflow: refund=%d, heldRefund=%d", refund, heldRefund))
+ }
+ return totalRefund
+}
+
+func (st *StateTransition) reimburseGas(rules *chain.Rules, refund, floorGas7623 uint64, usedMultiGas multigas.MultiGas) multigas.MultiGas {
+ if !st.evm.ProcessingHook.IsArbitrum() {
+ if st.gasUsed() < refund {
+ panic(fmt.Sprintf("gasUsed underflow in reimburseGas: gasUsed=%d, refund=%d", st.gasUsed(), refund))
+ }
+ gasUsed := st.gasUsed() - refund
+ if rules.IsPrague {
+ gasUsed = max(floorGas7623, gasUsed)
+ }
+ if st.initialGas < gasUsed {
+ panic(fmt.Sprintf("gasRemaining underflow in reimburseGas (non-Arbitrum): initialGas=%d, gasUsed=%d", st.initialGas, gasUsed))
+ }
+ st.gasRemaining = st.initialGas - gasUsed
+ } else { // Arbitrum: set the multigas refunds
+ forceRefund := st.evm.ProcessingHook.ForceRefundGas()
+ totalRefund := forceRefund + refund
+ // Check for overflow in refund addition
+ if totalRefund < forceRefund || totalRefund < refund {
+ panic(fmt.Sprintf("refund overflow in reimburseGas: forceRefund=%d, refund=%d", forceRefund, refund))
+ }
+ // Check for overflow when adding to gasRemaining
+ if st.gasRemaining > math.MaxUint64-totalRefund {
+ panic(fmt.Sprintf("gasRemaining overflow in reimburseGas (Arbitrum): gasRemaining=%d, totalRefund=%d", st.gasRemaining, totalRefund))
+ }
+ st.gasRemaining += totalRefund
+
+ usedMultiGas = usedMultiGas.WithRefund(refund)
+ if rules.IsPrague && st.evm.ProcessingHook.IsCalldataPricingIncreaseEnabled() {
+ // After EIP-7623: Data-heavy transactions pay the floor gas.
+ if st.gasUsed() < floorGas7623 {
+ usedMultiGas = usedMultiGas.SaturatingIncrement(multigas.ResourceKindL2Calldata, floorGas7623-usedMultiGas.SingleGas())
+ prev := st.gasRemaining
+ if st.initialGas < floorGas7623 {
+ panic(fmt.Sprintf("gasRemaining underflow in reimburseGas (Arbitrum Prague floor): initialGas=%d, floorGas7623=%d", st.initialGas, floorGas7623))
+ }
+ st.gasRemaining = st.initialGas - floorGas7623
+
+ if t := st.evm.Config().Tracer; t != nil && t.OnGasChange != nil {
+ t.OnGasChange(prev, st.gasRemaining, tracing.GasChangeTxDataFloor)
+ }
+ }
+ }
+ }
+ st.refundGas()
+ return usedMultiGas
+}
+
func (st *StateTransition) refundGas() {
// Return ETH for remaining gas, exchanged at the original rate.
remaining := u256.Mul(u256.U64(st.gasRemaining), *st.gasPrice)
if dbg.TraceGas || st.state.Trace() || dbg.TraceAccount(st.msg.From().Handle()) {
fmt.Printf("%d (%d.%d) Refund %x: remaining: %d, price: %d val: %d\n", st.state.BlockNumber(), st.state.TxIndex(), st.state.Incarnation(), st.msg.From(), st.gasRemaining, st.gasPrice, &remaining)
}
+ if arbTrace {
+ fmt.Printf("[ST] refund remaining gas %d to %x\n", remaining, st.msg.From())
+ }
st.state.AddBalance(st.msg.From(), remaining, tracing.BalanceIncreaseGasReturn)
+ // Arbitrum: record the gas refund
+ if tracer := st.evm.Config().Tracer; tracer != nil && tracer.CaptureArbitrumTransfer != nil {
+ from := st.msg.From()
+ tracer.CaptureArbitrumTransfer(nil, &from, remaining, false, "gasRefund")
+ }
+
// Also return remaining gas to the block gas counter so it is
// available for the next transaction.
st.gp.AddGas(st.gasRemaining)
@@ -727,5 +1126,28 @@ func (st *StateTransition) refundGas() {
// gasUsed returns the amount of gas used up by the state transition.
func (st *StateTransition) gasUsed() uint64 {
+ if st.initialGas < st.gasRemaining {
+ panic(fmt.Sprintf("gasUsed underflow: initialGas=%d, gasRemaining=%d", st.initialGas, st.gasRemaining))
+ }
return st.initialGas - st.gasRemaining
}
+
+// IntrinsicGas computes the 'intrinsic gas' for a message with the given data.
+// TODO: convert the input to a struct
+func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028, isEIP3860, isPrague bool, authorizationsLen uint64) (uint64, uint64, error) {
+ // Zero and non-zero bytes are priced differently
+ dataLen := uint64(len(data))
+ dataNonZeroLen := uint64(0)
+ for _, byt := range data {
+ if byt != 0 {
+ dataNonZeroLen++
+ }
+ }
+
+ // TODO arbitrum - do we need a separate one intrinsic estimator
+ gas, floorGas7623, overflow := fixedgas.CalcIntrinsicGas(dataLen, dataNonZeroLen, authorizationsLen, uint64(len(accessList)), uint64(accessList.StorageKeys()), isContractCreation, isHomestead, isEIP2028, isEIP3860, isPrague, false /*isAAtxn*/)
+ if overflow != false {
+ return 0, 0, ErrGasUintOverflow
+ }
+ return gas, floorGas7623, nil
+}
diff --git a/execution/rlp/arb.go b/execution/rlp/arb.go
new file mode 100644
index 00000000000..0f0d151aebb
--- /dev/null
+++ b/execution/rlp/arb.go
@@ -0,0 +1,30 @@
+package rlp
+
+//
+//func DialTransport(ctx context.Context, rawUrl string, transport *http.Transport) (*Client, error) {
+// u, err := url.Parse(rawUrl)
+// if err != nil {
+// return nil, err
+// }
+//
+// var rpcClient *Client
+// switch u.Scheme {
+// case "http", "https":
+// client := &http.Client{
+// Transport: transport,
+// }
+// rpcClient, err = DialHTTPWithClient(rawUrl, client)
+// case "ws", "wss":
+// rpcClient, err = DialWebsocket(ctx, rawUrl, "")
+// case "stdio":
+// return DialStdIO(ctx)
+// case "":
+// return DialIPC(ctx, rawUrl)
+// default:
+// return nil, fmt.Errorf("no known transport for scheme %q in URL %s", u.Scheme, rawUrl)
+// }
+// if err != nil {
+// return nil, err
+// }
+// return rpcClient, nil
+//}
diff --git a/execution/rlp/encode.go b/execution/rlp/encode.go
index 34e36b04421..a672323bdf4 100644
--- a/execution/rlp/encode.go
+++ b/execution/rlp/encode.go
@@ -588,6 +588,19 @@ func Uint256Len(i uint256.Int) int {
return 1 + common.BitLenToByteLen(bitLen)
}
+func BoolLen() int {
+ return 1
+}
+
+func EncodeBool(val bool, w io.Writer, buffer []byte) error {
+ // zero for false, one for true
+ intVal := uint64(0)
+ if val {
+ intVal = 1
+ }
+ return EncodeInt(intVal, w, buffer)
+}
+
// precondition: len(buffer) >= 9
// TODO(yperbasis): replace with EncodeU64?
func EncodeInt(i uint64, w io.Writer, buffer []byte) error {
diff --git a/execution/rlp/hash.go b/execution/rlp/hash.go
new file mode 100644
index 00000000000..967a6062787
--- /dev/null
+++ b/execution/rlp/hash.go
@@ -0,0 +1,13 @@
+package rlp
+
+import (
+ "github.com/erigontech/erigon/common"
+ "golang.org/x/crypto/sha3"
+)
+
+func RlpHash(x interface{}) (h common.Hash) {
+ hw := sha3.NewLegacyKeccak256()
+ Encode(hw, x) //nolint:errcheck
+ hw.Sum(h[:0])
+ return h
+}
diff --git a/execution/rlp/raw.go b/execution/rlp/raw.go
index 0ef83528573..501a777fec1 100644
--- a/execution/rlp/raw.go
+++ b/execution/rlp/raw.go
@@ -262,3 +262,19 @@ func AppendUint64(b []byte, i uint64) []byte {
)
}
}
+
+// BytesSize returns the encoded size of a byte slice.
+func BytesSize(b []byte) uint64 {
+ switch {
+ case len(b) == 0:
+ return 1
+ case len(b) == 1:
+ if b[0] <= 0x7f {
+ return 1
+ } else {
+ return 2
+ }
+ default:
+ return uint64(headsize(uint64(len(b))) + len(b))
+ }
+}
diff --git a/execution/stagedsync/bodydownload/block_propagator.go b/execution/stagedsync/bodydownload/block_propagator.go
index d25f6a1d095..7cb34bc7be7 100644
--- a/execution/stagedsync/bodydownload/block_propagator.go
+++ b/execution/stagedsync/bodydownload/block_propagator.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Erigon Authors
+// Copyright 2025 The Erigon Authors
// This file is part of Erigon.
//
// Erigon is free software: you can redistribute it and/or modify
diff --git a/execution/stagedsync/exec3.go b/execution/stagedsync/exec3.go
index 584955440fe..1b6264ff4e7 100644
--- a/execution/stagedsync/exec3.go
+++ b/execution/stagedsync/exec3.go
@@ -23,11 +23,13 @@ import (
"errors"
"fmt"
"math/big"
+ "runtime"
"slices"
"sync"
"sync/atomic"
"time"
+ "github.com/erigontech/erigon/db/config3"
"golang.org/x/sync/errgroup"
"github.com/erigontech/erigon/common"
@@ -54,6 +56,75 @@ import (
"github.com/erigontech/erigon/node/shards"
)
+const (
+ maxUnwindJumpAllowance = 1000 // Maximum number of blocks we are allowed to unwind
+)
+
+func NewProgress(prevOutputBlockNum, commitThreshold uint64, workersCount int, logPrefix string, logger log.Logger) *Progress {
+ return &Progress{prevTime: time.Now(), prevOutputBlockNum: prevOutputBlockNum, commitThreshold: commitThreshold, workersCount: workersCount, logPrefix: logPrefix, logger: logger}
+}
+
+type Progress struct {
+ prevTime time.Time
+ prevTxCount uint64
+ prevGasUsed uint64
+ prevOutputBlockNum uint64
+ prevRepeatCount uint64
+ commitThreshold uint64
+
+ workersCount int
+ logPrefix string
+ logger log.Logger
+}
+
+func (p *Progress) Log(suffix string, rs *state.ParallelExecutionState, in *state.QueueWithRetry, rws *state.ResultsQueue, txCount uint64, gas uint64, inputBlockNum uint64, outputBlockNum uint64, outTxNum uint64, repeatCount uint64, idxStepsAmountInDB float64, commitEveryBlock bool, inMemExec bool) {
+ mxExecStepsInDB.Set(idxStepsAmountInDB * 100)
+ var m runtime.MemStats
+ dbg.ReadMemStats(&m)
+ sizeEstimate := rs.SizeEstimate()
+ currentTime := time.Now()
+ interval := currentTime.Sub(p.prevTime)
+ //var repeatRatio float64
+ //if doneCount > p.prevCount {
+ // repeatRatio = 100.0 * float64(repeatCount-p.prevRepeatCount) / float64(doneCount-p.prevCount)
+ //}
+
+ if len(suffix) > 0 {
+ suffix = " " + suffix
+ }
+
+ if commitEveryBlock {
+ suffix += " Commit every block"
+ }
+
+ gasSec := uint64(float64(gas-p.prevGasUsed) / interval.Seconds())
+ txSec := uint64(float64(txCount-p.prevTxCount) / interval.Seconds())
+ diffBlocks := max(int(outputBlockNum)-int(p.prevOutputBlockNum)+1, 0)
+
+ p.logger.Info(fmt.Sprintf("[%s]"+suffix, p.logPrefix),
+ "blk", outputBlockNum,
+ "blks", diffBlocks,
+ "blk/s", fmt.Sprintf("%.1f", float64(diffBlocks)/interval.Seconds()),
+ "txs", txCount-p.prevTxCount,
+ "tx/s", common.PrettyCounter(txSec),
+ "gas/s", common.PrettyCounter(gasSec),
+ //"pipe", fmt.Sprintf("(%d+%d)->%d/%d->%d/%d", in.NewTasksLen(), in.RetriesLen(), rws.ResultChLen(), rws.ResultChCap(), rws.Len(), rws.Limit()),
+ //"repeatRatio", fmt.Sprintf("%.2f%%", repeatRatio),
+ //"workers", p.workersCount,
+ "buf", fmt.Sprintf("%s/%s", common.ByteCount(sizeEstimate), common.ByteCount(p.commitThreshold)),
+ "stepsInDB", fmt.Sprintf("%.2f", idxStepsAmountInDB),
+ "step", fmt.Sprintf("%.1f", float64(outTxNum)/float64(config3.DefaultStepSize)),
+ "inMem", inMemExec,
+ "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys),
+ )
+
+ p.prevTime = currentTime
+ p.prevTxCount = txCount
+ p.prevGasUsed = gas
+ p.prevOutputBlockNum = outputBlockNum
+ p.prevRepeatCount = repeatCount
+}
+
// Cases:
// 1. Snapshots > ExecutionStage: snapshots can have half-block data `10.4`. Get right txNum from SharedDomains (after SeekCommitment)
// 2. ExecutionStage > Snapshots: no half-block data possible. Rely on DB.
@@ -178,6 +249,11 @@ func ExecV3(ctx context.Context,
return nil
}
+ if maxBlockNum > blockNum+16 {
+ log.Info(fmt.Sprintf("[%s] starting", execStage.LogPrefix()),
+ "from", blockNum, "to", maxBlockNum, "fromTxNum", doms.TxNum(), "offsetFromBlockBeginning", offsetFromBlockBeginning, "initialCycle", initialCycle, "useExternalTx", useExternalTx, "inMem", inMemExec)
+ }
+
if execStage.SyncMode() == stages.ModeApplyingBlocks {
agg.BuildFilesInBackground(doms.TxNum())
}
@@ -684,11 +760,45 @@ func (te *txExecutor) executeBlocks(ctx context.Context, tx kv.TemporalTx, start
te.execRequests <- &execRequest{
b.Number().Uint64(), b.Hash(),
- protocol.NewGasPool(b.GasLimit(), te.cfg.chainConfig.GetMaxBlobGasPerBlock(b.Time())),
+ protocol.NewGasPool(b.GasLimit(), te.cfg.chainConfig.GetMaxBlobGasPerBlock(b.Time(), 0)),
b.BlockAccessList(),
txTasks, applyResults, false, exhausted,
}
+ // ARBITRUM_MERGE
+ /*
+ if ERIGON_COMMIT_EACH_BLOCK || shouldGenerateChangesets || cfg.syncCfg.KeepExecutionProofs {
+ start := time.Now()
+ if blockNum == 0 {
+ executor.domains().GetCommitmentContext().Trie().SetTrace(true)
+ } else {
+ executor.domains().GetCommitmentContext().Trie().SetTrace(false)
+ }
+ rh, err := executor.domains().ComputeCommitment(ctx, true, blockNum, inputTxNum, execStage.LogPrefix())
+ if err != nil {
+ return err
+ }
+
+ if ERIGON_COMMIT_EACH_BLOCK {
+ if !bytes.Equal(rh, header.Root.Bytes()) {
+ logger.Error(fmt.Sprintf("[%s] Wrong trie root of block %d: %x, expected (from header): %x. Block hash: %x", execStage.LogPrefix(), header.Number.Uint64(), rh, header.Root.Bytes(), header.Hash()))
+ return errors.New("wrong trie root")
+ }
+ }
+
+ computeCommitmentDuration += time.Since(start)
+ if shouldGenerateChangesets {
+ executor.domains().SavePastChangesetAccumulator(b.Hash(), blockNum, changeSet)
+ if !inMemExec {
+ if err := changeset2.WriteDiffSet(executor.tx(), blockNum, b.Hash(), changeSet); err != nil {
+ return err
+ }
+ }
+ }
+ executor.domains().SetChangesetAccumulator(nil)
+ }
+ */
+
mxExecBlocks.Add(1)
if offsetFromBlockBeginning > 0 {
@@ -755,6 +865,8 @@ func (te *txExecutor) commit(ctx context.Context, execStage *StageState, tx kv.T
return tx, t2, nil
}
+var ERIGON_COMMIT_EACH_BLOCK = dbg.EnvBool("ERIGON_COMMIT_EACH_BLOCK", false)
+
// nolint
func dumpPlainStateDebug(tx kv.TemporalRwTx, doms *execctx.SharedDomains) {
if doms != nil {
diff --git a/execution/stagedsync/exec3_serial.go b/execution/stagedsync/exec3_serial.go
index 0a9c7a7ccdd..55c27a207d8 100644
--- a/execution/stagedsync/exec3_serial.go
+++ b/execution/stagedsync/exec3_serial.go
@@ -385,6 +385,7 @@ func (se *serialExecutor) executeBlock(ctx context.Context, tasks []exec.Task, i
txTask.Config = se.cfg.chainConfig
txTask.Engine = se.cfg.engine
+ se.worker.SetArbitrumWasmDB(se.cfg.arbitrumWasmDB)
result := se.worker.RunTxTask(txTask)
if err := func() error {
@@ -435,7 +436,11 @@ func (se *serialExecutor) executeBlock(ctx context.Context, tasks []exec.Task, i
if !se.isMining && startTxIndex == 0 && !isInitialCycle {
se.cfg.notifications.RecentReceipts.Add(blockReceipts, txTask.Txs, txTask.Header)
}
- checkReceipts := !se.cfg.vmConfig.StatelessExec && se.cfg.chainConfig.IsByzantium(txTask.BlockNumber()) && !se.cfg.vmConfig.NoReceipts && !se.isMining
+ checkReceipts := (!se.cfg.vmConfig.StatelessExec && se.cfg.chainConfig.IsByzantium(txTask.BlockNumber()) && !se.cfg.vmConfig.NoReceipts && !se.isMining);
+ // TODO arbitrum enable receipt checking
+ if se.cfg.chainConfig.IsArbitrum() {
+ checkReceipts = false;
+ }
if txTask.BlockNumber() > 0 && startTxIndex == 0 {
//Disable check for genesis. Maybe need somehow improve it in future - to satisfy TestExecutionSpec
@@ -499,6 +504,7 @@ func (se *serialExecutor) executeBlock(ctx context.Context, tasks []exec.Task, i
if se.cfg.hd != nil && se.cfg.hd.POSSync() && errors.Is(err, rules.ErrInvalidBlock) {
se.cfg.hd.ReportBadHeaderPoS(txTask.Header.Hash(), txTask.Header.ParentHash)
}
+ os.Exit(1)
if se.cfg.badBlockHalt {
return false, err
}
@@ -574,7 +580,7 @@ func (se *serialExecutor) executeBlock(ctx context.Context, tasks []exec.Task, i
if rawtemporaldb.ReceiptStoresFirstLogIdx(se.applyTx) {
logIndexAfterTx -= uint32(len(result.Logs))
}
- if err := rawtemporaldb.AppendReceipt(se.doms.AsPutDel(se.applyTx), logIndexAfterTx, cumGasUsed, se.blobGasUsed, txTask.TxNum); err != nil {
+ if err := rawtemporaldb.AppendReceipt(se.doms.AsPutDel(se.applyTx.(kv.TemporalTx)), logIndexAfterTx, cumGasUsed, se.blobGasUsed, txTask.TxNum); err != nil {
return false, err
}
}
diff --git a/execution/stagedsync/headerdownload/header_algos.go b/execution/stagedsync/headerdownload/header_algos.go
index 335061a577b..a7ae4a3cdd7 100644
--- a/execution/stagedsync/headerdownload/header_algos.go
+++ b/execution/stagedsync/headerdownload/header_algos.go
@@ -679,6 +679,10 @@ func (hd *HeaderDownload) SetHeaderToDownloadPoS(hash common.Hash, height uint64
}
}
+func (hd *HeaderDownload) SetSynced() {
+ hd.posStatus = Synced
+}
+
func (hd *HeaderDownload) ProcessHeadersPOS(csHeaders []ChainSegmentHeader, tx kv.Getter, peerId [64]byte) ([]PenaltyItem, error) {
if len(csHeaders) == 0 {
return nil, nil
diff --git a/execution/stagedsync/stage_execute.go b/execution/stagedsync/stage_execute.go
index 88ef0dc9a3b..b14c2a44a2e 100644
--- a/execution/stagedsync/stage_execute.go
+++ b/execution/stagedsync/stage_execute.go
@@ -89,6 +89,8 @@ type ExecuteBlockCfg struct {
silkworm *silkworm.Silkworm
blockProduction bool
experimentalBAL bool
+
+ arbitrumWasmDB wasmdb.WasmIface
}
func StageExecuteBlocksCfg(
@@ -109,6 +111,8 @@ func StageExecuteBlocksCfg(
syncCfg ethconfig.Sync,
silkworm *silkworm.Silkworm,
experimentalBAL bool,
+
+ arbitrumWasmDB wasmdb.WasmIface,
) ExecuteBlockCfg {
if dirs.SnapDomain == "" {
panic("empty `dirs` variable")
@@ -132,6 +136,7 @@ func StageExecuteBlocksCfg(
syncCfg: syncCfg,
silkworm: silkworm,
experimentalBAL: experimentalBAL,
+ arbitrumWasmDB: arbitrumWasmDB,
}
}
diff --git a/execution/stagedsync/stage_headers.go b/execution/stagedsync/stage_headers.go
index 1a8baa3ca53..6ff426e0f24 100644
--- a/execution/stagedsync/stage_headers.go
+++ b/execution/stagedsync/stage_headers.go
@@ -67,6 +67,9 @@ type HeadersCfg struct {
notifications *shards.Notifications
syncConfig ethconfig.Sync
+
+ L2RPCAddr string // L2 RPC address for Arbitrum
+ ReceiptRPCAddr string // L2 RPC address for fetching receipts (if different from L2RPCAddr)
}
func StageHeadersCfg(
@@ -84,6 +87,8 @@ func StageHeadersCfg(
blockWriter *blockio.BlockWriter,
tmpdir string,
notifications *shards.Notifications,
+ L2RPCAddr string, // L2 RPC address for Arbitrum
+ ReceiptRPCAddr string,
) HeadersCfg {
return HeadersCfg{
db: db,
@@ -100,6 +105,8 @@ func StageHeadersCfg(
blockReader: blockReader,
blockWriter: blockWriter,
notifications: notifications,
+ L2RPCAddr: L2RPCAddr,
+ ReceiptRPCAddr: ReceiptRPCAddr,
}
}
@@ -118,9 +125,161 @@ func SpawnStageHeaders(s *StageState, u Unwinder, ctx context.Context, tx kv.RwT
return err
}
}
- cfg.hd.Progress()
- return HeadersPOW(s, u, ctx, tx, cfg, test, useExternalTx, logger)
+ if !cfg.chainConfig.IsArbitrum() {
+ return HeadersPOW(s, u, ctx, tx, cfg, test, useExternalTx, logger)
+ }
+
+ jsonRpcAddr := cfg.L2RPCAddr
+ client, err := rpc.Dial(jsonRpcAddr, log.Root())
+ if err != nil {
+ log.Warn("Error connecting to RPC", "err", err)
+ return err
+ }
+
+ var receiptClient *rpc.Client
+ if cfg.ReceiptRPCAddr != "" {
+ receiptClient, err = rpc.Dial(cfg.ReceiptRPCAddr, log.Root())
+ if err != nil {
+ log.Warn("Error connecting to receipt RPC", "err", err, "url", cfg.ReceiptRPCAddr)
+ return err
+ }
+ }
+
+ var curBlock uint64
+ curBlock, err = stages.GetStageProgress(tx, stages.Headers)
+ if err != nil {
+ log.Warn("can't check current block", "err", err)
+ }
+ // check the next block we're going to execute, not the one already executed
+ nextBlock := curBlock + 1
+ if err := checkL2RPCEndpointsHealth(ctx, client, receiptClient, nextBlock, cfg.L2RPCAddr, cfg.ReceiptRPCAddr); err != nil {
+ return err
+ }
+
+ // Query latest block number.
+ var latestBlockHex string
+ if err := client.CallContext(context.Background(), &latestBlockHex, "eth_blockNumber"); err != nil {
+ log.Warn("Error fetching latest block number", "err", err)
+ return err
+ }
+
+ latestBlock := new(big.Int)
+ latestBlock.SetString(latestBlockHex[2:], 16)
+ if curBlock > 0 {
+ curBlock++
+ }
+ firstBlock := curBlock
+
+ if firstBlock >= latestBlock.Uint64() {
+ return nil
+ }
+ latestBlock.SetUint64(min(latestBlock.Uint64(), firstBlock+uint64(cfg.syncConfig.LoopBlockLimit)))
+
+ if firstBlock+1 > latestBlock.Uint64() { // print only if 1+ blocks available
+ log.Info("[Arbitrum] Headers stage started", "from", firstBlock, "lastAvailableBlock", latestBlock.Uint64(), "extTx", useExternalTx)
+ }
+
+ finaliseState := func(tx kv.RwTx, lastCommittedBlockNum uint64) error {
+ err = cfg.hd.ReadProgressFromDb(tx)
+ if err != nil {
+ return fmt.Errorf("error reading header progress from db: %w", err)
+ }
+ //
+ //if err = cfg.blockWriter.FillHeaderNumberIndex(s.LogPrefix(), tx, os.TempDir(), firstBlock, lastCommittedBlockNum+1, ctx, logger); err != nil {
+ // return err
+ //}
+ //
+ //if err := rawdbv3.TxNums.Truncate(tx, firstBlock); err != nil {
+ // return err
+ //}
+ //if err := cfg.blockWriter.MakeBodiesCanonical(tx, firstBlock); err != nil {
+ // return fmt.Errorf("failed to make bodies canonical %d: %w", firstBlock, err)
+ //}
+ // This will update bd.maxProgress
+ if err = cfg.bodyDownload.UpdateFromDb(tx); err != nil {
+ return err
+ }
+ //defer cfg.bodyDownload.ClearBodyCache()
+ cfg.hd.SetSynced()
+ return nil
+ }
+
+ lastCommittedBlockNum, err := snapshots.GetAndCommitBlocks(ctx, cfg.db, tx, client, receiptClient, firstBlock, latestBlock.Uint64(), false, true, false, finaliseState)
+ if err != nil {
+ return fmt.Errorf("error fetching and committing blocks from rpc: %w", err)
+ }
+
+ if !useExternalTx {
+ if err = tx.Commit(); err != nil {
+ return fmt.Errorf("commit failed: %w", err)
+ }
+ tx = nil
+ }
+
+ ethdb.InitialiazeLocalWasmTarget()
+
+ if lastCommittedBlockNum-firstBlock > 1 {
+ log.Info("[Arbitrum] Headers stage completed", "latestProcessedBlock", lastCommittedBlockNum,
+ "from", firstBlock, "to", latestBlock.Uint64(), "wasTxCommitted", !useExternalTx)
+ }
+ return nil
+}
+
+func checkL2RPCEndpointsHealth(ctx context.Context, blockClient, receiptClient *rpc.Client, blockNum uint64, blockRPCAddr, receiptRPCAddr string) error {
+ if blockClient == nil {
+ return nil
+ }
+
+ checkBlockNum := fmt.Sprintf("0x%x", blockNum)
+
+ var blockResult map[string]interface{}
+ if err := blockClient.CallContext(ctx, &blockResult, "eth_getBlockByNumber", checkBlockNum, true); err != nil {
+ return fmt.Errorf("--l2rpc %q cannot respond to eth_getBlockByNumber for block %d: %w", blockRPCAddr, blockNum, err)
+ }
+ if blockResult == nil {
+ return fmt.Errorf("--l2rpc %q returned nil for block %d", blockRPCAddr, blockNum)
+ }
+
+ txs, ok := blockResult["transactions"].([]interface{})
+ if !ok || len(txs) == 0 {
+ log.Info("[Arbitrum] L2 RPC health check: block has no transactions, skipping receipt check", "block", blockNum)
+ return nil
+ }
+
+ var txHash string
+ if txMap, ok := txs[0].(map[string]interface{}); ok {
+ if h, ok := txMap["hash"].(string); ok {
+ txHash = h
+ }
+ }
+ if txHash == "" {
+ log.Warn("[Arbitrum] L2 RPC health check: could not extract tx hash from block, skipping receipt check", "block", blockNum)
+ return nil
+ }
+
+ if receiptClient == nil {
+ log.Info("[Arbitrum] L2 RPC health check: receipt client not configured, skipping receipt check", "block", blockNum)
+ return nil
+ }
+
+ var receiptResult map[string]interface{}
+ if err := receiptClient.CallContext(ctx, &receiptResult, "eth_getTransactionReceipt", txHash); err != nil {
+ return fmt.Errorf("--l2rpc.receipt %q cannot respond to eth_getTransactionReceipt for tx %s: %w", receiptRPCAddr, txHash, err)
+ }
+ if receiptResult == nil {
+ return fmt.Errorf("--l2rpc.receipt %q returned nil for tx %s", receiptRPCAddr, txHash)
+ }
+ receiptTxHash, ok := receiptResult["transactionHash"].(string)
+ if !ok || receiptTxHash == "" {
+ return fmt.Errorf("--l2rpc.receipt %q receipt missing transactionHash field or field is not a string for tx %s", receiptRPCAddr, txHash)
+ }
+ if receiptTxHash != txHash {
+ return fmt.Errorf("--l2rpc.receipt %q returned mismatched receipt: requested tx %s but got %s", receiptRPCAddr, txHash, receiptTxHash)
+ }
+
+ log.Info("[Arbitrum] L2 RPC endpoints health check passed", "blockEndpoint", blockRPCAddr, "receiptEndpoint", receiptRPCAddr, "checkedBlock", blockNum)
+ return nil
}
// HeadersPOW progresses Headers stage for Proof-of-Work headers
diff --git a/execution/stagedsync/stage_headers_health_test.go b/execution/stagedsync/stage_headers_health_test.go
new file mode 100644
index 00000000000..30967927b8d
--- /dev/null
+++ b/execution/stagedsync/stage_headers_health_test.go
@@ -0,0 +1,253 @@
+package stagedsync
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/erigontech/erigon/log/v3"
+ "github.com/erigontech/erigon/rpc"
+)
+
+func TestCheckL2RPCEndpointsHealth_NilClients(t *testing.T) {
+ ctx := context.Background()
+
+ err := checkL2RPCEndpointsHealth(ctx, nil, nil, 100, "", "")
+ require.NoError(t, err)
+}
+
+func TestCheckL2RPCEndpointsHealth_NilReceiptClient(t *testing.T) {
+ responses := map[string]interface{}{
+ "eth_getBlockByNumber": map[string]interface{}{
+ "number": "0x64",
+ "hash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "transactions": []interface{}{
+ map[string]interface{}{
+ "hash": "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890",
+ },
+ },
+ },
+ }
+ srv := newMockRPCServer(t, responses)
+ defer srv.Close()
+
+ client, err := rpc.Dial(srv.URL, log.New())
+ require.NoError(t, err)
+ defer client.Close()
+
+ err = checkL2RPCEndpointsHealth(context.Background(), client, nil, 100, srv.URL, "")
+ require.NoError(t, err)
+}
+
+func TestCheckL2RPCEndpointsHealth_Success(t *testing.T) {
+ txHash := "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"
+ responses := map[string]interface{}{
+ "eth_getBlockByNumber": map[string]interface{}{
+ "number": "0x64",
+ "hash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "transactions": []interface{}{
+ map[string]interface{}{
+ "hash": txHash,
+ },
+ },
+ },
+ "eth_getTransactionReceipt": map[string]interface{}{
+ "transactionHash": txHash,
+ "status": "0x1",
+ },
+ }
+ srv := newMockRPCServer(t, responses)
+ defer srv.Close()
+
+ client, err := rpc.Dial(srv.URL, log.New())
+ require.NoError(t, err)
+ defer client.Close()
+
+ err = checkL2RPCEndpointsHealth(context.Background(), client, client, 100, srv.URL, srv.URL)
+ require.NoError(t, err)
+}
+
+func TestCheckL2RPCEndpointsHealth_BlockError(t *testing.T) {
+ responses := map[string]interface{}{
+ "eth_getBlockByNumber": nil,
+ }
+ srv := newMockRPCServer(t, responses)
+ defer srv.Close()
+
+ client, err := rpc.Dial(srv.URL, log.New())
+ require.NoError(t, err)
+ defer client.Close()
+
+ err = checkL2RPCEndpointsHealth(context.Background(), client, client, 100, srv.URL, srv.URL)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "returned nil for block")
+}
+
+func TestCheckL2RPCEndpointsHealth_ReceiptMismatch(t *testing.T) {
+ txHash := "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"
+ responses := map[string]interface{}{
+ "eth_getBlockByNumber": map[string]interface{}{
+ "number": "0x64",
+ "hash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "transactions": []interface{}{
+ map[string]interface{}{
+ "hash": txHash,
+ },
+ },
+ },
+ "eth_getTransactionReceipt": map[string]interface{}{
+ "transactionHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
+ "status": "0x0",
+ },
+ }
+ srv := newMockRPCServer(t, responses)
+ defer srv.Close()
+
+ client, err := rpc.Dial(srv.URL, log.New())
+ require.NoError(t, err)
+ defer client.Close()
+
+ err = checkL2RPCEndpointsHealth(context.Background(), client, client, 100, srv.URL, srv.URL)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "returned mismatched receipt")
+}
+
+func TestCheckL2RPCEndpointsHealth_EmptyTransactions(t *testing.T) {
+ responses := map[string]interface{}{
+ "eth_getBlockByNumber": map[string]interface{}{
+ "number": "0x64",
+ "hash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "transactions": []interface{}{},
+ },
+ }
+ srv := newMockRPCServer(t, responses)
+ defer srv.Close()
+
+ client, err := rpc.Dial(srv.URL, log.New())
+ require.NoError(t, err)
+ defer client.Close()
+
+ err = checkL2RPCEndpointsHealth(context.Background(), client, client, 100, srv.URL, srv.URL)
+ require.NoError(t, err)
+}
+
+func TestCheckL2RPCEndpointsHealth_NilReceipt(t *testing.T) {
+ txHash := "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"
+ responses := map[string]interface{}{
+ "eth_getBlockByNumber": map[string]interface{}{
+ "number": "0x64",
+ "hash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef",
+ "transactions": []interface{}{
+ map[string]interface{}{
+ "hash": txHash,
+ },
+ },
+ },
+ "eth_getTransactionReceipt": nil,
+ }
+ srv := newMockRPCServer(t, responses)
+ defer srv.Close()
+
+ client, err := rpc.Dial(srv.URL, log.New())
+ require.NoError(t, err)
+ defer client.Close()
+
+ err = checkL2RPCEndpointsHealth(context.Background(), client, client, 100, srv.URL, srv.URL)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "returned nil for tx")
+}
+
+func newMockRPCServer(t *testing.T, responses map[string]interface{}) *httptest.Server {
+ t.Helper()
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ var req struct {
+ Method string `json:"method"`
+ Params []interface{} `json:"params"`
+ ID interface{} `json:"id"`
+ }
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ result, ok := responses[req.Method]
+ if !ok {
+ resp := map[string]interface{}{
+ "jsonrpc": "2.0",
+ "id": req.ID,
+ "error": map[string]interface{}{
+ "code": -32601,
+ "message": "method not found",
+ },
+ }
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(resp)
+ return
+ }
+
+ resp := map[string]interface{}{
+ "jsonrpc": "2.0",
+ "id": req.ID,
+ "result": result,
+ }
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(resp)
+ }))
+}
+
+// TestCheckL2RPCEndpointsHealth_Integration tests against real RPC endpoints.
+// Set environment variables to run:
+// - L2RPC: block RPC endpoint URL (required)
+// - L2RPC_RECEIPT: receipt RPC endpoint URL (optional, defaults to L2RPC)
+// - L2RPC_BLOCK: block number to check (optional, defaults to 1)
+//
+// Example:
+//
+// L2RPC=http://localhost:8547 go test -v -run TestCheckL2RPCEndpointsHealth_Integration
+// L2RPC=http://localhost:8547 L2RPC_RECEIPT=http://localhost:8548 L2RPC_BLOCK=1000 go test -v -run TestCheckL2RPCEndpointsHealth_Integration
+func TestCheckL2RPCEndpointsHealth_Integration(t *testing.T) {
+ l2rpc := os.Getenv("L2RPC")
+ if l2rpc == "" {
+ t.Skip("L2RPC environment variable not set, skipping integration test")
+ }
+
+ l2rpcReceipt := os.Getenv("L2RPC_RECEIPT")
+ if l2rpcReceipt == "" {
+ l2rpcReceipt = l2rpc
+ }
+
+ blockNum := uint64(1)
+ if blockStr := os.Getenv("L2RPC_BLOCK"); blockStr != "" {
+ var err error
+ blockNum, err = strconv.ParseUint(blockStr, 10, 64)
+ require.NoError(t, err, "invalid L2RPC_BLOCK value")
+ }
+
+ logger := log.New()
+
+ blockClient, err := rpc.Dial(l2rpc, logger)
+ require.NoError(t, err, "failed to connect to L2RPC endpoint")
+ defer blockClient.Close()
+
+ var receiptClient *rpc.Client
+ if l2rpcReceipt == l2rpc {
+ receiptClient = blockClient
+ } else {
+ receiptClient, err = rpc.Dial(l2rpcReceipt, logger)
+ require.NoError(t, err, "failed to connect to L2RPC_RECEIPT endpoint")
+ defer receiptClient.Close()
+ }
+
+ err = checkL2RPCEndpointsHealth(context.Background(), blockClient, receiptClient, blockNum, l2rpc, l2rpcReceipt)
+ require.NoError(t, err)
+
+ t.Logf("Health check passed for block %d", blockNum)
+ t.Logf(" Block endpoint: %s", l2rpc)
+ t.Logf(" Receipt endpoint: %s", l2rpcReceipt)
+}
diff --git a/execution/stagedsync/stage_mining_exec.go b/execution/stagedsync/stage_mining_exec.go
index 54295903f43..712fda58596 100644
--- a/execution/stagedsync/stage_mining_exec.go
+++ b/execution/stagedsync/stage_mining_exec.go
@@ -259,7 +259,8 @@ func getNextTransactions(
remainingGas := header.GasLimit - header.GasUsed
remainingBlobGas := uint64(0)
if header.BlobGasUsed != nil {
- maxBlobs := cfg.chainConfig.GetMaxBlobsPerBlock(header.Time)
+ arbOsVersion := types.GetArbOSVersion(header, cfg.chainConfig)
+ maxBlobs := cfg.chainConfig.GetMaxBlobsPerBlock(header.Time, arbOsVersion)
if cfg.miningState.MiningConfig.MaxBlobsPerBlock != nil {
maxBlobs = min(maxBlobs, *cfg.miningState.MiningConfig.MaxBlobsPerBlock)
}
@@ -343,7 +344,8 @@ func filterBadTransactions(transactions []types.Transaction, chainID *uint256.In
// Make sure the sender is an EOA (EIP-3607)
if !account.IsEmptyCodeHash() && transaction.Type() != types.AccountAbstractionTxType {
isEoaCodeAllowed := false
- if config.IsPrague(header.Time) || config.IsBhilai(header.Number.Uint64()) {
+ arbOsVersion := types.GetArbOSVersion(header, config)
+ if config.IsPrague(header.Time, arbOsVersion) || config.IsBhilai(header.Number.Uint64()) {
code, err := simStateReader.ReadAccountCode(senderAddress)
if err != nil {
return nil, err
@@ -435,7 +437,8 @@ func addTransactionsToMiningBlock(
txnIdx := ibs.TxnIndex() + 1
gasPool := new(protocol.GasPool).AddGas(header.GasLimit - header.GasUsed)
if header.BlobGasUsed != nil {
- gasPool.AddBlobGas(chainConfig.GetMaxBlobGasPerBlock(header.Time) - *header.BlobGasUsed)
+ arbOsVersion := types.GetArbOSVersion(header, chainConfig)
+ gasPool.AddBlobGas(chainConfig.GetMaxBlobGasPerBlock(header.Time, arbOsVersion) - *header.BlobGasUsed)
}
signer := types.MakeSigner(chainConfig, header.Number.Uint64(), header.Time)
diff --git a/execution/stagedsync/stage_snapshots.go b/execution/stagedsync/stage_snapshots.go
index ac2de420363..89d6ec4e219 100644
--- a/execution/stagedsync/stage_snapshots.go
+++ b/execution/stagedsync/stage_snapshots.go
@@ -233,6 +233,25 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R
return err
}
+ { // Now can open all files
+ if err := cfg.blockReader.Snapshots().OpenFolder(); err != nil {
+ return err
+ }
+
+ if cfg.chainConfig.Bor != nil {
+ if err := cfg.blockReader.BorSnapshots().OpenFolder(); err != nil {
+ return err
+ }
+ }
+ if err := agg.OpenFolder(); err != nil {
+ return err
+ }
+
+ if err := firstNonGenesisCheck(tx, cfg.blockReader.Snapshots(), s.LogPrefix(), cfg.dirs); err != nil {
+ return err
+ }
+ }
+
// want to add remaining snapshots here?
{ // Now can open all files
@@ -328,6 +347,21 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R
return nil
}
+func firstNonGenesisCheck(tx kv.RwTx, snapshots snapshotsync.BlockSnapshots, logPrefix string, dirs datadir.Dirs) error {
+ firstNonGenesis, err := rawdbv3.SecondKey(tx, kv.Headers)
+ if err != nil {
+ return err
+ }
+ if firstNonGenesis != nil {
+ firstNonGenesisBlockNumber := binary.BigEndian.Uint64(firstNonGenesis)
+ if snapshots.SegmentsMax()+1 < firstNonGenesisBlockNumber {
+ log.Warn(fmt.Sprintf("[%s] Some blocks are not in snapshots and not in db. This could have happened because the node was stopped at the wrong time; you can fix this with 'rm -rf %s' (this is not equivalent to a full resync)", logPrefix, dirs.Chaindata), "max_in_snapshots", snapshots.SegmentsMax(), "min_in_db", firstNonGenesisBlockNumber)
+ return fmt.Errorf("some blocks are not in snapshots and not in db. This could have happened because the node was stopped at the wrong time; you can fix this with 'rm -rf %s' (this is not equivalent to a full resync)", dirs.Chaindata)
+ }
+ }
+ return nil
+}
+
func firstNonGenesisCheck(tx kv.RwTx, snapshots services.BlockSnapshots, logPrefix string, dirs datadir.Dirs) error {
firstNonGenesis, err := rawdbv3.SecondKey(tx, kv.Headers)
if err != nil {
@@ -403,6 +437,7 @@ func SnapshotsPrune(s *PruneState, cfg SnapshotsCfg, ctx context.Context, tx kv.
cfg.blockRetire.SetWorkers(1)
}
+ noDl := cfg.snapshotDownloader == nil || reflect.ValueOf(cfg.snapshotDownloader).IsNil()
started := cfg.blockRetire.RetireBlocksInBackground(
ctx,
minBlockNumber,
diff --git a/execution/stagedsync/stageloop/stageloop.go b/execution/stagedsync/stageloop/stageloop.go
index fb33e79f36d..c8b5e08c552 100644
--- a/execution/stagedsync/stageloop/stageloop.go
+++ b/execution/stagedsync/stageloop/stageloop.go
@@ -296,6 +296,7 @@ func stageLoopIteration(ctx context.Context, db kv.TemporalRwDB, sd *execctx.Sha
return false, err
}
logCtx := sync.PrintTimings()
+
//var tableSizes []interface{}
var commitTime time.Duration
if canRunCycleInOneTransaction && !externalTx {
@@ -338,8 +339,11 @@ func stageLoopIteration(ctx context.Context, db kv.TemporalRwDB, sd *execctx.Sha
logCtx = append(logCtx, "mgas/s", mgasPerSec)
}
}
- logCtx = append(logCtx, "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys))
- logger.Info("Timings", logCtx...)
+ withTimings := len(logCtx) > 0
+ if withTimings {
+ logCtx = append(logCtx, "alloc", common.ByteCount(m.Alloc), "sys", common.ByteCount(m.Sys))
+ logger.Info("Timings", logCtx...)
+ }
//if len(tableSizes) > 0 {
// logger.Info("Tables", tableSizes...)
//}
@@ -772,11 +776,11 @@ func NewDefaultStages(ctx context.Context,
return stagedsync.DefaultStages(ctx,
stagedsync.StageSnapshotsCfg(db, controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.InternalCL && cfg.CaplinConfig.ArchiveBlocks, cfg.CaplinConfig.ArchiveBlobs, cfg.CaplinConfig.ArchiveStates, silkworm, cfg.Prune),
- stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications),
+ stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, blockReader, blockWriter, dirs.Tmp, notifications, cfg.L2RPCAddr, cfg.L2RPCReceiptAddr),
stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter),
stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, controlServer.ChainConfig, blockReader, blockWriter),
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd),
- stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{Tracer: tracingHooks}, notifications, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg), cfg.ExperimentalBAL),
+ stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{Tracer: tracingHooks}, notifications, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg), cfg.ExperimentalBAL, wasmdb.OpenArbitrumWasmDB(ctx, dirs.ArbitrumWasm),
stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader),
stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator), runInTestMode)
}
@@ -810,7 +814,7 @@ func NewPipelineStages(ctx context.Context,
stagedsync.StageSnapshotsCfg(db, controlServer.ChainConfig, cfg.Sync, dirs, blockRetire, snapDownloader, blockReader, notifications, cfg.InternalCL && cfg.CaplinConfig.ArchiveBlocks, cfg.CaplinConfig.ArchiveBlobs, cfg.CaplinConfig.ArchiveStates, silkworm, cfg.Prune),
stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter),
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd),
- stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{Tracer: tracingHooks}, notifications, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg), cfg.ExperimentalBAL),
+ stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{Tracer: tracingHooks}, notifications, cfg.StateStream, false, dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg), cfg.ExperimentalBAL, wasmdb.OpenArbitrumWasmDB(ctx, dirs.ArbitrumWasm)),
stagedsync.StageTxLookupCfg(db, cfg.Prune, dirs.Tmp, controlServer.ChainConfig.Bor, blockReader),
stagedsync.StageFinishCfg(db, dirs.Tmp, forkValidator),
stagedsync.StageWitnessProcessingCfg(db, controlServer.ChainConfig, controlServer.WitnessBuffer))
@@ -821,9 +825,9 @@ func NewInMemoryExecution(ctx context.Context, db kv.TemporalRwDB, cfg *ethconfi
silkworm *silkworm.Silkworm, logger log.Logger) *stagedsync.Sync {
return stagedsync.New(
cfg.Sync,
- stagedsync.StateStages(ctx, stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, nil),
+ stagedsync.StateStages(ctx, stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, controlServer.ChainConfig, cfg.Sync, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, false, blockReader, blockWriter, dirs.Tmp, nil, cfg.L2RPCAddr, cfg.L2RPCReceiptAddr),
stagedsync.StageBodiesCfg(db, controlServer.Bd, controlServer.SendBodyRequest, controlServer.Penalize, controlServer.BroadcastNewBlock, cfg.Sync.BodyDownloadTimeoutSeconds, controlServer.ChainConfig, blockReader, blockWriter), stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, cfg.Sync, true, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd),
- stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications, cfg.StateStream, true, cfg.Dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg), cfg.ExperimentalBAL)),
+ stagedsync.StageExecuteBlocksCfg(db, cfg.Prune, cfg.BatchSize, controlServer.ChainConfig, controlServer.Engine, &vm.Config{}, notifications, cfg.StateStream, true, cfg.Dirs, blockReader, controlServer.Hd, cfg.Genesis, cfg.Sync, SilkwormForExecutionStage(silkworm, cfg), cfg.ExperimentalBAL, wasmdb.OpenArbitrumWasmDB(ctx, dirs.ArbitrumWasm))),
stagedsync.StateUnwindOrder,
nil, /* pruneOrder */
logger,
diff --git a/execution/state/arb.go b/execution/state/arb.go
new file mode 100644
index 00000000000..2b2d5ed0229
--- /dev/null
+++ b/execution/state/arb.go
@@ -0,0 +1,489 @@
+package state
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "runtime"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/log/v3"
+ "github.com/erigontech/erigon/execution/tracing"
+ "github.com/erigontech/erigon/execution/types/accounts"
+ "github.com/erigontech/erigon/execution/vm/evmtypes"
+ "github.com/holiman/uint256"
+
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
+ "github.com/erigontech/erigon/arb/lru"
+ "github.com/erigontech/erigon/db/kv"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/types"
+
+ "github.com/erigontech/nitro-erigon/util/arbmath"
+)
+
+var (
+ // Defines prefix bytes for Stylus WASM program bytecode
+ // when deployed on-chain via a user-initiated transaction.
+ // These byte prefixes are meant to conflict with the L1 contract EOF
+ // validation rules so they can be sufficiently differentiated from EVM bytecode.
+ // This allows us to store WASM programs as code in the stateDB side-by-side
+ // with EVM contracts, but match against these prefix bytes when loading code
+ // to execute the WASMs through Stylus rather than the EVM.
+ stylusEOFMagic = byte(0xEF)
+ stylusEOFMagicSuffix = byte(0xF0)
+ stylusEOFVersion = byte(0x00)
+ // 4th byte specifies the Stylus dictionary used during compression
+
+ StylusDiscriminant = []byte{stylusEOFMagic, stylusEOFMagicSuffix, stylusEOFVersion}
+)
+
+func NewArbitrum(ibs *IntraBlockState) IntraBlockStateArbitrum {
+ ibs.arbExtraData = &ArbitrumExtraData{
+ unexpectedBalanceDelta: new(uint256.Int),
+ userWasms: map[common.Hash]ActivatedWasm{},
+ activatedWasms: map[common.Hash]ActivatedWasm{},
+ recentWasms: NewRecentWasms(),
+ }
+ return ibs // TODO
+}
+
+type IntraBlockStateArbitrum interface {
+ evmtypes.IntraBlockState
+
+ // Arbitrum: manage Stylus wasms
+ ActivateWasm(moduleHash common.Hash, asmMap map[wasmdb.WasmTarget][]byte)
+ TryGetActivatedAsm(target wasmdb.WasmTarget, moduleHash common.Hash) (asm []byte, err error)
+ TryGetActivatedAsmMap(targets []wasmdb.WasmTarget, moduleHash common.Hash) (asmMap map[wasmdb.WasmTarget][]byte, err error)
+ RecordCacheWasm(wasm CacheWasm)
+ RecordEvictWasm(wasm EvictWasm)
+ GetRecentWasms() RecentWasms
+ UserWasms() UserWasms
+ ActivatedAsm(target wasmdb.WasmTarget, moduleHash common.Hash) (asm []byte, err error)
+ WasmStore() kv.RwDB
+ WasmCacheTag() uint32
+ WasmTargets() []wasmdb.WasmTarget
+
+ // Arbitrum: track stylus's memory footprint
+ GetStylusPages() (uint16, uint16)
+ GetStylusPagesOpen() uint16
+ SetStylusPagesOpen(open uint16)
+ AddStylusPages(new uint16) (uint16, uint16)
+ AddStylusPagesEver(new uint16)
+
+ HasSelfDestructed(addr common.Address) bool
+
+ StartRecording()
+ RecordProgram(targets []wasmdb.WasmTarget, moduleHash common.Hash)
+
+ GetStorageRoot(address common.Address) common.Hash
+ GetUnexpectedBalanceDelta() *uint256.Int
+
+ // SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription
+ SetArbFinalizer(f func(*ArbitrumExtraData))
+
+ SetTxContext(bn uint64, ti int)
+ IntermediateRoot(_ bool) common.Hash
+ GetReceiptsByHash(hash common.Hash) types.Receipts
+ SetBalance(addr common.Address, amount uint256.Int, reason tracing.BalanceChangeReason) error
+ Commit(bn uint64, _ bool) (common.Hash, error)
+ FinalizeTx(chainRules *chain.Rules, stateWriter StateWriter) error
+ GetLogs(txIndex int, txnHash common.Hash, blockNumber uint64, blockHash common.Hash) types.Logs
+ // TxIndex returns the current transaction index set by Prepare.
+ TxnIndex() int
+ IsTxFiltered() bool
+}
+
+func (s *IntraBlockState) IsTxFiltered() bool {
+ return s.arbExtraData.arbTxFilter
+}
+
+func (s *IntraBlockState) ActivateWasm(moduleHash common.Hash, asmMap map[wasmdb.WasmTarget][]byte) {
+ _, exists := s.arbExtraData.activatedWasms[moduleHash]
+ if exists {
+ return
+ }
+ s.arbExtraData.activatedWasms[moduleHash] = asmMap
+ s.journal.append(wasmActivation{
+ moduleHash: moduleHash,
+ })
+}
+
+func (s *IntraBlockState) TryGetActivatedAsm(target wasmdb.WasmTarget, moduleHash common.Hash) ([]byte, error) {
+ asmMap, exists := s.arbExtraData.activatedWasms[moduleHash]
+ if exists {
+ if asm, exists := asmMap[target]; exists {
+ return asm, nil
+ }
+ }
+ return s.ActivatedAsm(target, moduleHash)
+}
+
+func (s *IntraBlockState) TryGetActivatedAsmMap(targets []wasmdb.WasmTarget, moduleHash common.Hash) (map[wasmdb.WasmTarget][]byte, error) {
+ asmMap := s.arbExtraData.activatedWasms[moduleHash]
+ if asmMap != nil {
+ for _, target := range targets {
+ if _, exists := asmMap[target]; !exists {
+ return nil, fmt.Errorf("newly activated wasms for module %v exist, but they don't contain asm for target %v", moduleHash, target)
+ }
+ }
+ return asmMap, nil
+ }
+ var err error
+ asmMap = make(map[wasmdb.WasmTarget][]byte, len(targets))
+ for _, target := range targets {
+ asm, dbErr := s.ActivatedAsm(target, moduleHash)
+ if dbErr == nil {
+ asmMap[target] = asm
+ } else {
+ err = errors.Join(fmt.Errorf("failed to read activated asm from database for target %v and module %v: %w", target, moduleHash, dbErr), err)
+ }
+ }
+ return asmMap, err
+}
+
+func (s *IntraBlockState) GetStylusPages() (uint16, uint16) {
+ return s.arbExtraData.openWasmPages, s.arbExtraData.everWasmPages
+}
+
+func (s *IntraBlockState) GetStylusPagesOpen() uint16 {
+ return s.arbExtraData.openWasmPages
+}
+
+func (s *IntraBlockState) SetStylusPagesOpen(open uint16) {
+ s.arbExtraData.openWasmPages = open
+}
+
+// Tracks that `new` additional pages have been opened, returning the previous counts
+func (s *IntraBlockState) AddStylusPages(new uint16) (uint16, uint16) {
+ open, ever := s.GetStylusPages()
+ s.arbExtraData.openWasmPages = arbmath.SaturatingUAdd(open, new)
+ s.arbExtraData.everWasmPages = max(ever, s.arbExtraData.openWasmPages)
+ return open, ever
+}
+
+// TODO arbitrum - not used in og nitro as well
+func (s *IntraBlockState) AddStylusPagesEver(new uint16) {
+ s.arbExtraData.everWasmPages = arbmath.SaturatingUAdd(s.arbExtraData.everWasmPages, new)
+}
+
+var ErrArbTxFilter error = errors.New("internal error")
+
+type ArbitrumExtraData struct {
+ unexpectedBalanceDelta *uint256.Int // total balance change across all accounts
+ userWasms UserWasms // user wasms encountered during execution
+ openWasmPages uint16 // number of pages currently open
+ everWasmPages uint16 // largest number of pages ever allocated during this tx's execution
+ activatedWasms map[common.Hash]ActivatedWasm // newly activated WASMs
+ recentWasms RecentWasms
+ arbTxFilter bool
+}
+
+func (s *IntraBlockState) SetArbFinalizer(f func(*ArbitrumExtraData)) {
+ runtime.SetFinalizer(s.arbExtraData, f)
+}
+
+func (s *IntraBlockState) GetCurrentTxLogs() []types.Logs {
+ return s.logs
+ //return s.logs[s.thash]
+}
+
+// GetUnexpectedBalanceDelta returns the total unexpected change in balances since the last commit to the database.
+func (s *IntraBlockState) GetUnexpectedBalanceDelta() *uint256.Int {
+ return s.arbExtraData.unexpectedBalanceDelta
+}
+
+func (s *IntraBlockState) GetSelfDestructs() []accounts.Address {
+ selfDestructs := []accounts.Address{}
+ for addr := range s.journal.dirties {
+ obj, exist := s.stateObjects[addr]
+ if !exist {
+ continue
+ }
+ if obj.selfdestructed {
+ selfDestructs = append(selfDestructs, addr)
+ }
+ }
+ return selfDestructs
+}
+
+func (sdb *IntraBlockState) ActivatedAsm(target wasmdb.WasmTarget, moduleHash common.Hash) (asm []byte, err error) {
+ if sdb.wasmDB == nil {
+ panic("IBS: wasmDB not set")
+ }
+ return sdb.wasmDB.ActivatedAsm(target, moduleHash)
+}
+
+func (sdb *IntraBlockState) WasmStore() kv.RwDB {
+ if sdb.wasmDB == nil {
+ panic("IBS: wasmDB not set")
+ }
+ //TODO implement me
+ return sdb.wasmDB.WasmStore()
+}
+
+func (sdb *IntraBlockState) WasmCacheTag() uint32 {
+ if sdb.wasmDB == nil {
+ panic("IBS: wasmDB not set")
+ }
+ return sdb.wasmDB.WasmCacheTag()
+}
+
+func (sdb *IntraBlockState) WasmTargets() []wasmdb.WasmTarget {
+ if sdb.wasmDB == nil {
+ panic("IBS: wasmDB not set")
+ }
+ return sdb.wasmDB.WasmTargets()
+}
+
+func (sdb *IntraBlockState) GetReceiptsByHash(hash common.Hash) types.Receipts {
+ return nil
+ //TODO implement me
+ panic("implement me")
+}
+
+func (sdb *IntraBlockState) Commit(bn uint64, _ bool) (common.Hash, error) {
+ return common.Hash{}, nil
+ //TODO implement me
+ panic("implement me")
+}
+
+// making the function public to be used by external tests
+// func ForEachStorage(s *IntraBlockState, addr common.Address, cb func(key, value common.Hash) bool) error {
+// return forEachStorage(s, addr, cb)
+// }
+
+// moved here from statedb_test.go
+// func forEachStorage(s *IntraBlockState, addr common.Address, cb func(key, value common.Hash) bool) error {
+// s.domains.IterateStoragePrefix(addr[:], cb)
+// so := s.getStateObject(addr)
+// if so == nil {
+// return nil
+// }
+// tr, err := so.getTrie()
+// if err != nil {
+// return err
+// }
+// trieIt, err := tr.NodeIterator(nil)
+// if err != nil {
+// return err
+// }
+// it := trie.NewIterator(trieIt)
+
+// for it.Next() {
+// key := common.BytesToHash(s.trie.GetKey(it.Key))
+// if value, dirty := so.dirtyStorage[key]; dirty {
+// if !cb(key, value) {
+// return nil
+// }
+// continue
+// }
+
+// if len(it.Value) > 0 {
+// _, content, _, err := rlp.Split(it.Value)
+// if err != nil {
+// return err
+// }
+// if !cb(key, common.BytesToHash(content)) {
+// return nil
+// }
+// }
+// }
+// return nil
+// }
+
+// maps moduleHash to activation info
+type UserWasms map[common.Hash]ActivatedWasm
+
+func (s *IntraBlockState) StartRecording() {
+ s.arbExtraData.userWasms = make(UserWasms)
+}
+
+func (s *IntraBlockState) RecordProgram(targets []wasmdb.WasmTarget, moduleHash common.Hash) {
+ if len(targets) == 0 {
+ // nothing to record
+ return
+ }
+ asmMap, err := s.TryGetActivatedAsmMap(targets, moduleHash)
+ if err != nil {
+ // This is not a fatal error - we may be recording a program that failed to activate. Unless state root mismatches, execution is still valid.
+ log.Debug("can't find activated wasm while recording", "modulehash", moduleHash, "err", err)
+ }
+ if s.arbExtraData.userWasms != nil {
+ s.arbExtraData.userWasms[moduleHash] = asmMap
+ }
+}
+
+func (s *IntraBlockState) UserWasms() UserWasms {
+ return s.arbExtraData.userWasms
+}
+
+func (s *IntraBlockState) RecordCacheWasm(wasm CacheWasm) {
+ s.journal.entries = append(s.journal.entries, wasm)
+}
+
+func (s *IntraBlockState) RecordEvictWasm(wasm EvictWasm) {
+ s.journal.entries = append(s.journal.entries, wasm)
+}
+
+func (s *IntraBlockState) GetRecentWasms() RecentWasms {
+ return s.arbExtraData.recentWasms
+}
+
+func (s *IntraBlockState) HasSelfDestructed(addr accounts.Address) bool {
+ stateObject, err := s.getStateObject(addr)
+ if err != nil {
+ panic(err)
+ }
+ if stateObject != nil {
+ return stateObject.selfdestructed
+ }
+ return false
+}
+
+func (s *IntraBlockState) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
+ _, fn, ln, _ := runtime.Caller(1)
+ log.Warn("need shared domains and writer to calculate intermediate root", "caller", fmt.Sprintf("%s:%d", fn, ln))
+ return common.Hash{}
+}
+
+// GetStorageRoot retrieves the storage root from the given address or empty
+// if object not found.
+func (s *IntraBlockState) GetStorageRoot(addr accounts.Address) common.Hash {
+ stateObject, err := s.getStateObject(addr)
+ if err == nil && stateObject != nil {
+ return stateObject.data.Root
+ }
+ return common.Hash{}
+}
+
+func (sdb *IntraBlockState) SetWasmDB(wasmDB wasmdb.WasmIface) {
+ sdb.wasmDB = wasmDB
+}
+
+func (s *IntraBlockState) ExpectBalanceMint(amount *uint256.Int) {
+ if amount.Sign() < 0 {
+ panic(fmt.Sprintf("ExpectBalanceMint called with negative amount %v", amount))
+ }
+ s.arbExtraData.unexpectedBalanceDelta.Sub(s.arbExtraData.unexpectedBalanceDelta, amount)
+}
+
+func (sdb *IntraBlockState) ExpectBalanceBurn(amount *uint256.Int) {
+ if amount.Sign() < 0 {
+ panic(fmt.Sprintf("ExpectBalanceBurn called with negative amount %v", amount))
+ }
+ sdb.arbExtraData.unexpectedBalanceDelta.Add(sdb.arbExtraData.unexpectedBalanceDelta, amount)
+}
+
+type ActivatedWasm map[wasmdb.WasmTarget][]byte
+
+// checks if a valid Stylus prefix is present
+func IsStylusProgram(b []byte) bool {
+ if len(b) < len(StylusDiscriminant)+1 {
+ return false
+ }
+ return bytes.Equal(b[:3], StylusDiscriminant)
+}
+
+// strips the Stylus header from a contract, returning the dictionary used
+func StripStylusPrefix(b []byte) ([]byte, byte, error) {
+ if !IsStylusProgram(b) {
+ return nil, 0, errors.New("specified bytecode is not a Stylus program")
+ }
+ return b[4:], b[3], nil
+}
+
+// creates a new Stylus prefix from the given dictionary byte
+func NewStylusPrefix(dictionary byte) []byte {
+ prefix := bytes.Clone(StylusDiscriminant)
+ return append(prefix, dictionary)
+}
+
+type wasmActivation struct {
+ moduleHash common.Hash
+}
+
+func (ch wasmActivation) revert(s *IntraBlockState) error {
+ delete(s.arbExtraData.activatedWasms, ch.moduleHash)
+ return nil
+}
+
+func (ch wasmActivation) dirtied() (accounts.Address, bool) {
+ return accounts.NilAddress, false
+}
+
+// Updates the Rust-side recent program cache
+var CacheWasmRust func(asm []byte, moduleHash common.Hash, version uint16, tag uint32, debug bool) = func([]byte, common.Hash, uint16, uint32, bool) {}
+var EvictWasmRust func(moduleHash common.Hash, version uint16, tag uint32, debug bool) = func(common.Hash, uint16, uint32, bool) {}
+
+type CacheWasm struct {
+ ModuleHash common.Hash
+ Version uint16
+ Tag uint32
+ Debug bool
+}
+
+func (ch CacheWasm) revert(*IntraBlockState) error {
+ EvictWasmRust(ch.ModuleHash, ch.Version, ch.Tag, ch.Debug)
+ return nil
+}
+
+func (ch CacheWasm) dirtied() (accounts.Address, bool) {
+ return accounts.NilAddress, false
+}
+
+type EvictWasm struct {
+ ModuleHash common.Hash
+ Version uint16
+ Tag uint32
+ Debug bool
+}
+
+func (ch EvictWasm) revert(s *IntraBlockState) error {
+ asm, err := s.TryGetActivatedAsm(wasmdb.LocalTarget(), ch.ModuleHash) // only happens in native mode
+ if err == nil && len(asm) != 0 {
+ //if we failed to get it - it's not in the current rust cache
+ CacheWasmRust(asm, ch.ModuleHash, ch.Version, ch.Tag, ch.Debug)
+ }
+ return err
+}
+
+func (ch EvictWasm) dirtied() (accounts.Address, bool) {
+ return accounts.NilAddress, false
+}
+
+// Type for managing recent program access.
+// The cache contained is discarded at the end of each block.
+type RecentWasms struct {
+ cache *lru.BasicLRU[common.Hash, struct{}]
+}
+
+// Creates an un uninitialized cache
+func NewRecentWasms() RecentWasms {
+ return RecentWasms{cache: nil}
+}
+
+// Inserts a new item, returning true if already present.
+func (p RecentWasms) Insert(item common.Hash, retain uint16) bool {
+ if p.cache == nil {
+ cache := lru.NewBasicLRU[common.Hash, struct{}](int(retain))
+ p.cache = &cache
+ }
+ if _, hit := p.cache.Get(item); hit {
+ return hit
+ }
+ p.cache.Add(item, struct{}{})
+ return false
+}
+
+// Copies all entries into a new LRU.
+func (p RecentWasms) Copy() RecentWasms {
+ if p.cache == nil {
+ return NewRecentWasms()
+ }
+ cache := lru.NewBasicLRU[common.Hash, struct{}](p.cache.Capacity())
+ for _, item := range p.cache.Keys() {
+ cache.Add(item, struct{}{})
+ }
+ return RecentWasms{cache: &cache}
+}
diff --git a/execution/state/genesiswrite/genesis_write.go b/execution/state/genesiswrite/genesis_write.go
index 170ad1c008f..4703dbf7749 100644
--- a/execution/state/genesiswrite/genesis_write.go
+++ b/execution/state/genesiswrite/genesis_write.go
@@ -24,6 +24,7 @@ import (
"context"
"errors"
"fmt"
+ "log"
"math/big"
"slices"
"sort"
@@ -268,6 +269,49 @@ func write(tx kv.RwTx, g *types.Genesis, dirs datadir.Dirs, logger log.Logger) (
return block, statedb, err
}
+// Writes custom genesis block to db. Allows to write genesis with block number > 0.
+func WriteCustomGenesisBlock(tx kv.RwTx, gen *types.Genesis, block *types.Block, difficulty *big.Int, genesisBlockNum uint64, cfg *chain.Config) error {
+ // This part already happens in InitializeArbosInDatabase
+ //var stateWriter state.StateWriter
+ //if block.Number().Sign() != 0 {
+ // return nil, statedb, errors.New("can't commit genesis block with number > 0")
+ //}
+ //if err := statedb.CommitBlock(rules, stateWriter); err != nil {
+ // return nil, statedb, fmt.Errorf("cannot write state: %w", err)
+ //}
+ //newCfg := gen.ConfigOrDefault(block.Root())
+ //if err := newCfg.CheckConfigForkOrder(); err != nil {
+ // return err
+ //}
+
+ if err := rawdb.WriteGenesisIfNotExist(tx, gen); err != nil {
+ return err
+ }
+ if err := rawdb.WriteBlock(tx, block); err != nil {
+ return err
+ }
+ if err := rawdb.WriteTd(tx, block.Hash(), block.NumberU64(), difficulty); err != nil {
+ return err
+ }
+ if genesisBlockNum != block.NumberU64() {
+ return fmt.Errorf("genesis block number and given block number mismatches (gen %d != block %d)", genesisBlockNum, block.NumberU64())
+ }
+ if err := rawdbv3.TxNums.Append(tx, genesisBlockNum, uint64(block.Transactions().Len()+1)); err != nil {
+ return err
+ }
+ if err := rawdb.WriteCanonicalHash(tx, block.Hash(), block.NumberU64()); err != nil {
+ return err
+ }
+ rawdb.WriteHeadBlockHash(tx, block.Hash())
+ if err := rawdb.WriteHeadHeaderHash(tx, block.Hash()); err != nil {
+ return err
+ }
+ if err := rawdb.WriteChainConfig(tx, block.Hash(), cfg); err != nil {
+ return err
+ }
+ return nil
+}
+
// Write writes the block a genesis specification to the database.
// The block is committed as the canonical head block.
func WriteGenesisBesideState(block *types.Block, tx kv.RwTx, g *types.Genesis) error {
@@ -306,7 +350,9 @@ func GenesisToBlock(tb testing.TB, g *types.Genesis, dirs datadir.Dirs, logger l
if dirs.SnapDomain == "" {
panic("empty `dirs` variable")
}
+ //head, withdrawals := rawdb.GenesisWithoutStateToBlock(g)
_ = g.Alloc //nil-check
+<<<<<<<< HEAD:execution/state/genesiswrite/genesis_write.go
head, withdrawals := GenesisWithoutStateToBlock(g)
@@ -420,7 +466,6 @@ func GenesisWithoutStateToBlock(g *types.Genesis) (head *types.Header, withdrawa
BlobGasUsed: g.BlobGasUsed,
ExcessBlobGas: g.ExcessBlobGas,
RequestsHash: g.RequestsHash,
- Root: empty.RootHash,
}
if g.AuRaSeal != nil && len(g.AuRaSeal.AuthorityRound.Signature) > 0 {
head.AuRaSeal = g.AuRaSeal.AuthorityRound.Signature
@@ -440,12 +485,13 @@ func GenesisWithoutStateToBlock(g *types.Genesis) (head *types.Header, withdrawa
}
}
- withdrawals = nil
- if g.Config != nil && g.Config.IsShanghai(g.Timestamp) {
+ arbosVersion := types.GetArbOSVersion(head, g.Config)
+
+ if g.Config != nil && g.Config.IsShanghai(g.Timestamp, arbosVersion) {
withdrawals = []*types.Withdrawal{}
}
- if g.Config != nil && g.Config.IsCancun(g.Timestamp) {
+ if g.Config != nil && g.Config.IsCancun(g.Timestamp, arbosVersion) {
if g.BlobGasUsed != nil {
head.BlobGasUsed = g.BlobGasUsed
} else {
@@ -463,11 +509,11 @@ func GenesisWithoutStateToBlock(g *types.Genesis) (head *types.Header, withdrawa
}
}
- if g.Config != nil && g.Config.IsPrague(g.Timestamp) {
+ if g.Config != nil && g.Config.IsPrague(g.Timestamp, arbosVersion) {
if g.RequestsHash != nil {
head.RequestsHash = g.RequestsHash
} else {
- head.RequestsHash = &empty.RequestsHash
+ head.RequestsHash = &types.EmptyRequestsHash
}
}
@@ -488,8 +534,88 @@ func GenesisWithoutStateToBlock(g *types.Genesis) (head *types.Header, withdrawa
head.ParentBeaconBlockRoot = &emptyHash
}
return
+
}
+// GenesisWithoutStateToBlock creates the genesis block, assuming an empty state.
+// func GenesisWithoutStateToBlock(g *types.Genesis) (head *types.Header, withdrawals []*types.Withdrawal) {
+// head = &types.Header{
+// Number: new(big.Int).SetUint64(g.Number),
+// Nonce: types.EncodeNonce(g.Nonce),
+// Time: g.Timestamp,
+// ParentHash: g.ParentHash,
+// Extra: g.ExtraData,
+// GasLimit: g.GasLimit,
+// GasUsed: g.GasUsed,
+// Difficulty: g.Difficulty,
+// MixDigest: g.Mixhash,
+// Coinbase: g.Coinbase,
+// BaseFee: g.BaseFee,
+// BlobGasUsed: g.BlobGasUsed,
+// ExcessBlobGas: g.ExcessBlobGas,
+// RequestsHash: g.RequestsHash,
+// Root: empty.RootHash,
+// }
+// if g.AuRaSeal != nil && len(g.AuRaSeal.AuthorityRound.Signature) > 0 {
+// head.AuRaSeal = g.AuRaSeal.AuthorityRound.Signature
+// head.AuRaStep = uint64(g.AuRaSeal.AuthorityRound.Step)
+// }
+// if g.GasLimit == 0 {
+// head.GasLimit = params.GenesisGasLimit
+// }
+// if g.Difficulty == nil {
+// head.Difficulty = params.GenesisDifficulty
+// }
+// if g.Config != nil && g.Config.IsLondon(0) {
+// if g.BaseFee != nil {
+// head.BaseFee = g.BaseFee
+// } else {
+// head.BaseFee = new(big.Int).SetUint64(params.InitialBaseFee)
+// }
+// }
+
+// withdrawals = nil
+// if g.Config != nil && g.Config.IsShanghai(g.Timestamp) {
+// withdrawals = []*types.Withdrawal{}
+// }
+
+// if g.Config != nil && g.Config.IsCancun(g.Timestamp) {
+// if g.BlobGasUsed != nil {
+// head.BlobGasUsed = g.BlobGasUsed
+// } else {
+// head.BlobGasUsed = new(uint64)
+// }
+// if g.ExcessBlobGas != nil {
+// head.ExcessBlobGas = g.ExcessBlobGas
+// } else {
+// head.ExcessBlobGas = new(uint64)
+// }
+// if g.ParentBeaconBlockRoot != nil {
+// head.ParentBeaconBlockRoot = g.ParentBeaconBlockRoot
+// } else {
+// head.ParentBeaconBlockRoot = &common.Hash{}
+// }
+// }
+
+// if g.Config != nil && g.Config.IsPrague(g.Timestamp) {
+// if g.RequestsHash != nil {
+// head.RequestsHash = g.RequestsHash
+// } else {
+// head.RequestsHash = &empty.RequestsHash
+// }
+// }
+
+// // these fields need to be overriden for Bor running in a kurtosis devnet
+// if g.Config != nil && g.Config.Bor != nil && g.Config.ChainID.Uint64() == polygonchain.BorKurtosisDevnetChainId {
+// withdrawals = []*types.Withdrawal{}
+// head.BlobGasUsed = new(uint64)
+// head.ExcessBlobGas = new(uint64)
+// emptyHash := common.HexToHash("0x0")
+// head.ParentBeaconBlockRoot = &emptyHash
+// }
+// return
+// }
+
func sortedAllocAddresses(m types.GenesisAlloc) []common.Address {
addrs := make([]common.Address, 0, len(m))
for addr := range m {
diff --git a/execution/state/intra_block_state.go b/execution/state/intra_block_state.go
index 2ed9779e0e1..dc80a17ce94 100644
--- a/execution/state/intra_block_state.go
+++ b/execution/state/intra_block_state.go
@@ -21,6 +21,7 @@
package state
import (
+ "context"
"errors"
"fmt"
"slices"
@@ -29,6 +30,8 @@ import (
"sync"
"time"
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
+ "github.com/erigontech/erigon/db/kv"
"github.com/holiman/uint256"
"github.com/erigontech/erigon/common"
@@ -40,14 +43,16 @@ import (
"github.com/erigontech/erigon/execution/tracing"
"github.com/erigontech/erigon/execution/types"
"github.com/erigontech/erigon/execution/types/accounts"
- "github.com/erigontech/erigon/execution/vm/evmtypes"
)
-var _ evmtypes.IntraBlockState = new(IntraBlockState) // compile-time interface-check
+//var _ evmtypes.IntraBlockState = new(IntraBlockState) // compile-time interface-check
type revision struct {
id int
journalIndex int
+
+ // Arbiturm: track the total balance change across all accounts
+ unexpectedBalanceDelta *uint256.Int
}
type revisions struct {
@@ -58,7 +63,7 @@ type revisions struct {
func (r *revisions) snapshot(journal *journal) int {
id := r.nextId
r.nextId++
- r.valid = append(r.valid, revision{id, journal.length()})
+ r.valid = append(r.valid, revision{id, journal.length(), nil})
return id
}
@@ -126,6 +131,7 @@ type BalanceIncrease struct {
increase uint256.Int
transferred bool // Set to true when the corresponding stateObject is created and balance increase is transferred to the stateObject
count int // Number of increases - this needs tracking for proper reversion
+ isEscrow bool // Arbiturm: true if increase related to escrow account
}
// IntraBlockState is responsible for caching and managing state changes
@@ -134,6 +140,8 @@ type BalanceIncrease struct {
type IntraBlockState struct {
stateReader StateReader
+ arbExtraData *ArbitrumExtraData // TODO make sure this field not used for other chains
+
// This map holds 'live' objects, which will get modified while processing a state transition.
stateObjects map[accounts.Address]*stateObject
stateObjectsDirty map[accounts.Address]struct{}
@@ -178,6 +186,14 @@ type IntraBlockState struct {
codeReadCount int64
version int
dep int
+
+ // Arbitrum stylus
+ wasmDB wasmdb.WasmIface
+}
+
+func (sdb *IntraBlockState) Snapshot() int {
+ //TODO implement me
+ panic("implement me")
}
// Create a new state from a given trie
@@ -197,6 +213,14 @@ func New(stateReader StateReader) *IntraBlockState {
txIndex: 0,
trace: false,
dep: UnknownDep,
+ arbExtraData: &ArbitrumExtraData{
+ unexpectedBalanceDelta: uint256.NewInt(0),
+ userWasms: UserWasms{},
+ openWasmPages: 0,
+ everWasmPages: 0,
+ activatedWasms: make(map[common.Hash]ActivatedWasm),
+ recentWasms: RecentWasms{},
+ },
}
}
@@ -755,6 +779,17 @@ func (sdb *IntraBlockState) ReadVersion(addr accounts.Address, path AccountPath,
return sdb.versionMap.Read(addr, path, key, txIdx)
}
+func (sdb *IntraBlockState) RemoveEscrowProtection(addr accounts.Address) {
+ bi, ok := sdb.balanceInc[addr]
+ if ok {
+ if sdb.trace {
+ fmt.Printf("RemoveEscrowProtection %x, isEscrow=%v\n", addr, bi.isEscrow)
+ }
+ bi.isEscrow = false
+ sdb.balanceInc[addr] = bi
+ }
+}
+
// AddBalance adds amount to the account associated with addr.
// DESCRIBED: docs/programmers_guide/guide.md#address---identifier-of-an-account
func (sdb *IntraBlockState) AddBalance(addr accounts.Address, amount uint256.Int, reason tracing.BalanceChangeReason) error {
@@ -769,8 +804,12 @@ func (sdb *IntraBlockState) AddBalance(addr accounts.Address, amount uint256.Int
bi, ok := sdb.balanceInc[addr]
if !ok {
bi = &BalanceIncrease{}
+ bi.isEscrow = reason == tracing.BalanceIncreaseEscrow // arbitrum specific protection
sdb.balanceInc[addr] = bi
}
+ if sdb.trace && bi.isEscrow {
+ fmt.Printf("protected escrow %x\n", addr)
+ }
if sdb.tracingHooks != nil && sdb.tracingHooks.OnBalanceChange != nil {
// TODO: discuss if we should ignore error
@@ -793,12 +832,26 @@ func (sdb *IntraBlockState) AddBalance(addr accounts.Address, amount uint256.Int
sdb.tracingHooks.OnBalanceChange(addr, *prev, *(new(uint256.Int).Add(prev, &amount)), reason)
}
+ sdb.arbExtraData.unexpectedBalanceDelta.Add(sdb.arbExtraData.unexpectedBalanceDelta, &amount)
+
bi.increase = u256.Add(bi.increase, amount)
bi.count++
return nil
}
}
+ if isEscrow := reason == tracing.BalanceIncreaseEscrow; isEscrow && sdb.balanceInc != nil {
+ bi, ok := sdb.balanceInc[addr]
+ if !ok {
+ bi = &BalanceIncrease{isEscrow: isEscrow}
+ }
+ bi.isEscrow = isEscrow
+ sdb.balanceInc[addr] = bi
+ if sdb.trace && bi.isEscrow {
+ fmt.Printf("protected escrow %x\n", addr)
+ }
+ }
+
// EIP161: We must check emptiness for the objects such that the account
// clearing (0,0,0 objects) can take effect.
if amount.IsZero() {
@@ -837,6 +890,8 @@ func (sdb *IntraBlockState) AddBalance(addr accounts.Address, amount uint256.Int
}()
}
+ sdb.arbExtraData.unexpectedBalanceDelta.Add(sdb.arbExtraData.unexpectedBalanceDelta, &amount)
+
update := u256.Add(prev, amount)
stateObject, err := sdb.GetOrNewStateObject(addr)
@@ -983,6 +1038,8 @@ func (sdb *IntraBlockState) SubBalance(addr accounts.Address, amount uint256.Int
if err != nil {
return err
}
+ sdb.arbExtraData.unexpectedBalanceDelta.Sub(sdb.arbExtraData.unexpectedBalanceDelta, &amount)
+
update := u256.Sub(prev, amount)
stateObject.SetBalance(update, wasCommited, reason)
if sdb.versionMap != nil {
@@ -1001,6 +1058,12 @@ func (sdb *IntraBlockState) SetBalance(addr accounts.Address, amount uint256.Int
if err != nil {
return err
}
+ if sdb.arbExtraData != nil {
+ prevBalance := stateObject.Balance()
+ sdb.arbExtraData.unexpectedBalanceDelta.Add(sdb.arbExtraData.unexpectedBalanceDelta, &amount)
+ sdb.arbExtraData.unexpectedBalanceDelta.Sub(sdb.arbExtraData.unexpectedBalanceDelta, &prevBalance)
+ }
+
stateObject.SetBalance(amount, !sdb.hasWrite(addr, BalancePath, accounts.NilKey), reason)
versionWritten(sdb, addr, BalancePath, accounts.NilKey, stateObject.Balance())
return nil
@@ -1170,11 +1233,24 @@ func (sdb *IntraBlockState) Selfdestruct(addr accounts.Address) (bool, error) {
wasCommited: !sdb.hasWrite(addr, SelfDestructPath, accounts.NilKey),
})
- if sdb.tracingHooks != nil && sdb.tracingHooks.OnBalanceChange != nil && !prevBalance.IsZero() {
- sdb.tracingHooks.OnBalanceChange(addr, prevBalance, zeroBalance, tracing.BalanceDecreaseSelfdestruct)
+ if sdb.tracingHooks != nil {
+ if sdb.tracingHooks.OnBalanceChange != nil && !prevBalance.IsZero() {
+ sdb.tracingHooks.OnBalanceChange(addr, prevBalance, zeroBalance, tracing.BalanceDecreaseSelfdestruct)
+ }
+ if sdb.tracingHooks.CaptureArbitrumTransfer != nil {
+ addrValue := addr.Value()
+ sdb.tracingHooks.CaptureArbitrumTransfer(&addrValue, nil, &prevBalance, false, "selfDestruct")
+ }
}
stateObject.markSelfdestructed()
+ sdb.arbExtraData.unexpectedBalanceDelta.Sub(sdb.arbExtraData.unexpectedBalanceDelta, &stateObject.data.Balance)
+ if bi, exist := sdb.balanceInc[addr]; exist && bi.isEscrow {
+ // TODO arbitrum remove log
+ fmt.Printf("ESCROW unprotected by selfdestruct %x\n", addr)
+ bi.isEscrow = false
+ }
+
stateObject.createdContract = false
stateObject.data.Balance.Clear()
@@ -1478,6 +1554,11 @@ func (sdb *IntraBlockState) PushSnapshot() int {
if sdb.revisions == nil {
sdb.revisions = revisionsPool.Get().(*revisions)
}
+ // MERGE_ARBITRUM
+ /*
+ sdb.validRevisions = append(sdb.validRevisions,
+ revision{id, sdb.journal.length(), sdb.arbExtraData.unexpectedBalanceDelta.Clone()})
+ */
return sdb.revisions.snapshot(sdb.journal)
}
@@ -1502,6 +1583,12 @@ func (sdb *IntraBlockState) RevertToSnapshot(revid int, err error) {
}
snapshot := sdb.revisions.revertToSnapshot(revid)
+ if sdb.arbExtraData != nil {
+ if sdb.arbExtraData.unexpectedBalanceDelta == nil {
+ sdb.arbExtraData.unexpectedBalanceDelta = uint256.NewInt(0)
+ }
+ sdb.arbExtraData.unexpectedBalanceDelta.Set(revision.unexpectedBalanceDelta)
+ }
// Replay the journal to undo changes and remove invalidated snapshots
sdb.journal.revert(sdb, snapshot)
@@ -1639,14 +1726,45 @@ func (sdb *IntraBlockState) CommitBlock(chainRules *chain.Rules, stateWriter Sta
sdb.getStateObject(addr)
}
}
+
+ if sdb.wasmDB != nil && sdb.arbExtraData != nil {
+ if db := sdb.wasmDB.WasmStore(); db != nil && len(sdb.arbExtraData.activatedWasms) > 0 {
+ if err := db.Update(context.TODO(), func(tx kv.RwTx) error {
+ // Arbitrum: write Stylus programs to disk
+ for moduleHash, asmMap := range sdb.arbExtraData.activatedWasms {
+ wasmdb.WriteActivation(tx, moduleHash, asmMap)
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ sdb.arbExtraData.activatedWasms = make(map[common.Hash]ActivatedWasm)
+ }
+ sdb.arbExtraData.unexpectedBalanceDelta.Clear()
+ }
return sdb.MakeWriteSet(chainRules, stateWriter)
}
-func (sdb *IntraBlockState) BalanceIncreaseSet() map[accounts.Address]uint256.Int {
- s := make(map[accounts.Address]uint256.Int, len(sdb.balanceInc))
+type BalanceIncreaseEntry struct {
+ Amount uint256.Int
+ IsEscrow bool
+}
+
+func (sdb *IntraBlockState) BalanceIncreaseSet() map[accounts.Address]BalanceIncreaseEntry {
+ s := make(map[accounts.Address]BalanceIncreaseEntry, len(sdb.balanceInc))
for addr, bi := range sdb.balanceInc {
+ if bi.isEscrow {
+ s[addr] = BalanceIncreaseEntry{
+ Amount: uint256.Int{},
+ IsEscrow: bi.isEscrow,
+ }
+ }
+
if !bi.transferred {
- s[addr] = bi.increase
+ s[addr] = BalanceIncreaseEntry{
+ Amount: bi.increase,
+ IsEscrow: bi.isEscrow,
+ }
}
}
return s
@@ -1727,6 +1845,19 @@ func (sdb *IntraBlockState) SetTxContext(bn uint64, ti int) {
*/
sdb.txIndex = ti
sdb.blockNum = bn
+
+ // Arbitrum: clear memory charging state for new tx
+ if sdb.arbExtraData == nil {
+ sdb.arbExtraData = &ArbitrumExtraData{
+ unexpectedBalanceDelta: new(uint256.Int),
+ userWasms: map[common.Hash]ActivatedWasm{},
+ activatedWasms: map[common.Hash]ActivatedWasm{},
+ recentWasms: NewRecentWasms(),
+ }
+ } else {
+ sdb.arbExtraData.openWasmPages = 0
+ sdb.arbExtraData.everWasmPages = 0
+ }
}
// no not lock
diff --git a/execution/tests/arb-execution-spec-tests b/execution/tests/arb-execution-spec-tests
new file mode 160000
index 00000000000..2353126e0da
--- /dev/null
+++ b/execution/tests/arb-execution-spec-tests
@@ -0,0 +1 @@
+Subproject commit 2353126e0da440633dd3efa9d8b9f6d0b2f6ba31
diff --git a/execution/tests/block_test.go b/execution/tests/block_test.go
index 42a91724766..930ec4311ce 100644
--- a/execution/tests/block_test.go
+++ b/execution/tests/block_test.go
@@ -94,6 +94,28 @@ func TestExecutionSpecBlockchain(t *testing.T) {
})
}
+func TestArbitrumExecutionSpecBlockchain(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+ t.Parallel()
+
+ defer log.Root().SetHandler(log.Root().GetHandler())
+ log.Root().SetHandler(log.LvlFilterHandler(log.LvlError, log.StderrHandler))
+
+ bt := new(testMatcher)
+
+ dir := filepath.Join(".", "arb-execution-spec-tests", "blockchain_tests")
+ //checkStateRoot := true
+
+ bt.walk(t, dir, func(t *testing.T, name string, test *testutil.BlockTest) {
+ if err := bt.checkFailure(t, test.Run(t)); err != nil {
+ t.Error(err)
+ }
+ })
+
+}
+
// Only runs EEST tests for current devnet - can "skip" on off-seasons
func TestExecutionSpecBlockchainDevnet(t *testing.T) {
t.Skip("Osaka is already covered by TestExecutionSpecBlockchain")
diff --git a/execution/tests/blockgen/chain_makers.go b/execution/tests/blockgen/chain_makers.go
index 5e7e3876f16..7f5fefeaa05 100644
--- a/execution/tests/blockgen/chain_makers.go
+++ b/execution/tests/blockgen/chain_makers.go
@@ -411,6 +411,19 @@ func makeHeader(chain rules.ChainReader, parent *types.Block, state *state.Intra
time = parent.Time() + 10 // block time is fixed at 10 seconds
}
+ /*
+ TODO
+ var arbosVersion uint64
+ if chainConfig.IsArbitrum() {
+ arbosVersion = types.DeserializeHeaderExtraInformation(header).ArbOSFormatVersion
+ }
+
+ if chainConfig.IsCancun(header.Time, arbosVersion) {
+ excessBlobGas := misc.CalcExcessBlobGas(chainConfig, parent, header.Time)
+ header.ExcessBlobGas = &excessBlobGas
+ header.BlobGasUsed = new(uint64)
+ }
+ */
header := builder.MakeEmptyHeader(parent.Header(), chain.Config(), time, nil)
header.Coinbase = parent.Coinbase()
header.Difficulty = engine.CalcDifficulty(chain, time,
diff --git a/execution/tests/legacy-tests b/execution/tests/legacy-tests
index c67e485ff8b..e2d83cf0946 160000
--- a/execution/tests/legacy-tests
+++ b/execution/tests/legacy-tests
@@ -1 +1 @@
-Subproject commit c67e485ff8b5be9abc8ad15345ec21aa22e290d9
+Subproject commit e2d83cf0946a3ecbf0a28381ab0939cbe0df4d3b
diff --git a/execution/tests/testutil/state_test_util.go b/execution/tests/testutil/state_test_util.go
index e28471edd50..5c8040cc3ae 100644
--- a/execution/tests/testutil/state_test_util.go
+++ b/execution/tests/testutil/state_test_util.go
@@ -254,8 +254,8 @@ func (t *StateTest) RunNoVerify(tb testing.TB, tx kv.TemporalRwTx, subtest State
context.PrevRanDao = &rnd
context.Difficulty = big.NewInt(0)
}
- if config.IsCancun(block.Time()) && t.Json.Env.ExcessBlobGas != nil {
- context.BlobBaseFee, err = misc.GetBlobGasPrice(config, *t.Json.Env.ExcessBlobGas, header.Time)
+ if config.IsCancun(block.Time(), 0) && t.json.Env.ExcessBlobGas != nil {
+ context.BlobBaseFee, err = misc.GetBlobGasPrice(config, *t.json.Env.ExcessBlobGas, header.Time)
if err != nil {
return nil, common.Hash{}, 0, err
}
@@ -268,7 +268,8 @@ func (t *StateTest) RunNoVerify(tb testing.TB, tx kv.TemporalRwTx, subtest State
// Execute the message.
snapshot := statedb.PushSnapshot()
gaspool := new(protocol.GasPool)
- gaspool.AddGas(block.GasLimit()).AddBlobGas(config.GetMaxBlobGasPerBlock(header.Time))
+ arbOsVersion := types.GetArbOSVersion(header, config)
+ gaspool.AddGas(block.GasLimit()).AddBlobGas(config.GetMaxBlobGasPerBlock(header.Time, arbOsVersion))
res, err := protocol.ApplyMessage(evm, msg, gaspool, true /* refunds */, false /* gasBailout */, nil /* engine */)
gasUsed := uint64(0)
if res != nil {
diff --git a/execution/tracing/hooks.go b/execution/tracing/hooks.go
index aca86048c91..8a6f61b85c6 100644
--- a/execution/tracing/hooks.go
+++ b/execution/tracing/hooks.go
@@ -65,6 +65,8 @@ type VMContext struct {
IntraBlockState IntraBlockState
TxHash common.Hash
+
+ ArbOSVersion uint64 // arbitrum
}
// BlockEvent is emitted upon tracing an incoming block.
@@ -160,6 +162,13 @@ type (
// LogHook is called when a log is emitted.
LogHook = func(log *types.Log)
+
+ // Arbitrum specific hooks
+ CaptureArbitrumTransferHook = func(from, to *common.Address, value *uint256.Int, before bool, reason string)
+ CaptureArbitrumStorageGetHook = func(key common.Hash, depth int, before bool)
+ CaptureArbitrumStorageSetHook = func(key, value common.Hash, depth int, before bool)
+
+ CaptureStylusHostioHook = func(name string, args, outs []byte, startInk, endInk uint64)
)
type Hooks struct {
@@ -185,6 +194,13 @@ type Hooks struct {
OnStorageChange StorageChangeHook
OnLog LogHook
Flush func(tx types.Transaction)
+
+ // Arbitrum specifics: transfer and storage access tracers
+ CaptureArbitrumTransfer CaptureArbitrumTransferHook
+ CaptureArbitrumStorageGet CaptureArbitrumStorageGetHook
+ CaptureArbitrumStorageSet CaptureArbitrumStorageSetHook
+ // Arbitrum Stylus specific
+ CaptureStylusHostio CaptureStylusHostioHook
}
// BalanceChangeReason is used to indicate the reason for a balance change, useful
@@ -238,6 +254,75 @@ const (
BalanceDecreaseSelfdestructBurn BalanceChangeReason = 14
)
+// Arbitrum specific
+const (
+ // Its like BalanceIncrease but marks address as escrow and prohibit it's deletion after SpuriousDragon
+ BalanceIncreaseEscrow BalanceChangeReason = 15
+
+ BalanceChangeDuringEVMExecution BalanceChangeReason = 128 + iota
+ BalanceIncreaseDeposit
+ BalanceDecreaseWithdrawToL1
+ BalanceIncreaseL1PosterFee
+ BalanceIncreaseInfraFee
+ BalanceIncreaseNetworkFee
+ BalanceChangeTransferInfraRefund
+ BalanceChangeTransferNetworkRefund
+ BalanceIncreasePrepaid
+ BalanceDecreaseUndoRefund
+ BalanceChangeEscrowTransfer
+ BalanceChangeTransferBatchposterReward
+ BalanceChangeTransferBatchposterRefund
+ BalanceChangeTransferRetryableExcessRefund
+ // Stylus
+ BalanceChangeTransferActivationFee
+ BalanceChangeTransferActivationReimburse
+ // Native token minting and burning
+ BalanceIncreaseMintNativeToken
+ BalanceDecreaseBurnNativeToken
+)
+
+func (b BalanceChangeReason) Str() string {
+ switch b {
+ case BalanceIncreaseRewardTransactionFee:
+ return "tip"
+ case BalanceDecreaseGasBuy:
+ return "feePayment"
+ case BalanceIncreaseGasReturn:
+ return "gasRefund"
+ case BalanceChangeTransfer:
+ return "transfer via a call"
+ case BalanceDecreaseSelfdestruct:
+ return "selfDestruct"
+ case BalanceChangeDuringEVMExecution:
+ return "during evm execution"
+ case BalanceIncreaseDeposit:
+ return "deposit"
+ case BalanceDecreaseWithdrawToL1:
+ return "withdraw"
+ case BalanceIncreaseL1PosterFee, BalanceIncreaseInfraFee, BalanceIncreaseNetworkFee:
+ return "feeCollection"
+ case BalanceIncreasePrepaid:
+ return "prepaid"
+ case BalanceDecreaseUndoRefund:
+ return "undoRefund"
+ case BalanceChangeEscrowTransfer:
+ return "escrow"
+ case BalanceChangeTransferInfraRefund, BalanceChangeTransferNetworkRefund, BalanceChangeTransferRetryableExcessRefund:
+ return "refund"
+ case BalanceChangeTransferBatchposterReward:
+ return "batchPosterReward"
+ case BalanceChangeTransferBatchposterRefund:
+ return "batchPosterRefund"
+ // Stylus
+ case BalanceChangeTransferActivationFee:
+ return "activate"
+ case BalanceChangeTransferActivationReimburse:
+ return "reimburse"
+ default:
+ return "unspecified"
+ }
+}
+
// GasChangeReason is used to indicate the reason for a gas change, useful
// for tracing and reporting.
//
@@ -296,6 +381,15 @@ const (
GasChangeCallFailedExecution GasChangeReason = 14
// GasChangeDelegatedDesignation is the amount of gas that will be charged for resolution of delegated designation.
GasChangeDelegatedDesignation GasChangeReason = 15
+ // GasChangeWitnessContractCreation flags the event of adding to the witness during the contract creation finalization step.
+ GasChangeWitnessContractCreation GasChangeReason = 16
+ // GasChangeWitnessCodeChunk flags the event of adding one or more contract code chunks to the witness.
+ GasChangeWitnessCodeChunk GasChangeReason = 17
+ // GasChangeWitnessContractCollisionCheck flags the event of adding to the witness when checking for contract address collision.
+ GasChangeWitnessContractCollisionCheck GasChangeReason = 18
+ // GasChangeTxDataFloor is the amount of extra gas the transaction has to pay to reach the minimum gas requirement for the
+ // transaction data. This change will always be a negative change.
+ GasChangeTxDataFloor GasChangeReason = 19
// GasChangeIgnored is a special value that can be used to indicate that the gas change should be ignored as
// it will be "manually" tracked by a direct emit of the gas change event.
diff --git a/execution/tracing/tracers/js/goja.go b/execution/tracing/tracers/js/goja.go
index 10dfae092e4..eaa4151905d 100644
--- a/execution/tracing/tracers/js/goja.go
+++ b/execution/tracing/tracers/js/goja.go
@@ -237,6 +237,7 @@ func (t *jsTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from
blockContext := evmtypes.BlockContext{
BlockNumber: env.BlockNumber,
Time: env.Time,
+ ArbOSVersion: env.ArbOSVersion,
}
rules := blockContext.Rules(env.ChainConfig)
t.activePrecompiles = vm.ActivePrecompiles(rules)
diff --git a/execution/tracing/tracers/js/tracer_arbitrum.go b/execution/tracing/tracers/js/tracer_arbitrum.go
new file mode 100644
index 00000000000..46df48a14c9
--- /dev/null
+++ b/execution/tracing/tracers/js/tracer_arbitrum.go
@@ -0,0 +1,91 @@
+// Copyright 2022 The go-ethereum Authors
+// (original work)
+// Copyright 2025 The Erigon Authors
+// (modifications)
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package js
+
+import (
+ "github.com/dop251/goja"
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/tracing"
+ "github.com/holiman/uint256"
+)
+
+func (jst *jsTracer) CaptureArbitrumTransfer(
+ from, to *common.Address, value *uint256.Int, before bool, reason string) {
+ traceTransfer, ok := goja.AssertFunction(jst.obj.Get("captureArbitrumTransfer"))
+ if !ok {
+ return
+ }
+
+ transfer := jst.vm.NewObject()
+ if from != nil {
+ transfer.Set("from", from.String())
+ } else {
+ transfer.Set("from", nil)
+ }
+ if to != nil {
+ transfer.Set("to", to.String())
+ } else {
+ transfer.Set("to", nil)
+ }
+
+ transfer.Set("value", value)
+ transfer.Set("before", before)
+ transfer.Set("purpose", reason)
+
+ if _, err := traceTransfer(transfer); err != nil {
+ jst.err = wrapError("captureArbitrumTransfer", err)
+ }
+}
+
+func (jst *jsTracer) CaptureStylusHostio(name string, args, outs []byte, startInk, endInk uint64) {
+ hostio, ok := goja.AssertFunction(jst.obj.Get("hostio"))
+ if !ok {
+ return
+ }
+
+ info := jst.vm.NewObject()
+ info.Set("name", name)
+ info.Set("args", args)
+ info.Set("outs", outs)
+ info.Set("startInk", startInk)
+ info.Set("endInk", endInk)
+
+ if _, err := hostio(jst.obj, info); err != nil {
+ jst.err = wrapError("hostio", err)
+ }
+}
+
+func (jst *jsTracer) OnBalanceChange(addr common.Address, prev, new *uint256.Int, reason tracing.BalanceChangeReason) {
+ traceBalanceChange, ok := goja.AssertFunction(jst.obj.Get("onBalanceChange"))
+
+ if !ok {
+ return
+ }
+
+ balanceChange := jst.vm.NewObject()
+ balanceChange.Set("addr", addr.String())
+ balanceChange.Set("prev", prev)
+ balanceChange.Set("new", new)
+ balanceChange.Set("reason", reason.Str())
+
+ if _, err := traceBalanceChange(jst.obj, balanceChange); err != nil {
+ jst.err = wrapError("onBalanceChange", err)
+ }
+}
diff --git a/execution/tracing/tracers/live/printer.go b/execution/tracing/tracers/live/printer.go
index 69e532e592f..4026090569f 100644
--- a/execution/tracing/tracers/live/printer.go
+++ b/execution/tracing/tracers/live/printer.go
@@ -25,18 +25,22 @@ func newPrinter(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, err
t := &Printer{}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
- OnTxStart: t.OnTxStart,
- OnTxEnd: t.OnTxEnd,
- OnEnter: t.OnEnter,
- OnExit: t.OnExit,
- OnOpcode: t.OnOpcode,
- OnFault: t.OnFault,
- OnGasChange: t.OnGasChange,
- OnBalanceChange: t.OnBalanceChange,
- OnNonceChange: t.OnNonceChange,
- OnCodeChange: t.OnCodeChange,
- OnStorageChange: t.OnStorageChange,
- OnLog: t.OnLog,
+ OnTxStart: t.OnTxStart,
+ OnTxEnd: t.OnTxEnd,
+ OnEnter: t.OnEnter,
+ OnExit: t.OnExit,
+ OnOpcode: t.OnOpcode,
+ OnFault: t.OnFault,
+ OnGasChange: t.OnGasChange,
+ OnBalanceChange: t.OnBalanceChange,
+ OnNonceChange: t.OnNonceChange,
+ OnCodeChange: t.OnCodeChange,
+ OnStorageChange: t.OnStorageChange,
+ OnLog: t.OnLog,
+ CaptureArbitrumTransfer: t.CaptureArbitrumTransfer,
+ CaptureArbitrumStorageGet: t.CaptureArbitrumStorageGet,
+ CaptureArbitrumStorageSet: t.CaptureArbitrumStorageSet,
+ CaptureStylusHostio: t.CaptureStylusHostio,
},
GetResult: t.GetResult,
Stop: t.Stop,
@@ -130,6 +134,22 @@ func (p *Printer) OnGasChange(old, new uint64, reason tracing.GasChangeReason) {
fmt.Printf("OnGasChange: old=%v, new=%v, diff=%v\n", old, new, new-old)
}
+func (p *Printer) CaptureArbitrumTransfer(from *common.Address, to *common.Address, value *uint256.Int, before bool, reason string) {
+ fmt.Printf("CaptureArbitrumTransfer: from=%v, to=%v, value=%v\n", from, to, value)
+}
+
+func (p *Printer) CaptureArbitrumStorageGet(key common.Hash, depth int, before bool) {
+ fmt.Printf("CaptureArbitrumStorageGet: key=%v, depth=%v\n", key, depth)
+}
+
+func (p *Printer) CaptureArbitrumStorageSet(key common.Hash, value common.Hash, depth int, before bool) {
+ fmt.Printf("CaptureArbitrumStorageSet: key=%v, value=%v, depth=%d\n", key, value, depth)
+}
+
+func (p *Printer) CaptureStylusHostio(name string, args []byte, outs []byte, ink uint64, ink2 uint64) {
+ fmt.Printf("CaptureStylusHostio: name=%s, args=%s, outs=%s, ink=%d, ink2=%d\n", name, hexutil.Bytes(args), hexutil.Bytes(outs), ink, ink2)
+}
+
func (p *Printer) GetResult() (json.RawMessage, error) {
return json.RawMessage{}, nil
}
diff --git a/execution/tracing/tracers/native/4byte.go b/execution/tracing/tracers/native/4byte.go
index 8cfb0a9188e..3d96c20a2ca 100644
--- a/execution/tracing/tracers/native/4byte.go
+++ b/execution/tracing/tracers/native/4byte.go
@@ -90,8 +90,9 @@ func (t *fourByteTracer) store(id []byte, size int) {
func (t *fourByteTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction, from accounts.Address) {
blockContext := evmtypes.BlockContext{
- BlockNumber: env.BlockNumber,
- Time: env.Time,
+ BlockNumber: env.BlockNumber,
+ Time: env.Time,
+ ArbOSVersion: env.ArbOSVersion,
}
rules := blockContext.Rules(env.ChainConfig)
t.activePrecompiles = vm.ActivePrecompiles(rules)
diff --git a/execution/tracing/tracers/native/call.go b/execution/tracing/tracers/native/call.go
index 526efbf609b..0e79b38a608 100644
--- a/execution/tracing/tracers/native/call.go
+++ b/execution/tracing/tracers/native/call.go
@@ -52,6 +52,9 @@ type callLog struct {
}
type callFrame struct {
+ BeforeEVMTransfers *[]arbitrumTransfer `json:"beforeEVMTransfers,omitempty"`
+ AfterEVMTransfers *[]arbitrumTransfer `json:"afterEVMTransfers,omitempty"`
+
Type vm.OpCode `json:"-"`
From common.Address `json:"from"`
Gas uint64 `json:"gas"`
@@ -108,6 +111,10 @@ type callFrameMarshaling struct {
}
type callTracer struct {
+ // Arbitrum: capture transfers occurring outside of evm execution
+ beforeEVMTransfers []arbitrumTransfer
+ afterEVMTransfers []arbitrumTransfer
+
callstack []callFrame
config callTracerConfig
gasLimit uint64
@@ -142,14 +149,19 @@ func newCallTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer,
}
// First callframe contains txn context info
// and is populated on start and end.
- t := &callTracer{callstack: make([]callFrame, 0, 1), config: config}
+ t := &callTracer{
+ beforeEVMTransfers: []arbitrumTransfer{},
+ afterEVMTransfers: []arbitrumTransfer{},
+ callstack: make([]callFrame, 0, 1),
+ config: config}
return &tracers.Tracer{
Hooks: &tracing.Hooks{
- OnTxStart: t.OnTxStart,
- OnTxEnd: t.OnTxEnd,
- OnEnter: t.OnEnter,
- OnExit: t.OnExit,
- OnLog: t.OnLog,
+ OnTxStart: t.OnTxStart,
+ OnTxEnd: t.OnTxEnd,
+ OnEnter: t.OnEnter,
+ OnExit: t.OnExit,
+ OnLog: t.OnLog,
+ CaptureArbitrumTransfer: t.CaptureArbitrumTransfer,
},
GetResult: t.GetResult,
Stop: t.Stop,
@@ -329,7 +341,13 @@ func (t *callTracer) GetResult() (json.RawMessage, error) {
if len(t.callstack) != 1 {
return nil, errors.New("incorrect number of top-level calls")
}
- res, err := json.Marshal(t.callstack[0])
+
+ call := t.callstack[0]
+ call.BeforeEVMTransfers = &t.beforeEVMTransfers
+ call.AfterEVMTransfers = &t.afterEVMTransfers
+
+ res, err := json.Marshal(call)
+
if err != nil {
return nil, err
}
diff --git a/execution/tracing/tracers/native/gen_callframe_json.go b/execution/tracing/tracers/native/gen_callframe_json.go
index c5a0c2f9623..0f29f6a7e3a 100644
--- a/execution/tracing/tracers/native/gen_callframe_json.go
+++ b/execution/tracing/tracers/native/gen_callframe_json.go
@@ -16,21 +16,25 @@ var _ = (*callFrameMarshaling)(nil)
// MarshalJSON marshals as JSON.
func (c callFrame) MarshalJSON() ([]byte, error) {
type callFrame0 struct {
- Type vm.OpCode `json:"-"`
- From common.Address `json:"from"`
- Gas hexutil.Uint64 `json:"gas"`
- GasUsed hexutil.Uint64 `json:"gasUsed"`
- To common.Address `json:"to,omitempty" rlp:"optional"`
- Input hexutil.Bytes `json:"input" rlp:"optional"`
- Output hexutil.Bytes `json:"output,omitempty" rlp:"optional"`
- Error string `json:"error,omitempty" rlp:"optional"`
- Revertal string `json:"revertReason,omitempty"`
- Calls []callFrame `json:"calls,omitempty" rlp:"optional"`
- Logs []callLog `json:"logs,omitempty" rlp:"optional"`
- Value *hexutil.Big `json:"value,omitempty" rlp:"optional"`
- TypeString string `json:"type"`
+ BeforeEVMTransfers *[]arbitrumTransfer `json:"beforeEVMTransfers,omitempty"`
+ AfterEVMTransfers *[]arbitrumTransfer `json:"afterEVMTransfers,omitempty"`
+ Type vm.OpCode `json:"-"`
+ From common.Address `json:"from"`
+ Gas hexutil.Uint64 `json:"gas"`
+ GasUsed hexutil.Uint64 `json:"gasUsed"`
+ To common.Address `json:"to,omitempty" rlp:"optional"`
+ Input hexutil.Bytes `json:"input" rlp:"optional"`
+ Output hexutil.Bytes `json:"output,omitempty" rlp:"optional"`
+ Error string `json:"error,omitempty" rlp:"optional"`
+ Revertal string `json:"revertReason,omitempty"`
+ Calls []callFrame `json:"calls,omitempty" rlp:"optional"`
+ Logs []callLog `json:"logs,omitempty" rlp:"optional"`
+ Value *hexutil.Big `json:"value,omitempty" rlp:"optional"`
+ TypeString string `json:"type"`
}
var enc callFrame0
+ enc.BeforeEVMTransfers = c.BeforeEVMTransfers
+ enc.AfterEVMTransfers = c.AfterEVMTransfers
enc.Type = c.Type
enc.From = c.From
enc.Gas = hexutil.Uint64(c.Gas)
@@ -50,23 +54,31 @@ func (c callFrame) MarshalJSON() ([]byte, error) {
// UnmarshalJSON unmarshals from JSON.
func (c *callFrame) UnmarshalJSON(input []byte) error {
type callFrame0 struct {
- Type *vm.OpCode `json:"-"`
- From *common.Address `json:"from"`
- Gas *hexutil.Uint64 `json:"gas"`
- GasUsed *hexutil.Uint64 `json:"gasUsed"`
- To *common.Address `json:"to,omitempty" rlp:"optional"`
- Input *hexutil.Bytes `json:"input" rlp:"optional"`
- Output *hexutil.Bytes `json:"output,omitempty" rlp:"optional"`
- Error *string `json:"error,omitempty" rlp:"optional"`
- Revertal *string `json:"revertReason,omitempty"`
- Calls []callFrame `json:"calls,omitempty" rlp:"optional"`
- Logs []callLog `json:"logs,omitempty" rlp:"optional"`
- Value *hexutil.Big `json:"value,omitempty" rlp:"optional"`
+ BeforeEVMTransfers *[]arbitrumTransfer `json:"beforeEVMTransfers,omitempty"`
+ AfterEVMTransfers *[]arbitrumTransfer `json:"afterEVMTransfers,omitempty"`
+ Type *vm.OpCode `json:"-"`
+ From *common.Address `json:"from"`
+ Gas *hexutil.Uint64 `json:"gas"`
+ GasUsed *hexutil.Uint64 `json:"gasUsed"`
+ To *common.Address `json:"to,omitempty" rlp:"optional"`
+ Input *hexutil.Bytes `json:"input" rlp:"optional"`
+ Output *hexutil.Bytes `json:"output,omitempty" rlp:"optional"`
+ Error *string `json:"error,omitempty" rlp:"optional"`
+ Revertal *string `json:"revertReason,omitempty"`
+ Calls []callFrame `json:"calls,omitempty" rlp:"optional"`
+ Logs []callLog `json:"logs,omitempty" rlp:"optional"`
+ Value *hexutil.Big `json:"value,omitempty" rlp:"optional"`
}
var dec callFrame0
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
+ if dec.BeforeEVMTransfers != nil {
+ c.BeforeEVMTransfers = dec.BeforeEVMTransfers
+ }
+ if dec.AfterEVMTransfers != nil {
+ c.AfterEVMTransfers = dec.AfterEVMTransfers
+ }
if dec.Type != nil {
c.Type = *dec.Type
}
diff --git a/execution/tracing/tracers/native/mux.go b/execution/tracing/tracers/native/mux.go
index c73242315b8..0a87b0912a5 100644
--- a/execution/tracing/tracers/native/mux.go
+++ b/execution/tracing/tracers/native/mux.go
@@ -75,6 +75,8 @@ func newMuxTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Tracer, e
OnCodeChange: t.OnCodeChange,
OnStorageChange: t.OnStorageChange,
OnLog: t.OnLog,
+
+ CaptureArbitrumTransfer: t.CaptureArbitrumTransfer,
},
GetResult: t.GetResult,
Stop: t.Stop,
@@ -177,6 +179,38 @@ func (t *muxTracer) OnLog(log *types.Log) {
}
}
+func (t *muxTracer) CaptureArbitrumStorageGet(key common.Hash, depth int, before bool) {
+ for _, t := range t.tracers {
+ if t.CaptureArbitrumStorageGet != nil {
+ t.CaptureArbitrumStorageGet(key, depth, before)
+ }
+ }
+}
+
+func (t *muxTracer) CaptureArbitrumStorageSet(key, value common.Hash, depth int, before bool) {
+ for _, t := range t.tracers {
+ if t.CaptureArbitrumStorageSet != nil {
+ t.CaptureArbitrumStorageSet(key, value, depth, before)
+ }
+ }
+}
+
+func (t *muxTracer) CaptureArbitrumTransfer(from, to *common.Address, value *uint256.Int, before bool, reason string) {
+ for _, t := range t.tracers {
+ if t.CaptureArbitrumTransfer != nil {
+ t.CaptureArbitrumTransfer(from, to, value, before, reason)
+ }
+ }
+}
+
+func (t *muxTracer) CaptureStylusHostio(name string, args, outs []byte, startInk, endInk uint64) {
+ for _, t := range t.tracers {
+ if t.CaptureStylusHostio != nil {
+ t.CaptureStylusHostio(name, args, outs, startInk, endInk)
+ }
+ }
+}
+
// GetResult returns an empty json object.
func (t *muxTracer) GetResult() (json.RawMessage, error) {
resObject := make(map[string]json.RawMessage)
diff --git a/execution/tracing/tracers/native/prestate.go b/execution/tracing/tracers/native/prestate.go
index adce0a6355a..32091cb390c 100644
--- a/execution/tracing/tracers/native/prestate.go
+++ b/execution/tracing/tracers/native/prestate.go
@@ -26,6 +26,7 @@ import (
"math/big"
"sync/atomic"
+ "github.com/erigontech/erigon/execution/chain/params"
"github.com/holiman/uint256"
"github.com/erigontech/erigon/common"
@@ -111,6 +112,8 @@ func newPrestateTracer(ctx *tracers.Context, cfg json.RawMessage) (*tracers.Trac
OnTxEnd: t.OnTxEnd,
OnOpcode: t.OnOpcode,
OnExit: t.OnExit,
+ CaptureArbitrumStorageGet: t.CaptureArbitrumStorageGet,
+ CaptureArbitrumStorageSet: t.CaptureArbitrumStorageSet,
},
GetResult: t.GetResult,
Stop: t.Stop,
@@ -230,6 +233,22 @@ func (t *prestateTracer) OnTxStart(env *tracing.VMContext, tx types.Transaction,
t.lookupAccount(from)
t.lookupAccount(t.to)
t.lookupAccount(env.Coinbase)
+ t.lookupAccount(params.HistoryStorageAddress)
+ if env.ChainConfig.IsArbitrum() {
+ t.lookupAccount(types.ArbosStateAddress)
+ }
+
+ // Add accounts with authorizations to the prestate before they get applied.
+ var b [32]byte
+ data := bytes.NewBuffer(nil)
+ for _, auth := range tx.GetAuthorizations() {
+ data.Reset()
+ addr, err := auth.RecoverSigner(data, b[:])
+ if err != nil {
+ continue
+ }
+ t.lookupAccount(*addr)
+ }
// Add accounts with authorizations to the prestate before they get applied.
var b [32]byte
@@ -388,6 +407,12 @@ func (t *prestateTracer) lookupAccount(addr accounts.Address) {
if !t.config.DisableCode {
t.pre[addr].Code = code
+ if len(code) > 0 {
+ codeHash := crypto.Keccak256Hash(code)
+ t.pre[addr].CodeHash = &codeHash
+ } else {
+ t.pre[addr].CodeHash = nil
+ }
}
if !t.config.DisableStorage {
t.pre[addr].Storage = make(map[common.Hash]common.Hash)
@@ -402,6 +427,10 @@ func (t *prestateTracer) lookupStorage(addr accounts.Address, key common.Hash) {
return
}
+ if t.pre[addr] == nil {
+ t.lookupAccount(addr)
+ }
+
if _, ok := t.pre[addr].Storage[key]; ok {
return
}
diff --git a/execution/tracing/tracers/native/stylus_tracer.go b/execution/tracing/tracers/native/stylus_tracer.go
new file mode 100644
index 00000000000..8a959a57f5e
--- /dev/null
+++ b/execution/tracing/tracers/native/stylus_tracer.go
@@ -0,0 +1,219 @@
+// Copyright 2024, Offchain Labs, Inc.
+// For license information, see https://github.com/erigontech/nitro-erigon/blob/master/LICENSE
+
+package native
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math/big"
+ "strings"
+ "sync/atomic"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/hexutil"
+ "github.com/erigontech/erigon/common/log/v3"
+ "github.com/erigontech/erigon/execution/tracing"
+ "github.com/erigontech/erigon/execution/tracing/tracers"
+ "github.com/erigontech/erigon/execution/types/accounts"
+ "github.com/holiman/uint256"
+
+ "github.com/erigontech/erigon/execution/vm"
+ "github.com/erigontech/nitro-erigon/util/containers"
+)
+
+func init() {
+ register("stylusTracer", newStylusTracer)
+}
+
+// stylusTracer captures Stylus HostIOs and returns them in a structured format to be used in Cargo
+// Stylus Replay.
+type stylusTracer struct {
+ open *containers.Stack[HostioTraceInfo]
+ stack *containers.Stack[*containers.Stack[HostioTraceInfo]]
+ interrupt atomic.Bool
+ reason error
+}
+
+// HostioTraceInfo contains the captured HostIO log returned by stylusTracer.
+type HostioTraceInfo struct {
+ // Name of the HostIO.
+ Name string `json:"name"`
+
+ // Arguments of the HostIO encoded as binary.
+ // For details about the encoding check the HostIO implemenation on
+ // arbitrator/wasm-libraries/user-host-trait.
+ Args hexutil.Bytes `json:"args"`
+
+ // Outputs of the HostIO encoded as binary.
+ // For details about the encoding check the HostIO implemenation on
+ // arbitrator/wasm-libraries/user-host-trait.
+ Outs hexutil.Bytes `json:"outs"`
+
+ // Amount of Ink before executing the HostIO.
+ StartInk uint64 `json:"startInk"`
+
+ // Amount of Ink after executing the HostIO.
+ EndInk uint64 `json:"endInk"`
+
+ // For *call HostIOs, the address of the called contract.
+ Address *accounts.Address `json:"address,omitempty"`
+
+ // For *call HostIOs, the steps performed by the called contract.
+ Steps *containers.Stack[HostioTraceInfo] `json:"steps,omitempty"`
+}
+
+// nestsHostios contains the hostios with nested calls.
+var nestsHostios = map[string]bool{
+ "call_contract": true,
+ "delegate_call_contract": true,
+ "static_call_contract": true,
+}
+
+func newStylusTracer(ctx *tracers.Context, _ json.RawMessage) (*tracers.Tracer, error) {
+ t := &stylusTracer{
+ open: containers.NewStack[HostioTraceInfo](),
+ stack: containers.NewStack[*containers.Stack[HostioTraceInfo]](),
+ }
+
+ return &tracers.Tracer{
+ Hooks: &tracing.Hooks{
+ OnEnter: t.OnEnter,
+ OnExit: t.OnExit,
+ CaptureStylusHostio: t.CaptureStylusHostio,
+ },
+ GetResult: t.GetResult,
+ Stop: t.Stop,
+ }, nil
+}
+
+func (t *stylusTracer) CaptureStylusHostio(name string, args, outs []byte, startInk, endInk uint64) {
+ if t.interrupt.Load() {
+ return
+ }
+ info := HostioTraceInfo{
+ Name: name,
+ Args: args,
+ Outs: outs,
+ StartInk: startInk,
+ EndInk: endInk,
+ }
+ if nestsHostios[name] {
+ last, err := t.open.Pop()
+ if err != nil {
+ t.Stop(err)
+ return
+ }
+ if !strings.HasPrefix(last.Name, "evm_") || last.Name[4:] != info.Name {
+ t.Stop(fmt.Errorf("trace inconsistency for %v: last opcode is %v", info.Name, last.Name))
+ return
+ }
+ if last.Steps == nil {
+ t.Stop(fmt.Errorf("trace inconsistency for %v: nil steps", info.Name))
+ return
+ }
+ info.Address = last.Address
+ info.Steps = last.Steps
+ }
+ t.open.Push(info)
+}
+
+func (t *stylusTracer) OnEnter(depth int, typ byte, from accounts.Address, to accounts.Address, precompile bool, input []byte, gas uint64, value uint256.Int, code []byte) {
+ if t.interrupt.Load() {
+ return
+ }
+ if depth == 0 {
+ return
+ }
+
+ // This function adds the prefix evm_ because it assumes the opcode came from the EVM.
+ // If the opcode comes from WASM, the CaptureStylusHostio function will remove the evm prefix.
+ var name string
+ switch vm.OpCode(typ) {
+ case vm.CALL:
+ name = "evm_call_contract"
+ case vm.DELEGATECALL:
+ name = "evm_delegate_call_contract"
+ case vm.STATICCALL:
+ name = "evm_static_call_contract"
+ case vm.CREATE:
+ name = "evm_create1"
+ case vm.CREATE2:
+ name = "evm_create2"
+ case vm.SELFDESTRUCT:
+ name = "evm_self_destruct"
+ }
+
+ inner := containers.NewStack[HostioTraceInfo]()
+ info := HostioTraceInfo{
+ Name: name,
+ Address: &to,
+ Steps: inner,
+ }
+ t.open.Push(info)
+ t.stack.Push(t.open)
+ t.open = inner
+}
+
+func (t *stylusTracer) OnExit(depth int, output []byte, gasUsed uint64, _ error, reverted bool) {
+ if t.interrupt.Load() {
+ return
+ }
+ if depth == 0 {
+ return
+ }
+ var err error
+ t.open, err = t.stack.Pop()
+ if err != nil {
+ t.Stop(err)
+ }
+}
+
+func (t *stylusTracer) GetResult() (json.RawMessage, error) {
+ if t.reason != nil {
+ return nil, t.reason
+ }
+
+ var internalErr error
+ if t.open == nil {
+ internalErr = errors.Join(internalErr, fmt.Errorf("tracer.open is nil"))
+ }
+ if t.stack == nil {
+ internalErr = errors.Join(internalErr, fmt.Errorf("tracer.stack is nil"))
+ }
+ if !t.stack.Empty() {
+ internalErr = errors.Join(internalErr, fmt.Errorf("tracer.stack should be empty, but has %d values", t.stack.Len()))
+ }
+ if internalErr != nil {
+ log.Error("stylusTracer: internal error when generating a trace", "error", internalErr)
+ return nil, fmt.Errorf("internal error: %w", internalErr)
+ }
+
+ msg, err := json.Marshal(t.open)
+ if err != nil {
+ return nil, err
+ }
+ return msg, nil
+}
+
+func (t *stylusTracer) Stop(err error) {
+ t.reason = err
+ t.interrupt.Store(true)
+}
+
+// Unimplemented EVMLogger interface methods
+
+func (t *stylusTracer) CaptureArbitrumTransfer(env *vm.EVM, from, to *common.Address, value *big.Int, before bool, purpose string) {
+}
+func (t *stylusTracer) CaptureArbitrumStorageGet(key common.Hash, depth int, before bool) {}
+func (t *stylusTracer) CaptureArbitrumStorageSet(key, value common.Hash, depth int, before bool) {}
+func (t *stylusTracer) CaptureTxStart(gasLimit uint64) {}
+func (t *stylusTracer) CaptureTxEnd(restGas uint64) {}
+func (t *stylusTracer) CaptureEnd(output []byte, usedGas uint64, err error) {}
+func (t *stylusTracer) CaptureStart(env *vm.EVM, from, to common.Address, precompile, create bool, input []byte, gas uint64, value *uint256.Int, code []byte) {
+}
+func (t *stylusTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.CallContext, rData []byte, depth int, err error) {
+}
+func (t *stylusTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.CallContext, depth int, err error) {
+}
diff --git a/execution/tracing/tracers/native/tracer.go b/execution/tracing/tracers/native/tracer.go
index 1f3a4c8e945..d57416ca333 100644
--- a/execution/tracing/tracers/native/tracer.go
+++ b/execution/tracing/tracers/native/tracer.go
@@ -62,6 +62,10 @@ Hence, we cannot make the map in init, but must make it upon first use.
*/
var ctors map[string]ctorFn
+func RegisterExternal(name string, ctor ctorFn) {
+ register(name, ctor)
+}
+
// register is used by native tracers to register their presence.
func register(name string, ctor ctorFn) {
if ctors == nil {
diff --git a/execution/tracing/tracers/native/tracer_arbitrum.go b/execution/tracing/tracers/native/tracer_arbitrum.go
new file mode 100644
index 00000000000..376132f61b7
--- /dev/null
+++ b/execution/tracing/tracers/native/tracer_arbitrum.go
@@ -0,0 +1,64 @@
+// Copyright 2022 The go-ethereum Authors
+// (original work)
+// Copyright 2025 The Erigon Authors
+// (modifications)
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package native
+
+import (
+ "github.com/holiman/uint256"
+
+ libcommon "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/types"
+)
+
+type arbitrumTransfer struct {
+ Purpose string `json:"purpose"`
+ From *string `json:"from"`
+ To *string `json:"to"`
+ Value string `json:"value"`
+}
+
+func (t *callTracer) CaptureArbitrumTransfer(from, to *libcommon.Address, value *uint256.Int, before bool, reason string) {
+ transfer := arbitrumTransfer{
+ Purpose: reason,
+ Value: value.Hex(),
+ }
+ if from != nil {
+ from := from.String()
+ transfer.From = &from
+ }
+ if to != nil {
+ to := to.String()
+ transfer.To = &to
+ }
+ if before {
+ t.beforeEVMTransfers = append(t.beforeEVMTransfers, transfer)
+ } else {
+ t.afterEVMTransfers = append(t.afterEVMTransfers, transfer)
+ }
+}
+
+func (t *prestateTracer) CaptureArbitrumStorageGet(key libcommon.Hash, depth int, before bool) {
+ t.lookupAccount(types.ArbosStateAddress)
+ t.lookupStorage(types.ArbosStateAddress, key)
+}
+
+func (t *prestateTracer) CaptureArbitrumStorageSet(key, value libcommon.Hash, depth int, before bool) {
+ t.lookupAccount(types.ArbosStateAddress)
+ t.lookupStorage(types.ArbosStateAddress, key)
+}
diff --git a/execution/tracing/tracers/tracers.go b/execution/tracing/tracers/tracers.go
index 1b3b21e75d3..ee14cc5374e 100644
--- a/execution/tracing/tracers/tracers.go
+++ b/execution/tracing/tracers/tracers.go
@@ -75,3 +75,7 @@ func New(code string, ctx *Context, cfg json.RawMessage) (*Tracer, error) {
}
return nil, errors.New("tracer not found")
}
+
+// StateReleaseFunc is used to deallocate resources held by constructing a
+// historical state for tracing purposes.
+type StateReleaseFunc func()
diff --git a/execution/types/aa_transaction.go b/execution/types/aa_transaction.go
index a6205fb98a0..14a8d682d57 100644
--- a/execution/types/aa_transaction.go
+++ b/execution/types/aa_transaction.go
@@ -7,6 +7,8 @@ import (
"io"
"math/big"
+ "github.com/erigontech/erigon/arb"
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
"github.com/holiman/uint256"
"github.com/erigontech/erigon/common"
@@ -32,6 +34,8 @@ var AA_ENTRY_POINT = accounts.InternAddress(common.HexToAddress("0x0000000000000
var AA_SENDER_CREATOR = accounts.InternAddress(common.HexToAddress("0x00000000000000000000000000000000ffff7560"))
type AccountAbstractionTransaction struct {
+ arb.NoTimeBoosted
+
TransactionMisc
Nonce uint64
ChainID *uint256.Int
@@ -77,7 +81,7 @@ func (tx *AccountAbstractionTransaction) Sender(signer Signer) (accounts.Address
return tx.SenderAddress, nil
}
-func (tx *AccountAbstractionTransaction) cachedSender() (accounts.Address, bool) {
+func (tx *AccountAbstractionTransaction) CachedSender() (accounts.Address, bool) {
return tx.SenderAddress, true
}
@@ -169,6 +173,8 @@ func (tx *AccountAbstractionTransaction) AsMessage(s Signer, baseFee *big.Int, r
to: accounts.NilAddress,
gasPrice: *tx.FeeCap,
blobHashes: []common.Hash{},
+
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
}, nil
}
@@ -273,8 +279,8 @@ func (tx *AccountAbstractionTransaction) EncodingSize() int {
func (tx *AccountAbstractionTransaction) EncodeRLP(w io.Writer) error {
payloadSize, accessListLen, authorizationsLen := tx.payloadSize()
envelopSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// encode envelope size
if err := rlp.EncodeStringSizePrefix(envelopSize, w, b[:]); err != nil {
return err
@@ -509,8 +515,8 @@ func (tx *AccountAbstractionTransaction) DecodeRLP(s *rlp.Stream) error {
func (tx *AccountAbstractionTransaction) MarshalBinary(w io.Writer) error {
payloadSize, accessListLen, authorizationsLen := tx.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// encode TxType
b[0] = AccountAbstractionTxType
if _, err := w.Write(b[:1]); err != nil {
diff --git a/execution/types/access_list_tx.go b/execution/types/access_list_tx.go
index cf6bab66da2..1a347830fc5 100644
--- a/execution/types/access_list_tx.go
+++ b/execution/types/access_list_tx.go
@@ -25,6 +25,8 @@ import (
"io"
"math/big"
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
+ "github.com/erigontech/erigon/common/length"
"github.com/holiman/uint256"
"github.com/erigontech/erigon/common"
@@ -54,8 +56,9 @@ func (al AccessList) StorageKeys() int {
// AccessListTx is the data of EIP-2930 access list transactions.
type AccessListTx struct {
LegacyTx
- ChainID *uint256.Int
- AccessList AccessList // EIP-2930 access list
+ ChainID *uint256.Int
+ AccessList AccessList // EIP-2930 access list
+ Timeboosted *bool
}
// copy creates a deep copy of the transaction data and initializes all fields.
@@ -77,6 +80,9 @@ func (tx *AccessListTx) copy() *AccessListTx {
AccessList: make(AccessList, len(tx.AccessList)),
}
copy(cpy.AccessList, tx.AccessList)
+ if tx.Timeboosted != nil {
+ cpy.Timeboosted = &(*tx.Timeboosted)
+ }
if tx.Value != nil {
cpy.Value.Set(tx.Value)
}
@@ -103,20 +109,22 @@ func (tx *AccessListTx) GetAuthorizations() []Authorization {
func (tx *AccessListTx) Protected() bool {
return true
}
-
func (tx *AccessListTx) Unwrap() Transaction {
return tx
}
+func (tx *AccessListTx) IsTimeBoosted() *bool { return tx.Timeboosted }
+func (tx *AccessListTx) SetTimeboosted(val *bool) { tx.Timeboosted = val }
+
// EncodingSize returns the RLP encoding size of the whole transaction envelope
func (tx *AccessListTx) EncodingSize() int {
- payloadSize, _ := tx.payloadSize()
+ payloadSize, _ := tx.payloadSize(false)
// Add envelope size and type size
return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
}
// payloadSize calculates the RLP encoding size of transaction, without TxType and envelope
-func (tx *AccessListTx) payloadSize() (payloadSize int, accessListLen int) {
+func (tx *AccessListTx) payloadSize(hashingOnly bool) (payloadSize int, accessListLen int) {
payloadSize += rlp.Uint256Len(*tx.ChainID)
payloadSize += rlp.U64Len(tx.Nonce)
payloadSize += rlp.Uint256Len(*tx.GasPrice)
@@ -138,6 +146,11 @@ func (tx *AccessListTx) payloadSize() (payloadSize int, accessListLen int) {
payloadSize += rlp.Uint256Len(tx.V)
payloadSize += rlp.Uint256Len(tx.R)
payloadSize += rlp.Uint256Len(tx.S)
+
+ if !hashingOnly && tx.Timeboosted != nil {
+ payloadSize += rlp.BoolLen()
+ }
+
return payloadSize, accessListLen
}
@@ -186,21 +199,36 @@ func encodeAccessList(al AccessList, w io.Writer, b []byte) error {
// For legacy transactions, it returns the RLP encoding. For EIP-2718 typed
// transactions, it returns the type and payload.
func (tx *AccessListTx) MarshalBinary(w io.Writer) error {
- payloadSize, accessListLen := tx.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ payloadSize, accessListLen := tx.payloadSize(false)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = AccessListTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, accessListLen, false); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *AccessListTx) MarshalBinaryForHashing(w io.Writer) error {
+ payloadSize, accessListLen := tx.payloadSize(true)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// encode TxType
b[0] = AccessListTxType
if _, err := w.Write(b[:1]); err != nil {
return err
}
- if err := tx.encodePayload(w, b[:], payloadSize, accessListLen); err != nil {
+ if err := tx.encodePayload(w, b[:], payloadSize, accessListLen, true); err != nil {
return err
}
return nil
}
-func (tx *AccessListTx) encodePayload(w io.Writer, b []byte, payloadSize, accessListLen int) error {
+func (tx *AccessListTx) encodePayload(w io.Writer, b []byte, payloadSize, accessListLen int, hashingOnly bool) error {
// prefix
if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
return err
@@ -225,7 +253,7 @@ func (tx *AccessListTx) encodePayload(w io.Writer, b []byte, payloadSize, access
if tx.To == nil {
b[0] = 128
} else {
- b[0] = 128 + 20
+ b[0] = 128 + length.Addr
}
if _, err := w.Write(b[:1]); err != nil {
return err
@@ -263,17 +291,24 @@ func (tx *AccessListTx) encodePayload(w io.Writer, b []byte, payloadSize, access
if err := rlp.EncodeUint256(tx.S, w, b); err != nil {
return err
}
+
+ if tx.Timeboosted != nil && !hashingOnly {
+ if err := rlp.EncodeBool(*tx.Timeboosted, w, b); err != nil {
+ return err
+ }
+ }
+
return nil
}
// EncodeRLP implements rlp.Encoder
func (tx *AccessListTx) EncodeRLP(w io.Writer) error {
- payloadSize, accessListLen := tx.payloadSize()
+ payloadSize, accessListLen := tx.payloadSize(false)
// size of struct prefix and TxType
envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// envelope
if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
return err
@@ -283,7 +318,7 @@ func (tx *AccessListTx) EncodeRLP(w io.Writer) error {
if _, err := w.Write(b[:1]); err != nil {
return err
}
- if err := tx.encodePayload(w, b[:], payloadSize, accessListLen); err != nil {
+ if err := tx.encodePayload(w, b[:], payloadSize, accessListLen, false); err != nil {
return err
}
return nil
@@ -390,10 +425,15 @@ func (tx *AccessListTx) DecodeRLP(s *rlp.Stream) error {
return fmt.Errorf("read S: %w", err)
}
tx.S.SetBytes(b)
- if err := s.ListEnd(); err != nil {
- return fmt.Errorf("close AccessListTx: %w", err)
+
+ if s.MoreDataInList() {
+ boolVal, err := s.Bool()
+ if err != nil {
+ return err
+ }
+ tx.Timeboosted = &boolVal
}
- return nil
+ return s.ListEnd()
}
// AsMessage returns the transaction as a core.Message.
@@ -418,6 +458,9 @@ func (tx *AccessListTx) AsMessage(s Signer, _ *big.Int, rules *chain.Rules) (*Me
checkNonce: true,
checkTransaction: true,
checkGas: true,
+
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ Tx: tx,
}
if !rules.IsBerlin {
@@ -498,7 +541,7 @@ func (tx *AccessListTx) GetChainID() *uint256.Int {
return tx.ChainID
}
-func (tx *AccessListTx) cachedSender() (sender accounts.Address, ok bool) {
+func (tx *AccessListTx) CachedSender() (sender accounts.Address, ok bool) {
s := tx.from
if s.IsNil() {
return sender, false
diff --git a/execution/types/accounts/account_test.go b/execution/types/accounts/account_test.go
index 544cd4f953a..abd56e86497 100644
--- a/execution/types/accounts/account_test.go
+++ b/execution/types/accounts/account_test.go
@@ -47,6 +47,27 @@ func TestEmptyAccount(t *testing.T) {
isIncarnationEqual(t, a.Incarnation, decodedAcc.Incarnation)
}
+func TestEmptyAcountEncoding(t *testing.T) {
+ t.Parallel()
+ emptyAcc := Account{
+ Initialised: true,
+ Nonce: 0,
+ Balance: *new(uint256.Int),
+ Root: empty.RootHash, // extAccount doesn't have Root value
+ CodeHash: empty.CodeHash, // extAccount doesn't have CodeHash value
+ Incarnation: 0,
+ }
+
+ encodedAccount := SerialiseV3(&emptyAcc)
+
+ decodedAcc := Account{}
+ if err := DeserialiseV3(&decodedAcc, encodedAccount); err != nil {
+ t.Fatal("Can't decode the incarnation", err, encodedAccount)
+ }
+
+ isAccountsEqual(t, emptyAcc, decodedAcc)
+}
+
func TestEmptyAccount2(t *testing.T) {
t.Parallel()
emptyAcc := Account{}
diff --git a/execution/types/arb_tx.go b/execution/types/arb_tx.go
new file mode 100644
index 00000000000..821827a78e3
--- /dev/null
+++ b/execution/types/arb_tx.go
@@ -0,0 +1,656 @@
+package types
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "sync/atomic"
+ "time"
+
+ "github.com/erigontech/erigon/common"
+ cmath "github.com/erigontech/erigon/common/math"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/erigontech/erigon/execution/types/accounts"
+)
+
+var (
+ ErrGasFeeCapTooLow = errors.New("fee cap less than base fee")
+ errShortTypedTx = errors.New("typed transaction too short")
+ errInvalidYParity = errors.New("'yParity' field must be 0 or 1")
+ errVYParityMismatch = errors.New("'v' and 'yParity' fields do not match")
+ errVYParityMissing = errors.New("missing 'yParity' or 'v' field in transaction")
+)
+
+// getPooledBuffer retrieves a buffer from the pool and creates a byte slice of the
+// requested size from it.
+//
+// The caller should return the *bytes.Buffer object back into EncodeBufferPool after use!
+// The returned byte slice must not be used after returning the buffer.
+func getPooledBuffer(size uint64) ([]byte, *bytes.Buffer, error) {
+ if size > math.MaxInt {
+ return nil, nil, fmt.Errorf("can't get buffer of size %d", size)
+ }
+ buf := EncodeBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ buf.Grow(int(size))
+ b := buf.Bytes()[:int(size)]
+ return b, buf, nil
+}
+
+// ArbTx is an Arbitrum transaction.
+type ArbTx struct {
+ BlobTxWrapper
+ inner Transaction // Consensus contents of a transaction
+ // sidecar *BlobTxSidecar
+ time time.Time // Time first seen locally (spam avoidance)
+
+ // Arbitrum cache: must be atomically accessed
+ CalldataUnits uint64
+
+ // caches
+ hash atomic.Value
+ size atomic.Value
+ from atomic.Value
+}
+
+// NewTx creates a new transaction.
+func NewArbTx(inner Transaction) *ArbTx {
+ tx := new(ArbTx)
+ tx.setDecoded(inner.Unwrap(), 0)
+ return tx
+}
+
+// EncodeRLP implements rlp.Encoder
+// func (tx *ArbTx) EncodeRLP(w io.Writer) error {
+// if tx.Type() == LegacyTxType {
+// return rlp.Encode(w, tx.inner)
+// }
+// // It's an EIP-2718 typed TX envelope.
+// buf := EncodeBufferPool.Get().(*bytes.Buffer)
+// defer EncodeBufferPool.Put(buf)
+// buf.Reset()
+// if err := tx.encodeTyped(buf); err != nil {
+// return err
+// }
+// return rlp.Encode(w, buf.Bytes())
+// }
+
+// encodeTyped writes the canonical encoding of a typed transaction to w.
+func (tx *ArbTx) encodeTyped(w *bytes.Buffer) error {
+ w.WriteByte(tx.Type())
+ return tx.inner.EncodeRLP(w)
+}
+
+func (tx *ArbTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (*Message, error) {
+ msg, err := tx.Tx.AsMessage(s, baseFee, rules)
+ if err == nil {
+ msg.Tx = tx
+ }
+ return msg, err
+}
+
+// MarshalBinary returns the canonical encoding of the transaction.
+// For legacy transactions, it returns the RLP encoding. For EIP-2718 typed
+// transactions, it returns the type and payload.
+// func (tx *ArbTx) MarshalBinary() ([]byte, error) {
+// if tx.Type() == LegacyTxType {
+// return rlp.EncodeToBytes(tx.inner)
+// }
+// var buf bytes.Buffer
+// err := tx.encodeTyped(&buf)
+// return buf.Bytes(), err
+// }
+
+// DecodeRLP implements rlp.Decoder
+func (tx *ArbTx) DecodeRLP(s *rlp.Stream) error {
+ kind, size, err := s.Kind()
+ switch {
+ case err != nil:
+ return err
+ case kind == rlp.List:
+ // It's a legacy transaction.
+ var inner LegacyTx
+ err := s.Decode(&inner)
+ if err == nil {
+ tx.setDecoded(&inner, rlp.ListSize(size))
+ }
+ return err
+ case kind == rlp.Byte:
+ return errShortTypedTx
+ default:
+ //b, buf, err := getPooledBuffer(size)
+ //if err != nil {
+ // return err
+ //}
+ //defer EncodeBufferPool.Put(buf)
+ //s.
+
+ // It's an EIP-2718 typed TX envelope.
+ // First read the tx payload bytes into a temporary buffer.
+ b, err := s.Bytes()
+ if err != nil {
+ return err
+ }
+ // Now decode the inner transaction.
+ inner, err := tx.decodeTyped(b, true)
+ if err == nil {
+ tx.setDecoded(inner, size)
+ }
+ return err
+ }
+}
+
+// UnmarshalBinary decodes the canonical encoding of transactions.
+// It supports legacy RLP transactions and EIP-2718 typed transactions.
+func (tx *ArbTx) UnmarshalBinary(b []byte) error {
+ if len(b) > 0 && b[0] > 0x7f {
+ // It's a legacy transaction.
+ var data LegacyTx
+ err := rlp.DecodeBytes(b, &data)
+ if err != nil {
+ return err
+ }
+ tx.setDecoded(&data, uint64(len(b)))
+ return nil
+ }
+ // It's an EIP-2718 typed transaction envelope.
+ inner, err := tx.decodeTyped(b, false)
+ if err != nil {
+ return err
+ }
+ tx.setDecoded(inner, uint64(len(b)))
+ return nil
+}
+
+// decodeTyped decodes a typed transaction from the canonical format.
+func (tx *ArbTx) decodeTyped(b []byte, arbParsing bool) (Transaction, error) {
+ if len(b) <= 1 {
+ return nil, errShortTypedTx
+ }
+ var inner Transaction
+ if arbParsing {
+ switch b[0] {
+ case ArbitrumDepositTxType:
+ inner = new(ArbitrumDepositTx)
+ case ArbitrumInternalTxType:
+ inner = new(ArbitrumInternalTx)
+ case ArbitrumUnsignedTxType:
+ inner = new(ArbitrumUnsignedTx)
+ case ArbitrumContractTxType:
+ inner = new(ArbitrumContractTx)
+ case ArbitrumRetryTxType:
+ inner = new(ArbitrumRetryTx)
+ case ArbitrumSubmitRetryableTxType:
+ inner = new(ArbitrumSubmitRetryableTx)
+ case ArbitrumLegacyTxType:
+ inner = new(ArbitrumLegacyTxData)
+ default:
+ arbParsing = false
+ }
+ }
+ if !arbParsing {
+ switch b[0] {
+ case AccessListTxType:
+ inner = new(AccessListTx)
+ case DynamicFeeTxType:
+ inner = new(DynamicFeeTransaction)
+ case BlobTxType:
+ inner = new(BlobTx)
+ default:
+ return nil, ErrTxTypeNotSupported
+ }
+ }
+ s := rlp.NewStream(bytes.NewReader(b[1:]), uint64(len(b)-1))
+ err := inner.DecodeRLP(s)
+ return inner, err
+}
+
+// setDecoded sets the inner transaction and size after decoding.
+func (tx *ArbTx) setDecoded(inner Transaction, size uint64) {
+ tx.inner = inner
+ tx.time = time.Now()
+ if size > 0 {
+ tx.size.Store(size)
+ }
+}
+
+// Protected says whether the transaction is replay-protected.
+func (tx *ArbTx) Protected() bool {
+ switch tx := tx.inner.(type) {
+ case *LegacyTx:
+ return !tx.V.IsZero() && IsProtectedV(&tx.V)
+ default:
+ return true
+ }
+}
+
+// Type returns the transaction type.
+func (tx *ArbTx) Type() uint8 {
+ return tx.inner.Type()
+ //return tx.inner.txType()
+}
+
+func (tx *ArbTx) GetInner() Transaction {
+ return tx.inner
+}
+
+// ChainId returns the EIP155 chain ID of the transaction. The return value will always be
+// non-nil. For legacy transactions which are not replay-protected, the return value is
+// zero.
+func (tx *ArbTx) ChainId() *big.Int {
+ return tx.inner.GetChainID().ToBig()
+}
+
+// Data returns the input data of the transaction.
+func (tx *ArbTx) Data() []byte { return tx.inner.GetData() }
+
+// AccessList returns the access list of the transaction.
+func (tx *ArbTx) AccessList() AccessList { return tx.inner.GetAccessList() }
+
+// Gas returns the gas limit of the transaction.
+func (tx *ArbTx) Gas() uint64 { return tx.inner.GetGasLimit() }
+
+// GasPrice returns the gas price of the transaction.
+// TODO same as .GasFeeCap()?
+func (tx *ArbTx) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.GetFeeCap().ToBig()) }
+
+// GasTipCap returns the gasTipCap per gas of the transaction.
+func (tx *ArbTx) GasTipCap() *big.Int { return new(big.Int).Set(tx.inner.GetTipCap().ToBig()) }
+
+// GasFeeCap returns the fee cap per gas of the transaction.
+func (tx *ArbTx) GasFeeCap() *big.Int { return new(big.Int).Set(tx.inner.GetFeeCap().ToBig()) }
+
+// Value returns the ether amount of the transaction.
+func (tx *ArbTx) Value() *big.Int { return new(big.Int).Set(tx.inner.GetValue().ToBig()) }
+
+// Nonce returns the sender account nonce of the transaction.
+func (tx *ArbTx) Nonce() uint64 { return tx.inner.GetNonce() }
+
+// To returns the recipient address of the transaction.
+// For contract-creation transactions, To returns nil.
+func (tx *ArbTx) To() *common.Address {
+ return copyAddressPtr(tx.inner.GetTo())
+}
+
+// Cost returns (gas * gasPrice) + (blobGas * blobGasPrice) + value.
+func (tx *ArbTx) Cost() *big.Int {
+ total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas()))
+ if tx.Type() == BlobTxType {
+ total.Add(total, new(big.Int).Mul(tx.BlobGasFeeCap(), new(big.Int).SetUint64(tx.BlobGas())))
+ }
+ total.Add(total, tx.Value())
+ return total
+}
+
+// GasFeeCapCmp compares the fee cap of two transactions.
+func (tx *ArbTx) GasFeeCapCmp(other *ArbTx) int {
+ return tx.inner.GetFeeCap().ToBig().Cmp(other.inner.GetFeeCap().ToBig())
+}
+
+// GasFeeCapIntCmp compares the fee cap of the transaction against the given fee cap.
+func (tx *ArbTx) GasFeeCapIntCmp(other *big.Int) int {
+ return tx.inner.GetFeeCap().ToBig().Cmp(other)
+}
+
+// GasTipCapCmp compares the gasTipCap of two transactions.
+func (tx *ArbTx) GasTipCapCmp(other *ArbTx) int {
+ return tx.inner.GetTipCap().Cmp(other.inner.GetTipCap())
+}
+
+// GasTipCapIntCmp compares the gasTipCap of the transaction against the given gasTipCap.
+func (tx *ArbTx) GasTipCapIntCmp(other *big.Int) int {
+ return tx.inner.GetTipCap().ToBig().Cmp(other)
+}
+
+// EffectiveGasTip returns the effective miner gasTipCap for the given base fee.
+// Note: if the effective gasTipCap is negative, this method returns both error
+// the actual negative value, _and_ ErrGasFeeCapTooLow
+func (tx *ArbTx) EffectiveGasTip(baseFee *big.Int) (*big.Int, error) {
+ if baseFee == nil {
+ return tx.GasTipCap(), nil
+ }
+ var err error
+ gasFeeCap := tx.GasFeeCap()
+ if gasFeeCap.Cmp(baseFee) == -1 {
+ err = ErrGasFeeCapTooLow
+ }
+ minn := tx.GasTipCap()
+ gasCap := gasFeeCap.Sub(gasFeeCap, baseFee)
+ if minn.Cmp(gasCap) > 0 {
+ minn = gasCap
+ }
+ return minn, err
+}
+
+// EffectiveGasTipValue is identical to EffectiveGasTip, but does not return an
+// error in case the effective gasTipCap is negative
+func (tx *ArbTx) EffectiveGasTipValue(baseFee *big.Int) *big.Int {
+ effectiveTip, _ := tx.EffectiveGasTip(baseFee)
+ return effectiveTip
+}
+
+// EffectiveGasTipCmp compares the effective gasTipCap of two transactions assuming the given base fee.
+func (tx *ArbTx) EffectiveGasTipCmp(other *ArbTx, baseFee *big.Int) int {
+ if baseFee == nil {
+ return tx.GasTipCapCmp(other)
+ }
+ return tx.EffectiveGasTipValue(baseFee).Cmp(other.EffectiveGasTipValue(baseFee))
+}
+
+// EffectiveGasTipIntCmp compares the effective gasTipCap of a transaction to the given gasTipCap.
+func (tx *ArbTx) EffectiveGasTipIntCmp(other *big.Int, baseFee *big.Int) int {
+ if baseFee == nil {
+ return tx.GasTipCapIntCmp(other)
+ }
+ return tx.EffectiveGasTipValue(baseFee).Cmp(other)
+}
+
+// BlobGas returns the blob gas limit of the transaction for blob transactions, 0 otherwise.
+func (tx *ArbTx) BlobGas() uint64 {
+ if blobtx, ok := tx.inner.(*BlobTx); ok {
+ return blobtx.GetBlobGas()
+ }
+ return 0
+}
+
+// BlobGasFeeCap returns the blob gas fee cap per blob gas of the transaction for blob transactions, nil otherwise.
+func (tx *ArbTx) BlobGasFeeCap() *big.Int {
+ if blobtx, ok := tx.inner.(*BlobTx); ok {
+ return blobtx.GetFeeCap().ToBig()
+ }
+ return nil
+}
+
+// BlobHashes returns the hashes of the blob commitments for blob transactions, nil otherwise.
+func (tx *ArbTx) BlobHashes() []common.Hash {
+ if blobtx, ok := tx.inner.(*BlobTx); ok {
+ return blobtx.GetBlobHashes()
+ }
+ return nil
+}
+
+// BlobTxSidecar returns the sidecar of a blob transaction, nil otherwise.
+func (tx *ArbTx) BlobTxSidecar() *BlobTxWrapper {
+ //if blobtx, ok := tx.inner.(*BlobTx); ok {
+ // //return blobtx.Get
+ //}
+ return &tx.BlobTxWrapper
+}
+
+// BlobGasFeeCapCmp compares the blob fee cap of two transactions.
+func (tx *ArbTx) BlobGasFeeCapCmp(other *ArbTx) int {
+ return tx.BlobGasFeeCap().Cmp(other.BlobGasFeeCap())
+}
+
+// BlobGasFeeCapIntCmp compares the blob fee cap of the transaction against the given blob fee cap.
+func (tx *ArbTx) BlobGasFeeCapIntCmp(other *big.Int) int {
+ return tx.BlobGasFeeCap().Cmp(other)
+}
+
+//// WithoutBlobTxSidecar returns a copy of tx with the blob sidecar removed.
+//func (tx *ArbTx) WithoutBlobTxSidecar() *ArbTx {
+// blobtx, ok := tx.inner.(*BlobTx)
+// if !ok {
+// return tx
+// }
+// cpy := &ArbTx{
+// inner: blobtx.withoutSidecar(),
+// time: tx.time,
+// }
+// // Note: tx.size cache not carried over because the sidecar is included in size!
+// if h := tx.hash.Load(); h != nil {
+// cpy.hash.Store(h)
+// }
+// if f := tx.from.Load(); f != nil {
+// cpy.from.Store(f)
+// }
+// return cpy
+//}
+
+// BlobTxSidecar contains the blobs of a blob transaction.
+// type BlobTxSidecar struct {
+// Blobs []kzg4844.Blob // Blobs needed by the blob pool
+// Commitments []kzg4844.KZGCommitment // Commitments needed by the blob pool
+// Proofs []kzg4844.KZGProof // Proofs needed by the blob pool
+// }
+
+// // BlobHashes computes the blob hashes of the given blobs.
+// func (sc *BlobTxSidecar) BlobHashes() []common.Hash {
+// hasher := sha256.New()
+// h := make([]common.Hash, len(sc.Commitments))
+// for i := range sc.Blobs {
+// h[i] = kzg4844.CalcBlobHashV1(hasher, &sc.Commitments[i])
+// }
+// return h
+// }
+
+// // encodedSize computes the RLP size of the sidecar elements. This does NOT return the
+// // encoded size of the BlobTxSidecar, it's just a helper for tx.Size().
+// func (sc *BlobTxSidecar) encodedSize() uint64 {
+// var blobs, commitments, proofs uint64
+// for i := range sc.Blobs {
+// blobs += rlp.BytesSize(sc.Blobs[i][:])
+// }
+// for i := range sc.Commitments {
+// commitments += rlp.BytesSize(sc.Commitments[i][:])
+// }
+// for i := range sc.Proofs {
+// proofs += rlp.BytesSize(sc.Proofs[i][:])
+// }
+// return rlp.ListSize(blobs) + rlp.ListSize(commitments) + rlp.ListSize(proofs)
+// }
+
+// WithBlobTxSidecar returns a copy of tx with the blob sidecar added.
+// TODO figure out how to add the sidecar
+func (tx *ArbTx) WithBlobTxSidecar(sideCar *BlobTxWrapper) *ArbTx {
+ //blobtx, ok := tx.inner.(*BlobTx)
+ //if !ok {
+ // return tx
+ //}
+ cpy := &ArbTx{
+ inner: tx.inner,
+ //inner: blobtx.withSidecar(sideCar),
+ // sidecar: sideCar,
+ time: tx.time,
+ }
+ // Note: tx.size cache not carried over because the sidecar is included in size!
+ if h := tx.hash.Load(); h != nil {
+ cpy.hash.Store(h)
+ }
+ if f := tx.from.Load(); f != nil {
+ cpy.from.Store(f)
+ }
+ return cpy
+}
+
+// SetTime sets the decoding time of a transaction. This is used by tests to set
+// arbitrary times and by persistent transaction pools when loading old txs from
+// disk.
+func (tx *ArbTx) SetTime(t time.Time) {
+ tx.time = t
+}
+
+// Time returns the time when the transaction was first seen on the network. It
+// is a heuristic to prefer mining older txs vs new all other things equal.
+func (tx *ArbTx) Time() time.Time {
+ return tx.time
+}
+
+// Hash returns the transaction hash.
+func (tx *ArbTx) Hash() common.Hash {
+ if hash := tx.hash.Load(); hash != nil {
+ return hash.(common.Hash)
+ }
+
+ var h common.Hash
+ if tx.Type() == LegacyTxType {
+ h = rlpHash(tx.inner)
+ } else if tx.Type() == ArbitrumLegacyTxType {
+ h = tx.inner.(*ArbitrumLegacyTxData).HashOverride
+ } else {
+ h = prefixedRlpHash(tx.Type(), tx.inner)
+ }
+ tx.hash.Store(h)
+ return h
+}
+
+// Size returns the true encoded storage size of the transaction, either by encoding
+// and returning it, or returning a previously cached value.
+// func (tx *ArbTx) Size() uint64 {
+// if size := tx.size.Load(); size != nil {
+// return size.(uint64)
+// }
+
+// // Cache miss, encode and cache.
+// // Note we rely on the assumption that all tx.inner values are RLP-encoded!
+// c := writeCounter(0)
+// rlp.Encode(&c, &tx.inner)
+// size := uint64(c)
+
+// // For blob transactions, add the size of the blob content and the outer list of the
+// // tx + sidecar encoding.
+// if sc := tx.BlobTxSidecar(); sc != nil {
+// size += rlp.ListSize(sc.encodedSize())
+// }
+
+// // For typed transactions, the encoding also includes the leading type byte.
+// if tx.Type() != LegacyTxType {
+// size += 1
+// }
+
+// tx.size.Store(size)
+// return size
+// }
+
+// WithSignature returns a new transaction with the given signature.
+// This signature needs to be in the [R || S || V] format where V is 0 or 1.
+// func (tx *ArbTx) WithSignature(signer Signer, sig []byte) (*ArbTx, error) {
+// r, s, v, err := signer.SignatureValues(tx, sig)
+// if err != nil {
+// return nil, err
+// }
+// if r == nil || s == nil || v == nil {
+// return nil, fmt.Errorf("%w: r: %s, s: %s, v: %s", ErrInvalidSig, r, s, v)
+// }
+// cpy := tx.inner.copy()
+// cpy.setSignatureValues(signer.ChainID(), v, r, s)
+// return &ArbTx{inner: cpy, time: tx.time}, nil
+// }
+
+// ArbTxs implements DerivableList for transactions.
+type ArbTxs []*ArbTx
+
+func WrapArbTransactions(txs ArbTxs) Transactions {
+ txns := make([]Transaction, len(txs))
+ for i := 0; i < len(txs); i++ {
+ txns[i] = Transaction(txs[i])
+ }
+ return txns
+}
+
+// Len returns the length of s.
+func (s ArbTxs) Len() int { return len(s) }
+
+// EncodeIndex encodes the i'th transaction to w. Note that this does not check for errors
+// because we assume that *ArbTx will only ever contain valid txs that were either
+// constructed by decoding or via public API in this package.
+func (s ArbTxs) EncodeIndex(i int, w *bytes.Buffer) {
+ tx := s[i]
+
+ switch tx.Type() {
+ // case ArbitrumLegacyTxType:
+ // arbData := tx.inner.(*ArbitrumLegacyTxData) //
+ // arbData.EncodeOnlyLegacyInto(w)
+ case ArbitrumLegacyTxType, LegacyTxType:
+ rlp.Encode(w, tx.inner)
+ default:
+ tx.encodeTyped(w)
+ }
+}
+
+// TxDifference returns a new set which is the difference between a and b.
+// func TxDifference(a, b ArbTxs) ArbTxs {
+// keep := make(ArbTxs, 0, len(a))
+
+// remove := make(map[common.Hash]struct{})
+// for _, tx := range b {
+// remove[tx.Hash()] = struct{}{}
+// }
+
+// for _, tx := range a {
+// if _, ok := remove[tx.Hash()]; !ok {
+// keep = append(keep, tx)
+// }
+// }
+
+// return keep
+// }
+
+// HashDifference returns a new set which is the difference between a and b.
+func HashDifference(a, b []common.Hash) []common.Hash {
+ keep := make([]common.Hash, 0, len(a))
+
+ remove := make(map[common.Hash]struct{})
+ for _, hash := range b {
+ remove[hash] = struct{}{}
+ }
+
+ for _, hash := range a {
+ if _, ok := remove[hash]; !ok {
+ keep = append(keep, hash)
+ }
+ }
+
+ return keep
+}
+
+// TxByNonce implements the sort interface to allow sorting a list of transactions
+// by their nonces. This is usually only useful for sorting transactions from a
+// single account, otherwise a nonce comparison doesn't make much sense.
+// type TxByNonce ArbTxs
+
+// func (s TxByNonce) Len() int { return len(s) }
+// func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() }
+// func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// copyAddressPtr copies an address.
+func copyAddressPtr(a *common.Address) *common.Address {
+ if a == nil {
+ return nil
+ }
+ cpy := *a
+ return &cpy
+}
+
+// // TransactionToMessage converts a transaction into a Message.
+func TransactionToMessage(tx Transaction, s ArbitrumSigner, baseFee *big.Int, runmode MessageRunMode) (msg *Message, err error) {
+ // tx.AsMessage(s types.Signer, baseFee *big.Int, rules *chain.Rules)
+ msg = &Message{
+ TxRunMode: runmode,
+ Tx: tx,
+
+ nonce: tx.GetNonce(),
+ gasLimit: tx.GetGasLimit(),
+ gasPrice: *tx.GetFeeCap(),
+ feeCap: *tx.GetFeeCap(),
+ tipCap: *tx.GetTipCap(),
+ to: accounts.InternAddress(*tx.GetTo()),
+ // value: tx.GetValue(),
+ amount: *tx.GetValue(), // TODO amount is value?
+ data: tx.GetData(),
+ accessList: tx.GetAccessList(),
+ SkipAccountChecks: false, // tx.SkipAccountChecks(), // TODO Arbitrum upstream this was init'd to false
+ blobHashes: tx.GetBlobHashes(),
+ // maxFeePerBlobGas: tx.GetBlobGasFeeCap(),
+ // BlobGasFeeCap: tx.GetBlobGasFeeCap(), // TODO
+ }
+ // If baseFee provided, set gasPrice to effectiveGasPrice.
+ if baseFee != nil {
+ msg.gasPrice.SetFromBig(cmath.BigMin(msg.gasPrice.ToBig().Add(msg.tipCap.ToBig(), baseFee), msg.feeCap.ToBig()))
+ }
+ msg.from, err = s.Sender(tx)
+ return msg, err
+}
diff --git a/execution/types/arb_types.go b/execution/types/arb_types.go
new file mode 100644
index 00000000000..8113bf82837
--- /dev/null
+++ b/execution/types/arb_types.go
@@ -0,0 +1,2552 @@
+package types
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math/big"
+
+ "github.com/erigontech/erigon/arb"
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/hexutil"
+ "github.com/erigontech/erigon/common/length"
+ "github.com/erigontech/erigon/common/log/v3"
+ "github.com/erigontech/erigon/common/math"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/erigontech/erigon/execution/types/accounts"
+ "github.com/holiman/uint256"
+)
+
+// Returns true if nonce checks should be skipped based on inner's isFake()
+// This also disables requiring that sender is an EOA and not a contract
+func (tx *ArbTx) SkipAccountChecks() bool {
+ // return tx.inner.skipAccountChecks()
+ return skipAccountChecks[tx.Type()]
+}
+
+var fallbackErrorMsg = "missing trie node 0000000000000000000000000000000000000000000000000000000000000000 (path ) "
+var fallbackErrorCode = -32000
+
+func SetFallbackError(msg string, code int) {
+ fallbackErrorMsg = msg
+ fallbackErrorCode = code
+ log.Debug("setting fallback error", "msg", msg, "code", code)
+}
+
+type fallbackError struct{}
+
+func (f fallbackError) ErrorCode() int { return fallbackErrorCode }
+func (f fallbackError) Error() string { return fallbackErrorMsg }
+
+var ErrUseFallback = fallbackError{}
+
+type FallbackClient interface {
+ CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error
+}
+
+var bigZero = big.NewInt(0)
+var uintZero = uint256.NewInt(0)
+
+var skipAccountChecks = [...]bool{
+ ArbitrumDepositTxType: true,
+ ArbitrumRetryTxType: true,
+ ArbitrumSubmitRetryableTxType: true,
+ ArbitrumInternalTxType: true,
+ ArbitrumContractTxType: true,
+ ArbitrumUnsignedTxType: false,
+}
+
+// func (tx *LegacyTx) skipAccountChecks() bool { return false }
+// func (tx *AccessListTx) skipAccountChecks() bool { return false }
+// func (tx *DynamicFeeTransaction) skipAccountChecks() bool { return false }
+// func (tx *ArbitrumUnsignedTx) skipAccountChecks() bool { return false }
+// func (tx *ArbitrumContractTx) skipAccountChecks() bool { return true }
+// func (tx *ArbitrumRetryTx) skipAccountChecks() bool { return true }
+// func (tx *ArbitrumSubmitRetryableTx) skipAccountChecks() bool { return true }
+// func (tx *ArbitrumDepositTx) skipAccountChecks() bool { return true }
+// func (tx *ArbitrumInternalTx) skipAccountChecks() bool { return true }
+
+type ArbitrumUnsignedTx struct {
+ arb.NoTimeBoosted
+ ChainId *big.Int
+ From accounts.Address
+
+ Nonce uint64 // nonce of sender account
+ GasFeeCap *big.Int // wei per gas
+ Gas uint64 // gas limit
+ To *common.Address `rlp:"nil"` // nil means contract creation
+ Value *big.Int // wei amount
+ Data []byte // contract invocation input data
+}
+
+func (tx *ArbitrumUnsignedTx) copy() Transaction {
+ cpy := &ArbitrumUnsignedTx{
+ ChainId: new(big.Int),
+ Nonce: tx.Nonce,
+ GasFeeCap: new(big.Int),
+ Gas: tx.Gas,
+ From: tx.From,
+ To: nil,
+ Value: new(big.Int),
+ Data: common.Copy(tx.Data),
+ }
+ if tx.ChainId != nil {
+ cpy.ChainId.Set(tx.ChainId)
+ }
+ if tx.GasFeeCap != nil {
+ cpy.GasFeeCap.Set(tx.GasFeeCap)
+ }
+ if tx.To != nil {
+ tmp := *tx.To
+ cpy.To = &tmp
+ }
+ if tx.Value != nil {
+ cpy.Value.Set(tx.Value)
+ }
+ return cpy
+}
+
+func (tx *ArbitrumUnsignedTx) Type() byte { return ArbitrumUnsignedTxType }
+func (tx *ArbitrumUnsignedTx) GetChainID() *uint256.Int { return uint256.MustFromBig(tx.ChainId) }
+func (tx *ArbitrumUnsignedTx) GetNonce() uint64 { return tx.Nonce }
+func (tx *ArbitrumUnsignedTx) GetPrice() *uint256.Int { return uint256.MustFromBig(tx.GasFeeCap) }
+func (tx *ArbitrumUnsignedTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumUnsignedTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumUnsignedTx) GetGasLimit() uint64 { return tx.Gas }
+func (tx *ArbitrumUnsignedTx) GetBlobGas() uint64 { return 0 }
+func (tx *ArbitrumUnsignedTx) GetValue() *uint256.Int { return uint256.MustFromBig(tx.Value) }
+func (tx *ArbitrumUnsignedTx) GetTo() *common.Address { return tx.To }
+func (tx *ArbitrumUnsignedTx) GetData() []byte { return tx.Data }
+func (tx *ArbitrumUnsignedTx) GetAccessList() AccessList { return nil }
+func (tx *ArbitrumUnsignedTx) GetFeeCap() *uint256.Int { return uint256.MustFromBig(tx.GasFeeCap) }
+func (tx *ArbitrumUnsignedTx) GetAuthorizations() []Authorization { return nil }
+
+func (tx *ArbitrumUnsignedTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int {
+ if baseFee == nil {
+ return tx.GetPrice()
+ }
+ res := uint256.NewInt(0)
+ return res.Set(baseFee)
+}
+
+func (tx *ArbitrumUnsignedTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (*Message, error) {
+ var to accounts.Address
+ if tx.GetTo() == nil {
+ to = accounts.InternAddress(*tx.GetTo())
+ } else {
+ to = accounts.NilAddress
+ }
+ msg := &Message{
+ gasPrice: *tx.GetPrice(),
+ tipCap: *tx.GetTipCap(),
+ feeCap: *tx.GetFeeCap(),
+ gasLimit: tx.GetGasLimit(),
+ nonce: tx.GetNonce(),
+ accessList: tx.GetAccessList(),
+ from: tx.From,
+ to: to,
+ data: tx.GetData(),
+ amount: *tx.GetValue(),
+ checkNonce: !skipAccountChecks[tx.Type()],
+
+ // TxRunMode: MessageRunMode, // must be set separately?
+ Tx: tx,
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ }
+ // if baseFee != nil {
+ // msg.gasPrice.SetFromBig(math.BigMin(msg.gasPrice.ToBig().Add(msg.tip.ToBig(), baseFee), msg.feeCap.ToBig()))
+ // }
+
+ return msg, nil
+}
+
+func (tx *ArbitrumUnsignedTx) WithSignature(signer Signer, sig []byte) (Transaction, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumUnsignedTx) Hash() common.Hash {
+ //TODO implement me
+ return prefixedRlpHash(ArbitrumUnsignedTxType, []interface{}{
+ tx.ChainId,
+ tx.From,
+ tx.Nonce,
+ tx.GasFeeCap,
+ tx.Gas,
+ tx.To,
+ tx.Value,
+ tx.Data,
+ })
+}
+
+func (tx *ArbitrumUnsignedTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumUnsignedTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumUnsignedTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (tx *ArbitrumUnsignedTx) payloadSize() (payloadSize int, nonceLen, gasLen int) {
+ // ChainId
+ payloadSize += rlp.BigIntLen(tx.ChainId)
+
+ // Nonce
+ nonceLen = rlp.U64Len(tx.Nonce)
+ payloadSize += nonceLen
+
+ // size of From (20 bytes)
+ payloadSize++
+ if tx.To != nil {
+ payloadSize += length.Addr
+ }
+
+ // GasFeeCap
+ payloadSize += rlp.BigIntLen(tx.GasFeeCap)
+
+ // Gas
+ gasLen = rlp.U64Len(tx.Gas)
+ payloadSize += gasLen
+
+ // To (20 bytes if non-nil)
+ payloadSize++
+ if tx.To != nil {
+ payloadSize += 20
+ }
+
+ // Value
+ payloadSize += rlp.BigIntLen(tx.GasFeeCap)
+
+ // Data (includes its own header)
+ payloadSize += rlp.StringLen(tx.Data)
+
+ return payloadSize, nonceLen, gasLen
+}
+
+func (tx *ArbitrumUnsignedTx) encodePayload(w io.Writer, b []byte, payloadSize, nonceLen, gasLen int) error {
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeBigInt(tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ addrFrom := tx.From.Value()
+ if _, err := w.Write(addrFrom[:]); err != nil {
+ return err
+ }
+
+ if tx.Nonce > 0 && tx.Nonce < 128 {
+ b[0] = byte(tx.Nonce)
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ binary.BigEndian.PutUint64(b[1:], tx.Nonce)
+ b[8-nonceLen] = 128 + byte(nonceLen)
+ if _, err := w.Write(b[8-nonceLen : 9]); err != nil {
+ return err
+ }
+ }
+
+ if err := rlp.EncodeBigInt(tx.GasFeeCap, w, b); err != nil {
+ return err
+ }
+
+ if tx.Gas > 0 && tx.Gas < 128 {
+ b[0] = byte(tx.Gas)
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ binary.BigEndian.PutUint64(b[1:], tx.Gas)
+ b[8-gasLen] = 128 + byte(gasLen)
+ if _, err := w.Write(b[8-gasLen : 9]); err != nil {
+ return err
+ }
+ }
+
+ if tx.To == nil {
+ b[0] = 128
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.To[:]); err != nil {
+ return err
+ }
+ }
+
+ if err := rlp.EncodeBigInt(tx.Value, w, b); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeString(tx.Data, w, b); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumUnsignedTx) EncodingSize() int {
+ payloadSize, _, _ := tx.payloadSize()
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumUnsignedTx) EncodeRLP(w io.Writer) error {
+ payloadSize, nonceLen, gasLen := tx.payloadSize()
+
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // envelope
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+
+ // encode TxType
+ b[0] = ArbitrumUnsignedTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, nonceLen, gasLen); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumUnsignedTx) DecodeRLP(s *rlp.Stream) error {
+ // Begin decoding the RLP list.
+ if _, err := s.List(); err != nil {
+ return err
+ }
+
+ var b []byte
+ var err error
+
+ // Decode ChainId (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(big.Int).SetBytes(b)
+
+ // Decode From (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read From: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for From: %d", len(b))
+ }
+ addrFrom := tx.From.Value()
+ copy(addrFrom[:], b)
+
+ // Decode Nonce (uint64)
+ if tx.Nonce, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Nonce: %w", err)
+ }
+
+ // Decode GasFeeCap (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read GasFeeCap: %w", err)
+ }
+ tx.GasFeeCap = new(big.Int).SetBytes(b)
+
+ // Decode Gas (uint64)
+ if tx.Gas, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Gas: %w", err)
+ }
+
+ // Decode To (*common.Address, 20 bytes if non-nil)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read To: %w", err)
+ }
+ if len(b) > 0 {
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for To: %d", len(b))
+ }
+ tx.To = new(common.Address)
+ copy(tx.To[:], b)
+ } else {
+ tx.To = nil
+ }
+
+ // Decode Value (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Value: %w", err)
+ }
+ tx.Value = new(big.Int).SetBytes(b)
+
+ // Decode Data ([]byte)
+ if tx.Data, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Data: %w", err)
+ }
+
+ // End the RLP list.
+ if err := s.ListEnd(); err != nil {
+ return fmt.Errorf("close ArbitrumUnsignedTx: %w", err)
+ }
+ return nil
+}
+
+func (tx *ArbitrumUnsignedTx) MarshalBinary(w io.Writer) error {
+ payloadSize, nonceLen, gasLen := tx.payloadSize()
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumUnsignedTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, nonceLen, gasLen); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumUnsignedTx) Sender(signer Signer) (accounts.Address, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumUnsignedTx) CachedSender() (accounts.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumUnsignedTx) GetSender() (accounts.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumUnsignedTx) SetSender(address accounts.Address) {
+ tx.From = address
+}
+
+func (tx *ArbitrumUnsignedTx) IsContractDeploy() bool {
+ return tx.To == nil
+}
+
+func (tx *ArbitrumUnsignedTx) Unwrap() Transaction {
+ //TODO implement me
+ panic("implement me")
+}
+
+// func (tx *ArbitrumUnsignedTx) gas() uint64 { }
+// func (tx *ArbitrumUnsignedTx) gasPrice() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumUnsignedTx) gasTipCap() *big.Int { return bigZero }
+// func (tx *ArbitrumUnsignedTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumUnsignedTx) value() *big.Int { return tx.Value }
+// func (tx *ArbitrumUnsignedTx) nonce() uint64 { }
+// func (tx *ArbitrumUnsignedTx) to() *common.Address { return tx.To }
+
+func (tx *ArbitrumUnsignedTx) setSignatureValues(chainID, v, r, s *big.Int) {}
+
+//func (tx *ArbitrumUnsignedTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// if baseFee == nil {
+// return dst.Set(tx.GasFeeCap)
+// }
+// return dst.Set(baseFee)
+//}
+
+type ArbitrumContractTx struct {
+ arb.NoTimeBoosted
+ ChainId *big.Int
+ RequestId common.Hash
+ From accounts.Address
+
+ GasFeeCap *big.Int // wei per gas
+ Gas uint64 // gas limit
+ To *common.Address `rlp:"nil"` // nil means contract creation
+ Value *big.Int // wei amount
+ Data []byte // contract invocation input data
+}
+
+func (tx *ArbitrumContractTx) copy() *ArbitrumContractTx {
+ cpy := &ArbitrumContractTx{
+ ChainId: new(big.Int),
+ RequestId: tx.RequestId,
+ GasFeeCap: new(big.Int),
+ Gas: tx.Gas,
+ From: tx.From,
+ To: nil,
+ Value: new(big.Int),
+ Data: common.Copy(tx.Data),
+ }
+ if tx.ChainId != nil {
+ cpy.ChainId.Set(tx.ChainId)
+ }
+ if tx.GasFeeCap != nil {
+ cpy.GasFeeCap.Set(tx.GasFeeCap)
+ }
+ if tx.To != nil {
+ tmp := *tx.To
+ cpy.To = &tmp
+ }
+ if tx.Value != nil {
+ cpy.Value.Set(tx.Value)
+ }
+ return cpy
+}
+func (tx *ArbitrumContractTx) Type() byte { return ArbitrumContractTxType }
+func (tx *ArbitrumContractTx) GetChainID() *uint256.Int { return uint256.MustFromBig(tx.ChainId) }
+func (tx *ArbitrumContractTx) GetNonce() uint64 { return 0 }
+func (tx *ArbitrumContractTx) GetPrice() *uint256.Int { return uint256.MustFromBig(tx.GasFeeCap) }
+func (tx *ArbitrumContractTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumContractTx) GetFeeCap() *uint256.Int { return uint256.MustFromBig(tx.GasFeeCap) }
+func (tx *ArbitrumContractTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumContractTx) GetGasLimit() uint64 { return tx.Gas }
+func (tx *ArbitrumContractTx) GetBlobGas() uint64 { return 0 }
+func (tx *ArbitrumContractTx) GetData() []byte { return tx.Data }
+func (tx *ArbitrumContractTx) GetValue() *uint256.Int { return uint256.MustFromBig(tx.Value) }
+func (tx *ArbitrumContractTx) GetTo() *common.Address { return tx.To }
+func (tx *ArbitrumContractTx) GetAccessList() AccessList { return nil }
+func (tx *ArbitrumContractTx) GetAuthorizations() []Authorization { return nil }
+
+func (tx *ArbitrumContractTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int {
+ if baseFee == nil {
+ return tx.GetPrice()
+ }
+ res := uint256.NewInt(0)
+ return res.Set(baseFee)
+}
+func (tx *ArbitrumContractTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (tx *ArbitrumContractTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (*Message, error) {
+ var to accounts.Address
+ if tx.To != nil {
+ to = accounts.InternAddress(*tx.To)
+ } else {
+ to = accounts.NilAddress
+ }
+
+ msg := &Message{
+ gasPrice: *tx.GetPrice(),
+ tipCap: *tx.GetTipCap(),
+ feeCap: *tx.GetFeeCap(),
+ gasLimit: tx.GetGasLimit(),
+ nonce: tx.GetNonce(),
+ accessList: tx.GetAccessList(),
+ from: tx.From,
+ to: to,
+ data: tx.GetData(),
+ amount: *tx.GetValue(),
+ checkNonce: !skipAccountChecks[tx.Type()],
+
+ Tx: tx,
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ }
+ if baseFee != nil {
+ msg.gasPrice.SetFromBig(math.BigMin(msg.gasPrice.ToBig().Add(msg.tipCap.ToBig(), baseFee), msg.feeCap.ToBig()))
+ }
+ return msg, nil
+}
+
+func (tx *ArbitrumContractTx) WithSignature(signer Signer, sig []byte) (Transaction, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumContractTx) Hash() common.Hash {
+ //TODO implement me
+ return prefixedRlpHash(ArbitrumContractTxType, []interface{}{
+ tx.ChainId,
+ tx.RequestId,
+ tx.From,
+ tx.GasFeeCap,
+ tx.Gas,
+ tx.To,
+ tx.Value,
+ tx.Data,
+ })
+}
+
+func (tx *ArbitrumContractTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumContractTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumContractTx) payloadSize() (payloadSize int, gasLen int) {
+ // 1. ChainId (big.Int): 1 header byte + content length.
+ payloadSize += rlp.BigIntLen(tx.ChainId)
+
+ // 2. RequestId (common.Hash, fixed 32 bytes): header + 32 bytes.
+ payloadSize++ // header for RequestId
+ payloadSize += 32
+
+ // 3. From (common.Address, fixed 20 bytes): header + 20 bytes.
+ payloadSize++ // header for From
+ payloadSize += 20
+
+ // 4. GasFeeCap (big.Int): header + content length.
+ payloadSize += rlp.BigIntLen(tx.GasFeeCap)
+
+ // 5. Gas (uint64): header + computed length.
+ gasLen = rlp.U64Len(tx.Gas)
+ payloadSize += gasLen
+
+ // 6. To (*common.Address): header always; if non-nil then add 20 bytes.
+ payloadSize++ // header for To
+ if tx.To != nil {
+ payloadSize += 20
+ }
+
+ // 7. Value (big.Int): header + content length.
+ payloadSize += rlp.BigIntLen(tx.Value)
+
+ // 8. Data ([]byte): rlp.StringLen returns full encoded length (header + data).
+ payloadSize += rlp.StringLen(tx.Data)
+
+ return payloadSize, gasLen
+}
+
+func (tx *ArbitrumContractTx) encodePayload(w io.Writer, b []byte, payloadSize, gasLen int) error {
+ // Write the RLP list prefix for the payload.
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ // 1. ChainId (big.Int)
+ if err := rlp.EncodeBigInt(tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ // 2. RequestId (common.Hash, 32 bytes)
+ // Write header for fixed length 32: 0x80 + 32.
+ b[0] = 128 + 32
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.RequestId[:]); err != nil {
+ return err
+ }
+
+ // 3. From (common.Address, 20 bytes)
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ fromAddr := tx.From.Value()
+ if _, err := w.Write(fromAddr[:]); err != nil {
+ return err
+ }
+
+ // 4. GasFeeCap (big.Int)
+ if err := rlp.EncodeBigInt(tx.GasFeeCap, w, b); err != nil {
+ return err
+ }
+
+ // 5. Gas (uint64)
+ // If Gas is less than 128, it is encoded as a single byte.
+ if tx.Gas > 0 && tx.Gas < 128 {
+ b[0] = byte(tx.Gas)
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ // Otherwise, encode as big‑endian. Write into b[1:9],
+ // then set the header at position 8 - gasLen.
+ binary.BigEndian.PutUint64(b[1:], tx.Gas)
+ b[8-gasLen] = 128 + byte(gasLen)
+ if _, err := w.Write(b[8-gasLen : 9]); err != nil {
+ return err
+ }
+ }
+
+ // 6. To (*common.Address)
+ if tx.To == nil {
+ // nil is encoded as an empty byte string.
+ b[0] = 128
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ // Write header for 20-byte string and then the address bytes.
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write((*tx.To)[:]); err != nil {
+ return err
+ }
+ }
+
+ // 7. Value (big.Int)
+ if err := rlp.EncodeBigInt(tx.Value, w, b); err != nil {
+ return err
+ }
+
+ // 8. Data ([]byte)
+ if err := rlp.EncodeString(tx.Data, w, b); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumContractTx) EncodingSize() int {
+ payloadSize, _ := tx.payloadSize()
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumContractTx) EncodeRLP(w io.Writer) error {
+ payloadSize, gasLen := tx.payloadSize()
+
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // envelope
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+
+ // encode TxType
+ b[0] = ArbitrumContractTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, gasLen); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumContractTx) DecodeRLP(s *rlp.Stream) error {
+ // Begin decoding the RLP list.
+ if _, err := s.List(); err != nil {
+ return err
+ }
+
+ var b []byte
+ var err error
+
+ // Decode ChainId (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(big.Int).SetBytes(b)
+
+ // Decode RequestId (common.Hash, 32 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RequestId: %w", err)
+ }
+ if len(b) != 32 {
+ return fmt.Errorf("wrong size for RequestId: %d", len(b))
+ }
+ copy(tx.RequestId[:], b)
+
+ // Decode From (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read From: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for From: %d", len(b))
+ }
+ fromAddr := common.Address{}
+ copy(fromAddr[:], b)
+ tx.From = accounts.InternAddress(fromAddr)
+
+ // Decode GasFeeCap (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read GasFeeCap: %w", err)
+ }
+ tx.GasFeeCap = new(big.Int).SetBytes(b)
+
+ // Decode Gas (uint64)
+ if tx.Gas, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Gas: %w", err)
+ }
+
+ // Decode To (*common.Address, 20 bytes if non-nil)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read To: %w", err)
+ }
+ if len(b) > 0 {
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for To: %d", len(b))
+ }
+ addrValue := common.Address{}
+ copy(addrValue[:], b)
+ tx.To = &addrValue
+ } else {
+ tx.To = nil
+ }
+
+ // Decode Value (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Value: %w", err)
+ }
+ tx.Value = new(big.Int).SetBytes(b)
+
+ // Decode Data ([]byte)
+ if tx.Data, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Data: %w", err)
+ }
+
+ // End the RLP list.
+ if err := s.ListEnd(); err != nil {
+ return fmt.Errorf("close ArbitrumContractTx: %w", err)
+ }
+ return nil
+}
+
+func (tx *ArbitrumContractTx) MarshalBinary(w io.Writer) error {
+ payloadSize, gasLen := tx.payloadSize()
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumContractTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, gasLen); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumContractTx) Sender(signer Signer) (accounts.Address, error) {
+ panic("implement me")
+}
+
+func (tx *ArbitrumContractTx) CachedSender() (accounts.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumContractTx) GetSender() (accounts.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumContractTx) SetSender(address accounts.Address) {
+ tx.From = address
+}
+
+func (tx *ArbitrumContractTx) IsContractDeploy() bool {
+ return tx.To == nil
+}
+
+func (tx *ArbitrumContractTx) Unwrap() Transaction {
+ return tx
+}
+
+// func (tx *ArbitrumContractTx) ChainID() *big.Int { return tx.ChainId }
+// func (tx *ArbitrumContractTx) accessList() types.AccessList { return nil }
+// func (tx *ArbitrumContractTx) data() []byte { return tx.Data }
+// func (tx *ArbitrumContractTx) gas() uint64 { return tx.Gas }
+// func (tx *ArbitrumContractTx) gasPrice() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumContractTx) gasTipCap() *big.Int { return bigZero }
+// func (tx *ArbitrumContractTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumContractTx) value() *big.Int { return tx.Value }
+// func (tx *ArbitrumContractTx) nonce() uint64 { return 0 }
+// func (tx *ArbitrumContractTx) to() *common.Address { return tx.To }
+func (tx *ArbitrumContractTx) encode(b *bytes.Buffer) error {
+ return rlp.Encode(b, tx)
+}
+func (tx *ArbitrumContractTx) decode(input []byte) error {
+ return rlp.DecodeBytes(input, tx)
+}
+
+// func (tx *ArbitrumContractTx) rawSignatureValues() (v, r, s *big.Int) {
+// return bigZero, bigZero, bigZero
+// }
+func (tx *ArbitrumContractTx) setSignatureValues(chainID, v, r, s *big.Int) {}
+
+//func (tx *ArbitrumContractTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// if baseFee == nil {
+// return dst.Set(tx.GasFeeCap)
+// }
+// return dst.Set(baseFee)
+//}
+
+type ArbitrumRetryTx struct {
+ ChainId *big.Int
+ Nonce uint64
+ From accounts.Address
+ GasFeeCap *big.Int // wei per gas
+ Gas uint64 // gas limit
+ To *common.Address `rlp:"nil"` // nil means contract creation
+ Value *big.Int // wei amount
+ Data []byte // contract invocation input data
+ TicketId common.Hash
+ RefundTo common.Address
+ MaxRefund *big.Int // the maximum refund sent to RefundTo (the rest goes to From)
+ SubmissionFeeRefund *big.Int // the submission fee to refund if successful (capped by MaxRefund)
+ Timeboosted *bool
+}
+
+func (t *ArbitrumRetryTx) copy() *ArbitrumRetryTx {
+ cpy := &ArbitrumRetryTx{
+ ChainId: new(big.Int),
+ Nonce: t.Nonce,
+ GasFeeCap: new(big.Int),
+ Gas: t.Gas,
+ From: t.From,
+ To: nil,
+ Value: new(big.Int),
+ Data: common.Copy(t.Data),
+ TicketId: t.TicketId,
+ RefundTo: t.RefundTo,
+ MaxRefund: new(big.Int),
+ SubmissionFeeRefund: new(big.Int),
+ Timeboosted: t.Timeboosted,
+ }
+ if t.ChainId != nil {
+ cpy.ChainId.Set(t.ChainId)
+ }
+ if t.GasFeeCap != nil {
+ cpy.GasFeeCap.Set(t.GasFeeCap)
+ }
+ if t.To != nil {
+ tmp := *t.To
+ cpy.To = &tmp
+ }
+ if t.Value != nil {
+ cpy.Value.Set(t.Value)
+ }
+ if t.MaxRefund != nil {
+ cpy.MaxRefund.Set(t.MaxRefund)
+ }
+ if t.SubmissionFeeRefund != nil {
+ cpy.SubmissionFeeRefund.Set(t.SubmissionFeeRefund)
+ }
+ return cpy
+}
+
+func (t *ArbitrumRetryTx) Type() byte { return ArbitrumRetryTxType }
+func (t *ArbitrumRetryTx) GetChainID() *uint256.Int { return uint256.MustFromBig(t.ChainId) }
+func (t *ArbitrumRetryTx) GetNonce() uint64 { return t.Nonce }
+func (t *ArbitrumRetryTx) GetPrice() *uint256.Int { return uint256.MustFromBig(t.GasFeeCap) }
+func (t *ArbitrumRetryTx) GetTipCap() *uint256.Int { return uintZero }
+func (t *ArbitrumRetryTx) GetFeeCap() *uint256.Int { return uint256.MustFromBig(t.GasFeeCap) }
+func (t *ArbitrumRetryTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (t *ArbitrumRetryTx) GetGasLimit() uint64 { return t.Gas }
+func (t *ArbitrumRetryTx) GetBlobGas() uint64 { return 0 }
+func (t *ArbitrumRetryTx) GetData() []byte { return t.Data }
+func (t *ArbitrumRetryTx) GetValue() *uint256.Int { return uint256.MustFromBig(t.Value) }
+func (t *ArbitrumRetryTx) GetTo() *common.Address { return t.To }
+func (t *ArbitrumRetryTx) GetAccessList() AccessList { return nil }
+func (tx *ArbitrumRetryTx) GetAuthorizations() []Authorization { return nil }
+
+func (t *ArbitrumRetryTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int {
+ if baseFee == nil {
+ return t.GetPrice()
+ }
+ res := uint256.NewInt(0)
+ return res.Set(baseFee)
+}
+func (t *ArbitrumRetryTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (t *ArbitrumRetryTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (*Message, error) {
+ var to accounts.Address
+ if t.To != nil {
+ to = accounts.InternAddress(*t.To)
+ } else {
+ to = accounts.NilAddress
+ }
+ msg := &Message{
+ gasPrice: *t.GetPrice(),
+ tipCap: *t.GetTipCap(),
+ feeCap: *t.GetFeeCap(),
+ gasLimit: t.GetGasLimit(),
+ nonce: t.GetNonce(),
+ accessList: t.GetAccessList(),
+ from: t.From,
+ to: to,
+ data: t.GetData(),
+ amount: *t.GetValue(),
+ checkNonce: !skipAccountChecks[t.Type()],
+
+ Tx: t,
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ }
+ if baseFee != nil {
+ msg.gasPrice.SetFromBig(math.BigMin(msg.gasPrice.ToBig().Add(msg.tipCap.ToBig(), baseFee), msg.feeCap.ToBig()))
+ }
+ return msg, nil
+}
+
+func (t *ArbitrumRetryTx) WithSignature(signer Signer, sig []byte) (Transaction, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (t *ArbitrumRetryTx) Hash() common.Hash {
+ //TODO implement me
+ return prefixedRlpHash(ArbitrumRetryTxType, []interface{}{
+ t.ChainId,
+ t.Nonce,
+ t.From,
+ t.GasFeeCap,
+ t.Gas,
+ t.To,
+ t.Value,
+ t.Data,
+ t.TicketId,
+ t.RefundTo,
+ t.MaxRefund,
+ t.SubmissionFeeRefund,
+ })
+}
+
+func (t *ArbitrumRetryTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (t *ArbitrumRetryTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (t *ArbitrumRetryTx) encodePayload(w io.Writer, b []byte, payloadSize, nonceLen, gasLen int, hashingOnly bool) error {
+ // Write the RLP list prefix.
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ // ChainId (big.Int)
+ if err := rlp.EncodeBigInt(t.ChainId, w, b); err != nil {
+ return err
+ }
+
+ // Nonce (uint64)
+ if t.Nonce > 0 && t.Nonce < 128 {
+ b[0] = byte(t.Nonce)
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ binary.BigEndian.PutUint64(b[1:], t.Nonce)
+ b[8-nonceLen] = 128 + byte(nonceLen)
+ if _, err := w.Write(b[8-nonceLen : 9]); err != nil {
+ return err
+ }
+ }
+
+ // From (common.Address, 20 bytes)
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ fromAddr := t.From.Value()
+ if _, err := w.Write(fromAddr[:]); err != nil {
+ return err
+ }
+
+ // GasFeeCap (big.Int)
+ if err := rlp.EncodeBigInt(t.GasFeeCap, w, b); err != nil {
+ return err
+ }
+
+ // Gas (uint64)
+ if err := rlp.EncodeInt(t.Gas, w, b); err != nil {
+ return err
+ }
+
+ // To (optional common.Address, 20 bytes if non-nil)
+ if t.To == nil {
+ b[0] = 128
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write((*t.To)[:]); err != nil {
+ return err
+ }
+ }
+
+ // Value (big.Int)
+ if err := rlp.EncodeBigInt(t.Value, w, b); err != nil {
+ return err
+ }
+
+ // Data ([]byte)
+ if err := rlp.EncodeString(t.Data, w, b); err != nil {
+ return err
+ }
+
+ // TicketId (common.Hash, 32 bytes)
+ b[0] = 128 + 32
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(t.TicketId[:]); err != nil {
+ return err
+ }
+
+ // RefundTo (common.Address, 20 bytes)
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(t.RefundTo[:]); err != nil {
+ return err
+ }
+
+ // MaxRefund (big.Int)
+ if err := rlp.EncodeBigInt(t.MaxRefund, w, b); err != nil {
+ return err
+ }
+
+ // SubmissionFeeRefund (big.Int)
+ if err := rlp.EncodeBigInt(t.SubmissionFeeRefund, w, b); err != nil {
+ return err
+ }
+
+ if t.Timeboosted != nil && !hashingOnly {
+ if err := rlp.EncodeBool(*t.Timeboosted, w, b); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *ArbitrumRetryTx) payloadSize(hashingOnly bool) (payloadSize int, nonceLen, gasLen int) {
+ // ChainId (big.Int)
+ payloadSize += rlp.BigIntLen(t.ChainId)
+
+ // Nonce (uint64)
+ nonceLen = rlp.U64Len(t.Nonce)
+ payloadSize += nonceLen
+
+ // From (common.Address, 20 bytes)
+ payloadSize++ // header
+ payloadSize += 20
+
+ // GasFeeCap (big.Int)
+ payloadSize += rlp.BigIntLen(t.GasFeeCap)
+
+ // Gas (uint64)
+ gasLen = rlp.U64Len(t.Gas)
+ payloadSize += gasLen
+
+ // To (optional common.Address, 20 bytes if non-nil)
+ payloadSize++ // header
+ if t.To != nil {
+ payloadSize += 20
+ }
+
+ // Value (big.Int)
+ payloadSize += rlp.BigIntLen(t.Value)
+
+ // Data ([]byte) — rlp.StringLen returns the full encoded length (header + data)
+ payloadSize += rlp.StringLen(t.Data)
+
+ // TicketId (common.Hash, 32 bytes)
+ payloadSize++ // header
+ payloadSize += 32
+
+ // RefundTo (common.Address, 20 bytes)
+ payloadSize++ // header
+ payloadSize += 20
+
+ // MaxRefund (big.Int)
+ payloadSize += rlp.BigIntLen(t.MaxRefund)
+
+ // SubmissionFeeRefund (big.Int)
+ payloadSize += rlp.BigIntLen(t.SubmissionFeeRefund)
+
+ if t.Timeboosted != nil && !hashingOnly {
+ // Timeboosted (bool)
+ payloadSize++
+ payloadSize += rlp.BoolLen()
+ }
+
+ return payloadSize, nonceLen, gasLen
+}
+
+func (t *ArbitrumRetryTx) EncodingSize() int {
+ payloadSize, _, _ := t.payloadSize(false)
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (t *ArbitrumRetryTx) EncodeRLP(w io.Writer) error {
+ payloadSize, nonceLen, gasLen := t.payloadSize(false)
+
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // envelope
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+
+ // encode TxType
+ b[0] = ArbitrumRetryTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := t.encodePayload(w, b[:], payloadSize, nonceLen, gasLen, false); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *ArbitrumRetryTx) DecodeRLP(s *rlp.Stream) error {
+ // Begin list decoding.
+ if _, err := s.List(); err != nil {
+ return err
+ }
+
+ var b []byte
+ var err error
+
+ // Decode ChainId (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ t.ChainId = new(big.Int).SetBytes(b)
+
+ // Decode Nonce (uint64)
+ if t.Nonce, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Nonce: %w", err)
+ }
+
+ // Decode From (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read From: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for From: %d", len(b))
+ }
+ fromAddr := common.Address{}
+ copy(fromAddr[:], b)
+
+ t.From = accounts.InternAddress(fromAddr)
+
+ // Decode GasFeeCap (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read GasFeeCap: %w", err)
+ }
+ t.GasFeeCap = new(big.Int).SetBytes(b)
+
+ // Decode Gas (uint64)
+ if t.Gas, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Gas: %w", err)
+ }
+
+ // Decode To (*common.Address, 20 bytes if non-nil)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read To: %w", err)
+ }
+ if len(b) > 0 {
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for To: %d", len(b))
+ }
+ t.To = new(common.Address)
+ copy(t.To[:], b)
+ }
+
+ // Decode Value (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Value: %w", err)
+ }
+ t.Value = new(big.Int).SetBytes(b)
+
+ // Decode Data ([]byte)
+ if t.Data, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Data: %w", err)
+ }
+
+ // Decode TicketId (common.Hash, 32 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read TicketId: %w", err)
+ }
+ if len(b) != 32 {
+ return fmt.Errorf("wrong size for TicketId: %d", len(b))
+ }
+ copy(t.TicketId[:], b)
+
+ // Decode RefundTo (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RefundTo: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for RefundTo: %d", len(b))
+ }
+ copy(t.RefundTo[:], b)
+
+ // Decode MaxRefund (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read MaxRefund: %w", err)
+ }
+ t.MaxRefund = new(big.Int).SetBytes(b)
+
+ // Decode SubmissionFeeRefund (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read SubmissionFeeRefund: %w", err)
+ }
+ t.SubmissionFeeRefund = new(big.Int).SetBytes(b)
+
+ if s.MoreDataInList() {
+ boolVal, err := s.Bool()
+ if err != nil {
+ return err
+ }
+ t.Timeboosted = &boolVal
+ }
+ return s.ListEnd()
+}
+
+func (t *ArbitrumRetryTx) MarshalBinary(w io.Writer) error {
+ payloadSize, nonceLen, gasLen := t.payloadSize(false)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumRetryTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := t.encodePayload(w, b[:], payloadSize, nonceLen, gasLen, false); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *ArbitrumRetryTx) MarshalBinaryForHashing(w io.Writer) error {
+ payloadSize, nonceLen, gasLen := t.payloadSize(true)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumRetryTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := t.encodePayload(w, b[:], payloadSize, nonceLen, gasLen, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (t *ArbitrumRetryTx) Sender(signer Signer) (accounts.Address, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (t *ArbitrumRetryTx) CachedSender() (accounts.Address, bool) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (t *ArbitrumRetryTx) GetSender() (accounts.Address, bool) {
+ return t.From, true
+}
+
+func (t *ArbitrumRetryTx) SetSender(address accounts.Address) {
+ t.From = address
+}
+
+func (t *ArbitrumRetryTx) IsContractDeploy() bool {
+ return t.To == nil
+}
+
+func (t *ArbitrumRetryTx) Unwrap() Transaction {
+ return t
+}
+
+func (t *ArbitrumRetryTx) IsTimeBoosted() *bool {
+ return t.Timeboosted
+}
+
+func (t *ArbitrumRetryTx) SetTimeboosted(val *bool) {
+ t.Timeboosted = val
+}
+
+// func (tx *ArbitrumRetryTx) chainID() *big.Int { return tx.ChainId }
+// func (tx *ArbitrumRetryTx) accessList() types.AccessList { return nil }
+// func (tx *ArbitrumRetryTx) data() []byte { return tx.Data }
+// func (tx *ArbitrumRetryTx) gas() uint64 { return tx.Gas }
+// func (tx *ArbitrumRetryTx) gasPrice() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumRetryTx) gasTipCap() *big.Int { return bigZero }
+// func (tx *ArbitrumRetryTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumRetryTx) value() *big.Int { return tx.Value }
+// func (tx *ArbitrumRetryTx) nonce() uint64 { return tx.Nonce }
+// func (tx *ArbitrumRetryTx) to() *common.Address { return tx.To }
+func (t *ArbitrumRetryTx) encode(b *bytes.Buffer) error {
+ return rlp.Encode(b, t)
+}
+func (t *ArbitrumRetryTx) decode(input []byte) error {
+ return rlp.DecodeBytes(input, t)
+}
+
+func (t *ArbitrumRetryTx) setSignatureValues(chainID, v, r, s *big.Int) {}
+
+//func (tx *ArbitrumRetryTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// if baseFee == nil {
+// return dst.Set(tx.GasFeeCap)
+// }
+// return dst.Set(baseFee)
+//}
+
+type ArbitrumSubmitRetryableTx struct {
+ arb.NoTimeBoosted
+ ChainId *big.Int
+ RequestId common.Hash
+ From accounts.Address
+ L1BaseFee *big.Int
+
+ DepositValue *big.Int
+ GasFeeCap *big.Int // wei per gas
+ Gas uint64 // gas limit for the retryable tx, actual gas spending is EffectiveGasUsed
+ RetryTo *common.Address `rlp:"nil"` // nil means contract creation
+ RetryValue *big.Int // wei amount
+ Beneficiary common.Address
+ MaxSubmissionFee *big.Int
+ FeeRefundAddr common.Address
+ RetryData []byte // contract invocation input data
+ EffectiveGasUsed uint64
+}
+
+func (tx *ArbitrumSubmitRetryableTx) copy() *ArbitrumSubmitRetryableTx {
+ cpy := &ArbitrumSubmitRetryableTx{
+ ChainId: new(big.Int),
+ RequestId: tx.RequestId,
+ DepositValue: new(big.Int),
+ L1BaseFee: new(big.Int),
+ GasFeeCap: new(big.Int),
+ Gas: tx.Gas,
+ From: tx.From,
+ RetryTo: tx.RetryTo,
+ RetryValue: new(big.Int),
+ Beneficiary: tx.Beneficiary,
+ MaxSubmissionFee: new(big.Int),
+ FeeRefundAddr: tx.FeeRefundAddr,
+ RetryData: common.Copy(tx.RetryData),
+ EffectiveGasUsed: tx.EffectiveGasUsed,
+ }
+ if tx.ChainId != nil {
+ cpy.ChainId.Set(tx.ChainId)
+ }
+ if tx.DepositValue != nil {
+ cpy.DepositValue.Set(tx.DepositValue)
+ }
+ if tx.L1BaseFee != nil {
+ cpy.L1BaseFee.Set(tx.L1BaseFee)
+ }
+ if tx.GasFeeCap != nil {
+ cpy.GasFeeCap.Set(tx.GasFeeCap)
+ }
+ if tx.RetryTo != nil {
+ tmp := *tx.RetryTo
+ cpy.RetryTo = &tmp
+ }
+ if tx.RetryValue != nil {
+ cpy.RetryValue.Set(tx.RetryValue)
+ }
+ if tx.MaxSubmissionFee != nil {
+ cpy.MaxSubmissionFee.Set(tx.MaxSubmissionFee)
+ }
+
+ return cpy
+}
+
+func (tx *ArbitrumSubmitRetryableTx) Type() byte { return ArbitrumSubmitRetryableTxType }
+func (tx *ArbitrumSubmitRetryableTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumSubmitRetryableTx) GetGasLimit() uint64 { return tx.Gas }
+func (tx *ArbitrumSubmitRetryableTx) GetBlobGas() uint64 { return 0 }
+func (tx *ArbitrumSubmitRetryableTx) GetNonce() uint64 { return 0 }
+func (tx *ArbitrumSubmitRetryableTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumSubmitRetryableTx) GetValue() *uint256.Int { return uintZero }
+func (tx *ArbitrumSubmitRetryableTx) GetTo() *common.Address { return &ArbRetryableTxAddress }
+func (tx *ArbitrumSubmitRetryableTx) GetAccessList() AccessList { return nil }
+func (tx *ArbitrumSubmitRetryableTx) GetAuthorizations() []Authorization { return nil }
+func (tx *ArbitrumSubmitRetryableTx) GetChainID() *uint256.Int {
+ return uint256.MustFromBig(tx.ChainId)
+}
+func (tx *ArbitrumSubmitRetryableTx) GetPrice() *uint256.Int {
+ return uint256.MustFromBig(tx.GasFeeCap)
+}
+func (tx *ArbitrumSubmitRetryableTx) GetFeeCap() *uint256.Int {
+ return uint256.MustFromBig(tx.GasFeeCap)
+}
+
+func (tx *ArbitrumSubmitRetryableTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int {
+ if baseFee == nil {
+ return tx.GetPrice()
+ }
+ res := uint256.NewInt(0)
+ return res.Set(baseFee)
+}
+
+func (tx *ArbitrumSubmitRetryableTx) GetData() []byte {
+ var retryTo common.Address
+ if tx.RetryTo != nil {
+ retryTo = *tx.RetryTo
+ }
+ data := make([]byte, 0)
+ data = append(data, tx.RequestId.Bytes()...)
+ data = append(data, math.U256Bytes(tx.L1BaseFee)...)
+ data = append(data, math.U256Bytes(tx.DepositValue)...)
+ data = append(data, math.U256Bytes(tx.RetryValue)...)
+ data = append(data, math.U256Bytes(tx.GasFeeCap)...)
+ data = append(data, math.U256Bytes(new(big.Int).SetUint64(tx.Gas))...)
+ data = append(data, math.U256Bytes(tx.MaxSubmissionFee)...)
+ data = append(data, make([]byte, 12)...)
+ data = append(data, tx.FeeRefundAddr.Bytes()...)
+ data = append(data, make([]byte, 12)...)
+ data = append(data, tx.Beneficiary.Bytes()...)
+ data = append(data, make([]byte, 12)...)
+ data = append(data, retryTo.Bytes()...)
+ offset := len(data) + 32
+ data = append(data, math.U256Bytes(big.NewInt(int64(offset)))...)
+ data = append(data, math.U256Bytes(big.NewInt(int64(len(tx.RetryData))))...)
+ data = append(data, tx.RetryData...)
+ extra := len(tx.RetryData) % 32
+ if extra > 0 {
+ data = append(data, make([]byte, 32-extra)...)
+ }
+ data = append(hexutil.MustDecode("0xc9f95d32"), data...)
+ return data
+}
+
+func (tx *ArbitrumSubmitRetryableTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (tx *ArbitrumSubmitRetryableTx) payloadSize(hashingOnly bool) (payloadSize int, gasLen int) {
+ size := 0
+ size += rlp.BigIntLen(tx.ChainId)
+
+ size++
+ size += length.Hash
+
+ size++
+ size += length.Addr
+
+ size += rlp.BigIntLen(tx.L1BaseFee)
+
+ size += rlp.BigIntLen(tx.DepositValue)
+
+ size += rlp.BigIntLen(tx.GasFeeCap)
+
+ gasLen = rlp.U64Len(tx.Gas)
+ size += gasLen
+
+ size++
+ if tx.RetryTo != nil {
+ size += length.Addr
+ }
+
+ size += rlp.BigIntLen(tx.RetryValue)
+
+ size++
+ size += length.Addr
+
+ size += rlp.BigIntLen(tx.MaxSubmissionFee)
+
+ size++
+ size += length.Addr
+
+ size += rlp.StringLen(tx.RetryData)
+
+ if hashingOnly {
+ return size, gasLen
+ }
+ // effective gas used is only included in non-hashing RLP encodings
+ size += rlp.U64Len(tx.EffectiveGasUsed)
+
+ return size, gasLen
+}
+
+func (tx *ArbitrumSubmitRetryableTx) encodePayload(w io.Writer, b []byte, payloadSize int, hashingOnly bool) error {
+ // Write the RLP list prefix.
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ // ChainId (big.Int)
+ if err := rlp.EncodeBigInt(tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ // RequestId (common.Hash, 32 bytes)
+ b[0] = 128 + length.Hash
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.RequestId[:]); err != nil {
+ return err
+ }
+
+ // From (common.Address, 20 bytes)
+ b[0] = 128 + length.Addr
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ fromAddr := tx.From.Value()
+ if _, err := w.Write(fromAddr[:]); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeBigInt(tx.L1BaseFee, w, b); err != nil {
+ return err
+ }
+ if err := rlp.EncodeBigInt(tx.DepositValue, w, b); err != nil {
+ return err
+ }
+ if err := rlp.EncodeBigInt(tx.GasFeeCap, w, b); err != nil {
+ return err
+ }
+ if err := rlp.EncodeInt(tx.Gas, w, b); err != nil {
+ return err
+ }
+
+ // RetryTo (pointer to common.Address, 20 bytes if non-nil; otherwise RLP nil)
+ if tx.RetryTo == nil {
+ b[0] = 128
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ b[0] = 128 + length.Addr
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write((*tx.RetryTo)[:]); err != nil {
+ return err
+ }
+ }
+ if err := rlp.EncodeBigInt(tx.RetryValue, w, b); err != nil {
+ return err
+ }
+ // Beneficiary (common.Address, 20 bytes)
+ b[0] = 128 + length.Addr
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.Beneficiary[:]); err != nil {
+ return err
+ }
+ if err := rlp.EncodeBigInt(tx.MaxSubmissionFee, w, b); err != nil {
+ return err
+ }
+
+ // FeeRefundAddr (common.Address, 20 bytes)
+ b[0] = 128 + length.Addr
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.FeeRefundAddr[:]); err != nil {
+ return err
+ }
+ if err := rlp.EncodeString(tx.RetryData, w, b); err != nil {
+ return err
+ }
+
+ if bytes.Equal(tx.Hash().Bytes()[:], TxHashhh[:]) {
+ fmt.Printf("marshal %x hashingOnly=%t tx: %+v\n", TxHashhh, hashingOnly, tx)
+ }
+
+ if hashingOnly {
+ return nil
+ }
+ if err := rlp.EncodeInt(tx.EffectiveGasUsed, w, b); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (*Message, error) {
+ var to accounts.Address
+ if tx.GetTo() != nil {
+ to = accounts.InternAddress(*tx.GetTo())
+ } else {
+ to = accounts.NilAddress
+ }
+ msg := &Message{
+ gasPrice: *tx.GetPrice(),
+ tipCap: *tx.GetTipCap(),
+ feeCap: *tx.GetFeeCap(),
+ gasLimit: tx.GetGasLimit(),
+ nonce: tx.GetNonce(),
+ accessList: tx.GetAccessList(),
+ from: tx.From,
+ to: to,
+ data: tx.GetData(),
+ amount: *tx.GetValue(),
+ checkNonce: !skipAccountChecks[tx.Type()],
+
+ EffectiveGas: tx.EffectiveGasUsed,
+ Tx: tx,
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ }
+ if baseFee != nil {
+ msg.gasPrice.SetFromBig(math.BigMin(msg.gasPrice.ToBig().Add(msg.tipCap.ToBig(), baseFee), msg.feeCap.ToBig()))
+ }
+ // if !rules.IsCancun {
+ // return msg, errors.New("BlobTx transactions require Cancun")
+ // }
+ // if baseFee != nil {
+ // overflow := msg.gasPrice.SetFromBig(baseFee)
+ // if overflow {
+ // return msg, errors.New("gasPrice higher than 2^256-1")
+ // }
+ // }
+ // msg.gasPrice.Add(&msg.gasPrice, stx.Tip)
+ // if msg.gasPrice.Gt(stx.FeeCap) {
+ // msg.gasPrice.Set(stx.FeeCap)
+ // }
+ // var err error
+ // msg.from, err = d.Sender(s)
+ // msg.maxFeePerBlobGas = *stx.MaxFeePerBlobGas
+ // msg.blobHashes = stx.BlobVersionedHashes
+ return msg, nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) WithSignature(signer Signer, sig []byte) (Transaction, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumSubmitRetryableTx) Hash() common.Hash {
+ return prefixedRlpHash(ArbitrumSubmitRetryableTxType, []interface{}{
+ tx.ChainId,
+ tx.RequestId,
+ tx.From,
+ tx.L1BaseFee,
+ tx.DepositValue,
+ tx.GasFeeCap,
+ tx.Gas,
+ tx.RetryTo,
+ tx.RetryValue,
+ tx.Beneficiary,
+ tx.MaxSubmissionFee,
+ tx.FeeRefundAddr,
+ tx.RetryData,
+ })
+}
+
+func (tx *ArbitrumSubmitRetryableTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumSubmitRetryableTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumSubmitRetryableTx) EncodingSize() int {
+ payloadSize, _ := tx.payloadSize(false)
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumSubmitRetryableTx) EncodeRLP(w io.Writer) error {
+ hashingOnly := false
+
+ payloadSize, _ := tx.payloadSize(hashingOnly)
+
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+
+ // encode TxType
+ b[0] = ArbitrumSubmitRetryableTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, hashingOnly); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) DecodeRLP(s *rlp.Stream) error {
+ // Begin decoding the RLP list.
+ if _, err := s.List(); err != nil {
+ return err
+ }
+
+ var b []byte
+ var err error
+
+ // Decode ChainId (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(big.Int).SetBytes(b)
+
+ // Decode RequestId (common.Hash, 32 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RequestId: %w", err)
+ }
+ if len(b) != 32 {
+ return fmt.Errorf("wrong size for RequestId: %d", len(b))
+ }
+ copy(tx.RequestId[:], b)
+
+ // Decode From (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read From: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for From: %d", len(b))
+ }
+ fromAddr := common.Address{}
+ copy(fromAddr[:], b)
+
+ tx.From = accounts.InternAddress(fromAddr)
+
+ // Decode L1BaseFee (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read L1BaseFee: %w", err)
+ }
+ tx.L1BaseFee = new(big.Int).SetBytes(b)
+
+ // Decode DepositValue (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read DepositValue: %w", err)
+ }
+ tx.DepositValue = new(big.Int).SetBytes(b)
+
+ // Decode GasFeeCap (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read GasFeeCap: %w", err)
+ }
+ tx.GasFeeCap = new(big.Int).SetBytes(b)
+
+ // Decode Gas (uint64)
+ if tx.Gas, err = s.Uint(); err != nil {
+ return fmt.Errorf("read Gas: %w", err)
+ }
+
+ // Decode RetryTo (*common.Address, 20 bytes if non-nil)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RetryTo: %w", err)
+ }
+ if len(b) > 0 {
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for RetryTo: %d", len(b))
+ }
+ tx.RetryTo = new(common.Address)
+ copy(tx.RetryTo[:], b)
+ } else {
+ tx.RetryTo = nil
+ }
+
+ // Decode RetryValue (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RetryValue: %w", err)
+ }
+ tx.RetryValue = new(big.Int).SetBytes(b)
+
+ // Decode Beneficiary (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Beneficiary: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for Beneficiary: %d", len(b))
+ }
+ copy(tx.Beneficiary[:], b)
+
+ // Decode MaxSubmissionFee (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read MaxSubmissionFee: %w", err)
+ }
+ tx.MaxSubmissionFee = new(big.Int).SetBytes(b)
+
+ // Decode FeeRefundAddr (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read FeeRefundAddr: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for FeeRefundAddr: %d", len(b))
+ }
+ copy(tx.FeeRefundAddr[:], b)
+
+ // Decode RetryData ([]byte)
+ if tx.RetryData, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read RetryData: %w", err)
+ }
+
+ if s.MoreDataInList() {
+ if tx.EffectiveGasUsed, err = s.Uint(); err != nil {
+ return fmt.Errorf("read EffectiveGasUSed: %w", err)
+ }
+ }
+
+ // End the RLP list.
+ if err := s.ListEnd(); err != nil {
+ return fmt.Errorf("close ArbitrumSubmitRetryableTx: %w", err)
+ }
+ return nil
+}
+
+var TxHashhh = common.HexToHash("0xae75e367d4b38d413a9cc3c0ff825453913e95db0f4089fbfdccae2e77e9cf1c")
+
+func (tx *ArbitrumSubmitRetryableTx) MarshalBinary(w io.Writer) error {
+ hashingOnly := false
+ payloadSize, _ := tx.payloadSize(hashingOnly)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumSubmitRetryableTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, hashingOnly); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) MarshalBinaryForHashing(w io.Writer) error {
+ hashingOnly := true
+
+ payloadSize, _ := tx.payloadSize(hashingOnly)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumSubmitRetryableTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, hashingOnly); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) Sender(signer Signer) (accounts.Address, error) {
+ panic("cannot sign ArbitrumSubmitRetryableTx")
+}
+
+func (tx *ArbitrumSubmitRetryableTx) CachedSender() (accounts.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumSubmitRetryableTx) GetSender() (accounts.Address, bool) {
+ return tx.From, true
+}
+
+func (tx *ArbitrumSubmitRetryableTx) SetSender(address accounts.Address) {
+ tx.From = address
+}
+
+func (tx *ArbitrumSubmitRetryableTx) IsContractDeploy() bool {
+ return tx.RetryTo == nil
+}
+
+func (tx *ArbitrumSubmitRetryableTx) Unwrap() Transaction {
+ return tx
+}
+
+// func (tx *ArbitrumSubmitRetryableTx) chainID() *big.Int { return tx.ChainId }
+// func (tx *ArbitrumSubmitRetryableTx) accessList() types.AccessList { return nil }
+// func (tx *ArbitrumSubmitRetryableTx) gas() uint64 { return tx.Gas }
+// func (tx *ArbitrumSubmitRetryableTx) gasPrice() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumSubmitRetryableTx) gasTipCap() *big.Int { return big.NewInt(0) }
+// func (tx *ArbitrumSubmitRetryableTx) gasFeeCap() *big.Int { return tx.GasFeeCap }
+// func (tx *ArbitrumSubmitRetryableTx) value() *big.Int { return common.Big0 }
+// func (tx *ArbitrumSubmitRetryableTx) nonce() uint64 { return 0 }
+// func (tx *ArbitrumSubmitRetryableTx) to() *common.Address { return &ArbRetryableTxAddress }
+func (tx *ArbitrumSubmitRetryableTx) encode(b *bytes.Buffer) error {
+ return rlp.Encode(b, tx)
+}
+func (tx *ArbitrumSubmitRetryableTx) decode(input []byte) error {
+ return rlp.DecodeBytes(input, tx)
+}
+
+//func (tx *ArbitrumSubmitRetryableTx) setSignatureValues(chainID, v, r, s *big.Int) {}
+//
+//func (tx *ArbitrumSubmitRetryableTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// if baseFee == nil {
+// return dst.Set(tx.GasFeeCap)
+// }
+// return dst.Set(baseFee)
+//}
+
+type ArbitrumDepositTx struct {
+ arb.NoTimeBoosted
+ ChainId *big.Int
+ L1RequestId common.Hash
+ From accounts.Address
+ To common.Address
+ Value *big.Int
+}
+
+func (tx *ArbitrumDepositTx) copy() *ArbitrumDepositTx {
+ dtx := &ArbitrumDepositTx{
+ ChainId: new(big.Int),
+ L1RequestId: tx.L1RequestId,
+ From: tx.From,
+ To: tx.To,
+ Value: new(big.Int),
+ }
+ if dtx.ChainId != nil {
+ dtx.ChainId.Set(tx.ChainId)
+ }
+ if dtx.Value != nil {
+ dtx.Value.Set(tx.Value)
+ }
+ return dtx
+}
+
+func (tx *ArbitrumDepositTx) Type() byte { return ArbitrumDepositTxType }
+func (tx *ArbitrumDepositTx) GetChainID() *uint256.Int { return uint256.MustFromBig(tx.ChainId) }
+func (tx *ArbitrumDepositTx) GetNonce() uint64 { return 0 }
+func (tx *ArbitrumDepositTx) GetPrice() *uint256.Int { return uintZero }
+func (tx *ArbitrumDepositTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumDepositTx) GetFeeCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumDepositTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumDepositTx) GetGasLimit() uint64 { return 0 }
+func (tx *ArbitrumDepositTx) GetBlobGas() uint64 { return 0 }
+func (tx *ArbitrumDepositTx) GetData() []byte { return nil }
+func (tx *ArbitrumDepositTx) GetValue() *uint256.Int { return uint256.MustFromBig(tx.Value) }
+func (tx *ArbitrumDepositTx) GetTo() *common.Address { return &tx.To }
+func (tx *ArbitrumDepositTx) GetAccessList() AccessList { return nil }
+func (tx *ArbitrumDepositTx) GetAuthorizations() []Authorization { return nil }
+
+func (tx *ArbitrumDepositTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int { return uintZero }
+func (tx *ArbitrumDepositTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+
+func (tx *ArbitrumDepositTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (*Message, error) {
+ var to accounts.Address
+ if tx.GetTo() != nil {
+ to = accounts.InternAddress(*tx.GetTo())
+ } else {
+ to = accounts.NilAddress
+ }
+ msg := &Message{
+ gasPrice: *tx.GetPrice(),
+ tipCap: *tx.GetTipCap(),
+ feeCap: *tx.GetFeeCap(),
+ gasLimit: tx.GetGasLimit(),
+ nonce: tx.GetNonce(),
+ accessList: tx.GetAccessList(),
+ from: tx.From,
+ to: to,
+ data: tx.GetData(),
+ amount: *tx.GetValue(),
+ checkNonce: !skipAccountChecks[tx.Type()],
+
+ Tx: tx,
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ }
+ if baseFee != nil {
+ msg.gasPrice.SetFromBig(math.BigMin(msg.gasPrice.ToBig().Add(msg.tipCap.ToBig(), baseFee), msg.feeCap.ToBig()))
+ }
+ // if msg.feeCap.IsZero() {
+ // msg.feeCap.Set(uint256.NewInt(0x5f5e100))
+ // }
+ // if !rules.IsCancun {
+ // return msg, errors.New("BlobTx transactions require Cancun")
+ // }
+ // if baseFee != nil {
+ // overflow := msg.gasPrice.SetFromBig(baseFee)
+ // if overflow {
+ // return msg, errors.New("gasPrice higher than 2^256-1")
+ // }
+ // }
+ // msg.gasPrice.Add(&msg.gasPrice, tx.GetTipCap())
+ // if msg.gasPrice.Gt(tx.GetFeeCap()) {
+ // msg.gasPrice.Set(tx.GetFeeCap())
+ // }
+ // var err error
+ // msg.from, err = d.Sender(s)
+ // msg.maxFeePerBlobGas = *stx.MaxFeePerBlobGas
+ // msg.blobHashes = stx.BlobVersionedHashes
+ return msg, nil
+}
+
+func (tx *ArbitrumDepositTx) SigningHash(chainID *big.Int) common.Hash {
+ //TODO implement me
+ panic("implement me")
+}
+func (tx *ArbitrumDepositTx) Protected() bool {
+ //TODO implement me
+ panic("implement me")
+}
+func (tx *ArbitrumDepositTx) WithSignature(signer Signer, sig []byte) (Transaction, error) {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (tx *ArbitrumDepositTx) Hash() common.Hash {
+ //TODO implement me
+ return prefixedRlpHash(ArbitrumDepositTxType, []interface{}{
+ tx.ChainId,
+ tx.L1RequestId,
+ tx.From,
+ tx.To,
+ tx.Value,
+ })
+}
+
+func (tx *ArbitrumDepositTx) EncodingSize() int {
+ payloadSize := tx.payloadSize()
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumDepositTx) EncodeRLP(w io.Writer) error {
+ payloadSize := tx.payloadSize()
+
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+
+ // envelope
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+
+ // encode TxType
+ b[0] = ArbitrumDepositTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumDepositTx) payloadSize() int {
+ size := 0
+
+ // ChainId: header + length of big.Int (excluding header)
+ size += rlp.BigIntLen(tx.ChainId)
+
+ // L1RequestId: header + 32 bytes
+ size++ // header for L1RequestId
+ size += 32
+
+ // From: header + 20 bytes
+ size++ // header for From
+ size += 20
+
+ // To: header + 20 bytes
+ size++ // header for To
+ size += 20
+
+ // Value: header + length of big.Int (excluding header)
+ size += rlp.BigIntLen(tx.Value)
+
+ return size
+}
+
+func (tx *ArbitrumDepositTx) encodePayload(w io.Writer, b []byte, payloadSize int) error {
+ // Write the RLP list prefix.
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ // Encode ChainId.
+ if err := rlp.EncodeBigInt(tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ // Encode L1RequestId (common.Hash, 32 bytes).
+ b[0] = 128 + 32
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.L1RequestId[:]); err != nil {
+ return err
+ }
+
+ // Encode From (common.Address, 20 bytes).
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ fromAddr := tx.From.Value()
+ if _, err := w.Write(fromAddr[:]); err != nil {
+ return err
+ }
+
+ // Encode To (common.Address, 20 bytes).
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.To[:]); err != nil {
+ return err
+ }
+
+ // Encode Value.
+ if err := rlp.EncodeBigInt(tx.Value, w, b); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumDepositTx) DecodeRLP(s *rlp.Stream) error {
+ // Begin decoding the RLP list.
+ if _, err := s.List(); err != nil {
+ return err
+ }
+
+ var b []byte
+ var err error
+
+ // Decode ChainId (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(big.Int).SetBytes(b)
+
+ // Decode L1RequestId (common.Hash, 32 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read L1RequestId: %w", err)
+ }
+ if len(b) != 32 {
+ return fmt.Errorf("wrong size for L1RequestId: %d", len(b))
+ }
+ copy(tx.L1RequestId[:], b)
+
+ // Decode From (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read From: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for From: %d", len(b))
+ }
+ from := common.Address{}
+ copy(from[:], b)
+
+ tx.From = accounts.InternAddress(from)
+
+ // Decode To (common.Address, 20 bytes)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read To: %w", err)
+ }
+ if len(b) != 20 {
+ return fmt.Errorf("wrong size for To: %d", len(b))
+ }
+ copy(tx.To[:], b)
+
+ // Decode Value (*big.Int)
+ if b, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Value: %w", err)
+ }
+ tx.Value = new(big.Int).SetBytes(b)
+
+ // End the RLP list.
+ if err := s.ListEnd(); err != nil {
+ return fmt.Errorf("close ArbitrumDepositTx: %w", err)
+ }
+ return nil
+}
+
+func (tx *ArbitrumDepositTx) MarshalBinary(w io.Writer) error {
+ payloadSize := tx.payloadSize()
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumDepositTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumDepositTx) Sender(signer Signer) (accounts.Address, error) { panic("implement me") }
+func (tx *ArbitrumDepositTx) CachedSender() (accounts.Address, bool) { return tx.From, true }
+func (tx *ArbitrumDepositTx) GetSender() (accounts.Address, bool) { return tx.From, true }
+func (tx *ArbitrumDepositTx) SetSender(address accounts.Address) { tx.From = address }
+func (tx *ArbitrumDepositTx) IsContractDeploy() bool { return false }
+func (tx *ArbitrumDepositTx) Unwrap() Transaction { return tx }
+func (tx *ArbitrumDepositTx) encode(b *bytes.Buffer) error { return rlp.Encode(b, tx) }
+func (tx *ArbitrumDepositTx) decode(input []byte) error { return rlp.DecodeBytes(input, tx) }
+
+//func (tx *ArbitrumDepositTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// return dst.Set(bigZero)
+//}
+
+type ArbitrumInternalTx struct {
+ ChainId *uint256.Int
+ Data []byte
+
+ //arb.NoTimeBoosted
+}
+
+func (t *ArbitrumInternalTx) IsTimeBoosted() *bool {
+ return nil
+}
+func (t *ArbitrumInternalTx) SetTimeboosted(b *bool) {}
+
+func (t *ArbitrumInternalTx) copy() *ArbitrumInternalTx {
+ cpy := &ArbitrumInternalTx{
+ ChainId: t.ChainId.Clone(),
+ Data: common.Copy(t.Data),
+ }
+ return cpy
+}
+
+func (tx *ArbitrumInternalTx) Type() byte { return ArbitrumInternalTxType }
+func (tx *ArbitrumInternalTx) GetChainID() *uint256.Int { return tx.ChainId }
+func (tx *ArbitrumInternalTx) GetNonce() uint64 { return 0 }
+func (tx *ArbitrumInternalTx) GetPrice() *uint256.Int { return uintZero }
+func (tx *ArbitrumInternalTx) GetTipCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumInternalTx) GetFeeCap() *uint256.Int { return uintZero }
+func (tx *ArbitrumInternalTx) GetBlobHashes() []common.Hash { return []common.Hash{} }
+func (tx *ArbitrumInternalTx) GetGasLimit() uint64 { return 0 }
+func (tx *ArbitrumInternalTx) GetBlobGas() uint64 { return 0 } // todo
+func (tx *ArbitrumInternalTx) GetData() []byte { return tx.Data }
+func (tx *ArbitrumInternalTx) GetValue() *uint256.Int { return uintZero }
+func (tx *ArbitrumInternalTx) GetTo() *common.Address {
+ addrVal := ArbosAddress.Value()
+ return &addrVal
+}
+func (tx *ArbitrumInternalTx) GetAccessList() AccessList { return nil }
+func (tx *ArbitrumInternalTx) GetAuthorizations() []Authorization { return nil }
+func (tx *ArbitrumInternalTx) CachedSender() (accounts.Address, bool) { return ArbosAddress, true }
+func (tx *ArbitrumInternalTx) GetSender() (accounts.Address, bool) { return ArbosAddress, true }
+func (tx *ArbitrumInternalTx) IsContractDeploy() bool { return false }
+func (tx *ArbitrumInternalTx) Unwrap() Transaction { return tx }
+func (tx *ArbitrumInternalTx) SigningHash(chainID *big.Int) common.Hash { panic("implement me") }
+func (tx *ArbitrumInternalTx) Protected() bool { panic("implement me") }
+func (tx *ArbitrumInternalTx) SetSender(address accounts.Address) {} // not supported in ArbitrumInternalTx
+func (tx *ArbitrumInternalTx) Sender(signer Signer) (accounts.Address, error) { panic("not supported") }
+
+func (tx *ArbitrumInternalTx) GetEffectiveGasTip(baseFee *uint256.Int) *uint256.Int { return uintZero }
+func (tx *ArbitrumInternalTx) RawSignatureValues() (*uint256.Int, *uint256.Int, *uint256.Int) {
+ return uintZero, uintZero, uintZero
+}
+func (tx *ArbitrumInternalTx) WithSignature(signer Signer, sig []byte) (Transaction, error) {
+ panic("implement me")
+}
+func (tx *ArbitrumInternalTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (*Message, error) {
+ var to accounts.Address
+ if tx.GetTo() != nil {
+ to = accounts.InternAddress(*tx.GetTo())
+ } else {
+ to = accounts.NilAddress
+ }
+
+ msg := &Message{
+ gasPrice: *tx.GetPrice(),
+ tipCap: *tx.GetTipCap(),
+ feeCap: *tx.GetFeeCap(),
+ gasLimit: tx.GetGasLimit(),
+ nonce: tx.GetNonce(),
+ accessList: tx.GetAccessList(),
+ from: ArbosAddress,
+ to: to,
+ data: tx.GetData(),
+ amount: *tx.GetValue(),
+ checkNonce: !skipAccountChecks[tx.Type()],
+ Tx: tx,
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ }
+
+ if baseFee != nil {
+ msg.gasPrice.SetFromBig(math.BigMin(msg.gasPrice.ToBig().Add(msg.tipCap.ToBig(), baseFee), msg.feeCap.ToBig()))
+ }
+ // if msg.feeCap.IsZero() {
+ // msg.gasLimit = baseFee.Uint64()
+ // msg.feeCap.Set(uint256.NewInt(0x5f5e100))
+ // }
+ // if baseFee != nil {
+ // overflow := msg.gasPrice.SetFromBig(baseFee)
+ // if overflow {
+ // return msg, errors.New("gasPrice higher than 2^256-1")
+ // }
+ // }
+ // if msg.feeCap.IsZero() {
+ // msg.gasLimit = baseFee.Uint64()
+ // }
+ // msg.gasPrice.Add(&msg.gasPrice, tx.GetTipCap())
+ // if msg.gasPrice.Gt(tx.GetFeeCap()) {
+ // msg.gasPrice.Set(tx.GetFeeCap())
+ // }
+ return msg, nil
+}
+
+func (tx *ArbitrumInternalTx) Hash() common.Hash {
+ return prefixedRlpHash(ArbitrumInternalTxType, []interface{}{
+ tx.ChainId,
+ tx.Data,
+ })
+}
+
+func (tx *ArbitrumInternalTx) EncodingSize() int {
+ payloadSize := tx.payloadSize()
+ // Add envelope size and type size
+ return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+}
+
+func (tx *ArbitrumInternalTx) payloadSize() (size int) {
+ // ChainId: add 1 byte for header and the length of ChainId (excluding header)
+ size += rlp.Uint256Len(*tx.ChainId)
+
+ // Data: rlp.StringLen returns the full encoded length (header + payload)
+ size += rlp.StringLen(tx.Data)
+ return size
+}
+
+func (tx *ArbitrumInternalTx) encodePayload(w io.Writer, b []byte, payloadSize int) error {
+ // Write the RLP list prefix
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ // Encode ChainId
+ if err := rlp.EncodeUint256(*tx.ChainId, w, b); err != nil {
+ return err
+ }
+
+ // Encode Data
+ if err := rlp.EncodeString(tx.Data, w, b); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumInternalTx) EncodeRLP(w io.Writer) error {
+ payloadSize := tx.payloadSize()
+ // size of struct prefix and TxType
+ envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // envelope
+ if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
+ return err
+ }
+ // encode TxType
+ b[0] = ArbitrumInternalTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumInternalTx) DecodeRLP(s *rlp.Stream) error {
+ _, err := s.List()
+ if err != nil {
+ return err
+ }
+ var b []byte
+ if b, err = s.Uint256Bytes(); err != nil {
+ return fmt.Errorf("read ChainId: %w", err)
+ }
+ tx.ChainId = new(uint256.Int).SetBytes(b)
+ if tx.Data, err = s.Bytes(); err != nil {
+ return fmt.Errorf("read Data: %w", err)
+ }
+
+ if err := s.ListEnd(); err != nil {
+ return fmt.Errorf("close ArbitrumInternalTx: %w", err)
+ }
+ return nil
+}
+
+func (tx *ArbitrumInternalTx) MarshalBinary(w io.Writer) error {
+ payloadSize := tx.payloadSize()
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = ArbitrumInternalTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+//func (tx *ArbitrumInternalTx) effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int {
+// return dst.Set(bigZero)
+//}
+
+type HeaderInfo struct {
+ SendRoot common.Hash
+ SendCount uint64
+ L1BlockNumber uint64
+ ArbOSFormatVersion uint64
+}
+
+func (info HeaderInfo) extra() []byte {
+ return info.SendRoot[:]
+}
+
+func (info HeaderInfo) mixDigest() [32]byte {
+ mixDigest := common.Hash{}
+ binary.BigEndian.PutUint64(mixDigest[:8], info.SendCount)
+ binary.BigEndian.PutUint64(mixDigest[8:16], info.L1BlockNumber)
+ binary.BigEndian.PutUint64(mixDigest[16:24], info.ArbOSFormatVersion)
+ return mixDigest
+}
+
+func (info HeaderInfo) UpdateHeaderWithInfo(header *Header) {
+ header.MixDigest = info.mixDigest()
+ header.Extra = info.extra()
+}
+
+func DeserializeHeaderExtraInformation(header *Header) HeaderInfo {
+ if header == nil || header.BaseFee == nil || header.BaseFee.Sign() == 0 || len(header.Extra) != 32 || header.Difficulty.Cmp(common.Big1) != 0 {
+ // imported blocks have no base fee
+ // The genesis block doesn't have an ArbOS encoded extra field
+ return HeaderInfo{}
+ }
+ extra := HeaderInfo{}
+ copy(extra.SendRoot[:], header.Extra)
+ extra.SendCount = binary.BigEndian.Uint64(header.MixDigest[:8])
+ extra.L1BlockNumber = binary.BigEndian.Uint64(header.MixDigest[8:16])
+ extra.ArbOSFormatVersion = binary.BigEndian.Uint64(header.MixDigest[16:24])
+ return extra
+}
+
+func GetArbOSVersion(header *Header, chain *chain.Config) uint64 {
+ if !chain.IsArbitrum() {
+ return 0
+ }
+ extraInfo := DeserializeHeaderExtraInformation(header)
+ return extraInfo.ArbOSFormatVersion
+}
diff --git a/execution/types/arb_types_test.go b/execution/types/arb_types_test.go
new file mode 100644
index 00000000000..307cfade4eb
--- /dev/null
+++ b/execution/types/arb_types_test.go
@@ -0,0 +1,173 @@
+package types
+
+import (
+ "bytes"
+ "math/big"
+ "testing"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/stretchr/testify/require"
+)
+
+func TestArbitrumInternalTx(t *testing.T) {
+ rawInitial := [][]byte{
+ common.FromHex("6af88a83066eeeb8846bf6a42d000000000000000000000000000000000000000000000000000000005bd57bd900000000000000000000000000000000000000000000000000000000003f28db00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000064e4f6d4"),
+ common.FromHex("0x6af88a83066eeeb8846bf6a42d00000000000000000000000000000000000000000000000000000000064cb523000000000000000000000000000000000000000000000000000000000049996b00000000000000000000000000000000000000000000000000000000001f09350000000000000000000000000000000000000000000000000000000000000000"),
+ }
+
+ expectedHashes := []common.Hash{
+ common.HexToHash("0x1ac8d67d5c4be184b3822f9ef97102789394f4bc75a0f528d5e14debef6e184c"),
+ common.HexToHash("0x3d78fd6ddbac46955b91777c1fc698b011b7c4a2a84d07a0b0b1a11f34ccf817"),
+ }
+
+ for ri, raw := range rawInitial {
+ var tx ArbitrumInternalTx
+ if err := rlp.DecodeBytes(raw[1:], &tx); err != nil {
+ t.Fatal(err)
+ }
+
+ var b bytes.Buffer
+ require.Equal(t, tx.Hash(), expectedHashes[ri])
+ // now encode and decode again
+ require.NoError(t, tx.MarshalBinary(&b))
+ require.Equal(t, raw, b.Bytes())
+ }
+}
+
+func TestArbitrumUnsignedTx(t *testing.T) {
+ rawInitial := [][]byte{
+ common.FromHex("0x65f85e83066eee9462182981bf35cdf00dbecdb9bbc00be33138a4dc0184a0eebb008301bdd494000000000000000000000000000000000000006401a425e1606300000000000000000000000051072981bf35cdf00dbecdb9bbc00be3313893cb"),
+ }
+
+ expectedHashes := []common.Hash{
+ common.HexToHash("0x8b7e4e0a2a31d2889200dc6c91c12833208d2f7847eabf0c21e9b15f86a8a8aa"),
+ }
+
+ for ri, raw := range rawInitial {
+ var tx ArbitrumUnsignedTx
+ if err := rlp.DecodeBytes(raw[1:], &tx); err != nil {
+ t.Fatal(err)
+ }
+
+ var b bytes.Buffer
+ require.Equal(t, tx.Hash(), expectedHashes[ri])
+ // now encode and decode again
+ require.NoError(t, tx.MarshalBinary(&b))
+ require.Equal(t, raw, b.Bytes())
+ }
+}
+
+func TestArbitrumSubmitRetryableTx(t *testing.T) {
+ rawInitial := common.FromHex("0x69f89f83066eeea0000000000000000000000000000000000000000000000000000000000000000194b8787d8f23e176a5d32135d746b69886e03313be845bd57bd98723e3dbb7b88ab8843b9aca00830186a0943fab184622dc19b6109349b94811493bf2a45362872386f26fc100009411155ca9bbf7be58e27f3309e629c847996b43c88601f6377d4ab89411155ca9bbf7be58e27f3309e629c847996b43c880")
+ var tx ArbitrumSubmitRetryableTx
+ if err := rlp.DecodeBytes(rawInitial[1:], &tx); err != nil {
+ t.Fatal(err)
+ }
+ require.Equal(t, tx.Hash(), common.HexToHash("0x13cb79b086a427f3db7ebe6ec2bb90a806a3b0368ecee6020144f352e37dbdf6"))
+
+ var b bytes.Buffer
+
+ // now encode and decode again, use ForHashing method since normal method encodes EffectiveGasUsed, which is not present in OG encoding
+ require.NoError(t, tx.MarshalBinaryForHashing(&b))
+
+ require.Equal(t, rawInitial, b.Bytes())
+}
+
+func TestArbitrumSubmitRetryTx(t *testing.T) {
+ rawInitial := common.FromHex("0x68f88583066eee8094b8787d8f23e176a5d32135d746b69886e03313be8405f5e100830186a0943fab184622dc19b6109349b94811493bf2a45362872386f26fc1000080a013cb79b086a427f3db7ebe6ec2bb90a806a3b0368ecee6020144f352e37dbdf69411155ca9bbf7be58e27f3309e629c847996b43c8860b0e85efeab88601f6377d4ab8")
+ var tx ArbitrumRetryTx
+ if err := rlp.DecodeBytes(rawInitial[1:], &tx); err != nil {
+ t.Fatal(err)
+ }
+ require.Equal(t, tx.Hash(), common.HexToHash("0x873c5ee3092c40336006808e249293bf5f4cb3235077a74cac9cafa7cf73cb8b"))
+
+ var b bytes.Buffer
+
+ // now encode and decode again
+ require.NoError(t, tx.MarshalBinary(&b))
+
+ require.Equal(t, rawInitial, b.Bytes())
+}
+
+func TestArbitrumDepsitTx(t *testing.T) {
+ rawInitial := common.FromHex("0x64f85883066eeea0000000000000000000000000000000000000000000000000000000000000000f9499998aa374dbde60d26433e275ad700b658731749488888aa374dbde60d26433e275ad700b65872063880de0b6b3a7640000")
+ var tx ArbitrumDepositTx
+
+ if err := rlp.DecodeBytes(rawInitial[1:], &tx); err != nil {
+ t.Fatal(err)
+ }
+
+ require.Equal(t, tx.Hash(), common.HexToHash("0x733c1300c06ac4ced959e68f16f565ee8918a4e75c9f9e3913bc7a7e939c60db"))
+
+ var b bytes.Buffer
+
+ // now encode and decode again
+ require.NoError(t, tx.MarshalBinary(&b))
+
+ require.Equal(t, rawInitial, b.Bytes())
+}
+
+func TestArbitrumSubmitRetryableTxGasUsed(t *testing.T) {
+ gasUsedVals := []uint64{0, 32000}
+
+ for _, gasUsed := range gasUsedVals {
+ two := big.NewInt(2)
+ chainID := big.NewInt(1)
+
+ requestId := common.HexToHash("0x0123")
+ from := common.HexToAddress("0x0000000000000000000000000000000000000001")
+ retryTo := common.HexToAddress("0x0000000000000000000000000000000000000002")
+ beneficiary := common.HexToAddress("0x00000000000000000000000000000000000000B5")
+ feeRefund := common.HexToAddress("0x0000000000000000000000000000000000000003")
+
+ tx := &ArbitrumSubmitRetryableTx{
+ ChainId: chainID,
+ RequestId: requestId,
+ From: from,
+ L1BaseFee: big.NewInt(0),
+ DepositValue: big.NewInt(1000),
+ GasFeeCap: two,
+ Gas: 60000,
+ RetryTo: &retryTo,
+ RetryValue: two,
+ Beneficiary: beneficiary,
+ MaxSubmissionFee: big.NewInt(7),
+ FeeRefundAddr: feeRefund,
+ RetryData: []byte("data"),
+ EffectiveGasUsed: gasUsed,
+ }
+
+ var buf bytes.Buffer
+ require.NoError(t, tx.EncodeRLP(&buf))
+
+ // Decode using your generic RLP transaction decoder
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*ArbitrumSubmitRetryableTx)
+ require.True(t, ok, "decoded type should be *ArbitrumSubmitRetryableTx")
+
+ // Field-by-field equality
+ require.EqualValues(t, tx.ChainId, tx2.ChainId)
+ require.EqualValues(t, tx.RequestId, tx2.RequestId)
+ require.EqualValues(t, tx.From, tx2.From)
+ require.EqualValues(t, tx.L1BaseFee, tx2.L1BaseFee)
+ require.EqualValues(t, tx.DepositValue, tx2.DepositValue)
+ require.EqualValues(t, tx.GasFeeCap, tx2.GasFeeCap)
+ require.EqualValues(t, tx.Gas, tx2.Gas)
+ require.EqualValues(t, tx.RetryTo, tx2.RetryTo)
+ require.EqualValues(t, tx.RetryValue, tx2.RetryValue)
+ require.EqualValues(t, tx.Beneficiary, tx2.Beneficiary)
+ require.EqualValues(t, tx.MaxSubmissionFee, tx2.MaxSubmissionFee)
+ require.EqualValues(t, tx.FeeRefundAddr, tx2.FeeRefundAddr)
+ require.EqualValues(t, tx.RetryData, tx2.RetryData)
+ require.EqualValues(t, tx.EffectiveGasUsed, tx2.EffectiveGasUsed)
+
+ // With NoTimeBoosted embedded, this should be false.
+ require.Nil(t, tx2.IsTimeBoosted())
+ }
+}
+
+func boolPtr(b bool) *bool { return &b }
diff --git a/execution/types/arbitrum_legacy_tx.go b/execution/types/arbitrum_legacy_tx.go
new file mode 100644
index 00000000000..8909c108389
--- /dev/null
+++ b/execution/types/arbitrum_legacy_tx.go
@@ -0,0 +1,313 @@
+package types
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "io"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/hexutil"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/holiman/uint256"
+)
+
+type ArbitrumLegacyTxData struct {
+ *LegacyTx
+ HashOverride common.Hash // Hash cannot be locally computed from other fields
+ EffectiveGasPrice uint64
+ L1BlockNumber uint64
+ OverrideSender *common.Address `rlp:"optional,nil"` // only used in unsigned Txs
+}
+
+func NewArbitrumLegacyTx(origTx Transaction, hashOverride common.Hash, effectiveGas uint64, l1Block uint64, senderOverride *common.Address) (Transaction, error) {
+ if origTx.Type() != LegacyTxType {
+ return nil, errors.New("attempt to arbitrum-wrap non-legacy transaction")
+ }
+ inner := ArbitrumLegacyTxData{
+ LegacyTx: origTx.(*LegacyTx),
+ HashOverride: hashOverride,
+ EffectiveGasPrice: effectiveGas,
+ L1BlockNumber: l1Block,
+ OverrideSender: senderOverride,
+ }
+ return NewArbTx(&inner), nil
+}
+
+// func (tx *ArbitrumLegacyTxData) copy() *ArbitrumLegacyTxData {
+// legacyCopy := tx.LegacyTx.copy()
+// var sender *common.Address
+// if tx.Sender != nil {
+// sender = new(common.Address)
+// *sender = *tx.Sender()
+// }
+// return &ArbitrumLegacyTxData{
+// LegacyTx: *legacyCopy,
+// HashOverride: tx.HashOverride,
+// EffectiveGasPrice: tx.EffectiveGasPrice,
+// L1BlockNumber: tx.L1BlockNumber,
+// OverrideSender: sender,
+// }
+// }
+
+func (tx *ArbitrumLegacyTxData) Type() byte { return ArbitrumLegacyTxType }
+
+func (tx *ArbitrumLegacyTxData) Unwrap() Transaction {
+ return tx
+}
+
+func (tx *ArbitrumLegacyTxData) Hash() common.Hash {
+ if tx.HashOverride != (common.Hash{}) {
+ return tx.HashOverride
+ }
+ return tx.LegacyTx.Hash()
+}
+
+func (tx *ArbitrumLegacyTxData) EncodeRLP(w io.Writer) error {
+ if _, err := w.Write([]byte{ArbitrumLegacyTxType}); err != nil {
+ return err
+ }
+
+ legacy := bytes.NewBuffer(nil)
+ if err := tx.LegacyTx.EncodeRLP(legacy); err != nil {
+ return err
+ }
+ legacyBytes := legacy.Bytes()
+
+ payloadSize := rlp.StringLen(legacyBytes) // embedded LegacyTx RLP
+ payloadSize += 1 + 32 // HashOverride (1 byte length + 32 bytes hash)
+ payloadSize += rlp.U64Len(tx.EffectiveGasPrice) // EffectiveGasPrice
+ payloadSize += rlp.U64Len(tx.L1BlockNumber) // L1BlockNumber
+
+ if tx.OverrideSender == nil {
+ payloadSize += 1 // empty OverrideSender
+ } else {
+ payloadSize += 1 + 20 // OverrideSender (1 byte length + 20 bytes address)
+ }
+
+ b := make([]byte, 10)
+ if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeString(legacyBytes, w, b); err != nil {
+ return err
+ }
+
+ b[0] = 128 + 32
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.HashOverride[:]); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeInt(tx.EffectiveGasPrice, w, b); err != nil {
+ return err
+ }
+
+ if err := rlp.EncodeInt(tx.L1BlockNumber, w, b); err != nil {
+ return err
+ }
+
+ if tx.OverrideSender == nil {
+ b[0] = 0x80
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ } else {
+ b[0] = 128 + 20
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if _, err := w.Write(tx.OverrideSender[:]); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (tx *ArbitrumLegacyTxData) DecodeRLP(s *rlp.Stream) error {
+ _, err := s.List()
+ if err != nil {
+ return err
+ }
+
+ legacyBytes, err := s.Bytes()
+ if err != nil {
+ return err
+ }
+
+ legacyTx := &LegacyTx{}
+ str := rlp.NewStream(bytes.NewReader(legacyBytes), uint64(len(legacyBytes)))
+ if err := legacyTx.DecodeRLP(str); err != nil {
+ return err
+ }
+ tx.LegacyTx = legacyTx
+
+ var hash common.Hash
+ if err := s.Decode(&hash); err != nil {
+ return err
+ }
+ tx.HashOverride = hash
+
+ var effectiveGasPrice uint64
+ if err := s.Decode(&effectiveGasPrice); err != nil {
+ return err
+ }
+ tx.EffectiveGasPrice = effectiveGasPrice
+
+ var l1BlockNumber uint64
+ if err := s.Decode(&l1BlockNumber); err != nil {
+ return err
+ }
+ tx.L1BlockNumber = l1BlockNumber
+
+ var sender common.Address
+ if err := s.Decode(&sender); err != nil {
+ if err.Error() == "rlp: input string too short for common.Address" {
+ tx.OverrideSender = nil
+ } else {
+ return err
+ }
+ } else if sender != (common.Address{}) {
+ tx.OverrideSender = &sender
+ }
+
+ return s.ListEnd()
+}
+
+type arbitrumLegacyTxJSON struct {
+ Type hexutil.Uint64 `json:"type"`
+ Hash common.Hash `json:"hash"`
+ Nonce *hexutil.Uint64 `json:"nonce"`
+ GasPrice *hexutil.Big `json:"gasPrice"`
+ Gas *hexutil.Uint64 `json:"gas"`
+ To *common.Address `json:"to"`
+ Value *hexutil.Big `json:"value"`
+ Data *hexutil.Bytes `json:"input"`
+ V *hexutil.Big `json:"v"`
+ R *hexutil.Big `json:"r"`
+ S *hexutil.Big `json:"s"`
+ HashOverride common.Hash `json:"hashOverride"`
+ EffectiveGasPrice *hexutil.Uint64 `json:"effectiveGasPrice"`
+ L1BlockNumber *hexutil.Uint64 `json:"l1BlockNumber"`
+ OverrideSender *common.Address `json:"overrideSender,omitempty"`
+}
+
+func (tx *ArbitrumLegacyTxData) MarshalJSON() ([]byte, error) {
+ var enc arbitrumLegacyTxJSON
+
+ // These are set for all txn types.
+ enc.Type = hexutil.Uint64(tx.Type())
+ enc.Hash = tx.HashOverride // For ArbitrumLegacyTxData, hash comes from HashOverride
+ enc.Nonce = (*hexutil.Uint64)(&tx.Nonce)
+ enc.Gas = (*hexutil.Uint64)(&tx.GasLimit)
+ enc.GasPrice = (*hexutil.Big)(tx.GasPrice.ToBig())
+ enc.Value = (*hexutil.Big)(tx.Value.ToBig())
+ enc.Data = (*hexutil.Bytes)(&tx.Data)
+ enc.To = tx.To
+ enc.V = (*hexutil.Big)(tx.V.ToBig())
+ enc.R = (*hexutil.Big)(tx.R.ToBig())
+ enc.S = (*hexutil.Big)(tx.S.ToBig())
+
+ // Arbitrum-specific fields
+ enc.HashOverride = tx.HashOverride
+ enc.EffectiveGasPrice = (*hexutil.Uint64)(&tx.EffectiveGasPrice)
+ enc.L1BlockNumber = (*hexutil.Uint64)(&tx.L1BlockNumber)
+ enc.OverrideSender = tx.OverrideSender
+
+ return json.Marshal(&enc)
+}
+
+func (tx *ArbitrumLegacyTxData) UnmarshalJSON(input []byte) error {
+ var dec arbitrumLegacyTxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+
+ // Validate and set common fields
+ if dec.To != nil {
+ tx.To = dec.To
+ }
+ if dec.Nonce == nil {
+ return errors.New("missing required field 'nonce' in transaction")
+ }
+ tx.Nonce = uint64(*dec.Nonce)
+
+ if dec.GasPrice == nil {
+ return errors.New("missing required field 'gasPrice' in transaction")
+ }
+ var overflow bool
+ tx.GasPrice, overflow = uint256.FromBig(dec.GasPrice.ToInt())
+ if overflow {
+ return errors.New("'gasPrice' in transaction does not fit in 256 bits")
+ }
+
+ if dec.Gas == nil {
+ return errors.New("missing required field 'gas' in transaction")
+ }
+ tx.GasLimit = uint64(*dec.Gas)
+
+ if dec.Value == nil {
+ return errors.New("missing required field 'value' in transaction")
+ }
+ tx.Value, overflow = uint256.FromBig(dec.Value.ToInt())
+ if overflow {
+ return errors.New("'value' in transaction does not fit in 256 bits")
+ }
+
+ if dec.Data == nil {
+ return errors.New("missing required field 'input' in transaction")
+ }
+ tx.Data = *dec.Data
+
+ // Decode signature fields
+ if dec.V == nil {
+ return errors.New("missing required field 'v' in transaction")
+ }
+ overflow = tx.V.SetFromBig(dec.V.ToInt())
+ if overflow {
+ return errors.New("dec.V higher than 2^256-1")
+ }
+
+ if dec.R == nil {
+ return errors.New("missing required field 'r' in transaction")
+ }
+ overflow = tx.R.SetFromBig(dec.R.ToInt())
+ if overflow {
+ return errors.New("dec.R higher than 2^256-1")
+ }
+
+ if dec.S == nil {
+ return errors.New("missing required field 's' in transaction")
+ }
+ overflow = tx.S.SetFromBig(dec.S.ToInt())
+ if overflow {
+ return errors.New("dec.S higher than 2^256-1")
+ }
+
+ // Validate signature if present
+ withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero()
+ if withSignature {
+ if err := SanityCheckSignature(&tx.V, &tx.R, &tx.S, true); err != nil {
+ return err
+ }
+ }
+
+ // Arbitrum-specific fields
+ tx.HashOverride = dec.HashOverride
+
+ if dec.EffectiveGasPrice != nil {
+ tx.EffectiveGasPrice = uint64(*dec.EffectiveGasPrice)
+ }
+
+ if dec.L1BlockNumber != nil {
+ tx.L1BlockNumber = uint64(*dec.L1BlockNumber)
+ }
+
+ tx.OverrideSender = dec.OverrideSender
+
+ return nil
+}
diff --git a/execution/types/arbitrum_legacy_tx_test.go b/execution/types/arbitrum_legacy_tx_test.go
new file mode 100644
index 00000000000..2896fb94d81
--- /dev/null
+++ b/execution/types/arbitrum_legacy_tx_test.go
@@ -0,0 +1,418 @@
+package types
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+)
+
+func TestArbitrumLegacyTxData_RLPEncodeDecode(t *testing.T) {
+ to := common.HexToAddress("0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb7")
+ senderOverride := common.HexToAddress("0x1234567890123456789012345678901234567890")
+ legacyTx := &LegacyTx{
+ CommonTx: CommonTx{
+ Nonce: 42,
+ GasLimit: 50000,
+ To: &to,
+ Value: uint256.NewInt(1000000),
+ Data: []byte{0x01, 0x02, 0x03, 0x04},
+ V: *uint256.NewInt(28),
+ R: *uint256.NewInt(100),
+ S: *uint256.NewInt(200),
+ },
+ GasPrice: uint256.NewInt(20000000000), // 20 gwei
+ }
+
+ arbLegacyTx := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"),
+ EffectiveGasPrice: 15000000000, // 15 gwei
+ L1BlockNumber: 1234567,
+ OverrideSender: &senderOverride,
+ }
+
+ t.Run("RLP Encode and Decode from bytes", func(t *testing.T) {
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encodedBytes := buf.Bytes()
+ require.Equal(t, ArbitrumLegacyTxType, encodedBytes[0])
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &LegacyTx{},
+ }
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+ require.Equal(t, arbLegacyTx.Nonce, decodedTx.Nonce)
+ require.Equal(t, arbLegacyTx.GasLimit, decodedTx.GasLimit)
+ require.Equal(t, arbLegacyTx.To, decodedTx.To)
+ require.True(t, arbLegacyTx.Value.Eq(decodedTx.Value))
+ require.Equal(t, arbLegacyTx.Data, decodedTx.Data)
+ require.True(t, arbLegacyTx.V.Eq(&decodedTx.V))
+ require.True(t, arbLegacyTx.R.Eq(&decodedTx.R))
+ require.True(t, arbLegacyTx.S.Eq(&decodedTx.S))
+ require.True(t, arbLegacyTx.GasPrice.Eq(decodedTx.GasPrice))
+ require.Equal(t, arbLegacyTx.HashOverride, decodedTx.HashOverride)
+ require.Equal(t, arbLegacyTx.EffectiveGasPrice, decodedTx.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTx.L1BlockNumber, decodedTx.L1BlockNumber)
+ require.Equal(t, arbLegacyTx.OverrideSender, decodedTx.OverrideSender)
+ })
+
+ t.Run("RLP Decode from Stream", func(t *testing.T) {
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encodedBytes := buf.Bytes()
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &LegacyTx{},
+ }
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.Equal(t, arbLegacyTx.Nonce, decodedTx.Nonce)
+ require.Equal(t, arbLegacyTx.GasLimit, decodedTx.GasLimit)
+ require.Equal(t, arbLegacyTx.To, decodedTx.To)
+ require.True(t, arbLegacyTx.Value.Eq(decodedTx.Value))
+ require.Equal(t, arbLegacyTx.Data, decodedTx.Data)
+ require.True(t, arbLegacyTx.V.Eq(&decodedTx.V))
+ require.True(t, arbLegacyTx.R.Eq(&decodedTx.R))
+ require.True(t, arbLegacyTx.S.Eq(&decodedTx.S))
+ require.True(t, arbLegacyTx.GasPrice.Eq(decodedTx.GasPrice))
+ require.Equal(t, arbLegacyTx.HashOverride, decodedTx.HashOverride)
+ require.Equal(t, arbLegacyTx.EffectiveGasPrice, decodedTx.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTx.L1BlockNumber, decodedTx.L1BlockNumber)
+ require.Equal(t, arbLegacyTx.OverrideSender, decodedTx.OverrideSender)
+ })
+
+ t.Run("RLP with nil OverrideSender", func(t *testing.T) {
+ arbLegacyTxNoSender := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xdeadbeef"),
+ EffectiveGasPrice: 25000000000,
+ L1BlockNumber: 999999,
+ OverrideSender: nil,
+ }
+
+ var buf bytes.Buffer
+ err := arbLegacyTxNoSender.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &LegacyTx{},
+ }
+ encodedBytes := buf.Bytes()
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+ require.Nil(t, decodedTx.OverrideSender)
+ require.Equal(t, arbLegacyTxNoSender.HashOverride, decodedTx.HashOverride)
+ require.Equal(t, arbLegacyTxNoSender.EffectiveGasPrice, decodedTx.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTxNoSender.L1BlockNumber, decodedTx.L1BlockNumber)
+ })
+
+ t.Run("Type byte verification", func(t *testing.T) {
+ require.Equal(t, ArbitrumLegacyTxType, arbLegacyTx.Type())
+
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ bytes := buf.Bytes()
+ require.Greater(t, len(bytes), 0)
+ require.Equal(t, ArbitrumLegacyTxType, bytes[0])
+ })
+
+ t.Run("LegacyTx embedding verification", func(t *testing.T) {
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &LegacyTx{},
+ }
+ encodedBytes := buf.Bytes()
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.NotNil(t, decodedTx.LegacyTx)
+ require.Equal(t, legacyTx.Nonce, decodedTx.LegacyTx.Nonce)
+ require.True(t, legacyTx.GasPrice.Eq(decodedTx.LegacyTx.GasPrice))
+ })
+}
+
+func TestArbitrumLegacyTxData_ComplexScenarios(t *testing.T) {
+ t.Run("Contract creation transaction", func(t *testing.T) {
+ legacyTx := &LegacyTx{
+ CommonTx: CommonTx{
+ Nonce: 1,
+ GasLimit: 1000000,
+ To: nil, // Contract creation
+ Value: uint256.NewInt(0),
+ Data: []byte{0x60, 0x80, 0x60, 0x40},
+ V: *uint256.NewInt(27),
+ R: *uint256.NewInt(1),
+ S: *uint256.NewInt(2),
+ },
+ GasPrice: uint256.NewInt(1000000000),
+ }
+
+ arbLegacyTx := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"),
+ EffectiveGasPrice: 900000000,
+ L1BlockNumber: 100,
+ OverrideSender: nil,
+ }
+
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &LegacyTx{},
+ }
+ encodedBytes := buf.Bytes()
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.Nil(t, decodedTx.To)
+ require.Equal(t, arbLegacyTx.Data, decodedTx.Data)
+ })
+
+ t.Run("Large values", func(t *testing.T) {
+ maxUint256 := new(uint256.Int)
+ maxUint256.SetAllOne()
+
+ to := common.HexToAddress("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
+ legacyTx := &LegacyTx{
+ CommonTx: CommonTx{
+ Nonce: ^uint64(0),
+ GasLimit: ^uint64(0),
+ To: &to,
+ Value: maxUint256,
+ Data: make([]byte, 1000),
+ V: *maxUint256,
+ R: *maxUint256,
+ S: *maxUint256,
+ },
+ GasPrice: maxUint256,
+ }
+
+ arbLegacyTx := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"),
+ EffectiveGasPrice: ^uint64(0),
+ L1BlockNumber: ^uint64(0),
+ OverrideSender: &to,
+ }
+
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ decodedTx := &ArbitrumLegacyTxData{
+ LegacyTx: &LegacyTx{},
+ }
+ encodedBytes := buf.Bytes()
+ stream := rlp.NewStream(bytes.NewReader(encodedBytes[1:]), uint64(len(encodedBytes)-1))
+ err = decodedTx.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.Equal(t, arbLegacyTx.Nonce, decodedTx.Nonce)
+ require.Equal(t, arbLegacyTx.GasLimit, decodedTx.GasLimit)
+ require.True(t, arbLegacyTx.Value.Eq(decodedTx.Value))
+ require.Equal(t, arbLegacyTx.EffectiveGasPrice, decodedTx.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTx.L1BlockNumber, decodedTx.L1BlockNumber)
+ })
+}
+
+func TestArbitrumLegacyTxData_TypeByteHandling(t *testing.T) {
+ to := common.HexToAddress("0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb7")
+ legacyTx := &LegacyTx{
+ CommonTx: CommonTx{
+ Nonce: 100,
+ GasLimit: 21000,
+ To: &to,
+ Value: uint256.NewInt(1000000),
+ Data: []byte{0x12, 0x34},
+ V: *uint256.NewInt(28),
+ R: *uint256.NewInt(1),
+ S: *uint256.NewInt(2),
+ },
+ GasPrice: uint256.NewInt(30000000000),
+ }
+
+ arbLegacyTx := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xabcdef"),
+ EffectiveGasPrice: 25000000000,
+ L1BlockNumber: 999999,
+ OverrideSender: nil,
+ }
+
+ t.Run("EncodeRLP writes type byte first", func(t *testing.T) {
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encoded := buf.Bytes()
+ require.Greater(t, len(encoded), 1)
+ require.Equal(t, ArbitrumLegacyTxType, encoded[0])
+
+ decoded := &ArbitrumLegacyTxData{
+ LegacyTx: &LegacyTx{},
+ }
+ stream := rlp.NewStream(bytes.NewReader(encoded[1:]), uint64(len(encoded)-1))
+ err = decoded.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.Equal(t, arbLegacyTx.HashOverride, decoded.HashOverride)
+ require.Equal(t, arbLegacyTx.EffectiveGasPrice, decoded.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTx.L1BlockNumber, decoded.L1BlockNumber)
+ require.Equal(t, arbLegacyTx.Nonce, decoded.Nonce)
+ })
+
+ t.Run("Round-trip with type byte", func(t *testing.T) {
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encoded := buf.Bytes()
+ require.Equal(t, ArbitrumLegacyTxType, encoded[0])
+
+ // Decode skipping type byte
+ decoded := &ArbitrumLegacyTxData{
+ LegacyTx: &LegacyTx{},
+ }
+ stream := rlp.NewStream(bytes.NewReader(encoded[1:]), uint64(len(encoded)-1))
+ err = decoded.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ // Re-encode and compare
+ var buf2 bytes.Buffer
+ err = decoded.EncodeRLP(&buf2)
+ require.NoError(t, err)
+
+ require.Equal(t, encoded, buf2.Bytes())
+ })
+}
+
+func TestArbitrumLegacyTxData_ArbTxIntegration(t *testing.T) {
+ to := common.HexToAddress("0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb7")
+
+ legacyTx := &LegacyTx{
+ CommonTx: CommonTx{
+ Nonce: 10,
+ GasLimit: 21000,
+ To: &to,
+ Value: uint256.NewInt(1000),
+ Data: []byte{},
+ V: *uint256.NewInt(28),
+ R: *uint256.NewInt(1000),
+ S: *uint256.NewInt(2000),
+ },
+ GasPrice: uint256.NewInt(10000000000),
+ }
+
+ arbLegacyTxData := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef"),
+ EffectiveGasPrice: 9000000000,
+ L1BlockNumber: 500000,
+ OverrideSender: nil,
+ }
+
+ arbTx := NewArbTx(arbLegacyTxData)
+ require.Equal(t, ArbitrumLegacyTxType, arbTx.Type())
+
+ // Encode using the inner transaction's EncodeRLP (which includes type byte)
+ var buf bytes.Buffer
+ err := arbLegacyTxData.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encodedBytes := buf.Bytes()
+
+ // Verify first byte is the type
+ require.Equal(t, ArbitrumLegacyTxType, encodedBytes[0])
+
+ // Decode using ArbTx's decodeTyped (skip the type byte)
+ newArbTx := &ArbTx{}
+ decoded, err := newArbTx.decodeTyped(encodedBytes, true)
+ require.NoError(t, err)
+
+ decodedArbLegacy, ok := decoded.(*ArbitrumLegacyTxData)
+ require.True(t, ok, "Decoded transaction should be ArbitrumLegacyTxData")
+
+ // Verify all fields
+ require.Equal(t, arbLegacyTxData.HashOverride, decodedArbLegacy.HashOverride)
+ require.Equal(t, arbLegacyTxData.EffectiveGasPrice, decodedArbLegacy.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTxData.L1BlockNumber, decodedArbLegacy.L1BlockNumber)
+ require.Equal(t, arbLegacyTxData.Nonce, decodedArbLegacy.Nonce)
+ require.Equal(t, arbLegacyTxData.GasLimit, decodedArbLegacy.GasLimit)
+}
+
+func TestArbitrumLegacyTxData_TypeBasedDecodingPattern(t *testing.T) {
+ to := common.HexToAddress("0x742d35Cc6634C0532925a3b844Bc9e7595f0bEb7")
+ legacyTx := &LegacyTx{
+ CommonTx: CommonTx{
+ Nonce: 42,
+ GasLimit: 50000,
+ To: &to,
+ Value: uint256.NewInt(1000000),
+ Data: []byte{0x01, 0x02, 0x03, 0x04},
+ V: *uint256.NewInt(28),
+ R: *uint256.NewInt(100),
+ S: *uint256.NewInt(200),
+ },
+ GasPrice: uint256.NewInt(20000000000),
+ }
+
+ arbLegacyTx := &ArbitrumLegacyTxData{
+ LegacyTx: legacyTx,
+ HashOverride: common.HexToHash("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"),
+ EffectiveGasPrice: 15000000000,
+ L1BlockNumber: 1234567,
+ OverrideSender: nil,
+ }
+
+ var buf bytes.Buffer
+ err := arbLegacyTx.EncodeRLP(&buf)
+ require.NoError(t, err)
+
+ encoded := buf.Bytes()
+ require.Greater(t, len(encoded), 0)
+
+ txType := encoded[0]
+ require.Equal(t, ArbitrumLegacyTxType, txType)
+
+ var decodedTx Transaction
+ switch txType {
+ case ArbitrumLegacyTxType:
+ decodedTx = &ArbitrumLegacyTxData{
+ LegacyTx: &LegacyTx{},
+ }
+ default:
+ t.Fatalf("Unknown transaction type: 0x%x", txType)
+ }
+
+ stream := rlp.NewStream(bytes.NewReader(encoded[1:]), uint64(len(encoded)-1))
+ err = decodedTx.(*ArbitrumLegacyTxData).DecodeRLP(stream)
+ require.NoError(t, err)
+
+ decoded := decodedTx.(*ArbitrumLegacyTxData)
+ require.Equal(t, arbLegacyTx.HashOverride, decoded.HashOverride)
+ require.Equal(t, arbLegacyTx.EffectiveGasPrice, decoded.EffectiveGasPrice)
+ require.Equal(t, arbLegacyTx.L1BlockNumber, decoded.L1BlockNumber)
+ require.Equal(t, arbLegacyTx.Nonce, decoded.Nonce)
+}
diff --git a/execution/types/arbitrum_signer.go b/execution/types/arbitrum_signer.go
new file mode 100644
index 00000000000..ba8591191bc
--- /dev/null
+++ b/execution/types/arbitrum_signer.go
@@ -0,0 +1,84 @@
+package types
+
+import (
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/types/accounts"
+ "github.com/holiman/uint256"
+)
+
+var ArbosAddress = accounts.InternAddress(common.HexToAddress("0xa4b05"))
+var ArbosStateAddress = accounts.InternAddress(common.HexToAddress("0xA4B05FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF"))
+var ArbSysAddress = accounts.InternAddress(common.HexToAddress("0x64"))
+var ArbInfoAddress = accounts.InternAddress(common.HexToAddress("0x65"))
+var ArbAddressTableAddress = accounts.InternAddress(common.HexToAddress("0x66"))
+var ArbBLSAddress = accounts.InternAddress(common.HexToAddress("0x67"))
+var ArbFunctionTableAddress = accounts.InternAddress(common.HexToAddress("0x68"))
+var ArbosTestAddress = accounts.InternAddress(common.HexToAddress("0x69"))
+var ArbGasInfoAddress = accounts.InternAddress(common.HexToAddress("0x6c"))
+var ArbOwnerPublicAddress = accounts.InternAddress(common.HexToAddress("0x6b"))
+var ArbAggregatorAddress = accounts.InternAddress(common.HexToAddress("0x6d"))
+var ArbRetryableTxAddress = common.HexToAddress("0x6e")
+var ArbStatisticsAddress = accounts.InternAddress(common.HexToAddress("0x6f"))
+var ArbOwnerAddress = accounts.InternAddress(common.HexToAddress("0x70"))
+var ArbWasmAddress = accounts.InternAddress(common.HexToAddress("0x71"))
+var ArbWasmCacheAddress = accounts.InternAddress(common.HexToAddress("0x72"))
+var ArbNativeTokenManagerAddress = accounts.InternAddress(common.HexToAddress("0x73"))
+var NodeInterfaceAddress = accounts.InternAddress(common.HexToAddress("0xc8"))
+var NodeInterfaceDebugAddress = accounts.InternAddress(common.HexToAddress("0xc9"))
+var ArbDebugAddress = accounts.InternAddress(common.HexToAddress("0xff"))
+
+type ArbitrumSigner struct {
+ Signer
+}
+
+func NewArbitrumSigner(signer Signer) ArbitrumSigner {
+ return ArbitrumSigner{Signer: signer}
+}
+
+func (s ArbitrumSigner) Sender(tx Transaction) (accounts.Address, error) {
+ switch inner := tx.(type) {
+ case *ArbitrumUnsignedTx:
+ return inner.From, nil
+ case *ArbitrumContractTx:
+ return inner.From, nil
+ case *ArbitrumDepositTx:
+ return inner.From, nil
+ case *ArbitrumInternalTx:
+ return ArbosAddress, nil
+ case *ArbitrumRetryTx:
+ return inner.From, nil
+ case *ArbitrumSubmitRetryableTx:
+ return inner.From, nil
+ case *ArbitrumLegacyTxData:
+ if inner.OverrideSender != nil {
+ return accounts.InternAddress(*inner.OverrideSender), nil
+ }
+ // TODO Arbitrum: not sure this check is needed for arb1
+ if inner.LegacyTx.V.IsZero() && inner.LegacyTx.R.IsZero() && inner.LegacyTx.S.IsZero() {
+ return accounts.NilAddress, nil
+ }
+ return s.Signer.Sender(inner.LegacyTx)
+ default:
+ return s.Signer.Sender(tx)
+ }
+}
+
+func (s ArbitrumSigner) Equal(s2 ArbitrumSigner) bool {
+ // x, ok := s2.(ArbitrumSigner)
+ return s2.Signer.Equal(s.Signer)
+}
+
+func (s ArbitrumSigner) SignatureValues(tx Transaction, sig []byte) (R, S, V *uint256.Int, err error) {
+ switch dataTx := tx.(type) {
+ case *ArbitrumUnsignedTx, *ArbitrumContractTx, *ArbitrumDepositTx,
+ *ArbitrumInternalTx, *ArbitrumRetryTx, *ArbitrumSubmitRetryableTx:
+
+ return nil, nil, nil, nil
+ case *ArbitrumLegacyTxData:
+ // legacyData := tx.(*ArbitrumLegacyTxData)
+ // fakeTx := NewArbTx(legacyData.LegacyTx)
+ return s.Signer.SignatureValues(dataTx.LegacyTx, sig)
+ default:
+ return s.Signer.SignatureValues(tx, sig)
+ }
+}
diff --git a/execution/types/blob_tx.go b/execution/types/blob_tx.go
index 8a6df3fc889..2724f915a48 100644
--- a/execution/types/blob_tx.go
+++ b/execution/types/blob_tx.go
@@ -22,6 +22,7 @@ import (
"io"
"math/big"
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
"github.com/holiman/uint256"
"github.com/erigontech/erigon/common"
@@ -69,6 +70,9 @@ func (stx *BlobTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (*M
checkNonce: true,
checkTransaction: true,
checkGas: true,
+
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ Tx: stx,
}
if !rules.IsCancun {
return nil, errors.New("BlobTx transactions require Cancun")
@@ -92,7 +96,7 @@ func (stx *BlobTx) AsMessage(s Signer, baseFee *big.Int, rules *chain.Rules) (*M
return &msg, nil
}
-func (stx *BlobTx) cachedSender() (sender accounts.Address, ok bool) {
+func (stx *BlobTx) CachedSender() (sender accounts.Address, ok bool) {
s := stx.from
if s.IsNil() {
return sender, false
@@ -191,13 +195,13 @@ func (stx *BlobTx) copy() *BlobTx {
}
func (stx *BlobTx) EncodingSize() int {
- payloadSize, _, _ := stx.payloadSize()
+ payloadSize, _, _ := stx.payloadSize(false)
// Add envelope size and type size
return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
}
-func (stx *BlobTx) payloadSize() (payloadSize, accessListLen, blobHashesLen int) {
- payloadSize, accessListLen = stx.DynamicFeeTransaction.payloadSize()
+func (stx *BlobTx) payloadSize(hashingOnly bool) (payloadSize, accessListLen, blobHashesLen int) {
+ payloadSize, accessListLen = stx.DynamicFeeTransaction.payloadSize(hashingOnly)
payloadSize += rlp.Uint256Len(*stx.MaxFeePerBlobGas)
// size of BlobVersionedHashes
blobHashesLen = blobVersionedHashesSize(stx.BlobVersionedHashes)
@@ -218,7 +222,7 @@ func encodeBlobVersionedHashes(hashes []common.Hash, w io.Writer, b []byte) erro
return nil
}
-func (stx *BlobTx) encodePayload(w io.Writer, b []byte, payloadSize, accessListLen, blobHashesLen int) error {
+func (stx *BlobTx) encodePayload(w io.Writer, b []byte, payloadSize, accessListLen, blobHashesLen int, hashingOnly bool) error {
// prefix
if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
return err
@@ -291,6 +295,12 @@ func (stx *BlobTx) encodePayload(w io.Writer, b []byte, payloadSize, accessListL
if err := rlp.EncodeUint256(stx.S, w, b); err != nil {
return err
}
+ //encode Timeboosted
+ if stx.Timeboosted != nil && !hashingOnly {
+ if err := rlp.EncodeBool(*stx.Timeboosted, w, b); err != nil {
+ return err
+ }
+ }
return nil
}
@@ -298,11 +308,11 @@ func (stx *BlobTx) EncodeRLP(w io.Writer) error {
if stx.To == nil {
return ErrNilToFieldTx
}
- payloadSize, accessListLen, blobHashesLen := stx.payloadSize()
+ payloadSize, accessListLen, blobHashesLen := stx.payloadSize(false)
// size of struct prefix and TxType
envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// envelope
if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
return err
@@ -312,7 +322,7 @@ func (stx *BlobTx) EncodeRLP(w io.Writer) error {
if _, err := w.Write(b[:1]); err != nil {
return err
}
- if err := stx.encodePayload(w, b[:], payloadSize, accessListLen, blobHashesLen); err != nil {
+ if err := stx.encodePayload(w, b[:], payloadSize, accessListLen, blobHashesLen, false); err != nil {
return err
}
return nil
@@ -322,15 +332,34 @@ func (stx *BlobTx) MarshalBinary(w io.Writer) error {
if stx.To == nil {
return ErrNilToFieldTx
}
- payloadSize, accessListLen, blobHashesLen := stx.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ payloadSize, accessListLen, blobHashesLen := stx.payloadSize(false)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = BlobTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := stx.encodePayload(w, b[:], payloadSize, accessListLen, blobHashesLen, false); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (stx *BlobTx) MarshalBinaryForHashing(w io.Writer) error {
+ if stx.To == nil {
+ return ErrNilToFieldTx
+ }
+
+ payloadSize, accessListLen, blobHashesLen := stx.payloadSize(true)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// encode TxType
b[0] = BlobTxType
if _, err := w.Write(b[:1]); err != nil {
return err
}
- if err := stx.encodePayload(w, b[:], payloadSize, accessListLen, blobHashesLen); err != nil {
+ if err := stx.encodePayload(w, b[:], payloadSize, accessListLen, blobHashesLen, true); err != nil {
return err
}
return nil
@@ -417,9 +446,25 @@ func (stx *BlobTx) DecodeRLP(s *rlp.Stream) error {
return err
}
stx.S.SetBytes(b)
+
+ if s.MoreDataInList() {
+ boolVal, err := s.Bool()
+ if err != nil {
+ return err
+ }
+ stx.Timeboosted = &boolVal
+ }
return s.ListEnd()
}
+func (tx *BlobTx) IsTimeBoosted() *bool {
+ return tx.Timeboosted
+}
+
+func (tx *BlobTx) SetTimeboosted(val *bool) {
+ tx.Timeboosted = val
+}
+
func decodeBlobVersionedHashes(hashes *[]common.Hash, s *rlp.Stream) error {
_, err := s.List()
if err != nil {
diff --git a/execution/types/blob_tx_wrapper.go b/execution/types/blob_tx_wrapper.go
index b521efedbf7..09c917e270b 100644
--- a/execution/types/blob_tx_wrapper.go
+++ b/execution/types/blob_tx_wrapper.go
@@ -78,7 +78,7 @@ func (li BlobKzgs) payloadSize() int {
func (li BlobKzgs) encodePayload(w io.Writer, b []byte, payloadSize int) error {
// prefix
- buf := newEncodingBuf()
+ buf := NewEncodingBuf()
l := rlp.EncodeListPrefix(payloadSize, buf[:])
w.Write(buf[:l])
@@ -128,7 +128,7 @@ func (li KZGProofs) payloadSize() int {
func (li KZGProofs) encodePayload(w io.Writer, b []byte, payloadSize int) error {
// prefix
- buf := newEncodingBuf()
+ buf := NewEncodingBuf()
l := rlp.EncodeListPrefix(payloadSize, buf[:])
w.Write(buf[:l])
@@ -183,7 +183,7 @@ func (blobs Blobs) payloadSize() int {
func (blobs Blobs) encodePayload(w io.Writer, b []byte, payloadSize int) error {
// prefix
- buf := newEncodingBuf()
+ buf := NewEncodingBuf()
l := rlp.EncodeListPrefix(payloadSize, buf[:])
w.Write(buf[:l])
for _, blob := range blobs {
@@ -358,7 +358,7 @@ func (txw *BlobTxWrapper) RawSignatureValues() (*uint256.Int, *uint256.Int, *uin
return txw.Tx.RawSignatureValues()
}
-func (txw *BlobTxWrapper) cachedSender() (accounts.Address, bool) { return txw.Tx.cachedSender() }
+func (txw *BlobTxWrapper) CachedSender() (accounts.Address, bool) { return txw.Tx.CachedSender() }
func (txw *BlobTxWrapper) Sender(s Signer) (accounts.Address, error) { return txw.Tx.Sender(s) }
@@ -370,6 +370,14 @@ func (txw *BlobTxWrapper) IsContractDeploy() bool { return txw.Tx.IsContractDepl
func (txw *BlobTxWrapper) Unwrap() Transaction { return &txw.Tx }
+func (txw *BlobTxWrapper) IsTimeBoosted() *bool {
+ return txw.Tx.IsTimeBoosted()
+}
+
+func (txw *BlobTxWrapper) SetTimeboosted(val *bool) {
+ txw.Tx.Timeboosted = val
+}
+
func (txw *BlobTxWrapper) DecodeRLP(s *rlp.Stream) error {
_, err := s.List()
if err != nil {
@@ -411,7 +419,7 @@ func (txw *BlobTxWrapper) EncodingSize() int {
return txw.Tx.EncodingSize()
}
func (txw *BlobTxWrapper) payloadSize() (payloadSize int) {
- l, _, _ := txw.Tx.payloadSize()
+ l, _, _ := txw.Tx.payloadSize(false)
payloadSize += l + rlp.ListPrefixLen(l)
if txw.WrapperVersion != 0 {
payloadSize += 1
@@ -425,8 +433,8 @@ func (txw *BlobTxWrapper) payloadSize() (payloadSize int) {
return
}
func (txw *BlobTxWrapper) MarshalBinaryWrapped(w io.Writer) error {
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// encode TxType
b[0] = BlobTxType
if _, err := w.Write(b[:1]); err != nil {
diff --git a/execution/types/block.go b/execution/types/block.go
index 94b9e092020..9c9c7615834 100644
--- a/execution/types/block.go
+++ b/execution/types/block.go
@@ -45,6 +45,23 @@ const (
var ErrBlockExceedsMaxRlpSize = errors.New("block exceeds max rlp size")
+var (
+ EmptyRootHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+ EmptyRequestsHash = common.HexToHash("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") // sha256.Sum256([]byte(""))
+ EmptyUncleHash = rlpHash([]*Header(nil))
+)
+
+var ( // Arbirum specific
+ // EmptyTxsHash is the known hash of the empty transaction set.
+ EmptyTxsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+
+ // EmptyReceiptsHash is the known hash of the empty receipt set.
+ EmptyReceiptsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+
+ // EmptyWithdrawalsHash is the known hash of the empty withdrawal set.
+ EmptyWithdrawalsHash = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
+)
+
// A BlockNonce is a 64-bit hash which proves (combined with the
// mix-hash) that a sufficient amount of computation has been carried
// out on a block.
@@ -173,8 +190,8 @@ func (h *Header) EncodingSize() int {
func (h *Header) EncodeRLP(w io.Writer) error {
encodingSize := h.EncodingSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// Prefix
if err := rlp.EncodeStructSizePrefix(encodingSize, w, b[:]); err != nil {
return err
@@ -851,8 +868,8 @@ func (rb RawBody) payloadSize() (payloadSize, txsLen, unclesLen, withdrawalsLen,
func (rb RawBody) EncodeRLP(w io.Writer) error {
payloadSize, txsLen, unclesLen, withdrawalsLen, blockAccessListLen := rb.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// prefix
if err := rlp.EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil {
return err
@@ -948,8 +965,8 @@ func (bfs BodyForStorage) payloadSize() (payloadSize, unclesLen, withdrawalsLen,
func (bfs BodyForStorage) EncodeRLP(w io.Writer) error {
payloadSize, unclesLen, withdrawalsLen, blockAccessListLen := bfs.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// prefix
if err := rlp.EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil {
@@ -1048,8 +1065,8 @@ func (bb Body) payloadSize() (payloadSize int, txsLen, unclesLen, withdrawalsLen
func (bb Body) EncodeRLP(w io.Writer) error {
payloadSize, txsLen, unclesLen, withdrawalsLen, blockAccessListLen := bb.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// prefix
if err := rlp.EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil {
return err
@@ -1328,8 +1345,8 @@ func (bb *Block) EncodingSize() int {
func (bb *Block) EncodeRLP(w io.Writer) error {
payloadSize, txsLen, unclesLen, withdrawalsLen, accessListLen := bb.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// prefix
if err := rlp.EncodeStructSizePrefix(payloadSize, w, b[:]); err != nil {
return err
@@ -1430,6 +1447,7 @@ func (b *Block) Body() *Body {
return bd
}
func (b *Block) SendersToTxs(senders []common.Address) {
+ return // TODO Arbitrum!!!
if len(senders) == 0 {
return
}
diff --git a/execution/types/block_access_list.go b/execution/types/block_access_list.go
index 190bd8af6a1..88b7d3a81c9 100644
--- a/execution/types/block_access_list.go
+++ b/execution/types/block_access_list.go
@@ -98,7 +98,7 @@ func (ac *AccountChanges) EncodeRLP(w io.Writer) error {
return err
}
encodingSize := ac.EncodingSize()
- b := newEncodingBuf()
+ b := NewEncodingBuf()
defer releaseEncodingBuf(b)
if err := rlp.EncodeStructSizePrefix(encodingSize, w, b[:]); err != nil {
@@ -191,7 +191,7 @@ func (sc *SlotChanges) EncodeRLP(w io.Writer) error {
return err
}
- b := newEncodingBuf()
+ b := NewEncodingBuf()
defer releaseEncodingBuf(b)
encodingSize := sc.EncodingSize()
@@ -235,7 +235,7 @@ func (sc *StorageChange) EncodingSize() int {
}
func (sc *StorageChange) EncodeRLP(w io.Writer) error {
- b := newEncodingBuf()
+ b := NewEncodingBuf()
defer releaseEncodingBuf(b)
encodingSize := sc.EncodingSize()
@@ -273,7 +273,7 @@ func (bc *BalanceChange) EncodingSize() int {
}
func (bc *BalanceChange) EncodeRLP(w io.Writer) error {
- b := newEncodingBuf()
+ b := NewEncodingBuf()
defer releaseEncodingBuf(b)
encodingSize := bc.EncodingSize()
@@ -316,7 +316,7 @@ func (nc *NonceChange) EncodingSize() int {
}
func (nc *NonceChange) EncodeRLP(w io.Writer) error {
- b := newEncodingBuf()
+ b := NewEncodingBuf()
defer releaseEncodingBuf(b)
encodingSize := nc.EncodingSize()
@@ -356,7 +356,7 @@ func (cc *CodeChange) EncodingSize() int {
}
func (cc *CodeChange) EncodeRLP(w io.Writer) error {
- b := newEncodingBuf()
+ b := NewEncodingBuf()
defer releaseEncodingBuf(b)
encodingSize := cc.EncodingSize()
@@ -701,7 +701,7 @@ func releaseEncodingBuf(buf *encodingBuf) {
return
}
*buf = encodingBuf{}
- pooledBuf.Put(buf)
+ PooledBuf.Put(buf)
}
func (bal BlockAccessList) Hash() common.Hash {
diff --git a/execution/types/dynamic_fee_tx.go b/execution/types/dynamic_fee_tx.go
index f284b3ab47c..d97046867a7 100644
--- a/execution/types/dynamic_fee_tx.go
+++ b/execution/types/dynamic_fee_tx.go
@@ -25,6 +25,8 @@ import (
"io"
"math/big"
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
+ "github.com/erigontech/erigon/common/length"
"github.com/holiman/uint256"
"github.com/erigontech/erigon/common"
@@ -35,10 +37,11 @@ import (
type DynamicFeeTransaction struct {
CommonTx
- ChainID *uint256.Int
- TipCap *uint256.Int
- FeeCap *uint256.Int
- AccessList AccessList
+ ChainID *uint256.Int
+ TipCap *uint256.Int
+ FeeCap *uint256.Int
+ AccessList AccessList
+ Timeboosted *bool
}
func (tx *DynamicFeeTransaction) GetFeeCap() *uint256.Int { return tx.FeeCap }
@@ -83,6 +86,9 @@ func (tx *DynamicFeeTransaction) copy() *DynamicFeeTransaction {
FeeCap: new(uint256.Int),
}
copy(cpy.AccessList, tx.AccessList)
+ if tx.Timeboosted != nil {
+ cpy.Timeboosted = &(*tx.Timeboosted)
+ }
if tx.Value != nil {
cpy.Value.Set(tx.Value)
}
@@ -110,12 +116,12 @@ func (tx *DynamicFeeTransaction) GetAuthorizations() []Authorization {
}
func (tx *DynamicFeeTransaction) EncodingSize() int {
- payloadSize, _ := tx.payloadSize()
+ payloadSize, _ := tx.payloadSize(false)
// Add envelope size and type size
return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
}
-func (tx *DynamicFeeTransaction) payloadSize() (payloadSize int, accessListLen int) {
+func (tx *DynamicFeeTransaction) payloadSize(hashingOnly bool) (payloadSize int, accessListLen int) {
payloadSize += rlp.Uint256Len(*tx.ChainID)
payloadSize += rlp.U64Len(tx.Nonce)
payloadSize += rlp.Uint256Len(*tx.TipCap)
@@ -125,7 +131,7 @@ func (tx *DynamicFeeTransaction) payloadSize() (payloadSize int, accessListLen i
// size of To
payloadSize++
if tx.To != nil {
- payloadSize += 20
+ payloadSize += length.Addr
}
payloadSize += rlp.Uint256Len(*tx.Value)
@@ -138,6 +144,11 @@ func (tx *DynamicFeeTransaction) payloadSize() (payloadSize int, accessListLen i
payloadSize += rlp.Uint256Len(tx.V)
payloadSize += rlp.Uint256Len(tx.R)
payloadSize += rlp.Uint256Len(tx.S)
+
+ if tx.Timeboosted != nil && !hashingOnly {
+ payloadSize++
+ payloadSize += rlp.BoolLen()
+ }
return payloadSize, accessListLen
}
@@ -158,21 +169,36 @@ func (tx *DynamicFeeTransaction) WithSignature(signer Signer, sig []byte) (Trans
// For legacy transactions, it returns the RLP encoding. For EIP-2718 typed
// transactions, it returns the type and payload.
func (tx *DynamicFeeTransaction) MarshalBinary(w io.Writer) error {
- payloadSize, accessListLen := tx.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ payloadSize, accessListLen := tx.payloadSize(false)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = DynamicFeeTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, accessListLen, false); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *DynamicFeeTransaction) MarshalBinaryForHashing(w io.Writer) error {
+ payloadSize, accessListLen := tx.payloadSize(true)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// encode TxType
b[0] = DynamicFeeTxType
if _, err := w.Write(b[:1]); err != nil {
return err
}
- if err := tx.encodePayload(w, b[:], payloadSize, accessListLen); err != nil {
+ if err := tx.encodePayload(w, b[:], payloadSize, accessListLen, true); err != nil {
return err
}
return nil
}
-func (tx *DynamicFeeTransaction) encodePayload(w io.Writer, b []byte, payloadSize, accessListLen int) error {
+func (tx *DynamicFeeTransaction) encodePayload(w io.Writer, b []byte, payloadSize, accessListLen int, hashingOnly bool) error {
// prefix
if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
return err
@@ -229,15 +255,20 @@ func (tx *DynamicFeeTransaction) encodePayload(w io.Writer, b []byte, payloadSiz
if err := rlp.EncodeUint256(tx.S, w, b); err != nil {
return err
}
+ if tx.Timeboosted != nil && !hashingOnly {
+ if err := rlp.EncodeBool(*tx.Timeboosted, w, b); err != nil {
+ return err
+ }
+ }
return nil
}
func (tx *DynamicFeeTransaction) EncodeRLP(w io.Writer) error {
- payloadSize, accessListLen := tx.payloadSize()
+ payloadSize, accessListLen := tx.payloadSize(false)
// size of struct prefix and TxType
envelopeSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// envelope
if err := rlp.EncodeStringSizePrefix(envelopeSize, w, b[:]); err != nil {
return err
@@ -247,7 +278,7 @@ func (tx *DynamicFeeTransaction) EncodeRLP(w io.Writer) error {
if _, err := w.Write(b[:1]); err != nil {
return err
}
- if err := tx.encodePayload(w, b[:], payloadSize, accessListLen); err != nil {
+ if err := tx.encodePayload(w, b[:], payloadSize, accessListLen, false); err != nil {
return err
}
return nil
@@ -312,6 +343,14 @@ func (tx *DynamicFeeTransaction) DecodeRLP(s *rlp.Stream) error {
return err
}
tx.S.SetBytes(b)
+
+ if s.MoreDataInList() {
+ boolVal, err := s.Bool()
+ if err != nil {
+ return err
+ }
+ tx.Timeboosted = &boolVal
+ }
return s.ListEnd()
}
@@ -336,6 +375,9 @@ func (tx *DynamicFeeTransaction) AsMessage(s Signer, baseFee *big.Int, rules *ch
checkNonce: true,
checkTransaction: true,
checkGas: true,
+
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ Tx: tx,
}
if !rules.IsLondon {
return nil, errors.New("eip-1559 transactions require London")
@@ -418,7 +460,7 @@ func (tx *DynamicFeeTransaction) GetChainID() *uint256.Int {
return tx.ChainID
}
-func (tx *DynamicFeeTransaction) cachedSender() (sender accounts.Address, ok bool) {
+func (tx *DynamicFeeTransaction) CachedSender() (sender accounts.Address, ok bool) {
s := tx.from
if s.IsNil() {
return sender, false
@@ -438,6 +480,14 @@ func (tx *DynamicFeeTransaction) Sender(signer Signer) (accounts.Address, error)
return addr, nil
}
+func (tx *DynamicFeeTransaction) IsTimeBoosted() *bool {
+ return tx.Timeboosted
+}
+
+func (tx *DynamicFeeTransaction) SetTimeboosted(val *bool) {
+ tx.Timeboosted = val
+}
+
// NewEIP1559Transaction creates an unsigned eip1559 transaction.
func NewEIP1559Transaction(chainID uint256.Int, nonce uint64, to common.Address, amount *uint256.Int, gasLimit uint64, gasPrice *uint256.Int, gasTip *uint256.Int, gasFeeCap *uint256.Int, data []byte) *DynamicFeeTransaction {
return &DynamicFeeTransaction{
diff --git a/execution/types/hashing.go b/execution/types/hashing.go
index 1e2822efa5a..944ebbbc3f3 100644
--- a/execution/types/hashing.go
+++ b/execution/types/hashing.go
@@ -31,8 +31,8 @@ import (
"github.com/erigontech/erigon/execution/rlp"
)
-// encodeBufferPool holds temporary encoder buffers for DeriveSha and TX encoding.
-var encodeBufferPool = sync.Pool{
+// EncodeBufferPool holds temporary encoder buffers for DeriveSha and TX encoding.
+var EncodeBufferPool = sync.Pool{
New: func() any { return new(bytes.Buffer) },
}
diff --git a/execution/types/hashing_test.go b/execution/types/hashing_test.go
index 3410409dff0..604d94f3851 100644
--- a/execution/types/hashing_test.go
+++ b/execution/types/hashing_test.go
@@ -136,3 +136,29 @@ func BenchmarkCurrentLargeList(b *testing.B) {
DeriveSha(largeTxList)
}
}
+
+func TestArbTransactionListHash(t *testing.T) {
+ rawStartBlock := common.FromHex("0x6bf6a42d00000000000000000000000000000000000000000000000000000009a3bd877b0000000000000000000000000000000000000000000000000000000001072bc600000000000000000000000000000000000000000000000000000000055eaf170000000000000000000000000000000000000000000000000000000000000000")
+ var tx1 ArbitrumInternalTx
+
+ //bb := bytes.NewBuffer(rawStartBlock[1:])
+ //stream := rlp.NewStream(bb, 0)
+ //err := tx1.DecodeRLP(stream)
+ err := rlp.DecodeBytes(rawStartBlock[:], &tx1)
+ require.NoError(t, err)
+
+ rawRetryable := common.FromHex("0xc9f95d3200000000000000000000000000000000000000000000000000000000000ce99300000000000000000000000000000000000000000000000000000009a3bd877b000000000000000000000000000000000000000000000000000060ffa32345b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060ffa32345b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003749c4f034022c39ecaffaba182555d4508caccc000000000000000000000000000000000000000000000000000000000000016000000000000000000000000000000000000000000000000000000000000000c4cc29a3060000000000000000000000007a3d05c70581bd345fe117c06e45f9669205384f00000000000000000000000000000000000000000000000001a942840b9d400000000000000000000000000000000000000000000000000001a71922b86b2d5a000000000000000000000000000000000000000000000000000001881255a9470000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
+ var tx2 ArbitrumSubmitRetryableTx
+ err = rlp.DecodeBytes(rawRetryable[1:], &tx2)
+ require.NoError(t, err)
+
+ require.Equal(t, tx1.Hash(), common.HexToHash("0xd420a8799a87c13e6d6e9dbeb66b40d928f76eff1acfea38845a8960bc122fda"))
+ require.Equal(t, tx2.Hash(), common.HexToHash("0xb96bee31487d0826e41f58618deed9dbcfd6ae20957a7d5cbbe07067db4c0746"))
+
+ txns := make(Transactions, 2)
+ txns[0] = &tx1
+ txns[1] = &tx2
+
+ txnRoot := DeriveSha(txns)
+ require.Equal(t, txnRoot, common.HexToHash("0x9f586abcb16e6530ab4675ce632a2ee55af15f015472c3fd537312deb8288b25"))
+}
diff --git a/execution/types/legacy_tx.go b/execution/types/legacy_tx.go
index fb210b51af3..f402264b6e7 100644
--- a/execution/types/legacy_tx.go
+++ b/execution/types/legacy_tx.go
@@ -26,6 +26,7 @@ import (
"github.com/holiman/uint256"
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
"github.com/erigontech/erigon/common"
"github.com/erigontech/erigon/execution/chain"
"github.com/erigontech/erigon/execution/rlp"
@@ -95,6 +96,8 @@ func (ct *CommonTx) GetBlobHashes() []common.Hash {
type LegacyTx struct {
CommonTx
GasPrice *uint256.Int // wei per gas
+
+ Timeboosted *bool
}
func (tx *LegacyTx) GetTipCap() *uint256.Int { return tx.GasPrice }
@@ -125,13 +128,21 @@ func (tx *LegacyTx) GetAuthorizations() []Authorization {
}
func (tx *LegacyTx) Protected() bool {
- return isProtectedV(&tx.V)
+ return IsProtectedV(&tx.V)
}
func (tx *LegacyTx) Unwrap() Transaction {
return tx
}
+func (tx *LegacyTx) IsTimeBoosted() *bool {
+ return tx.Timeboosted
+}
+
+func (tx *LegacyTx) SetTimeboosted(val *bool) {
+ tx.Timeboosted = val
+}
+
// NewTransaction creates an unsigned legacy transaction.
//
// Deprecated: use NewTx instead.
@@ -177,6 +188,10 @@ func (tx *LegacyTx) copy() *LegacyTx {
},
GasPrice: new(uint256.Int),
}
+ if tx.Timeboosted != nil {
+ val := *tx.Timeboosted
+ cpy.Timeboosted = &val
+ }
if tx.Value != nil {
cpy.Value.Set(tx.Value)
}
@@ -190,10 +205,10 @@ func (tx *LegacyTx) copy() *LegacyTx {
}
func (tx *LegacyTx) EncodingSize() int {
- return tx.payloadSize()
+ return tx.payloadSize(true)
}
-func (tx *LegacyTx) payloadSize() (payloadSize int) {
+func (tx *LegacyTx) payloadSize(hashingOnly bool) (payloadSize int) {
payloadSize += rlp.U64Len(tx.Nonce)
payloadSize += rlp.Uint256Len(*tx.GasPrice)
payloadSize += rlp.U64Len(tx.GasLimit)
@@ -206,20 +221,33 @@ func (tx *LegacyTx) payloadSize() (payloadSize int) {
payloadSize += rlp.Uint256Len(tx.V)
payloadSize += rlp.Uint256Len(tx.R)
payloadSize += rlp.Uint256Len(tx.S)
+ if tx.Timeboosted != nil {
+ payloadSize += rlp.BoolLen()
+ }
return payloadSize
}
func (tx *LegacyTx) MarshalBinary(w io.Writer) error {
- payloadSize := tx.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
- if err := tx.encodePayload(w, b[:], payloadSize); err != nil {
+ payloadSize := tx.payloadSize(false)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ if err := tx.encodePayload(w, b[:], payloadSize, false); err != nil {
return err
}
return nil
}
-func (tx *LegacyTx) encodePayload(w io.Writer, b []byte, payloadSize int) error {
+func (tx *LegacyTx) MarshalBinaryForHashing(w io.Writer) error {
+ payloadSize := tx.payloadSize(true)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ if err := tx.encodePayload(w, b[:], payloadSize, true); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *LegacyTx) encodePayload(w io.Writer, b []byte, payloadSize int, hashingOnly bool) error {
// prefix
if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
return err
@@ -261,15 +289,22 @@ func (tx *LegacyTx) encodePayload(w io.Writer, b []byte, payloadSize int) error
if err := rlp.EncodeUint256(tx.S, w, b); err != nil {
return err
}
+ if hashingOnly {
+ return nil
+ }
+
+ if tx.Timeboosted != nil {
+ return rlp.EncodeBool(*tx.Timeboosted, w, b)
+ }
return nil
}
func (tx *LegacyTx) EncodeRLP(w io.Writer) error {
- payloadSize := tx.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
- if err := tx.encodePayload(w, b[:], payloadSize); err != nil {
+ payloadSize := tx.payloadSize(false)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ if err := tx.encodePayload(w, b[:], payloadSize, false); err != nil {
return err
}
return nil
@@ -320,10 +355,15 @@ func (tx *LegacyTx) DecodeRLP(s *rlp.Stream) error {
return fmt.Errorf("read S: %w", err)
}
tx.S.SetBytes(b)
- if err = s.ListEnd(); err != nil {
- return fmt.Errorf("close txn struct: %w", err)
+
+ if s.MoreDataInList() {
+ boolVal, err := s.Bool()
+ if err != nil {
+ return err
+ }
+ tx.Timeboosted = &boolVal
}
- return nil
+ return s.ListEnd()
}
// AsMessage returns the transaction as a core.Message.
@@ -347,6 +387,8 @@ func (tx *LegacyTx) AsMessage(s Signer, _ *big.Int, _ *chain.Rules) (*Message, e
checkNonce: true,
checkTransaction: true,
checkGas: true,
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ Tx: tx,
}
var err error
@@ -430,7 +472,7 @@ func (tx *LegacyTx) GetChainID() *uint256.Int {
return DeriveChainId(&tx.V)
}
-func (tx *LegacyTx) cachedSender() (sender accounts.Address, ok bool) {
+func (tx *LegacyTx) CachedSender() (sender accounts.Address, ok bool) {
s := tx.from
if s.IsNil() {
return sender, false
diff --git a/execution/types/msg_run_context.go b/execution/types/msg_run_context.go
new file mode 100644
index 00000000000..1a97af4c7e6
--- /dev/null
+++ b/execution/types/msg_run_context.go
@@ -0,0 +1,121 @@
+package types
+
+import "github.com/erigontech/erigon/arb/ethdb/wasmdb"
+
+type MessageRunMode uint8
+
+const (
+ MessageCommitMode MessageRunMode = iota
+ MessageGasEstimationMode
+ MessageEthcallMode
+ MessageReplayMode
+ MessageRecordingMode
+)
+
+// these message modes are executed onchain so cannot make any gas shortcuts
+func (m MessageRunMode) ExecutedOnChain() bool { // can use isFree for that??
+ return m == MessageCommitMode || m == MessageReplayMode
+}
+
+type MessageRunContext struct {
+ runMode MessageRunMode
+
+ wasmCacheTag uint32
+ wasmTargets []wasmdb.WasmTarget
+}
+
+func NewMessageCommitContext(wasmTargets []wasmdb.WasmTarget) *MessageRunContext {
+ if len(wasmTargets) == 0 {
+ wasmTargets = []wasmdb.WasmTarget{wasmdb.LocalTarget()}
+ }
+ return &MessageRunContext{
+ runMode: MessageCommitMode,
+ wasmCacheTag: 1,
+ wasmTargets: wasmTargets,
+ }
+}
+
+func NewMessageReplayContext() *MessageRunContext {
+ return &MessageRunContext{
+ runMode: MessageReplayMode,
+ wasmTargets: []wasmdb.WasmTarget{wasmdb.LocalTarget()},
+ }
+}
+
+func NewMessageRecordingContext(wasmTargets []wasmdb.WasmTarget) *MessageRunContext {
+ if len(wasmTargets) == 0 {
+ wasmTargets = []wasmdb.WasmTarget{wasmdb.LocalTarget()}
+ }
+ return &MessageRunContext{
+ runMode: MessageRecordingMode,
+ wasmTargets: wasmTargets,
+ }
+}
+
+func NewMessagePrefetchContext() *MessageRunContext {
+ return NewMessageReplayContext()
+}
+
+func NewMessageEthcallContext() *MessageRunContext {
+ return &MessageRunContext{
+ runMode: MessageEthcallMode,
+ wasmTargets: []wasmdb.WasmTarget{wasmdb.LocalTarget()},
+ }
+}
+
+func NewMessageGasEstimationContext() *MessageRunContext {
+ return &MessageRunContext{
+ runMode: MessageGasEstimationMode,
+ wasmTargets: []wasmdb.WasmTarget{wasmdb.LocalTarget()},
+ }
+}
+
+func (c *MessageRunContext) IsCommitMode() bool {
+ return c.runMode == MessageCommitMode
+}
+
+// these message modes are executed onchain so cannot make any gas shortcuts
+func (c *MessageRunContext) IsExecutedOnChain() bool {
+ return c.runMode == MessageCommitMode || c.runMode == MessageReplayMode || c.runMode == MessageRecordingMode
+}
+
+func (c *MessageRunContext) IsGasEstimation() bool {
+ return c.runMode == MessageGasEstimationMode
+}
+
+func (c *MessageRunContext) IsNonMutating() bool {
+ return c.runMode == MessageGasEstimationMode || c.runMode == MessageEthcallMode
+}
+
+func (c *MessageRunContext) IsEthcall() bool {
+ return c.runMode == MessageEthcallMode
+}
+
+func (c *MessageRunContext) IsRecording() bool {
+ return c.runMode == MessageRecordingMode
+}
+
+func (c *MessageRunContext) WasmCacheTag() uint32 {
+ return c.wasmCacheTag
+}
+
+func (c *MessageRunContext) WasmTargets() []wasmdb.WasmTarget {
+ return c.wasmTargets
+}
+
+func (c *MessageRunContext) RunModeMetricName() string {
+ switch c.runMode {
+ case MessageCommitMode:
+ return "commit_runmode"
+ case MessageGasEstimationMode:
+ return "gas_estimation_runmode"
+ case MessageEthcallMode:
+ return "eth_call_runmode"
+ case MessageReplayMode:
+ return "replay_runmode"
+ case MessageRecordingMode:
+ return "recording_runmode"
+ default:
+ return "unknown_runmode"
+ }
+}
diff --git a/execution/types/receipt.go b/execution/types/receipt.go
index 4c599312988..ff2973f1ce1 100644
--- a/execution/types/receipt.go
+++ b/execution/types/receipt.go
@@ -69,6 +69,9 @@ type Receipt struct {
GasUsed uint64 `json:"gasUsed" gencodec:"required"`
BlobGasUsed uint64 `json:"blobGasUsed,omitempty"`
+ GasUsedForL1 uint64 `json:"gasUsedForL1"` // Arbitrum L1 specific field, different from GasUsed (?)
+ EffectiveGasPrice *big.Int `json:"effectiveGasPrice"` // Arbitrum required, but tag omitted for backwards compatibility
+
// Inclusion information: These fields provide information about the inclusion of the
// transaction corresponding to this receipt.
BlockHash common.Hash `json:"blockHash,omitempty"`
@@ -78,6 +81,10 @@ type Receipt struct {
FirstLogIndexWithinBlock uint32 `json:"-"` // field which used to store in db and re-calc
}
+func (r *Receipt) GasUsedForL2() uint64 {
+ return r.GasUsed - r.GasUsedForL1
+}
+
type receiptMarshaling struct {
Type hexutil.Uint64
PostState hexutil.Bytes
@@ -140,8 +147,8 @@ func (r Receipt) EncodeRLP(w io.Writer) error {
if r.Type == LegacyTxType {
return rlp.Encode(w, data)
}
- buf := encodeBufferPool.Get().(*bytes.Buffer)
- defer encodeBufferPool.Put(buf)
+ buf := EncodeBufferPool.Get().(*bytes.Buffer)
+ defer EncodeBufferPool.Put(buf)
buf.Reset()
if err := r.encodeTyped(data, buf); err != nil {
return err
@@ -156,8 +163,8 @@ func (r Receipt) EncodeRLP69(w io.Writer) error {
if r.Type == LegacyTxType {
return rlp.Encode(w, data)
}
- buf := encodeBufferPool.Get().(*bytes.Buffer)
- defer encodeBufferPool.Put(buf)
+ buf := EncodeBufferPool.Get().(*bytes.Buffer)
+ defer EncodeBufferPool.Put(buf)
buf.Reset()
if err := r.encodeTyped69(data, buf); err != nil {
return err
@@ -588,7 +595,7 @@ func (rs Receipts) EncodeRLP69(w io.Writer) error {
func (r *Receipt) DeriveFieldsV3ForSingleReceipt(txnIdx int, blockHash common.Hash, blockNum uint64, txn Transaction, prevCumulativeGasUsed uint64) error {
logIndex := r.FirstLogIndexWithinBlock // logIdx is unique within the block and starts from 0
- sender, ok := txn.cachedSender()
+ sender, ok := txn.CachedSender()
if !ok {
return errors.New("tx must have cached sender")
}
diff --git a/execution/types/set_code_tx.go b/execution/types/set_code_tx.go
index 0e165711077..25b03962ce3 100644
--- a/execution/types/set_code_tx.go
+++ b/execution/types/set_code_tx.go
@@ -23,6 +23,7 @@ import (
"io"
"math/big"
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
"github.com/holiman/uint256"
"github.com/erigontech/erigon/common"
@@ -69,13 +70,13 @@ func (tx *SetCodeTransaction) copy() *SetCodeTransaction {
}
func (tx *SetCodeTransaction) EncodingSize() int {
- payloadSize, _, _ := tx.payloadSize()
+ payloadSize, _, _ := tx.payloadSize(false)
// Add envelope size and type size
return 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
}
-func (tx *SetCodeTransaction) payloadSize() (payloadSize, accessListLen, authorizationsLen int) {
- payloadSize, accessListLen = tx.DynamicFeeTransaction.payloadSize()
+func (tx *SetCodeTransaction) payloadSize(hashingOnly bool) (payloadSize, accessListLen, authorizationsLen int) {
+ payloadSize, accessListLen = tx.DynamicFeeTransaction.payloadSize(hashingOnly)
// size of Authorizations
authorizationsLen = authorizationsSize(tx.Authorizations)
payloadSize += rlp.ListPrefixLen(authorizationsLen) + authorizationsLen
@@ -101,15 +102,35 @@ func (tx *SetCodeTransaction) MarshalBinary(w io.Writer) error {
if tx.To == nil {
return ErrNilToFieldTx
}
- payloadSize, accessListLen, authorizationsLen := tx.payloadSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ hashingOnly := false
+ payloadSize, accessListLen, authorizationsLen := tx.payloadSize(hashingOnly)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// encode TxType
b[0] = SetCodeTxType
if _, err := w.Write(b[:1]); err != nil {
return err
}
- if err := tx.encodePayload(w, b[:], payloadSize, accessListLen, authorizationsLen); err != nil {
+ if err := tx.encodePayload(w, b[:], payloadSize, accessListLen, authorizationsLen, hashingOnly); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *SetCodeTransaction) MarshalBinaryForHashing(w io.Writer) error {
+ if tx.To == nil {
+ return ErrNilToFieldTx
+ }
+ hashingOnly := true
+ payloadSize, accessListLen, authorizationsLen := tx.payloadSize(hashingOnly)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
+ // encode TxType
+ b[0] = SetCodeTxType
+ if _, err := w.Write(b[:1]); err != nil {
+ return err
+ }
+ if err := tx.encodePayload(w, b[:], payloadSize, accessListLen, authorizationsLen, hashingOnly); err != nil {
return err
}
return nil
@@ -135,6 +156,9 @@ func (tx *SetCodeTransaction) AsMessage(s Signer, baseFee *big.Int, rules *chain
checkNonce: true,
checkTransaction: true,
checkGas: true,
+
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
+ Tx: tx,
}
if !rules.IsPrague {
return nil, errors.New("SetCodeTransaction is only supported in Prague")
@@ -230,10 +254,10 @@ func (tx *SetCodeTransaction) EncodeRLP(w io.Writer) error {
if tx.To == nil {
return ErrNilToFieldTx
}
- payloadSize, accessListLen, authorizationsLen := tx.payloadSize()
+ payloadSize, accessListLen, authorizationsLen := tx.payloadSize(false)
envelopSize := 1 + rlp.ListPrefixLen(payloadSize) + payloadSize
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
// encode envelope size
if err := rlp.EncodeStringSizePrefix(envelopSize, w, b[:]); err != nil {
return err
@@ -244,7 +268,7 @@ func (tx *SetCodeTransaction) EncodeRLP(w io.Writer) error {
return err
}
- return tx.encodePayload(w, b[:], payloadSize, accessListLen, authorizationsLen)
+ return tx.encodePayload(w, b[:], payloadSize, accessListLen, authorizationsLen, false)
}
func (tx *SetCodeTransaction) DecodeRLP(s *rlp.Stream) error {
@@ -311,10 +335,18 @@ func (tx *SetCodeTransaction) DecodeRLP(s *rlp.Stream) error {
return err
}
tx.S.SetBytes(b)
+
+ if s.MoreDataInList() {
+ boolVal, err := s.Bool()
+ if err != nil {
+ return err
+ }
+ tx.Timeboosted = &boolVal
+ }
return s.ListEnd()
}
-func (tx *SetCodeTransaction) encodePayload(w io.Writer, b []byte, payloadSize, accessListLen, authorizationsLen int) error {
+func (tx *SetCodeTransaction) encodePayload(w io.Writer, b []byte, payloadSize, accessListLen, authorizationsLen int, hashingOnly bool) error {
// prefix
if err := rlp.EncodeStructSizePrefix(payloadSize, w, b); err != nil {
return err
@@ -379,8 +411,13 @@ func (tx *SetCodeTransaction) encodePayload(w io.Writer, b []byte, payloadSize,
if err := rlp.EncodeUint256(tx.S, w, b); err != nil {
return err
}
- return nil
+ if tx.Timeboosted != nil && !hashingOnly {
+ if err := rlp.EncodeBool(*tx.Timeboosted, w, b); err != nil {
+ return err
+ }
+ }
+ return nil
}
// ParseDelegation tries to parse the address from a delegation slice.
diff --git a/execution/types/timeboosted_tx_rlp_test.go b/execution/types/timeboosted_tx_rlp_test.go
new file mode 100644
index 00000000000..7ce547afe6b
--- /dev/null
+++ b/execution/types/timeboosted_tx_rlp_test.go
@@ -0,0 +1,340 @@
+package types
+
+import (
+ "bytes"
+ "math/big"
+ "testing"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/rlp"
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_LegacyTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []*bool{boolPtr(true), boolPtr(false), nil}
+ for i := 0; i < 3; i++ {
+ two := uint256.NewInt(2)
+ ltx := NewTransaction(4, common.HexToAddress("0x2"), two, 21000, two, []byte("data"))
+ ltx.Timeboosted = timeboostedVals[i]
+
+ buf := bytes.NewBuffer(nil)
+ err := ltx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ var ltx2 LegacyTx
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), uint64(buf.Len()))
+ err = ltx2.DecodeRLP(stream)
+ require.NoError(t, err)
+
+ require.EqualValues(t, ltx.Timeboosted, ltx2.Timeboosted)
+ require.EqualValues(t, ltx.GasLimit, ltx2.GasLimit)
+ require.EqualValues(t, ltx.GasPrice.Bytes(), ltx2.GasPrice.Bytes())
+ require.EqualValues(t, ltx.Value.Bytes(), ltx2.Value.Bytes())
+ require.EqualValues(t, ltx.Data, ltx2.Data)
+ require.EqualValues(t, ltx.To, ltx2.To)
+
+ if timeboostedVals[i] == nil {
+ require.Nil(t, ltx2.Timeboosted)
+ } else {
+ require.EqualValues(t, timeboostedVals[i], ltx2.IsTimeBoosted())
+ }
+ }
+}
+
+func Test_DynamicFeeTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []*bool{boolPtr(true), boolPtr(false), nil}
+ for i := 0; i < 3; i++ {
+ two := uint256.NewInt(2)
+ three := uint256.NewInt(3)
+ chainID := uint256.NewInt(1)
+ accessList := AccessList{
+ {Address: common.HexToAddress("0x1"), StorageKeys: []common.Hash{common.HexToHash("0x01")}},
+ }
+
+ tx := &DynamicFeeTransaction{
+ CommonTx: CommonTx{
+ Nonce: 4,
+ To: &common.Address{0x2},
+ Value: two,
+ GasLimit: 21000,
+ Data: []byte("data"),
+ },
+ ChainID: chainID,
+ TipCap: two,
+ FeeCap: three,
+ AccessList: accessList,
+ Timeboosted: timeboostedVals[i],
+ }
+
+ buf := bytes.NewBuffer(nil)
+ err := tx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ // Decode using DecodeRLPTransaction pattern
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*DynamicFeeTransaction)
+ require.True(t, ok)
+
+ require.EqualValues(t, tx.Timeboosted, tx2.Timeboosted)
+ require.EqualValues(t, tx.GasLimit, tx2.GasLimit)
+ require.EqualValues(t, tx.TipCap.Bytes(), tx2.TipCap.Bytes())
+ require.EqualValues(t, tx.FeeCap.Bytes(), tx2.FeeCap.Bytes())
+ require.EqualValues(t, tx.Value.Bytes(), tx2.Value.Bytes())
+ require.EqualValues(t, tx.Data, tx2.Data)
+ require.EqualValues(t, tx.To, tx2.To)
+ require.EqualValues(t, tx.ChainID.Bytes(), tx2.ChainID.Bytes())
+ require.EqualValues(t, len(tx.AccessList), len(tx2.AccessList))
+
+ if timeboostedVals[i] == nil {
+ require.Nil(t, tx2.Timeboosted)
+ } else {
+ require.EqualValues(t, timeboostedVals[i], tx2.IsTimeBoosted())
+ }
+ }
+}
+
+func Test_AccessListTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []*bool{boolPtr(true), boolPtr(false), nil}
+ for i := 0; i < 3; i++ {
+ two := uint256.NewInt(2)
+ chainID := uint256.NewInt(1)
+ accessList := AccessList{
+ {Address: common.HexToAddress("0x1"), StorageKeys: []common.Hash{common.HexToHash("0x01")}},
+ }
+
+ tx := &AccessListTx{
+ LegacyTx: LegacyTx{
+ CommonTx: CommonTx{
+ Nonce: 4,
+ To: &common.Address{0x2},
+ Value: two,
+ GasLimit: 21000,
+ Data: []byte("data"),
+ },
+ GasPrice: two,
+ },
+ ChainID: chainID,
+ AccessList: accessList,
+ }
+ tx.Timeboosted = timeboostedVals[i]
+
+ buf := bytes.NewBuffer(nil)
+ err := tx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ // Decode using DecodeRLPTransaction pattern
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*AccessListTx)
+ require.True(t, ok)
+
+ require.EqualValues(t, tx.Timeboosted, tx2.Timeboosted)
+ require.EqualValues(t, tx.GasLimit, tx2.GasLimit)
+ require.EqualValues(t, tx.GasPrice.Bytes(), tx2.GasPrice.Bytes())
+ require.EqualValues(t, tx.Value.Bytes(), tx2.Value.Bytes())
+ require.EqualValues(t, tx.Data, tx2.Data)
+ require.EqualValues(t, tx.To, tx2.To)
+ require.EqualValues(t, tx.ChainID.Bytes(), tx2.ChainID.Bytes())
+ require.EqualValues(t, len(tx.AccessList), len(tx2.AccessList))
+
+ if timeboostedVals[i] == nil {
+ require.Nil(t, tx2.Timeboosted)
+ } else {
+ require.EqualValues(t, timeboostedVals[i], tx2.IsTimeBoosted())
+ }
+
+ buf.Reset()
+ err = tx.MarshalBinaryForHashing(buf)
+ require.NoError(t, err)
+ }
+}
+
+func Test_BlobTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []*bool{boolPtr(true), boolPtr(false), nil}
+ for i := 0; i < 3; i++ {
+ two := uint256.NewInt(2)
+ three := uint256.NewInt(3)
+ chainID := uint256.NewInt(1)
+ maxFeePerBlobGas := uint256.NewInt(5)
+ accessList := AccessList{
+ {Address: common.HexToAddress("0x1"), StorageKeys: []common.Hash{common.HexToHash("0x01")}},
+ }
+ blobHashes := []common.Hash{common.HexToHash("0x01"), common.HexToHash("0x02")}
+
+ tx := &BlobTx{
+ DynamicFeeTransaction: DynamicFeeTransaction{
+ CommonTx: CommonTx{
+ Nonce: 4,
+ To: &common.Address{0x2},
+ Value: two,
+ GasLimit: 21000,
+ Data: []byte("data"),
+ },
+ ChainID: chainID,
+ TipCap: two,
+ FeeCap: three,
+ AccessList: accessList,
+ },
+ MaxFeePerBlobGas: maxFeePerBlobGas,
+ BlobVersionedHashes: blobHashes,
+ }
+ tx.DynamicFeeTransaction.Timeboosted = timeboostedVals[i]
+
+ buf := bytes.NewBuffer(nil)
+ err := tx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ // Decode using DecodeRLPTransaction pattern
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*BlobTx)
+ require.True(t, ok)
+
+ require.EqualValues(t, tx.Timeboosted, tx2.Timeboosted)
+ require.EqualValues(t, tx.GasLimit, tx2.GasLimit)
+ require.EqualValues(t, tx.TipCap.Bytes(), tx2.TipCap.Bytes())
+ require.EqualValues(t, tx.FeeCap.Bytes(), tx2.FeeCap.Bytes())
+ require.EqualValues(t, tx.Value.Bytes(), tx2.Value.Bytes())
+ require.EqualValues(t, tx.Data, tx2.Data)
+ require.EqualValues(t, tx.To, tx2.To)
+ require.EqualValues(t, tx.ChainID.Bytes(), tx2.ChainID.Bytes())
+ require.EqualValues(t, tx.MaxFeePerBlobGas.Bytes(), tx2.MaxFeePerBlobGas.Bytes())
+ require.EqualValues(t, len(tx.AccessList), len(tx2.AccessList))
+ require.EqualValues(t, len(tx.BlobVersionedHashes), len(tx2.BlobVersionedHashes))
+ if timeboostedVals[i] == nil {
+ require.Nil(t, tx2.Timeboosted)
+ } else {
+ require.EqualValues(t, timeboostedVals[i], tx2.IsTimeBoosted())
+ }
+ }
+}
+
+func Test_SetCodeTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []*bool{boolPtr(true), boolPtr(false), nil}
+ for i := 0; i < 3; i++ {
+ two := uint256.NewInt(2)
+ three := uint256.NewInt(3)
+ chainID := uint256.NewInt(1)
+ accessList := AccessList{
+ {Address: common.HexToAddress("0x1"), StorageKeys: []common.Hash{common.HexToHash("0x01")}},
+ }
+
+ auth := Authorization{
+ ChainID: *chainID,
+ Address: common.HexToAddress("0x3"),
+ Nonce: 1,
+ }
+
+ tx := &SetCodeTransaction{
+ DynamicFeeTransaction: DynamicFeeTransaction{
+ CommonTx: CommonTx{
+ Nonce: 4,
+ To: &common.Address{0x2},
+ Value: two,
+ GasLimit: 21000,
+ Data: []byte("data"),
+ },
+ ChainID: chainID,
+ TipCap: two,
+ FeeCap: three,
+ AccessList: accessList,
+ Timeboosted: timeboostedVals[i],
+ },
+ Authorizations: []Authorization{auth},
+ }
+
+ buf := bytes.NewBuffer(nil)
+ err := tx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ // Decode using DecodeRLPTransaction pattern
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*SetCodeTransaction)
+ require.True(t, ok)
+
+ require.EqualValues(t, tx.Timeboosted, tx2.Timeboosted)
+ require.EqualValues(t, tx.GasLimit, tx2.GasLimit)
+ require.EqualValues(t, tx.TipCap.Bytes(), tx2.TipCap.Bytes())
+ require.EqualValues(t, tx.FeeCap.Bytes(), tx2.FeeCap.Bytes())
+ require.EqualValues(t, tx.Value.Bytes(), tx2.Value.Bytes())
+ require.EqualValues(t, tx.Data, tx2.Data)
+ require.EqualValues(t, tx.To, tx2.To)
+ require.EqualValues(t, tx.ChainID.Bytes(), tx2.ChainID.Bytes())
+ require.EqualValues(t, len(tx.AccessList), len(tx2.AccessList))
+ require.EqualValues(t, len(tx.Authorizations), len(tx2.Authorizations))
+
+ if timeboostedVals[i] == nil {
+ require.Nil(t, tx2.Timeboosted)
+ } else {
+ require.EqualValues(t, timeboostedVals[i], tx2.IsTimeBoosted())
+ }
+ }
+}
+
+func Test_ArbRetryTx_Timeboosted(t *testing.T) {
+ timeboostedVals := []*bool{boolPtr(true), boolPtr(false), nil}
+ for i := 0; i < 3; i++ {
+ two := big.NewInt(2)
+ chainID := big.NewInt(1)
+ ticketId := common.HexToHash("0x123")
+ toAddr := common.HexToAddress("0x2")
+
+ tx := &ArbitrumRetryTx{
+ ChainId: chainID,
+ Nonce: 4,
+ From: common.HexToAddress("0x1"),
+ GasFeeCap: two,
+ Gas: 21000,
+ To: &toAddr,
+ Value: two,
+ Data: []byte("data"),
+ TicketId: ticketId,
+ RefundTo: common.HexToAddress("0x3"),
+ MaxRefund: two,
+ SubmissionFeeRefund: two,
+ }
+ tx.Timeboosted = timeboostedVals[i]
+
+ buf := bytes.NewBuffer(nil)
+ err := tx.EncodeRLP(buf)
+ require.NoError(t, err)
+
+ // Decode using DecodeRLPTransaction pattern
+ stream := rlp.NewStream(bytes.NewReader(buf.Bytes()), 0)
+ decoded, err := DecodeRLPTransaction(stream, false)
+ require.NoError(t, err)
+
+ tx2, ok := decoded.(*ArbitrumRetryTx)
+ require.True(t, ok)
+
+ require.EqualValues(t, tx.Timeboosted, tx2.Timeboosted)
+ require.EqualValues(t, tx.Gas, tx2.Gas)
+ require.EqualValues(t, tx.GasFeeCap, tx2.GasFeeCap)
+ require.EqualValues(t, tx.Value, tx2.Value)
+ require.EqualValues(t, tx.Data, tx2.Data)
+ require.EqualValues(t, tx.To, tx2.To)
+ require.EqualValues(t, tx.From, tx2.From)
+ require.EqualValues(t, tx.Nonce, tx2.Nonce)
+ require.EqualValues(t, tx.ChainId, tx2.ChainId)
+ require.EqualValues(t, tx.TicketId, tx2.TicketId)
+ require.EqualValues(t, tx.RefundTo, tx2.RefundTo)
+ require.EqualValues(t, tx.MaxRefund, tx2.MaxRefund)
+ require.EqualValues(t, tx.SubmissionFeeRefund, tx2.SubmissionFeeRefund)
+ if timeboostedVals[i] == nil {
+ require.Nil(t, tx2.Timeboosted)
+ } else {
+ require.EqualValues(t, timeboostedVals[i], tx2.IsTimeBoosted())
+ }
+ }
+}
diff --git a/execution/types/transaction.go b/execution/types/transaction.go
index 746f463d5eb..f0b5bd26de0 100644
--- a/execution/types/transaction.go
+++ b/execution/types/transaction.go
@@ -27,6 +27,7 @@ import (
"math/big"
"sync/atomic"
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
"github.com/holiman/uint256"
"github.com/protolambda/ztyp/codec"
@@ -56,8 +57,38 @@ const (
BlobTxType
SetCodeTxType
AccountAbstractionTxType
+
+ // Arbitrum transaction types
+ ArbitrumDepositTxType byte = 0x64
+ ArbitrumUnsignedTxType byte = 0x65
+ ArbitrumContractTxType byte = 0x66
+ ArbitrumRetryTxType byte = 0x68
+ ArbitrumSubmitRetryableTxType byte = 0x69
+ ArbitrumInternalTxType byte = 0x6A
+ ArbitrumLegacyTxType byte = 0x78
)
+type constructTxnFunc = func() Transaction
+
+var externalTxnTypes map[byte]constructTxnFunc
+
+func RegisterTransaction(txnType byte, creator constructTxnFunc) {
+ if externalTxnTypes == nil {
+ externalTxnTypes = make(map[byte]constructTxnFunc)
+ }
+ externalTxnTypes[txnType] = creator
+}
+
+func CreateTransactioByType(txnType byte) Transaction {
+ if externalTxnTypes == nil {
+ externalTxnTypes = make(map[byte]constructTxnFunc)
+ }
+ if ctor, ok := externalTxnTypes[txnType]; ok {
+ return ctor()
+ }
+ return nil
+}
+
// Transaction is an Ethereum transaction.
type Transaction interface {
Type() byte
@@ -92,11 +123,19 @@ type Transaction interface {
// signing method. The cache is invalidated if the cached signer does
// not match the signer used in the current call.
Sender(Signer) (accounts.Address, error)
- cachedSender() (accounts.Address, bool)
+ CachedSender() (accounts.Address, bool)
GetSender() (accounts.Address, bool)
SetSender(accounts.Address)
IsContractDeploy() bool
Unwrap() Transaction // If this is a network wrapper, returns the unwrapped txn. Otherwise returns itself.
+
+ // TODO remove timeboosted methods, check tx type and if it supports, make type assertion and use type-specific methods
+ IsTimeBoosted() *bool
+ SetTimeboosted(val *bool)
+}
+
+type TransactionForHashMarshaller interface {
+ MarshalBinaryForHashing(w io.Writer) error
}
// TransactionMisc is collection of miscellaneous fields for transaction that is supposed to be embedded into concrete
@@ -218,6 +257,20 @@ func UnmarshalTransactionFromBinary(data []byte, blobTxnsAreWrappedWithBlobs boo
t = &SetCodeTransaction{}
case AccountAbstractionTxType:
t = &AccountAbstractionTransaction{}
+ case ArbitrumDepositTxType:
+ t = &ArbitrumDepositTx{}
+ case ArbitrumUnsignedTxType:
+ t = &ArbitrumUnsignedTx{}
+ case ArbitrumContractTxType:
+ t = &ArbitrumContractTx{}
+ case ArbitrumRetryTxType:
+ t = &ArbitrumRetryTx{}
+ case ArbitrumSubmitRetryableTxType:
+ t = &ArbitrumSubmitRetryableTx{}
+ case ArbitrumInternalTxType:
+ t = &ArbitrumInternalTx{}
+ case ArbitrumLegacyTxType:
+ t = &ArbitrumLegacyTxData{}
default:
if data[0] >= 0x80 {
// txn is type legacy which is RLP encoded
@@ -296,13 +349,13 @@ func TypedTransactionMarshalledAsRlpString(data []byte) bool {
return len(data) > 0 && 0x80 <= data[0] && data[0] < 0xc0
}
-func sanityCheckSignature(v *uint256.Int, r *uint256.Int, s *uint256.Int, maybeProtected bool) error {
- if isProtectedV(v) && !maybeProtected {
+func SanityCheckSignature(v *uint256.Int, r *uint256.Int, s *uint256.Int, maybeProtected bool) error {
+ if IsProtectedV(v) && !maybeProtected {
return ErrUnexpectedProtection
}
var plainV byte
- if isProtectedV(v) {
+ if IsProtectedV(v) {
chainID := DeriveChainId(v).Uint64()
plainV = byte(v.Uint64() - 35 - 2*chainID)
} else if maybeProtected {
@@ -322,7 +375,7 @@ func sanityCheckSignature(v *uint256.Int, r *uint256.Int, s *uint256.Int, maybeP
return nil
}
-func isProtectedV(V *uint256.Int) bool {
+func IsProtectedV(V *uint256.Int) bool {
if V.BitLen() <= 8 {
v := V.Uint64()
return v != 27 && v != 28 && v != 1 && v != 0
@@ -341,7 +394,14 @@ func (s Transactions) Len() int { return len(s) }
// because we assume that *Transaction will only ever contain valid txs that were either
// constructed by decoding or via public API in this package.
func (s Transactions) EncodeIndex(i int, w *bytes.Buffer) {
- if err := s[i].MarshalBinary(w); err != nil {
+ var err error
+ switch tm := s[i].(type) {
+ case TransactionForHashMarshaller:
+ err = tm.MarshalBinaryForHashing(w)
+ default:
+ err = s[i].MarshalBinary(w)
+ }
+ if err != nil {
panic(err)
}
}
@@ -395,8 +455,25 @@ type Message struct {
isFree bool
blobHashes []common.Hash
authorizations []Authorization
+
+ // Arbitrum
+ // L1 charging is disabled when SkipL1Charging is true.
+ // This field might be set to true for operations like RPC eth_call.
+ SkipAccountChecks bool // same as checkNonce
+ SkipL1Charging bool
+ TxRunMode MessageRunMode // deprecated (shoudl be)
+ TxRunContext *MessageRunContext
+ Tx Transaction
+ EffectiveGas uint64 // amount of gas effectively used by transaction (used in ArbitrumSubmitRetryableTx)
}
+// Arbitrum
+func (m *Message) SetGasPrice(f *uint256.Int) { m.gasPrice.Set(f) }
+func (m *Message) SetFeeCap(f *uint256.Int) { m.feeCap.Set(f) }
+func (m *Message) SetTip(f *uint256.Int) { m.tipCap.Set(f) }
+
+// eof arbitrum
+
func NewMessage(from accounts.Address, to accounts.Address, nonce uint64, amount *uint256.Int, gasLimit uint64,
gasPrice *uint256.Int, feeCap, tipCap *uint256.Int, data []byte, accessList AccessList, checkNonce bool,
checkTransaction bool, checkGas bool, isFree bool, maxFeePerBlobGas *uint256.Int,
@@ -413,6 +490,8 @@ func NewMessage(from accounts.Address, to accounts.Address, nonce uint64, amount
checkTransaction: checkTransaction,
checkGas: checkGas,
isFree: isFree,
+
+ TxRunContext: NewMessageCommitContext([]wasmdb.WasmTarget{wasmdb.LocalTarget()}),
}
if gasPrice != nil {
m.gasPrice.Set(gasPrice)
@@ -463,6 +542,16 @@ func (m *Message) SetIsFree(isFree bool) {
m.isFree = isFree
}
+func (msg *Message) SetTo(addr *accounts.Address) { msg.to = *addr }
+func (msg *Message) SetFrom(addr *accounts.Address) { msg.from = *addr }
+func (msg *Message) SetNonce(val uint64) { msg.nonce = val }
+func (msg *Message) SetAmount(f *uint256.Int) { msg.amount.Set(f) }
+func (msg *Message) SetGasLimit(val uint64) { msg.gasLimit = val }
+func (msg *Message) SetData(data []byte) { msg.data = data }
+func (msg *Message) SetAccessList(accessList AccessList) { msg.accessList = accessList }
+func (msg *Message) SetSkipAccountCheck(skipCheck bool) { msg.SkipAccountChecks = skipCheck }
+func (msg *Message) SetBlobHashes(blobHashes []common.Hash) { msg.blobHashes = blobHashes }
+
func (m *Message) ChangeGas(globalGasCap, desiredGas uint64) {
gas := globalGasCap
if gas == 0 {
diff --git a/execution/types/transaction_marshalling.go b/execution/types/transaction_marshalling.go
index fce519d58c0..467698a9db8 100644
--- a/execution/types/transaction_marshalling.go
+++ b/execution/types/transaction_marshalling.go
@@ -32,8 +32,12 @@ import (
"github.com/erigontech/erigon/common/hexutil"
)
-// txJSON is the JSON representation of transactions.
-type txJSON struct {
+type UnmarshalExtTxnFuncType = func(txType byte, input []byte) (Transaction, error)
+
+var UnmarshalExtTxnFunc UnmarshalExtTxnFuncType
+
+// TxJSON is the JSON representation of transactions.
+type TxJSON struct {
Type hexutil.Uint64 `json:"type"`
// Common transaction fields:
@@ -115,7 +119,7 @@ func (a JsonAuthorization) ToAuthorization() (Authorization, error) {
}
func (tx *LegacyTx) MarshalJSON() ([]byte, error) {
- var enc txJSON
+ var enc TxJSON
// These are set for all txn types.
enc.Hash = tx.Hash()
enc.Type = hexutil.Uint64(tx.Type())
@@ -135,7 +139,7 @@ func (tx *LegacyTx) MarshalJSON() ([]byte, error) {
}
func (tx *AccessListTx) MarshalJSON() ([]byte, error) {
- var enc txJSON
+ var enc TxJSON
// These are set for all txn types.
enc.Hash = tx.Hash()
enc.Type = hexutil.Uint64(tx.Type())
@@ -154,7 +158,7 @@ func (tx *AccessListTx) MarshalJSON() ([]byte, error) {
}
func (tx *DynamicFeeTransaction) MarshalJSON() ([]byte, error) {
- var enc txJSON
+ var enc TxJSON
// These are set for all txn types.
enc.Hash = tx.Hash()
enc.Type = hexutil.Uint64(tx.Type())
@@ -173,8 +177,8 @@ func (tx *DynamicFeeTransaction) MarshalJSON() ([]byte, error) {
return json.Marshal(&enc)
}
-func toBlobTxJSON(tx *BlobTx) *txJSON {
- var enc txJSON
+func toBlobTxJSON(tx *BlobTx) *TxJSON {
+ var enc TxJSON
// These are set for all txn types.
enc.Hash = tx.Hash()
enc.Type = hexutil.Uint64(tx.Type())
@@ -254,13 +258,50 @@ func UnmarshalTransactionFromJSON(input []byte) (Transaction, error) {
return nil, err
}
return tx, nil
+
+ case ArbitrumDepositTxType:
+ tx := new(ArbitrumDepositTx)
+ if err = tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumInternalTxType:
+ tx := new(ArbitrumInternalTx)
+ if err = tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumUnsignedTxType:
+ tx := new(ArbitrumUnsignedTx)
+ if err = tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumContractTxType:
+ tx := new(ArbitrumContractTx)
+ if err = tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumRetryTxType:
+ tx := new(ArbitrumRetryTx)
+ if err = tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumSubmitRetryableTxType:
+ tx := new(ArbitrumSubmitRetryableTx)
+ if err = tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
+ case ArbitrumLegacyTxType:
+ tx := new(ArbitrumLegacyTxData)
+ if err = tx.UnmarshalJSON(input); err != nil {
+ return nil, err
+ }
default:
- return nil, fmt.Errorf("unknown transaction type: %v", txType)
+ // return nil, fmt.Errorf("unknown transaction type: %v", txType)
}
+ return nil, fmt.Errorf("unknown transaction type: %v", txType)
}
func (tx *LegacyTx) UnmarshalJSON(input []byte) error {
- var dec txJSON
+ var dec TxJSON
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
@@ -320,7 +361,7 @@ func (tx *LegacyTx) UnmarshalJSON(input []byte) error {
}
withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero()
if withSignature {
- if err := sanityCheckSignature(&tx.V, &tx.R, &tx.S, true); err != nil {
+ if err := SanityCheckSignature(&tx.V, &tx.R, &tx.S, true); err != nil {
return err
}
}
@@ -328,7 +369,7 @@ func (tx *LegacyTx) UnmarshalJSON(input []byte) error {
}
func (tx *AccessListTx) UnmarshalJSON(input []byte) error {
- var dec txJSON
+ var dec TxJSON
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
@@ -396,14 +437,14 @@ func (tx *AccessListTx) UnmarshalJSON(input []byte) error {
}
withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero()
if withSignature {
- if err := sanityCheckSignature(&tx.V, &tx.R, &tx.S, false); err != nil {
+ if err := SanityCheckSignature(&tx.V, &tx.R, &tx.S, false); err != nil {
return err
}
}
return nil
}
-func (tx *DynamicFeeTransaction) unmarshalJson(dec txJSON) error {
+func (tx *DynamicFeeTransaction) unmarshalJson(dec TxJSON) error {
// Access list is optional for now.
if dec.AccessList != nil {
tx.AccessList = *dec.AccessList
@@ -475,7 +516,7 @@ func (tx *DynamicFeeTransaction) unmarshalJson(dec txJSON) error {
}
withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero()
if withSignature {
- if err := sanityCheckSignature(&tx.V, &tx.R, &tx.S, false); err != nil {
+ if err := SanityCheckSignature(&tx.V, &tx.R, &tx.S, false); err != nil {
return err
}
}
@@ -483,7 +524,7 @@ func (tx *DynamicFeeTransaction) unmarshalJson(dec txJSON) error {
}
func (tx *DynamicFeeTransaction) UnmarshalJSON(input []byte) error {
- var dec txJSON
+ var dec TxJSON
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
@@ -492,7 +533,7 @@ func (tx *DynamicFeeTransaction) UnmarshalJSON(input []byte) error {
}
func (tx *SetCodeTransaction) UnmarshalJSON(input []byte) error {
- var dec txJSON
+ var dec TxJSON
if err := json.Unmarshal(input, &dec); err != nil {
return err
}
@@ -511,8 +552,54 @@ func (tx *SetCodeTransaction) UnmarshalJSON(input []byte) error {
return nil
}
+func (tx *ArbitrumContractTx) UnmarshalJSON(input []byte) error {
+ var dec TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+func (t *ArbitrumRetryTx) UnmarshalJSON(input []byte) error {
+ var dec TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+func (tx *ArbitrumSubmitRetryableTx) UnmarshalJSON(input []byte) error {
+ var dec TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumDepositTx) UnmarshalJSON(input []byte) error {
+ var dec TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumUnsignedTx) UnmarshalJSON(input []byte) error {
+ var dec TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (tx *ArbitrumInternalTx) UnmarshalJSON(input []byte) error {
+ var dec TxJSON
+ if err := json.Unmarshal(input, &dec); err != nil {
+ return err
+ }
+ return nil
+}
+
func UnmarshalBlobTxJSON(input []byte) (Transaction, error) {
- var dec txJSON
+ var dec TxJSON
if err := json.Unmarshal(input, &dec); err != nil {
return nil, err
}
@@ -601,7 +688,7 @@ func UnmarshalBlobTxJSON(input []byte) (Transaction, error) {
withSignature := !tx.V.IsZero() || !tx.R.IsZero() || !tx.S.IsZero()
if withSignature {
- if err := sanityCheckSignature(&tx.V, &tx.R, &tx.S, false); err != nil {
+ if err := SanityCheckSignature(&tx.V, &tx.R, &tx.S, false); err != nil {
return nil, err
}
}
diff --git a/execution/types/transaction_signing.go b/execution/types/transaction_signing.go
index cfc827093a0..ecce6bde33e 100644
--- a/execution/types/transaction_signing.go
+++ b/execution/types/transaction_signing.go
@@ -38,65 +38,149 @@ import (
var ErrInvalidChainId = errors.New("invalid chain id for signer")
+func MakeSignerArb(config *chain.Config, blockNumber, blockTime, arbosVersion uint64) *Signer {
+ var signer Signer
+ var chainId uint256.Int
+ if config.ChainID != nil {
+ overflow := chainId.SetFromBig(config.ChainID)
+ if overflow {
+ panic("chainID higher than 2^256-1")
+ }
+ }
+ signer.unprotected = true
+ switch {
+ case config.IsPrague(blockTime, arbosVersion):
+ signer.protected = true
+ signer.accessList = true
+ signer.dynamicFee = true
+ signer.blob = true
+ signer.setCode = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsBhilai(blockNumber):
+ signer.protected = true
+ signer.accessList = true
+ signer.dynamicFee = true
+ signer.blob = false
+ signer.setCode = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsCancun(blockTime, arbosVersion):
+ // All transaction types are still supported
+ signer.protected = true
+ signer.accessList = true
+ signer.dynamicFee = true
+ signer.blob = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsLondon(blockNumber):
+ signer.protected = true
+ signer.accessList = true
+ signer.dynamicFee = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsBerlin(blockNumber):
+ signer.protected = true
+ signer.accessList = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsSpuriousDragon(blockNumber):
+ signer.protected = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsHomestead(blockNumber):
+ default:
+ // Only allow malleable transactions in Frontier
+ signer.malleable = true
+ }
+ return &signer
+}
+
// MakeSigner returns a Signer based on the given chain config and block number.
func MakeSigner(config *chain.Config, blockNumber uint64, blockTime uint64) *Signer {
var signer Signer
-
- if config != nil {
- var chainId uint256.Int
- if config.ChainID != nil {
- overflow := chainId.SetFromBig(config.ChainID)
- if overflow {
- panic("chainID higher than 2^256-1")
- }
- }
- signer.unprotected = true
- switch {
- case config.IsPrague(blockTime):
- signer.protected = true
- signer.accessList = true
- signer.dynamicFee = true
- signer.blob = true
- signer.setCode = true
- signer.chainID.Set(&chainId)
- signer.chainIDMul.Lsh(&chainId, 1) // ×2
- case config.IsBhilai(blockNumber):
- signer.protected = true
- signer.accessList = true
- signer.dynamicFee = true
- signer.blob = false
- signer.setCode = true
- signer.chainID.Set(&chainId)
- signer.chainIDMul.Lsh(&chainId, 1) // ×2
- case config.IsCancun(blockTime):
- // All transaction types are still supported
- signer.protected = true
- signer.accessList = true
- signer.dynamicFee = true
- signer.blob = true
- signer.chainID.Set(&chainId)
- signer.chainIDMul.Lsh(&chainId, 1) // ×2
- case config.IsLondon(blockNumber):
- signer.protected = true
- signer.accessList = true
- signer.dynamicFee = true
- signer.chainID.Set(&chainId)
- signer.chainIDMul.Lsh(&chainId, 1) // ×2
- case config.IsBerlin(blockNumber):
- signer.protected = true
- signer.accessList = true
- signer.chainID.Set(&chainId)
- signer.chainIDMul.Lsh(&chainId, 1) // ×2
- case config.IsSpuriousDragon(blockNumber):
- signer.protected = true
- signer.chainID.Set(&chainId)
- signer.chainIDMul.Lsh(&chainId, 1) // ×2
- case config.IsHomestead(blockNumber):
- default:
- // Only allow malleable transactions in Frontier
- signer.malleable = true
+ var chainId uint256.Int
+ if config.ChainID != nil {
+ overflow := chainId.SetFromBig(config.ChainID)
+ if overflow {
+ panic("chainID higher than 2^256-1")
}
}
+ signer.unprotected = true
+ switch {
+ case config.IsPrague(blockTime, 0):
+ signer.protected = true
+ signer.accessList = true
+ signer.dynamicFee = true
+ signer.blob = true
+ signer.setCode = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsCancun(blockTime, 0):
+ // All transaction types are still supported
+ signer.protected = true
+ signer.accessList = true
+ signer.dynamicFee = true
+ signer.blob = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsLondon(blockNumber):
+ signer.protected = true
+ signer.accessList = true
+ signer.dynamicFee = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsBerlin(blockNumber):
+ signer.protected = true
+ signer.accessList = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsSpuriousDragon(blockNumber):
+ signer.protected = true
+ signer.chainID.Set(&chainId)
+ signer.chainIDMul.Lsh(&chainId, 1) // ×2
+ case config.IsHomestead(blockNumber):
+ default:
+ // Only allow malleable transactions in Frontier
+ signer.malleable = true
+ }
+ return &signer
+}
+
+func NewLondonSigner(chainID *big.Int) *Signer {
+ var signer Signer
+ signer.unprotected = true
+ if chainID == nil {
+ return &signer
+ }
+ chainId, overflow := uint256.FromBig(chainID)
+ if overflow {
+ panic("chainID higher than 2^256-1")
+ }
+ signer.chainID.Set(chainId)
+ signer.chainIDMul.Mul(chainId, &u256.Num2)
+ signer.protected = true
+ signer.accessList = true
+ signer.dynamicFee = true
+ return &signer
+}
+
+func NewCancunSigner(chainID *big.Int) *Signer {
+ var signer Signer
+ signer.unprotected = true
+ if chainID == nil {
+ return &signer
+ }
+ chainId, overflow := uint256.FromBig(chainID)
+ if overflow {
+ panic("chainID higher than 2^256-1")
+ }
+ signer.chainID.Set(chainId)
+ signer.chainIDMul.Mul(chainId, &u256.Num2)
+ signer.protected = true
+ signer.accessList = true
+ signer.dynamicFee = true
+ signer.blob = true
return &signer
}
diff --git a/execution/types/withdrawal.go b/execution/types/withdrawal.go
index 2934600731d..7256631d075 100644
--- a/execution/types/withdrawal.go
+++ b/execution/types/withdrawal.go
@@ -31,15 +31,15 @@ import (
"github.com/erigontech/erigon/execution/rlp"
)
-type encodingBuf [32]byte
+type encodingBuf [64]byte
-var pooledBuf = sync.Pool{
+var PooledBuf = sync.Pool{
New: func() any { return new(encodingBuf) },
}
-func newEncodingBuf() *encodingBuf {
- b := pooledBuf.Get().(*encodingBuf)
- *b = encodingBuf([32]byte{}) // reset, do we need to?
+func NewEncodingBuf() *encodingBuf {
+ b := PooledBuf.Get().(*encodingBuf)
+ *b = encodingBuf([64]byte{}) // reset, do we need to?
return b
}
@@ -66,8 +66,8 @@ func (obj *Withdrawal) EncodeRLP(w io.Writer) error {
encodingSize := obj.EncodingSize()
- b := newEncodingBuf()
- defer pooledBuf.Put(b)
+ b := NewEncodingBuf()
+ defer PooledBuf.Put(b)
if err := rlp.EncodeStructSizePrefix(encodingSize, w, b[:]); err != nil {
return err
diff --git a/execution/vm/arb/costs/operations_acl_arb.go b/execution/vm/arb/costs/operations_acl_arb.go
new file mode 100644
index 00000000000..836e8939973
--- /dev/null
+++ b/execution/vm/arb/costs/operations_acl_arb.go
@@ -0,0 +1,195 @@
+package costs
+
+import (
+ "fmt"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/state"
+ "github.com/erigontech/erigon/execution/types/accounts"
+ "github.com/erigontech/erigon/execution/vm"
+ "github.com/erigontech/erigon/execution/vm/evmtypes"
+ "github.com/holiman/uint256"
+
+ "github.com/erigontech/erigon/arb/multigas"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/chain/params"
+ "github.com/erigontech/nitro-erigon/util/arbmath"
+)
+
+// Computes the cost of doing a state load in wasm
+// Note: the code here is adapted from gasSLoadEIP2929
+func WasmStateLoadCost(db *state.IntraBlockState, program common.Address, key common.Hash) multigas.MultiGas {
+ programAcc := accounts.InternAddress(program)
+ storageKey := accounts.InternKey(key)
+ // Check slot presence in the access list
+ if _, slotPresent := db.SlotInAccessList(programAcc, storageKey); !slotPresent {
+ // If the caller cannot afford the cost, this change will be rolled back
+ // If he does afford it, we can skip checking the same thing later on, during execution
+ db.AddSlotToAccessList(programAcc, storageKey)
+
+ // TODO consider that og code counted only params.ColdSloadCostEIP2929
+ // Cold slot access considered as storage access + computation
+ return multigas.MultiGasFromPairs(
+ multigas.Pair{Kind: multigas.ResourceKindStorageAccess, Amount: params.ColdSloadCostEIP2929 - params.WarmStorageReadCostEIP2929},
+ multigas.Pair{Kind: multigas.ResourceKindComputation, Amount: params.WarmStorageReadCostEIP2929},
+ )
+ }
+ // Warm slot access considered as computation
+ return multigas.ComputationGas(params.WarmStorageReadCostEIP2929)
+}
+
+// Computes the cost of doing a state store in wasm
+// Note: the code here is adapted from makeGasSStoreFunc with the most recent parameters as of The Merge
+// Note: the sentry check must be done by the caller
+func WasmStateStoreCost(db *state.IntraBlockState, program common.Address, key, value common.Hash) multigas.MultiGas {
+ clearingRefund := params.SstoreClearsScheduleRefundEIP3529
+ programAcc := accounts.InternAddress(program)
+ storageKey := accounts.InternKey(key)
+ current := uint256.Int{}
+ var err error
+ if current, err = db.GetState(programAcc, storageKey); err != nil {
+ panic(err)
+ }
+
+ cost := multigas.ZeroGas()
+ // Check slot presence in the access list
+ if addrPresent, slotPresent := db.SlotInAccessList(programAcc, storageKey); !slotPresent {
+ cost.SaturatingIncrementInto(multigas.ResourceKindStorageAccess, params.ColdSloadCostEIP2929)
+ // If the caller cannot afford the cost, this change will be rolled back
+ db.AddSlotToAccessList(programAcc, storageKey)
+ if !addrPresent {
+ panic(fmt.Sprintf("impossible case: address %v was not present in access list", program))
+ }
+ }
+
+ if arbmath.BigEquals(current.ToBig(), value.Big()) { // noop (1)
+ // EIP 2200 original clause:
+ // return params.SloadGasEIP2200, nil
+ return cost.SaturatingIncrement(multigas.ResourceKindComputation, params.WarmStorageReadCostEIP2929) // SLOAD_GAS
+ }
+
+ original := uint256.Int{}
+ if original, err = db.GetCommittedState(programAcc, storageKey); err != nil {
+ panic(err)
+ }
+ if original.Eq(¤t) {
+ if original.IsZero() { // create slot (2.1.1)
+ return cost.SaturatingIncrement(multigas.ResourceKindStorageGrowth, params.SstoreSetGasEIP2200)
+ }
+ if value.Cmp(common.Hash{}) == 0 { // delete slot (2.1.2b)
+ db.AddRefund(clearingRefund)
+ }
+ // EIP-2200 original clause:
+ // return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2)
+ return cost.SaturatingIncrement(multigas.ResourceKindStorageAccess, params.SstoreResetGasEIP2200-params.ColdSloadCostEIP2929)
+ }
+ if !original.IsZero() {
+ if current.IsZero() { // recreate slot (2.2.1.1)
+ db.SubRefund(clearingRefund)
+ } else if value.Cmp(common.Hash{}) == 0 { // delete slot (2.2.1.2)
+ db.AddRefund(clearingRefund)
+ }
+ }
+
+ if arbmath.BigEquals(original.ToBig(), value.Big()) {
+ if original.IsZero() { // reset to original inexistent slot (2.2.2.1)
+ // EIP 2200 Original clause:
+ //evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - params.SloadGasEIP2200)
+ db.AddRefund(params.SstoreSetGasEIP2200 - params.WarmStorageReadCostEIP2929)
+ } else { // reset to original existing slot (2.2.2.2)
+ // EIP 2200 Original clause:
+ // evm.StateDB.AddRefund(params.SstoreResetGasEIP2200 - params.SloadGasEIP2200)
+ // - SSTORE_RESET_GAS redefined as (5000 - COLD_SLOAD_COST)
+ // - SLOAD_GAS redefined as WARM_STORAGE_READ_COST
+ // Final: (5000 - COLD_SLOAD_COST) - WARM_STORAGE_READ_COST
+ db.AddRefund((params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929) - params.WarmStorageReadCostEIP2929)
+ }
+ }
+ // EIP-2200 original clause:
+ //return params.SloadGasEIP2200, nil // dirty update (2.2)
+ return cost.SaturatingIncrement(multigas.ResourceKindComputation, params.WarmStorageReadCostEIP2929) // dirty update (2.2)
+}
+
+// Computes the cost of starting a call from wasm
+//
+// The code here is adapted from the following functions with the most recent parameters as of The Merge
+// - operations_acl.go makeCallVariantGasCallEIP2929()
+// - gas_table.go gasCall()
+func WasmCallCost(db evmtypes.IntraBlockState, contract common.Address, value *uint256.Int, budget uint64) (multigas.MultiGas, error) {
+ total := multigas.ZeroGas()
+ apply := func(resource multigas.ResourceKind, amount uint64) bool {
+ total.SaturatingIncrementInto(resource, amount)
+ return total.SingleGas() > budget
+ }
+
+ // EIP 2929: the static cost considered as computation
+ if apply(multigas.ResourceKindComputation, params.WarmStorageReadCostEIP2929) {
+ return total, vm.ErrOutOfGas
+ }
+
+ // EIP 2929: first dynamic cost if cold (makeCallVariantGasCallEIP2929)
+ warmAccess := db.AddressInAccessList(contract)
+ coldCost := params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929
+ if !warmAccess {
+ db.AddAddressToAccessList(contract)
+
+ // Cold slot access considered as storage access.
+ if apply(multigas.ResourceKindStorageAccess, coldCost) {
+ return total, vm.ErrOutOfGas
+ }
+ }
+
+ // gasCall()
+ transfersValue := value.Sign() != 0
+ if transfersValue {
+ if empty, _ := db.Empty(contract); empty {
+ // Storage slot writes (zero -> nonzero) considered as storage growth.
+ if apply(multigas.ResourceKindStorageGrowth, params.CallNewAccountGas) {
+ return total, vm.ErrOutOfGas
+ }
+ }
+ // Value transfer to non-empty account considered as computation.
+ if apply(multigas.ResourceKindComputation, params.CallValueTransferGas) {
+ return total, vm.ErrOutOfGas
+ }
+ }
+ return total, nil
+}
+
+// Computes the cost of touching an account in wasm
+// Note: the code here is adapted from gasEip2929AccountCheck with the most recent parameters as of The Merge
+func WasmAccountTouchCost(cfg *chain.Config, db evmtypes.IntraBlockState, addr common.Address, withCode bool) multigas.MultiGas {
+ cost := multigas.ZeroGas()
+ if withCode {
+ extCodeCost := cfg.MaxCodeSize() / params.MaxCodeSize * params.ExtcodeSizeGasEIP150
+ cost.SaturatingIncrementInto(multigas.ResourceKindStorageAccess, extCodeCost)
+ }
+
+ if !db.AddressInAccessList(addr) {
+ db.AddAddressToAccessList(addr)
+ //return cost + params.ColdAccountAccessCostEIP2929
+ // TODO arbitrum - pricing differs?
+
+ // Cold slot read -> storage access + computation
+ return cost.SaturatingAdd(multigas.MultiGasFromPairs(
+ multigas.Pair{Kind: multigas.ResourceKindStorageAccess, Amount: params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929},
+ multigas.Pair{Kind: multigas.ResourceKindComputation, Amount: params.WarmStorageReadCostEIP2929},
+ ))
+ }
+ //return cost + params.WarmStorageReadCostEIP2929
+ // Warm slot read considered as computation.
+ return cost.SaturatingIncrement(multigas.ResourceKindComputation, params.WarmStorageReadCostEIP2929)
+}
+
+// Computes the history growth part cost of log operation in wasm,
+// Full cost is charged on the WASM side at the emit_log function
+// Note: the code here is adapted from makeGasLog
+func WasmLogCost(numTopics uint64, dataBytes uint64) multigas.MultiGas {
+ // Bloom/topic history growth: LogTopicHistoryGas per topic
+ bloomHistoryGrowthCost := params.LogTopicHistoryGas * numTopics
+
+ // Payload history growth: LogDataGas per payload byte
+ payloadHistoryGrowthCost := params.LogDataGas * dataBytes
+
+ return multigas.HistoryGrowthGas(bloomHistoryGrowthCost + payloadHistoryGrowthCost)
+}
diff --git a/execution/vm/contracts.go b/execution/vm/contracts.go
index b0d2aa8becd..0c486d3a686 100644
--- a/execution/vm/contracts.go
+++ b/execution/vm/contracts.go
@@ -32,6 +32,7 @@ import (
"github.com/consensys/gnark-crypto/ecc/bls12-381/fp"
"github.com/consensys/gnark-crypto/ecc/bls12-381/fr"
"github.com/consensys/gnark-crypto/ecc/bn254"
+ "github.com/erigontech/erigon/arb/multigas"
patched_big "github.com/ethereum/go-bigmodexpfix/src/math/big"
"github.com/holiman/uint256"
@@ -69,6 +70,15 @@ func ActivePrecompiledContracts(chainRules *chain.Rules) PrecompiledContracts {
}
func Precompiles(chainRules *chain.Rules) PrecompiledContracts {
+ if chainRules.IsArbitrum {
+ if chainRules.IsDia {
+ return PrecompiledContractsStartingFromArbOS50
+ }
+ if chainRules.IsStylus {
+ return PrecompiledContractsStartingFromArbOS30
+ }
+ return PrecompiledContractsBeforeArbOS30
+ }
switch {
case chainRules.IsOsaka:
return PrecompiledContractsOsaka
@@ -272,6 +282,15 @@ func init() {
// ActivePrecompiles returns the precompiles enabled with the current configuration.
func ActivePrecompiles(rules *chain.Rules) []accounts.Address {
+ if rules.IsArbitrum {
+ if rules.IsDia {
+ return PrecompiledAddressesStartingFromArbOS50
+ }
+ if rules.IsStylus {
+ return PrecompiledAddressesStartingFromArbOS30
+ }
+ return PrecompiledAddressesBeforeArbOS30
+ }
switch {
case rules.IsOsaka:
return PrecompiledAddressesOsaka
@@ -299,20 +318,26 @@ func ActivePrecompiles(rules *chain.Rules) []accounts.Address {
// - the returned bytes,
// - the _remaining_ gas,
// - any error that occurred
-func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uint64, tracer *tracing.Hooks,
-) (ret []byte, remainingGas uint64, err error) {
+func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uint64, logger *tracing.Hooks, advancedInfo *AdvancedPrecompileCall) (ret []byte, remainingGas uint64, usedMultiGas multigas.MultiGas, err error) {
+ advanced, isAdvanced := p.(AdvancedPrecompile)
+ if isAdvanced {
+ return advanced.RunAdvanced(input, suppliedGas, advancedInfo)
+ }
+ precompileArbosAware, isPrecompileArbosAware := p.(ArbosAwarePrecompile)
+ if isPrecompileArbosAware && advancedInfo != nil {
+ precompileArbosAware.SetArbosVersion(advancedInfo.Evm.Context.ArbOSVersion)
+ }
gasCost := p.RequiredGas(input)
if suppliedGas < gasCost {
- return nil, 0, ErrOutOfGas
+ return nil, 0, multigas.ComputationGas(suppliedGas), ErrOutOfGas
}
-
- if tracer != nil && tracer.OnGasChange != nil {
- tracer.OnGasChange(suppliedGas, suppliedGas-gasCost, tracing.GasChangeCallPrecompiledContract)
+ if logger != nil && logger.OnGasChange != nil {
+ logger.OnGasChange(suppliedGas, suppliedGas-gasCost, tracing.GasChangeCallPrecompiledContract)
}
suppliedGas -= gasCost
output, err := p.Run(input)
- return output, suppliedGas, err
+ return output, suppliedGas, multigas.ComputationGas(gasCost), err
}
// ECRECOVER implemented as a native contract.
@@ -474,6 +499,30 @@ func modExpMultComplexityEip198(x uint32) uint64 {
}
}
+// modExpMultComplexityEip198 implements modExp multiplication complexity formula, as defined in EIP-198
+//
+// def mult_complexity(x):
+//
+// if x <= 64: return x ** 2
+// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072
+// else: return x ** 2 // 16 + 480 * x - 199680
+//
+// where is x is max(base_length, modulus_length)
+func modExpMultComplexityEip198(x uint32) uint64 {
+ xx := uint64(x) * uint64(x)
+ switch {
+ case x <= 64:
+ return xx
+ case x <= 1024:
+ // (x ** 2 // 4 ) + ( 96 * x - 3072)
+ return xx/4 + 96*uint64(x) - 3072
+ default:
+ // (x ** 2 // 16) + (480 * x - 199680)
+ // max value: 0x100001df'dffcf220
+ return xx/16 + 480*uint64(x) - 199680
+ }
+}
+
// RequiredGas returns the gas required to execute the pre-compiled contract.
func (c *bigModExp) RequiredGas(input []byte) uint64 {
@@ -562,6 +611,82 @@ func (c *bigModExp) RequiredGas(input []byte) uint64 {
return max(gas, minGas)
}
+//// RequiredGas returns the gas required to execute the pre-compiled contract.
+//func (c *bigModExp) RequiredGas(input []byte) uint64 {
+// var (
+// baseLen = new(big.Int).SetBytes(getData(input, 0, 32))
+// expLen = new(big.Int).SetBytes(getData(input, 32, 32))
+// modLen = new(big.Int).SetBytes(getData(input, 64, 32))
+// )
+// if len(input) > 96 {
+// input = input[96:]
+// } else {
+// input = input[:0]
+// }
+// // Retrieve the head 32 bytes of exp for the adjusted exponent length
+// var expHead *big.Int
+// if big.NewInt(int64(len(input))).Cmp(baseLen) <= 0 {
+// expHead = new(big.Int)
+// } else {
+// if expLen.Cmp(big32) > 0 {
+// expHead = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), 32))
+// } else {
+// expHead = new(big.Int).SetBytes(getData(input, baseLen.Uint64(), expLen.Uint64()))
+// }
+// }
+// // Calculate the adjusted exponent length
+// var msb int
+// if bitlen := expHead.BitLen(); bitlen > 0 {
+// msb = bitlen - 1
+// }
+// adjExpLen := new(big.Int)
+// if expLen.Cmp(big32) > 0 {
+// adjExpLen.Sub(expLen, big32)
+// if c.osaka { // EIP-7883
+// adjExpLen.Lsh(adjExpLen, 4) // ×16
+// } else {
+// adjExpLen.Lsh(adjExpLen, 3) // ×8
+// }
+// }
+// adjExpLen.Add(adjExpLen, big.NewInt(int64(msb)))
+// adjExpLen = math.BigMax(adjExpLen, big1)
+//
+// // Calculate the gas cost of the operation
+// gas := new(big.Int).Set(math.BigMax(modLen, baseLen)) // max_length
+// if c.osaka {
+// // EIP-7883: ModExp Gas Cost Increase
+// gas = modExpMultComplexityEip7883(gas /*max_length */)
+// gas.Mul(gas, adjExpLen)
+// if gas.BitLen() > 64 {
+// return math.MaxUint64
+// }
+//
+// return max(500, gas.Uint64())
+// } else if c.eip2565 {
+// // EIP-2565 has three changes compared to EIP-198:
+//
+// // 1. Different multiplication complexity
+// gas = modExpMultComplexityEip2565(gas)
+//
+// gas.Mul(gas, adjExpLen)
+// // 2. Different divisor (`GQUADDIVISOR`) (3)
+// gas.Div(gas, big3)
+// if gas.BitLen() > 64 {
+// return math.MaxUint64
+// }
+// // 3. Minimum price of 200 gas
+// return max(200, gas.Uint64())
+// }
+// gas = modExpMultComplexityEip198(gas)
+// gas.Mul(gas, adjExpLen)
+// gas.Div(gas, big20)
+//
+// if gas.BitLen() > 64 {
+// return math.MaxUint64
+// }
+// return gas.Uint64()
+//}
+
var (
errModExpBaseLengthTooLarge = errors.New("base length is too large")
errModExpExponentLengthTooLarge = errors.New("exponent length is too large")
diff --git a/execution/vm/contracts_arb.go b/execution/vm/contracts_arb.go
new file mode 100644
index 00000000000..49829ce29fd
--- /dev/null
+++ b/execution/vm/contracts_arb.go
@@ -0,0 +1,41 @@
+package vm
+
+import (
+ "math/big"
+
+ "github.com/erigontech/erigon/arb/multigas"
+ "github.com/erigontech/erigon/common"
+)
+
+var (
+ PrecompiledContractsBeforeArbOS30 = make(map[common.Address]PrecompiledContract)
+ PrecompiledAddressesBeforeArbOS30 []common.Address
+ PrecompiledContractsStartingFromArbOS30 = make(map[common.Address]PrecompiledContract)
+ PrecompiledAddressesStartingFromArbOS30 []common.Address
+ PrecompiledContractsStartingFromArbOS50 = make(map[common.Address]PrecompiledContract)
+ PrecompiledAddressesStartingFromArbOS50 []common.Address
+)
+
+var PrecompiledContractsP256Verify = map[common.Address]PrecompiledContract{
+ common.BytesToAddress([]byte{0x01, 0x00}): &p256Verify{},
+}
+
+type AdvancedPrecompileCall struct {
+ PrecompileAddress common.Address
+ ActingAsAddress common.Address
+ Caller common.Address
+ Value *big.Int
+ ReadOnly bool
+ Evm *EVM
+}
+
+type AdvancedPrecompile interface {
+ RunAdvanced(input []byte, suppliedGas uint64, advancedInfo *AdvancedPrecompileCall) (ret []byte, remainingGas uint64, usedMultiGas multigas.MultiGas, err error)
+ PrecompiledContract
+}
+
+// TODO move into arbitrum package
+type ArbosAwarePrecompile interface {
+ SetArbosVersion(arbosVersion uint64)
+ PrecompiledContract
+}
diff --git a/execution/vm/contracts_fuzz_test.go b/execution/vm/contracts_fuzz_test.go
index f813f63b484..0ccb16aec04 100644
--- a/execution/vm/contracts_fuzz_test.go
+++ b/execution/vm/contracts_fuzz_test.go
@@ -20,6 +20,7 @@
package vm
import (
+ "fmt"
"maps"
"slices"
"testing"
diff --git a/execution/vm/contracts_test.go b/execution/vm/contracts_test.go
index 2d804f3b18e..d2e6b2a89a5 100644
--- a/execution/vm/contracts_test.go
+++ b/execution/vm/contracts_test.go
@@ -107,7 +107,7 @@ func testPrecompiled(t *testing.T, addr string, test precompiledTest) {
gas := p.RequiredGas(in)
t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) {
t.Parallel()
- if res, _, err := RunPrecompiledContract(p, in, gas, nil); err != nil {
+ if res, _, _, err := RunPrecompiledContract(p, in, gas, nil, nil); err != nil {
t.Error(err)
} else if common.Bytes2Hex(res) != test.Expected {
t.Errorf("Expected %v, got %v", test.Expected, common.Bytes2Hex(res))
@@ -130,7 +130,7 @@ func testPrecompiledOOG(t *testing.T, addr string, test precompiledTest) {
t.Run(fmt.Sprintf("%s-Gas=%d", test.Name, gas), func(t *testing.T) {
t.Parallel()
- _, _, err := RunPrecompiledContract(p, in, gas, nil)
+ _, _, _, err := RunPrecompiledContract(p, in, gas, nil, nil)
if err.Error() != "out of gas" {
t.Errorf("Expected error [out of gas], got [%v]", err)
}
@@ -148,7 +148,7 @@ func testPrecompiledFailure(addr string, test precompiledFailureTest, t *testing
gas := p.RequiredGas(in)
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
- _, _, err := RunPrecompiledContract(p, in, gas, nil)
+ _, _, _, err := RunPrecompiledContract(p, in, gas, nil, nil)
if err == nil || err.Error() != test.ExpectedError {
t.Errorf("Expected error [%v], got [%v]", test.ExpectedError, err)
}
@@ -180,7 +180,7 @@ func benchmarkPrecompiled(b *testing.B, addr string, test precompiledTest) {
bench.ResetTimer()
for i := 0; i < bench.N; i++ {
copy(data, in)
- res, _, err = RunPrecompiledContract(p, data, reqGas, nil)
+ res, _, _, err = RunPrecompiledContract(p, data, reqGas, nil, nil)
}
bench.StopTimer()
elapsed := max(uint64(time.Since(start)), 1)
@@ -264,7 +264,7 @@ func TestPrecompiledModExpPotentialOutOfRange(t *testing.T) {
hexString := "0x0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000ffffffffffffffff0000000000000000000000000000000000000000000000000000000000000000ee"
input := hexutil.MustDecode(hexString)
maxGas := uint64(math.MaxUint64)
- _, _, err := RunPrecompiledContract(modExpContract, input, maxGas, nil)
+ _, _, _, err := RunPrecompiledContract(modExpContract, input, maxGas, nil, nil)
require.NoError(t, err)
}
@@ -275,7 +275,7 @@ func TestPrecompiledModExpInputEip7823(t *testing.T) {
// length_of_EXPONENT = 1024; everything else is zero
in := common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000")
gas := pragueModExp.RequiredGas(in)
- res, _, err := RunPrecompiledContract(pragueModExp, in, gas, nil)
+ res, _, _, err := RunPrecompiledContract(pragueModExp, in, gas, nil, nil)
require.NoError(t, err)
assert.Equal(t, "", common.Bytes2Hex(res))
gas = osakaModExp.RequiredGas(in)
@@ -320,7 +320,17 @@ func TestPrecompiledModExpInputEip7823(t *testing.T) {
require.NoError(t, err)
assert.Equal(t, "", common.Bytes2Hex(res))
gas = osakaModExp.RequiredGas(in)
- _, _, err = RunPrecompiledContract(osakaModExp, in, gas, nil)
+ _, _, _, err = RunPrecompiledContract(osakaModExp, in, gas, nil, nil)
+ assert.ErrorIs(t, err, errModExpExponentLengthTooLarge)
+
+ // length_of_EXPONENT = 2^64; everything else is zero
+ in = common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000")
+ gas = pragueModExp.RequiredGas(in)
+ res, _, _, err = RunPrecompiledContract(pragueModExp, in, gas, nil, nil)
+ require.NoError(t, err)
+ assert.Equal(t, "", common.Bytes2Hex(res))
+ gas = osakaModExp.RequiredGas(in)
+ _, _, _, err = RunPrecompiledContract(osakaModExp, in, gas, nil, nil)
assert.ErrorIs(t, err, errModExpExponentLengthTooLarge)
}
diff --git a/execution/vm/evm.go b/execution/vm/evm.go
index 7cf478c2c03..c3e9d0e81f0 100644
--- a/execution/vm/evm.go
+++ b/execution/vm/evm.go
@@ -22,6 +22,9 @@ package vm
import (
"errors"
"fmt"
+ "github.com/erigontech/erigon/arb/chainparams"
+ "github.com/erigontech/erigon/arb/multigas"
+ "math/big"
"sync/atomic"
"github.com/holiman/uint256"
@@ -82,15 +85,28 @@ type EVM struct {
callGasTemp uint64
// optional overridden set of precompiled contracts
precompiles PrecompiledContracts
+
+ //Arbitrum processing hook
+ ProcessingHookSet atomic.Bool
+ ProcessingHook TxProcessingHook
}
// NewEVM returns a new EVM. The returned EVM is not thread safe and should
// only ever be used *once*.
func NewEVM(blockCtx evmtypes.BlockContext, txCtx evmtypes.TxContext, ibs *state.IntraBlockState, chainConfig *chain.Config, vmConfig Config) *EVM {
if vmConfig.NoBaseFee {
- if txCtx.GasPrice.IsZero() {
+ if txCtx.GasPrice != nil && txCtx.GasPrice.IsZero() {
+ if chainConfig.IsArbitrum() {
+ blockCtx.BaseFeeInBlock = new(uint256.Int)
+ if blockCtx.BaseFee != nil && !blockCtx.BaseFee.IsZero() {
+ blockCtx.BaseFeeInBlock.Set(blockCtx.BaseFee)
+ }
+ }
blockCtx.BaseFee = uint256.Int{}
}
+ if chainConfig.IsArbitrum() && txCtx.BlobFee != nil && txCtx.BlobFee.IsZero() {
+ blockCtx.BlobBaseFee = new(uint256.Int)
+ }
}
evm := &EVM{
Context: blockCtx,
@@ -100,6 +116,11 @@ func NewEVM(blockCtx evmtypes.BlockContext, txCtx evmtypes.TxContext, ibs *state
chainConfig: chainConfig,
chainRules: blockCtx.Rules(chainConfig),
}
+ if evm.config.JumpDestCache == nil {
+ evm.config.JumpDestCache = NewJumpDestCache(JumpDestCacheLimit)
+ }
+
+ evm.ProcessingHook = DefaultTxProcessor{evm: evm}
evm.interpreter = NewEVMInterpreter(evm, vmConfig)
return evm
@@ -160,9 +181,13 @@ func (evm *EVM) Interpreter() Interpreter {
return evm.interpreter
}
-func (evm *EVM) call(typ OpCode, caller accounts.Address, callerAddress accounts.Address, addr accounts.Address, input []byte, gas uint64, value uint256.Int, bailout bool) (ret []byte, leftOverGas uint64, err error) {
+func isSystemCall(caller common.Address) bool {
+ return caller == chainparams.SystemAddress
+}
+
+func (evm *EVM) call(typ OpCode, caller accounts.Address, addr accounts.Address, input []byte, gas uint64, value uint256.Int, bailout bool, arbInfo *AdvancedPrecompileCall) (ret []byte, leftOverGas uint64, usedMultiGas multigas.MultiGas, err error) {
if evm.abort.Load() {
- return ret, leftOverGas, nil
+ return ret, leftOverGas, multigas.ZeroGas(), nil
}
depth := evm.interpreter.Depth()
@@ -180,7 +205,7 @@ func (evm *EVM) call(typ OpCode, caller accounts.Address, callerAddress accounts
if !isPrecompile {
code, err = evm.intraBlockState.ResolveCode(addr)
if err != nil {
- return nil, 0, fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
+ return nil, 0, multigas.ZeroGas(), fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
}
}
@@ -193,43 +218,46 @@ func (evm *EVM) call(typ OpCode, caller accounts.Address, callerAddress accounts
}
if evm.config.NoRecursion && depth > 0 {
- return nil, gas, nil
+ return nil, gas, multigas.ZeroGas(), nil
}
// Fail if we're trying to execute above the call depth limit
if depth > int(params.CallCreateDepth) {
- return nil, gas, ErrDepth
+ return nil, gas, multigas.ZeroGas(), ErrDepth
}
if typ == CALL || typ == CALLCODE {
// Fail if we're trying to transfer more than the available balance
canTransfer, err := evm.Context.CanTransfer(evm.intraBlockState, caller, value)
if err != nil {
- return nil, 0, err
+ return nil, 0, multigas.ZeroGas(), err
}
- if !value.IsZero() && !canTransfer {
- if !bailout {
- return nil, gas, ErrInsufficientBalance
- }
+ if !bailout && !value.IsZero() && !canTransfer {
+ return nil, gas, multigas.ZeroGas(), ErrInsufficientBalance
}
}
// BAL: record address access even if call fails due to gas/call depth and to precompiles
+ // ARBITRUM_MERGE: verify the following line
evm.intraBlockState.MarkAddressAccess(addr)
snapshot := evm.intraBlockState.PushSnapshot()
defer evm.intraBlockState.PopSnapshot(snapshot)
+ usedMultiGas = multigas.ZeroGas()
+
if typ == CALL {
exist, err := evm.intraBlockState.Exist(addr)
if err != nil {
- return nil, 0, fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
+ return nil, 0, multigas.ZeroGas(), fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
}
if !exist {
if !isPrecompile && evm.chainRules.IsSpuriousDragon && value.IsZero() {
- return nil, gas, nil
+ return nil, gas, multigas.ZeroGas(), nil
}
evm.intraBlockState.CreateAccount(addr, false)
}
- evm.Context.Transfer(evm.intraBlockState, caller, addr, value, bailout)
+ if err = evm.Context.Transfer(evm.intraBlockState, caller, addr, value, bailout); err != nil {
+ return nil, gas, multigas.ComputationGas(gas), err
+ }
} else if typ == STATICCALL {
// We do an AddBalance of zero here, just in order to trigger a touch.
// This doesn't matter on Mainnet, where all empties are gone at the time of Byzantium,
@@ -240,7 +268,9 @@ func (evm *EVM) call(typ OpCode, caller accounts.Address, callerAddress accounts
// It is allowed to call precompiles, even via delegatecall
if isPrecompile {
- ret, gas, err = RunPrecompiledContract(p, input, gas, evm.Config().Tracer)
+ var precompileMultiGas multigas.MultiGas
+ ret, gas, precompileMultiGas, err = RunPrecompiledContract(p, input, gas, evm.Config().Tracer, arbInfo)
+ usedMultiGas.SaturatingAddInto(precompileMultiGas)
} else if len(code) == 0 {
// If the account has no code, we can abort here
// The depth-check is already done, and precompiles handled above
@@ -251,10 +281,15 @@ func (evm *EVM) call(typ OpCode, caller accounts.Address, callerAddress accounts
var codeHash accounts.CodeHash
codeHash, err = evm.intraBlockState.ResolveCodeHash(addr)
if err != nil {
- return nil, 0, fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
+ return nil, 0, multigas.ZeroGas(), fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
}
var contract Contract
if typ == CALLCODE {
+ // ARBITRUM_MERGE:
+ /*
+ contract = NewContract(caller, caller.Address(), value, gas, evm.config.JumpDestCache)
+ contract.delegateOrCallcode = true
+ */
contract = Contract{
caller: caller,
addr: caller,
@@ -263,6 +298,11 @@ func (evm *EVM) call(typ OpCode, caller accounts.Address, callerAddress accounts
CodeHash: codeHash,
}
} else if typ == DELEGATECALL {
+ // ARBITRUM_MERGE:
+ /*
+ contract = NewContract(caller, caller.Address(), value, gas, evm.config.JumpDestCache).AsDelegate()
+ contract.delegateOrCallcode = true
+ */
contract = Contract{
caller: callerAddress,
addr: caller,
@@ -271,6 +311,10 @@ func (evm *EVM) call(typ OpCode, caller accounts.Address, callerAddress accounts
CodeHash: codeHash,
}
} else {
+ // ARBITRUM_MERGE
+ /*
+ contract = NewContract(caller, addrCopy, value, gas, evm.config.JumpDestCache)
+ */
contract = Contract{
caller: caller,
addr: addr,
@@ -279,11 +323,26 @@ func (evm *EVM) call(typ OpCode, caller accounts.Address, callerAddress accounts
CodeHash: codeHash,
}
}
+ // ARBITRUM_MERGE
+ //contract.IsSystemCall = isSystemCall(caller.Address())
+ //contract.SetCallCode(&addrCopy, codeHash, code)
readOnly := false
if typ == STATICCALL {
readOnly = true
}
ret, gas, err = evm.interpreter.Run(contract, gas, input, readOnly)
+
+ // ARBITRUM_MERGE
+ /*
+ ret, err = evm.interpreter.Run(contract, input, readOnly)
+
+ //fmt.Printf("block %d CALLER %s TO %s gas spending %d multigas %s\n",
+ // evm.Context.BlockNumber, contract.Caller().String(), contract.self.String(), gas-contract.Gas, contract.GetTotalUsedMultiGas().String())
+ gas = contract.Gas
+ usedMultiGas.SaturatingAddInto(contract.GetTotalUsedMultiGas())
+ */
+ usedMultiGas.SaturatingAddInto(contract.GetTotalUsedMultiGas())
+
}
// When an error was returned by the EVM or when setting the creation code
// above we revert to the snapshot and consume any gas remaining. Additionally
@@ -294,19 +353,28 @@ func (evm *EVM) call(typ OpCode, caller accounts.Address, callerAddress accounts
if evm.config.Tracer != nil && evm.config.Tracer.OnGasChange != nil {
evm.Config().Tracer.OnGasChange(gas, 0, tracing.GasChangeCallFailedExecution)
}
+
+ // Attribute all leftover gas to computation
+ usedMultiGas.SaturatingIncrementInto(multigas.ResourceKindComputation, gas)
gas = 0
}
}
-
- return ret, gas, err
+ return ret, gas, usedMultiGas, err
}
// Call executes the contract associated with the addr with the given input as
// parameters. It also handles any necessary value transfer required and takes
// the necessary steps to create accounts and reverses the state in case of an
// execution error or failed value transfer.
-func (evm *EVM) Call(caller accounts.Address, addr accounts.Address, input []byte, gas uint64, value uint256.Int, bailout bool) (ret []byte, leftOverGas uint64, err error) {
- return evm.call(CALL, caller, caller, addr, input, gas, value, bailout)
+func (evm *EVM) Call(caller accounts.Address, addr accounts.Address, input []byte, gas uint64, value *uint256.Int, bailout bool) (ret []byte, leftOverGas uint64, usedMultiGas multigas.MultiGas, err error) {
+ return evm.call(CALL, caller, addr, input, gas, value, bailout, &AdvancedPrecompileCall{
+ PrecompileAddress: addr,
+ ActingAsAddress: addr,
+ Caller: caller,
+ Value: value,
+ ReadOnly: false,
+ Evm: evm,
+ })
}
// CallCode executes the contract associated with the addr with the given input
@@ -316,8 +384,15 @@ func (evm *EVM) Call(caller accounts.Address, addr accounts.Address, input []byt
//
// CallCode differs from Call in the sense that it executes the given address'
// code with the caller as context.
-func (evm *EVM) CallCode(caller accounts.Address, addr accounts.Address, input []byte, gas uint64, value uint256.Int) (ret []byte, leftOverGas uint64, err error) {
- return evm.call(CALLCODE, caller, caller, addr, input, gas, value, false)
+func (evm *EVM) CallCode(caller accounts.Address, addr accounts.Address, input []byte, gas uint64, value *uint256.Int) (ret []byte, leftOverGas uint64, usedMultiGas multigas.MultiGas, err error) {
+ return evm.call(CALLCODE, caller, addr, input, gas, value, false, &AdvancedPrecompileCall{
+ PrecompileAddress: addr,
+ ActingAsAddress: caller,
+ Caller: caller,
+ Value: value,
+ ReadOnly: false,
+ Evm: evm,
+ })
}
// DelegateCall executes the contract associated with the addr with the given input
@@ -325,16 +400,30 @@ func (evm *EVM) CallCode(caller accounts.Address, addr accounts.Address, input [
//
// DelegateCall differs from CallCode in the sense that it executes the given address'
// code with the caller as context and the caller is set to the caller of the caller.
-func (evm *EVM) DelegateCall(caller accounts.Address, callerAddress accounts.Address, addr accounts.Address, input []byte, value uint256.Int, gas uint64) (ret []byte, leftOverGas uint64, err error) {
- return evm.call(DELEGATECALL, caller, callerAddress, addr, input, gas, value, false)
+func (evm *EVM) DelegateCall(caller accounts.Address, callerAddress accounts.Address, addr accounts.Address, input []byte, value uint256.Int, gas uint64) (ret []byte, leftOverGas uint64, usedMultiGas multigas.MultiGas, err error) {
+ return evm.call(DELEGATECALL, caller, addr, input, gas, nil, false, &AdvancedPrecompileCall{
+ PrecompileAddress: addr,
+ ActingAsAddress: caller,
+ Caller: callerAddress,
+ Value: value.ToBig(),
+ ReadOnly: false,
+ Evm: evm,
+ })
}
// StaticCall executes the contract associated with the addr with the given input
// as parameters while disallowing any modifications to the state during the call.
// Opcodes that attempt to perform such modifications will result in exceptions
// instead of performing the modifications.
-func (evm *EVM) StaticCall(caller accounts.Address, addr accounts.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) {
- return evm.call(STATICCALL, caller, caller, addr, input, gas, uint256.Int{}, false)
+func (evm *EVM) StaticCall(caller accounts.Address, addr accounts.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, usedMultiGas multigas.MultiGas, err error) {
+ return evm.call(STATICCALL, caller, addr, input, gas, new(uint256.Int), false, &AdvancedPrecompileCall{
+ PrecompileAddress: addr,
+ ActingAsAddress: addr,
+ Caller: caller,
+ Value: new(big.Int),
+ ReadOnly: true,
+ Evm: evm,
+ })
}
type codeAndHash struct {
@@ -353,12 +442,12 @@ func (c *codeAndHash) Hash() accounts.CodeHash {
return c.hash
}
-func (evm *EVM) OverlayCreate(caller accounts.Address, codeAndHash *codeAndHash, gas uint64, value uint256.Int, address accounts.Address, typ OpCode, incrementNonce bool) ([]byte, accounts.Address, uint64, error) {
+func (evm *EVM) OverlayCreate(caller accounts.Address, codeAndHash *codeAndHash, gas uint64, value *uint256.Int, address accounts.Address, typ OpCode, incrementNonce bool) ([]byte, accounts.Address, uint64, multigas.MultiGas, error) {
return evm.create(caller, codeAndHash, gas, value, address, typ, incrementNonce, false)
}
// create creates a new contract using code as deployment code.
-func (evm *EVM) create(caller accounts.Address, codeAndHash *codeAndHash, gasRemaining uint64, value uint256.Int, address accounts.Address, typ OpCode, incrementNonce bool, bailout bool) (ret []byte, createAddress accounts.Address, leftOverGas uint64, err error) {
+func (evm *EVM) create(caller accounts.Address, codeAndHash *codeAndHash, gasRemaining uint64, value uint256.Int, address accounts.Address, typ OpCode, incrementNonce bool, bailout bool) (ret []byte, createAddress accounts.Address, leftOverGas uint64, usedMultiGas multigas.MultiGas, err error) {
if dbg.TraceTransactionIO && (evm.intraBlockState.Trace() || dbg.TraceAccount(caller.Handle())) {
defer func() {
version := evm.intraBlockState.Version()
@@ -386,26 +475,26 @@ func (evm *EVM) create(caller accounts.Address, codeAndHash *codeAndHash, gasRem
// limit.
if depth > int(params.CallCreateDepth) {
err = ErrDepth
- return nil, accounts.NilAddress, gasRemaining, err
+ return nil, accounts.NilAddress, gasRemaining, multigas.ZeroGas(), err
}
canTransfer, err := evm.Context.CanTransfer(evm.intraBlockState, caller, value)
if err != nil {
- return nil, accounts.NilAddress, 0, err
+ return nil, accounts.NilAddress, 0, multigas.ZeroGas(), err
}
if !canTransfer {
if !bailout {
err = ErrInsufficientBalance
- return nil, accounts.NilAddress, gasRemaining, err
+ return nil, accounts.NilAddress, gasRemaining, multigas.ZeroGas(), err
}
}
if incrementNonce {
nonce, err := evm.intraBlockState.GetNonce(caller)
if err != nil {
- return nil, accounts.NilAddress, 0, fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
+ return nil, accounts.NilAddress, 0, multigas.ZeroGas(), fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
}
if nonce+1 < nonce {
err = ErrNonceUintOverflow
- return nil, accounts.NilAddress, gasRemaining, err
+ return nil, accounts.NilAddress, gasRemaining, multigas.ZeroGas(), err
}
evm.intraBlockState.SetNonce(caller, nonce+1)
}
@@ -417,22 +506,22 @@ func (evm *EVM) create(caller accounts.Address, codeAndHash *codeAndHash, gasRem
// Ensure there's no existing contract already at the designated address
contractHash, err := evm.intraBlockState.ResolveCodeHash(address)
if err != nil {
- return nil, accounts.NilAddress, 0, fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
+ return nil, accounts.NilAddress, 0, multigas.ZeroGas(), fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
}
nonce, err := evm.intraBlockState.GetNonce(address)
if err != nil {
- return nil, accounts.NilAddress, 0, fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
+ return nil, accounts.NilAddress, 0, multigas.ZeroGas(), fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
}
hasStorage, err := evm.intraBlockState.HasStorage(address)
if err != nil {
- return nil, accounts.NilAddress, 0, fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
+ return nil, accounts.NilAddress, 0, multigas.ZeroGas(), fmt.Errorf("%w: %w", ErrIntraBlockStateFailed, err)
}
if nonce != 0 || !contractHash.IsEmpty() || hasStorage {
err = ErrContractAddressCollision
if evm.config.Tracer != nil && evm.config.Tracer.OnGasChange != nil {
evm.Config().Tracer.OnGasChange(gasRemaining, 0, tracing.GasChangeCallFailedExecution)
}
- return nil, accounts.NilAddress, 0, err
+ return nil, accounts.NilAddress, 0, multigas.ComputationGas(gasRemaining), err
}
// Create a new account on the state
snapshot := evm.intraBlockState.PushSnapshot()
@@ -446,6 +535,11 @@ func (evm *EVM) create(caller accounts.Address, codeAndHash *codeAndHash, gasRem
// Initialise a new contract and set the code that is to be used by the EVM.
// The contract is a scoped environment for this execution context only.
+ // ARBITRUM_MERGE
+ /*
+ contract := NewContract(caller, address, value, gasRemaining, evm.config.JumpDestCache)
+ contract.SetCodeOptionalHash(&address, codeAndHash)
+ */
contract := Contract{
caller: caller,
addr: address,
@@ -455,7 +549,7 @@ func (evm *EVM) create(caller accounts.Address, codeAndHash *codeAndHash, gasRem
}
if evm.config.NoRecursion && depth > 0 {
- return nil, address, gasRemaining, nil
+ return nil, address, gasRemaining, contract.GetTotalUsedMultiGas(), nil
}
ret, gasRemaining, err = evm.interpreter.Run(contract, gasRemaining, nil, false)
@@ -472,6 +566,11 @@ func (evm *EVM) create(caller accounts.Address, codeAndHash *codeAndHash, gasRem
// Reject code starting with 0xEF if EIP-3541 is enabled.
if err == nil && evm.chainRules.IsLondon && len(ret) >= 1 && ret[0] == 0xEF {
err = ErrInvalidCode
+
+ // Arbitrum: retain Stylus programs and instead store them in the DB alongside normal EVM bytecode.
+ if evm.chainRules.IsStylus && state.IsStylusProgram(ret) {
+ err = nil
+ }
}
// If the contract creation ran successfully and no errors were returned,
// calculate the gas required to store the code. If the code could not
@@ -479,13 +578,12 @@ func (evm *EVM) create(caller accounts.Address, codeAndHash *codeAndHash, gasRem
// by the error checking condition below.
if err == nil {
createDataGas := uint64(len(ret)) * params.CreateDataGas
- var ok bool
- if gasRemaining, ok = useGas(gasRemaining, createDataGas, evm.Config().Tracer, tracing.GasChangeCallCodeStorage); ok {
+ if contract.UseMultiGas(multigas.StorageGrowthGas(createDataGas), evm.Config().Tracer, tracing.GasChangeCallCodeStorage) {
evm.intraBlockState.SetCode(address, ret)
} else {
// If we run out of gas, we do not store the code: the returned code must be empty.
ret = []byte{}
- if evm.chainRules.IsHomestead {
+ if evm.chainRules.IsHomestead { // TODO ARBitrum! does not check IsHomestead; but not affects on stylus exec
err = ErrCodeStoreOutOfGas
}
}
@@ -495,13 +593,20 @@ func (evm *EVM) create(caller accounts.Address, codeAndHash *codeAndHash, gasRem
// above, we revert to the snapshot and consume any gas remaining. Additionally,
// when we're in Homestead, this also counts for code storage gas errors.
if err != nil && (evm.chainRules.IsHomestead || err != ErrCodeStoreOutOfGas) {
- evm.intraBlockState.RevertToSnapshot(snapshot, nil)
+ evm.intraBlockState.RevertToSnapshot(snapshot, err)
if err != ErrExecutionReverted {
+ contract.UseMultiGas(multigas.ComputationGas(contract.Gas), evm.Config().Tracer, tracing.GasChangeCallFailedExecution)
+ // MERGE_ARBITRUM, main erigon has:
+ /*
gasRemaining, _ = useGas(gasRemaining, gasRemaining, evm.Config().Tracer, tracing.GasChangeCallFailedExecution)
+ */
}
}
-
+ // MERGE_ARBITRUM, main erigon has:
+ /*
return ret, address, gasRemaining, err
+ */
+ return ret, address, contract.Gas, contract.GetTotalUsedMultiGas(), err
}
func (evm *EVM) maxCodeSize() int {
@@ -513,10 +618,10 @@ func (evm *EVM) maxCodeSize() int {
// Create creates a new contract using code as deployment code.
// DESCRIBED: docs/programmers_guide/guide.md#nonce
-func (evm *EVM) Create(caller accounts.Address, code []byte, gasRemaining uint64, endowment uint256.Int, bailout bool) (ret []byte, contractAddr accounts.Address, leftOverGas uint64, err error) {
- nonce, err := evm.intraBlockState.GetNonce(caller)
+func (evm *EVM) Create(caller accounts.Address, code []byte, gasRemaining uint64, endowment *uint256.Int, bailout bool) (ret []byte, contractAddr accounts.Address, leftOverGas uint64, usedMultiGas multigas.MultiGas, err error) {
+ nonce, err := evm.intraBlockState.GetNonce(caller.Address())
if err != nil {
- return nil, accounts.NilAddress, 0, err
+ return nil, accounts.NilAddress, 0, multigas.ZeroGas(), err
}
contractAddr = accounts.InternAddress(types.CreateAddress(caller.Value(), nonce))
return evm.create(caller, &codeAndHash{code: code}, gasRemaining, endowment, contractAddr, CREATE, true /* incrementNonce */, bailout)
@@ -527,7 +632,7 @@ func (evm *EVM) Create(caller accounts.Address, code []byte, gasRemaining uint64
// The different between Create2 with Create is Create2 uses keccak256(0xff ++ msg.sender ++ salt ++ keccak256(init_code))[12:]
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
// DESCRIBED: docs/programmers_guide/guide.md#nonce
-func (evm *EVM) Create2(caller accounts.Address, code []byte, gasRemaining uint64, endowment uint256.Int, salt *uint256.Int, bailout bool) (ret []byte, contractAddr accounts.Address, leftOverGas uint64, err error) {
+func (evm *EVM) Create2(caller accounts.Address, code []byte, gasRemaining uint64, endowment uint256.Int, salt *uint256.Int, bailout bool) (ret []byte, contractAddr accounts.Address, leftOverGas uint64, usedMultiGas multigas.MultiGas, err error) {
codeAndHash := &codeAndHash{code: code}
contractAddr = accounts.InternAddress(types.CreateAddress2(caller.Value(), salt.Bytes32(), codeAndHash.Hash()))
return evm.create(caller, codeAndHash, gasRemaining, endowment, contractAddr, CREATE2, true /* incrementNonce */, bailout)
@@ -535,8 +640,8 @@ func (evm *EVM) Create2(caller accounts.Address, code []byte, gasRemaining uint6
// SysCreate is a special (system) contract creation methods for genesis constructors.
// Unlike the normal Create & Create2, it doesn't increment caller's nonce.
-func (evm *EVM) SysCreate(caller accounts.Address, code []byte, gas uint64, endowment uint256.Int, contractAddr accounts.Address) (ret []byte, leftOverGas uint64, err error) {
- ret, _, leftOverGas, err = evm.create(caller, &codeAndHash{code: code}, gas, endowment, contractAddr, CREATE, false /* incrementNonce */, false)
+func (evm *EVM) SysCreate(caller accounts.Address, code []byte, gas uint64, endowment uint256.Int, contractAddr accounts.Address) (ret []byte, leftOverGas uint64, usedMultiGas multigas.MultiGas, err error) {
+ ret, _, leftOverGas, usedMultiGas, err = evm.create(caller, &codeAndHash{code: code}, gas, endowment, contractAddr, CREATE, false /* incrementNonce */, false)
return
}
@@ -572,6 +677,8 @@ func (evm *EVM) GetVMContext() *tracing.VMContext {
ChainConfig: evm.ChainConfig(),
IntraBlockState: evm.IntraBlockState(),
TxHash: evm.TxHash,
+
+ ArbOSVersion: evm.Context.ArbOSVersion,
}
}
diff --git a/execution/vm/evm_arb_tx_hook.go b/execution/vm/evm_arb_tx_hook.go
new file mode 100644
index 00000000000..bd4485d221b
--- /dev/null
+++ b/execution/vm/evm_arb_tx_hook.go
@@ -0,0 +1,120 @@
+package vm
+
+import (
+ "github.com/erigontech/erigon/arb/multigas"
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/log/v3"
+ "github.com/erigontech/erigon/execution/types/accounts"
+ "github.com/erigontech/erigon/execution/vm/evmtypes"
+ "github.com/holiman/uint256"
+
+ "github.com/erigontech/erigon/execution/types"
+)
+
+func (in *EVMInterpreter) EVM() *EVM {
+ return in.VM.evm
+}
+
+func (in *EVMInterpreter) ReadOnly() bool {
+ return in.VM.readOnly
+}
+
+func (in *EVMInterpreter) SetReturnData(data []byte) {
+ in.returnData = data
+}
+
+func (in *EVMInterpreter) GetReturnData() []byte {
+ return in.returnData
+}
+
+func (in *EVMInterpreter) Config() *Config {
+ c := in.evm.Config()
+ return &c
+}
+
+type TxProcessingHook interface {
+ // helpers
+ SetMessage(msg *types.Message, ibs evmtypes.IntraBlockState)
+ IsArbitrum() bool // returns true if that is arbos.TxProcessor
+ FillReceiptInfo(receipt *types.Receipt)
+ MsgIsNonMutating() bool
+
+ // used within STF
+ StartTxHook() (bool, multigas.MultiGas, error, []byte) // return 4-tuple rather than *struct to avoid an import cycle
+ ScheduledTxes() types.Transactions
+ EndTxHook(totalGasUsed uint64, evmSuccess bool)
+ GasChargingHook(gasRemaining *uint64, intrinsicGas uint64) (common.Address, multigas.MultiGas, error)
+ ForceRefundGas() uint64
+ NonrefundableGas() uint64
+ DropTip() bool
+ IsCalldataPricingIncreaseEnabled() bool // arbos 40/pectra
+
+ // used within evm run
+ ExecuteWASM(ctx *CallContext, input []byte, interpreter *EVMInterpreter) ([]byte, error)
+ PushContract(contract *Contract)
+ PopContract()
+
+ // vm ops
+ GasPriceOp(evm *EVM) *uint256.Int
+ L1BlockNumber(blockCtx evmtypes.BlockContext) (uint64, error)
+ L1BlockHash(blockCtx evmtypes.BlockContext, l1BlocKNumber uint64) (common.Hash, error)
+}
+
+type DefaultTxProcessor struct {
+ evm *EVM
+}
+
+func (p DefaultTxProcessor) IsArbitrum() bool { return false }
+
+func (p DefaultTxProcessor) SetMessage(*types.Message, evmtypes.IntraBlockState) {}
+
+func (p DefaultTxProcessor) StartTxHook() (bool, multigas.MultiGas, error, []byte) {
+ return false, multigas.ZeroGas(), nil, nil
+}
+
+func (p DefaultTxProcessor) GasChargingHook(gasRemaining *uint64, intrinsing uint64) (accounts.Address, multigas.MultiGas, error) {
+ return p.evm.Context.Coinbase, multigas.ZeroGas(), nil
+}
+
+func (p DefaultTxProcessor) PushContract(contract *Contract) {}
+
+func (p DefaultTxProcessor) PopContract() {}
+
+func (p DefaultTxProcessor) ForceRefundGas() uint64 { return 0 }
+
+func (p DefaultTxProcessor) NonrefundableGas() uint64 { return 0 }
+
+func (p DefaultTxProcessor) DropTip() bool { return false }
+
+func (p DefaultTxProcessor) EndTxHook(totalGasUsed uint64, evmSuccess bool) {}
+
+func (p DefaultTxProcessor) ScheduledTxes() types.Transactions {
+ return types.Transactions{}
+}
+
+func (p DefaultTxProcessor) L1BlockNumber(blockCtx evmtypes.BlockContext) (uint64, error) {
+ return blockCtx.BlockNumber, nil
+}
+
+func (p DefaultTxProcessor) L1BlockHash(blockCtx evmtypes.BlockContext, l1BlocKNumber uint64) (common.Hash, error) {
+ return blockCtx.GetHash(l1BlocKNumber)
+}
+
+func (p DefaultTxProcessor) GasPriceOp(evm *EVM) *uint256.Int {
+ return &p.evm.GasPrice
+}
+
+func (p DefaultTxProcessor) FillReceiptInfo(*types.Receipt) {}
+
+func (p DefaultTxProcessor) MsgIsNonMutating() bool {
+ return false
+}
+
+func (p DefaultTxProcessor) ExecuteWASM(scope *CallContext, input []byte, interpreter *EVMInterpreter) ([]byte, error) {
+ log.Crit("tried to execute WASM with default processing hook")
+ return nil, nil
+}
+
+func (d DefaultTxProcessor) IsCalldataPricingIncreaseEnabled() bool {
+ return true
+}
diff --git a/execution/vm/evmtypes/evmtypes.go b/execution/vm/evmtypes/evmtypes.go
index 07ce898e1bd..301c311ab82 100644
--- a/execution/vm/evmtypes/evmtypes.go
+++ b/execution/vm/evmtypes/evmtypes.go
@@ -19,6 +19,7 @@ package evmtypes
import (
"math/big"
+ "github.com/erigontech/erigon/arb/multigas"
"github.com/holiman/uint256"
"github.com/erigontech/erigon/common"
@@ -49,6 +50,11 @@ type BlockContext struct {
BaseFee uint256.Int // Provides information for BASEFEE
PrevRanDao *common.Hash // Provides information for PREVRANDAO
BlobBaseFee uint256.Int // Provides information for BLOBBASEFEE
+
+ //Arbitrum: current OS version
+ ArbOSVersion uint64
+ BaseFeeInBlock *uint256.Int // Copy of BaseFee to be used in arbitrum's geth hooks and precompiles when BaseFee is lowered to 0 when vm runs with NoBaseFee flag and 0 gas price. Is nil when BaseFee isn't lowered to 0
+
}
// TxContext provides the EVM with information about a transaction.
@@ -75,6 +81,13 @@ type ExecutionResult struct {
FeeBurnt uint256.Int
BurntContractAddress accounts.Address
EvmRefund uint64 // Gas refunded by EVM without considering refundQuotient
+
+ // Arbitrum: a tx may yield others that need to run afterward (see retryables)
+ ScheduledTxes types.Transactions
+ // Arbitrum: the contract deployed from the top-level transaction, or nil if not a contract creation tx
+ TopLevelDeployed *common.Address
+ // Arbitrum: total used multi-dimensional gas
+ UsedMultiGas multigas.MultiGas
}
// Unwrap returns the internal evm error which allows us for further
@@ -133,4 +146,60 @@ type IntraBlockState interface {
BlockNumber() uint64
TxIndex() int
Incarnation() int
+
+ // Arbitrum deprecated API
+ CreateAccount(common.Address, bool) error
+
+ RemoveEscrowProtection(addr common.Address)
+ ExpectBalanceBurn(amount *uint256.Int)
+ ExpectBalanceMint(amount *uint256.Int)
+
+ GetNonce(common.Address) (uint64, error)
+ SetNonce(common.Address, uint64) error
+
+ GetCodeHash(common.Address) (common.Hash, error)
+ GetCode(common.Address) ([]byte, error)
+ SetCode(common.Address, []byte) error
+ GetCodeSize(common.Address) (int, error)
+
+ // eip-7702; delegated designations
+ ResolveCodeHash(common.Address) (common.Hash, error)
+ ResolveCode(common.Address) ([]byte, error)
+ GetDelegatedDesignation(common.Address) (common.Address, bool, error)
+
+ AddRefund(uint64)
+ GetRefund() uint64
+ SubRefund(gas uint64) error
+
+ GetCommittedState(common.Address, common.Hash, *uint256.Int) error
+ GetState(address common.Address, slot common.Hash, outValue *uint256.Int) error
+ SetState(common.Address, common.Hash, uint256.Int) error
+
+ GetTransientState(addr common.Address, key common.Hash) uint256.Int
+ SetTransientState(addr common.Address, key common.Hash, value uint256.Int)
+
+ Selfdestruct(common.Address) (bool, error)
+ HasSelfdestructed(common.Address) (bool, error)
+ Selfdestruct6780(common.Address) error
+
+ // Exist reports whether the given account exists in state.
+ // Notably this should also return true for suicided accounts.
+ Exist(common.Address) (bool, error)
+ // Empty returns whether the given account is empty. Empty
+ // is defined according to EIP161 (balance = nonce = code = 0).
+ Empty(common.Address) (bool, error)
+
+ Prepare(rules *chain.Rules, sender, coinbase common.Address, dest *common.Address,
+ precompiles []common.Address, txAccesses types.AccessList, authorities []common.Address) error
+
+ AddressInAccessList(addr common.Address) bool
+ // AddAddressToAccessList adds the given address to the access list. This operation is safe to perform
+ // even if the feature/fork is not active yet
+ AddAddressToAccessList(addr common.Address) (addrMod bool)
+ // AddSlotToAccessList adds the given (address,slot) to the access list. This operation is safe to perform
+ // even if the feature/fork is not active yet
+ AddSlotToAccessList(addr common.Address, slot common.Hash) (addrMod, slotMod bool)
+
+ RevertToSnapshot(int, error)
+ Snapshot() int
}
diff --git a/execution/vm/evmtypes/rules.go b/execution/vm/evmtypes/rules.go
index 56ce681bc7d..9f8362a7f03 100644
--- a/execution/vm/evmtypes/rules.go
+++ b/execution/vm/evmtypes/rules.go
@@ -19,6 +19,7 @@ package evmtypes
import (
"math/big"
+ "github.com/erigontech/erigon/arb/osver"
"github.com/erigontech/erigon/execution/chain"
)
@@ -40,13 +41,17 @@ func (bc *BlockContext) Rules(c *chain.Config) *chain.Rules {
IsIstanbul: c.IsIstanbul(bc.BlockNumber),
IsBerlin: c.IsBerlin(bc.BlockNumber),
IsLondon: c.IsLondon(bc.BlockNumber),
- IsShanghai: c.IsShanghai(bc.Time) || c.IsAgra(bc.BlockNumber),
- IsCancun: c.IsCancun(bc.Time),
+ IsShanghai: c.IsShanghai(bc.Time, bc.ArbOSVersion) || c.IsAgra(bc.BlockNumber),
+ IsCancun: c.IsCancun(bc.Time, bc.ArbOSVersion),
IsNapoli: c.IsNapoli(bc.BlockNumber),
IsBhilai: c.IsBhilai(bc.BlockNumber),
- IsPrague: c.IsPrague(bc.Time) || c.IsBhilai(bc.BlockNumber),
- IsOsaka: c.IsOsaka(bc.Time),
+ IsPrague: c.IsPrague(bc.Time, bc.ArbOSVersion) || c.IsBhilai(bc.BlockNumber),
+ IsOsaka: c.IsOsaka(bc.BlockNumber, bc.Time, bc.ArbOSVersion),
IsAmsterdam: c.IsAmsterdam(bc.Time),
IsAura: c.Aura != nil,
+ ArbOSVersion: bc.ArbOSVersion,
+ IsArbitrum: c.IsArbitrum(),
+ IsStylus: c.IsArbitrum() && bc.ArbOSVersion >= osver.ArbosVersion_Stylus,
+ IsDia: c.IsArbitrum() && bc.ArbOSVersion >= osver.ArbosVersion_Dia,
}
}
diff --git a/execution/vm/gas.go b/execution/vm/gas.go
index ff18f090957..4bee002b22a 100644
--- a/execution/vm/gas.go
+++ b/execution/vm/gas.go
@@ -20,6 +20,8 @@
package vm
import (
+ "github.com/erigontech/erigon/arb/multigas"
+ "github.com/erigontech/erigon/execution/chain/params"
"github.com/holiman/uint256"
)
@@ -54,3 +56,19 @@ func callGas(isEip150 bool, availableGas, base uint64, callCost *uint256.Int) (u
return callCost.Uint64(), nil
}
+
+// addConstantMultiGas adds to usedMultiGas the constant multi-gas cost of an opcode.
+func addConstantMultiGas(usedMultiGas *multigas.MultiGas, cost uint64, op OpCode) {
+ // SELFDESTRUCT is a special case because it charges for storage access but it isn't
+ // dependent on any input data. We charge a small computational cost for warm access like
+ // other multi-dimensional gas opcodes, and the rest is storage access to delete the
+ // contract from the database.
+ // Note we only need to cover EIP150 because it the current cost, and SELFDESTRUCT cost was
+ // zero previously.
+ if op == SELFDESTRUCT && cost == params.SelfdestructGasEIP150 {
+ usedMultiGas.SaturatingIncrementInto(multigas.ResourceKindComputation, params.WarmStorageReadCostEIP2929)
+ usedMultiGas.SaturatingIncrementInto(multigas.ResourceKindStorageAccess, cost-params.WarmStorageReadCostEIP2929)
+ } else {
+ usedMultiGas.SaturatingIncrementInto(multigas.ResourceKindComputation, cost)
+ }
+}
diff --git a/execution/vm/gas_table.go b/execution/vm/gas_table.go
index c48059f614b..06dd42ebeef 100644
--- a/execution/vm/gas_table.go
+++ b/execution/vm/gas_table.go
@@ -22,6 +22,7 @@ package vm
import (
"errors"
"fmt"
+ "github.com/erigontech/erigon/arb/multigas"
"github.com/erigontech/erigon/common"
"github.com/erigontech/erigon/common/dbg"
@@ -32,9 +33,9 @@ import (
// memoryGasCost calculates the quadratic gas for memory expansion. It does so
// only for the memory region that is expanded, not the total memory.
-func memoryGasCost(callContext *CallContext, newMemSize uint64) (uint64, error) {
+func memoryGasCost(callContext *CallContext, newMemSize uint64) (multigas.MultiGas, error) {
if newMemSize == 0 {
- return 0, nil
+ return multigas.ZeroGas(), nil
}
// The maximum that will fit in a uint64 is max_word_count - 1. Anything above
// that will result in an overflow. Additionally, a newMemSize which results in
@@ -42,7 +43,7 @@ func memoryGasCost(callContext *CallContext, newMemSize uint64) (uint64, error)
// overflow. The constant 0x1FFFFFFFE0 is the highest number that can be used
// without overflowing the gas calculation.
if newMemSize > 0x1FFFFFFFE0 {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
newMemSizeWords := ToWordSize(newMemSize)
newMemSize = newMemSizeWords * 32
@@ -56,9 +57,11 @@ func memoryGasCost(callContext *CallContext, newMemSize uint64) (uint64, error)
fee := newTotalFee - callContext.Memory.lastGasCost
callContext.Memory.lastGasCost = newTotalFee
- return fee, nil
+ // Memory expansion considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ return multigas.ComputationGas(fee), nil
}
- return 0, nil
+ return multigas.ZeroGas(), nil
}
// memoryCopierGas creates the gas functions for the following opcodes, and takes
@@ -70,26 +73,38 @@ func memoryGasCost(callContext *CallContext, newMemSize uint64) (uint64, error)
// EXTCODECOPY (stack position 3)
// RETURNDATACOPY (stack position 2)
func memoryCopierGas(stackpos int) gasFunc {
- return func(_ *EVM, callContext *CallContext, scaopeGas uint64, memorySize uint64) (uint64, error) {
+ return func(evm *EVM, callContext *CallContext, scaopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
// Gas for expanding the memory
- gas, err := memoryGasCost(callContext, memorySize)
+ multiGas, err := memoryGasCost(callContext, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
// And gas for copying data, charged per word at param.CopyGas
words, overflow := callContext.Stack.Back(stackpos).Uint64WithOverflow()
if overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- if words, overflow = math.SafeMul(ToWordSize(words), params.CopyGas); overflow {
- return 0, ErrGasUintOverflow
+ var wordCopyGas uint64
+ if wordCopyGas, overflow = math.SafeMul(ToWordSize(words), params.CopyGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- if gas, overflow = math.SafeAdd(gas, words); overflow {
- return 0, ErrGasUintOverflow
+ // Distribute copy gas by dimension:
+ // - For EXTCODECOPY: count as ResourceKindStorageAccess since it is the only opcode
+ // using stack position 3 and reading from the state trie.
+ // - For others: count as ResourceKindComputation since they are in-memory operations
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ var dim multigas.ResourceKind
+ if stackpos == 3 {
+ dim = multigas.ResourceKindStorageAccess // EXTCODECOPY
+ } else {
+ dim = multigas.ResourceKindComputation // CALLDATACOPY, CODECOPY, MCOPY, RETURNDATACOPY
+ }
+ if multiGas, overflow = multiGas.SafeIncrement(dim, wordCopyGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multiGas, nil
}
}
@@ -101,7 +116,7 @@ var (
gasReturnDataCopy = memoryCopierGas(2)
)
-func gasSStore(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+func gasSStore(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
value, x := callContext.Stack.Back(1), callContext.Stack.Back(0)
key := accounts.InternKey(x.Bytes32())
current, _ := evm.IntraBlockState().GetState(callContext.Address(), key)
@@ -116,12 +131,12 @@ func gasSStore(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize u
// 3. From a non-zero to a non-zero (CHANGE)
switch {
case current.IsZero() && !value.IsZero(): // 0 => non 0
- return params.SstoreSetGas, nil
+ return multigas.StorageGrowthGas(params.SstoreSetGas), nil
case !current.IsZero() && value.IsZero(): // non 0 => 0
evm.IntraBlockState().AddRefund(params.SstoreRefundGas)
- return params.SstoreClearGas, nil
+ return multigas.StorageAccessGas(params.SstoreClearGas), nil
default: // non 0 => non 0 (or 0 => 0)
- return params.SstoreResetGas, nil
+ return multigas.StorageAccessGas(params.SstoreResetGas), nil
}
}
// The new gas metering is based on net gas costs (EIP-1283):
@@ -139,17 +154,17 @@ func gasSStore(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize u
// 2.2.2.1. If original value is 0, add 19800 gas to refund counter.
// 2.2.2.2. Otherwise, add 4800 gas to refund counter.
if current.Eq(value) { // noop (1)
- return params.NetSstoreNoopGas, nil
+ return multigas.StorageAccessGas(params.NetSstoreNoopGas), nil
}
var original, _ = evm.IntraBlockState().GetCommittedState(callContext.Address(), key)
if original == current {
if original.IsZero() { // create slot (2.1.1)
- return params.NetSstoreInitGas, nil
+ return multigas.StorageGrowthGas(params.NetSstoreInitGas), nil
}
if value.IsZero() { // delete slot (2.1.2b)
evm.IntraBlockState().AddRefund(params.NetSstoreClearRefund)
}
- return params.NetSstoreCleanGas, nil // write existing slot (2.1.2)
+ return multigas.StorageAccessGas(params.NetSstoreCleanGas), nil // write existing slot (2.1.2)
}
if !original.IsZero() {
if current.IsZero() { // recreate slot (2.2.1.1)
@@ -166,7 +181,7 @@ func gasSStore(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize u
}
}
- return params.NetSstoreDirtyGas, nil
+ return multigas.StorageAccessGas(params.NetSstoreDirtyGas), nil
}
// 0. If *gasleft* is less than or equal to 2300, fail the current call.
@@ -182,10 +197,10 @@ func gasSStore(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize u
// 2.2.2. If original value equals new value (this storage slot is reset):
// 2.2.2.1. If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter.
// 2.2.2.2. Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter.
-func gasSStoreEIP2200(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+func gasSStoreEIP2200(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
// If we fail the minimum gas availability invariant, fail (0)
if callContext.gas <= params.SstoreSentryGasEIP2200 {
- return 0, errors.New("not enough gas for reentrancy sentry")
+ return multigas.ZeroGas(), errors.New("not enough gas for reentrancy sentry")
}
// Gas sentry honoured, do the actual gas calculation based on the stored value
value, x := callContext.Stack.Back(1), callContext.Stack.Back(0)
@@ -193,18 +208,18 @@ func gasSStoreEIP2200(evm *EVM, callContext *CallContext, scopeGas uint64, memor
current, _ := evm.IntraBlockState().GetState(callContext.Address(), key)
if current.Eq(value) { // noop (1)
- return params.SloadGasEIP2200, nil
+ return multigas.StorageAccessGas(params.SloadGasEIP2200), nil
}
var original, _ = evm.IntraBlockState().GetCommittedState(callContext.Address(), key)
if original == current {
if original.IsZero() { // create slot (2.1.1)
- return params.SstoreSetGasEIP2200, nil
+ return multigas.StorageGrowthGas(params.SstoreSetGasEIP2200), nil
}
if value.IsZero() { // delete slot (2.1.2b)
evm.IntraBlockState().AddRefund(params.SstoreClearsScheduleRefundEIP2200)
}
- return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2)
+ return multigas.StorageAccessGas(params.SstoreResetGasEIP2200), nil
}
if !original.IsZero() {
if current.IsZero() { // recreate slot (2.2.1.1)
@@ -220,61 +235,90 @@ func gasSStoreEIP2200(evm *EVM, callContext *CallContext, scopeGas uint64, memor
evm.IntraBlockState().AddRefund(params.SstoreResetGasEIP2200 - params.SloadGasEIP2200)
}
}
- return params.SloadGasEIP2200, nil // dirty update (2.2)
+ return multigas.StorageAccessGas(params.SloadGasEIP2200), nil // dirty update (2.2)
}
func makeGasLog(n uint64) gasFunc {
- return func(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+ return func(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
requestedSize, overflow := callContext.Stack.Back(1).Uint64WithOverflow()
if overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- gas, err := memoryGasCost(callContext, memorySize)
+ multiGas, err := memoryGasCost(callContext, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
- if gas, overflow = math.SafeAdd(gas, params.LogGas); overflow {
- return 0, ErrGasUintOverflow
+ // Base LOG operation considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, params.LogGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- if gas, overflow = math.SafeAdd(gas, n*params.LogTopicGas); overflow {
- return 0, ErrGasUintOverflow
+ if e.chainRules.IsArbitrum {
+ // Per-topic cost is split between history growth and computation:
+ // - A fixed number of bytes per topic is persisted in history (topicBytes),
+ // and those bytes are charged at LogDataGas (gas per byte) as history growth.
+ // - The remainder of the per-topic cost is attributed to computation (e.g. hashing/bloom work).
+
+ // Scale by number of topics for LOG0..LOG4
+ var topicHistTotal, topicCompTotal uint64
+ if topicHistTotal, overflow = math.SafeMul(n, params.LogTopicHistoryGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
+ }
+ if topicCompTotal, overflow = math.SafeMul(n, params.LogTopicComputationGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
+ }
+
+ // Apply the split.
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindHistoryGrowth, topicHistTotal); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
+ }
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, topicCompTotal); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
+ }
+ } else {
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, n*params.LogTopicGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
+ }
}
+ // Data payload bytes → history growth at LogDataGas (gas per byte).
var memorySizeGas uint64
if memorySizeGas, overflow = math.SafeMul(requestedSize, params.LogDataGas); overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- if gas, overflow = math.SafeAdd(gas, memorySizeGas); overflow {
- return 0, ErrGasUintOverflow
+ // Event log data considered as history growth.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindHistoryGrowth, memorySizeGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multiGas, nil
}
}
-func gasKeccak256(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
- gas, err := memoryGasCost(callContext, memorySize)
+func gasKeccak256(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
+ multiGas, err := memoryGasCost(callContext, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
wordGas, overflow := callContext.Stack.Back(1).Uint64WithOverflow()
if overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
if wordGas, overflow = math.SafeMul(ToWordSize(wordGas), params.Keccak256WordGas); overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- if gas, overflow = math.SafeAdd(gas, wordGas); overflow {
- return 0, ErrGasUintOverflow
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, wordGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multiGas, nil
}
// pureMemoryGascost is used by several operations, which aside from their
// static cost have a dynamic cost which is solely based on the memory
// expansion
-func pureMemoryGascost(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+func pureMemoryGascost(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
return memoryGasCost(callContext, memorySize)
}
@@ -287,72 +331,86 @@ var (
gasCreate = pureMemoryGascost
)
-func gasCreate2(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
- gas, err := memoryGasCost(callContext, memorySize)
+func gasCreate2(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
+ multiGas, err := memoryGasCost(callContext, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
size, overflow := callContext.Stack.Back(2).Uint64WithOverflow()
if overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
numWords := ToWordSize(size)
wordGas, overflow := math.SafeMul(numWords, params.Keccak256WordGas)
if overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- gas, overflow = math.SafeAdd(gas, wordGas)
- if overflow {
- return 0, ErrGasUintOverflow
+ if wordGas, overflow = math.SafeMul(ToWordSize(wordGas), params.Keccak256WordGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
+ }
+ // Keccak hashing considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, wordGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multiGas, nil
}
-func gasCreateEip3860(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
- gas, err := memoryGasCost(callContext, memorySize)
+func gasCreateEip3860(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
+ multiGas, err := memoryGasCost(callContext, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
size, overflow := callContext.Stack.Back(2).Uint64WithOverflow()
if overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- if size > params.MaxInitCodeSize {
- return 0, fmt.Errorf("%w: size %d", ErrMaxInitCodeSizeExceeded, size)
+ //var ics = uint64(params.MaxCodeSize)
+ //if evm.chainRules.IsArbitrum {
+ // ics = evm.chainConfig.MaxInitCodeSize()
+ //}
+ if size > evm.chainConfig.MaxInitCodeSize() {
+ return multigas.ZeroGas(), fmt.Errorf("%w: size %d", ErrMaxInitCodeSizeExceeded, size)
}
- numWords := ToWordSize(size)
- // Since size <= params.MaxInitCodeSize, this multiplication cannot overflow
- wordGas := params.InitCodeWordGas * numWords
- gas, overflow = math.SafeAdd(gas, wordGas)
- if overflow {
- return 0, ErrGasUintOverflow
+ // Since size <= params.MaxInitCodeSize, these multiplication cannot overflow
+ moreGas := params.InitCodeWordGas * ((size + 31) / 32) // numWords
+
+ // Init code execution considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, moreGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multiGas, nil
}
-func gasCreate2Eip3860(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
- gas, err := memoryGasCost(callContext, memorySize)
+func gasCreate2Eip3860(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
+ multiGas, err := memoryGasCost(callContext, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
size, overflow := callContext.Stack.Back(2).Uint64WithOverflow()
if overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- if size > params.MaxInitCodeSize {
- return 0, fmt.Errorf("%w: size %d", ErrMaxInitCodeSizeExceeded, size)
+ //var ics = uint64(params.MaxCodeSize)
+ //if evm.chainRules.IsArbitrum {
+ // ics = evm.chainConfig.MaxInitCodeSize()
+ //}
+ if size > evm.chainConfig.MaxInitCodeSize() {
+ return multigas.ZeroGas(), fmt.Errorf("%w: size %d", ErrMaxInitCodeSizeExceeded, size)
}
- numWords := ToWordSize(size)
- // Since size <= params.MaxInitCodeSize, this multiplication cannot overflow
- wordGas := (params.InitCodeWordGas + params.Keccak256WordGas) * numWords
- gas, overflow = math.SafeAdd(gas, wordGas)
- if overflow {
- return 0, ErrGasUintOverflow
+ // Since size <= params.MaxInitCodeSize, these multiplication cannot overflow
+ moreGas := (params.InitCodeWordGas + params.Keccak256WordGas) * ((size + 31) / 32) // numWords
+
+ // Init code execution and Keccak hashing both considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, moreGas); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multiGas, nil
}
-func gasExpFrontier(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+func gasExpFrontier(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
expByteLen := uint64(common.BitLenToByteLen(callContext.Stack.data[callContext.Stack.len()-2].BitLen()))
var (
@@ -360,66 +418,69 @@ func gasExpFrontier(_ *EVM, callContext *CallContext, scopeGas uint64, memorySiz
overflow bool
)
if gas, overflow = math.SafeAdd(gas, params.ExpGas); overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multigas.ComputationGas(gas), nil
}
-func gasExpEIP160(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+func gasExpEIP160(_ *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
expByteLen := uint64(common.BitLenToByteLen(callContext.Stack.data[callContext.Stack.len()-2].BitLen()))
var (
gas = expByteLen * params.ExpByteEIP160 // no overflow check required. Max is 256 * ExpByte gas
overflow bool
)
+
if gas, overflow = math.SafeAdd(gas, params.ExpGas); overflow {
- return 0, ErrGasUintOverflow
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multigas.ComputationGas(gas), nil
}
-func gasCall(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+func gasCall(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
var (
- gas uint64
+ multiGas = multigas.ZeroGas()
transfersValue = !callContext.Stack.Back(2).IsZero()
address = accounts.InternAddress(callContext.Stack.Back(1).Bytes20())
)
if evm.ChainRules().IsSpuriousDragon {
empty, err := evm.IntraBlockState().Empty(address)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if transfersValue && empty {
- gas += params.CallNewAccountGas
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageGrowth, params.CallNewAccountGas)
}
} else {
exists, err := evm.IntraBlockState().Exist(address)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if !exists {
- gas += params.CallNewAccountGas
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageGrowth, params.CallNewAccountGas)
}
}
- if transfersValue {
- gas += params.CallValueTransferGas
+ // Value transfer to non-empty account considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ if transfersValue { // && !evm.chainRules.IsEIP4762
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindComputation, params.CallValueTransferGas)
}
- memoryGas, err := memoryGasCost(callContext, memorySize)
+
+ memoryMultiGas, err := memoryGasCost(callContext, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
-
- var overflow bool
- if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
- return 0, ErrGasUintOverflow
+ multiGas, overflow := multiGas.SafeAdd(memoryMultiGas)
+ if overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- var callGasTemp uint64
- callGasTemp, err = callGas(evm.ChainRules().IsTangerineWhistle, scopeGas, gas, callContext.Stack.Back(0))
+ singleGas := multiGas.SingleGas()
+ callGasTemp, err := callGas(evm.ChainRules().IsTangerineWhistle, scopeGas, singleGas, callContext.Stack.Back(0))
evm.SetCallGasTemp(callGasTemp)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if dbg.TraceDyanmicGas && evm.intraBlockState.Trace() {
@@ -427,61 +488,65 @@ func gasCall(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uin
evm.intraBlockState.BlockNumber(), evm.intraBlockState.TxIndex(), evm.intraBlockState.Incarnation(), gas-memoryGas, memorySize, memoryGas, callGasTemp)
}
- if gas, overflow = math.SafeAdd(gas, callGasTemp); overflow {
- return 0, ErrGasUintOverflow
+ // Call gas forwarding considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, callGasTemp); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multiGas, nil
}
-func gasCallCode(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+func gasCallCode(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
memoryGas, err := memoryGasCost(callContext, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
- var (
- gas uint64
- overflow bool
- )
- if !callContext.Stack.Back(2).IsZero() {
- gas += params.CallValueTransferGas
+ multiGas := multigas.ZeroGas()
+ if !stack.Back(2).IsZero() {
+ // Value transfer to non-empty account considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindComputation, params.CallValueTransferGas)
}
- if gas, overflow = math.SafeAdd(gas, memoryGas); overflow {
- return 0, ErrGasUintOverflow
+ var overflow bool
+ multiGas, overflow = multiGas.SafeAdd(memoryMultiGas)
+ if overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
+ singleGas := multiGas.SingleGas()
- var callGasTemp uint64
- callGasTemp, err = callGas(evm.ChainRules().IsTangerineWhistle, scopeGas, gas, callContext.Stack.Back(0))
+ callGasTemp, err := callGas(evm.ChainRules().IsTangerineWhistle, scopeGas, singleGas, callContext.Stack.Back(0))
evm.SetCallGasTemp(callGasTemp)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
-
if dbg.TraceDyanmicGas && evm.intraBlockState.Trace() {
fmt.Printf("%d (%d.%d) CallCode Gas: base: %d memory(%d): %d call: %d\n",
evm.intraBlockState.BlockNumber(), evm.intraBlockState.TxIndex(), evm.intraBlockState.Incarnation(), gas-memoryGas, memorySize, memoryGas, callGasTemp)
}
- if gas, overflow = math.SafeAdd(gas, callGasTemp); overflow {
- return 0, ErrGasUintOverflow
+ // Call gas forwarding considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, callGasTemp); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multiGas, nil
}
-func gasDelegateCall(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
- gas, err := memoryGasCost(callContext, memorySize)
+func gasDelegateCall(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
+ multiGas, err := memoryGasCost(callContext, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
- var callGasTemp uint64
- callGasTemp, err = callGas(evm.ChainRules().IsTangerineWhistle, scopeGas, gas, callContext.Stack.Back(0))
+ gas := multiGas.SingleGas()
+ callGasTemp, err := callGas(evm.ChainRules().IsTangerineWhistle, scopeGas, gas, callContext.Stack.Back(0))
evm.SetCallGasTemp(callGasTemp)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if dbg.TraceDyanmicGas && evm.intraBlockState.Trace() {
@@ -489,25 +554,29 @@ func gasDelegateCall(evm *EVM, callContext *CallContext, scopeGas uint64, memory
evm.intraBlockState.BlockNumber(), evm.intraBlockState.TxIndex(), evm.intraBlockState.Incarnation(), memorySize, gas, callGasTemp)
}
+
+ // Call gas forwarding considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
var overflow bool
- if gas, overflow = math.SafeAdd(gas, callGasTemp); overflow {
- return 0, ErrGasUintOverflow
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, callGasTemp); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+
+ return multiGas, nil
}
-func gasStaticCall(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
- gas, err := memoryGasCost(callContext, memorySize)
+func gasStaticCall(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
+ multiGas, err := memoryGasCost(callContext, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
- var callGasTemp uint64
- callGasTemp, err = callGas(evm.ChainRules().IsTangerineWhistle, scopeGas, gas, callContext.Stack.Back(0))
+ gas := multiGas.SingleGas()
+ callGasTemp, err := callGas(evm.ChainRules().IsTangerineWhistle, scopeGas, gas, callContext.Stack.Back(0))
evm.SetCallGasTemp(callGasTemp)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if dbg.TraceDyanmicGas && evm.intraBlockState.Trace() {
@@ -515,51 +584,55 @@ func gasStaticCall(evm *EVM, callContext *CallContext, scopeGas uint64, memorySi
evm.intraBlockState.BlockNumber(), evm.intraBlockState.TxIndex(), evm.intraBlockState.Incarnation(), memorySize, gas, callGasTemp)
}
+ // Call gas forwarding considered as computation.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
var overflow bool
- if gas, overflow = math.SafeAdd(gas, callGasTemp); overflow {
- return 0, ErrGasUintOverflow
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindComputation, callGasTemp); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multiGas, nil
}
-func gasSelfdestruct(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
- var gas uint64
+func gasSelfdestruct(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
+ multiGas := multigas.ZeroGas()
// TangerineWhistle (EIP150) gas reprice fork:
if evm.ChainRules().IsTangerineWhistle {
- gas = params.SelfdestructGasEIP150
+ // Selfdestruct operation considered as storage access.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageAccess, params.SelfdestructGasEIP150)
var address = accounts.InternAddress(callContext.Stack.Back(0).Bytes20())
if evm.ChainRules().IsSpuriousDragon {
// if empty and transfers value
empty, err := evm.IntraBlockState().Empty(address)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
balance, err := evm.IntraBlockState().GetBalance(callContext.Address())
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if empty && !balance.IsZero() {
- gas += params.CreateBySelfdestructGas
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageGrowth, params.CreateBySelfdestructGas)
}
} else {
exist, err := evm.IntraBlockState().Exist(address)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if !exist {
- gas += params.CreateBySelfdestructGas
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageGrowth, params.CreateBySelfdestructGas)
}
}
}
hasSelfdestructed, err := evm.IntraBlockState().HasSelfdestructed(callContext.Address())
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if !hasSelfdestructed {
evm.IntraBlockState().AddRefund(params.SelfdestructRefundGas)
}
- return gas, nil
+ return multiGas, nil
}
diff --git a/execution/vm/instructions.go b/execution/vm/instructions.go
index 5e83fd28dd5..cca300f7d8f 100644
--- a/execution/vm/instructions.go
+++ b/execution/vm/instructions.go
@@ -651,7 +651,13 @@ func opExtCodeHash(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (
}
func opGasprice(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uint64, []byte, error) {
- scope.Stack.push(interpreter.evm.GasPrice)
+ if true { // MERGE_ARBITRUM
+ gasPrice := interpreter.evm.ProcessingHook.GasPriceOp(interpreter.evm.GasPrice)
+ scope.Stack.push(gasPrice)
+ }
+ else {
+ scope.Stack.push(interpreter.evm.GasPrice)
+ }
return pc, nil, nil
}
@@ -664,14 +670,20 @@ func opBlockhash(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (ui
return pc, nil, nil
}
var upper, lower uint64
- upper = interpreter.evm.Context.BlockNumber
+ // Arbitrum
+ upper, err := interpreter.evm.ProcessingHook.L1BlockNumber(interpreter.evm.Context)
+ if err != nil {
+ return nil, err
+ }
+ // upper = interpreter.evm.Context.BlockNumber // must be returned by default hook
if upper <= params.BlockHashOldWindow {
lower = 0
} else {
lower = upper - params.BlockHashOldWindow
}
if arg64 >= lower && arg64 < upper {
- hash, err := interpreter.evm.Context.GetHash(arg64)
+ hash, err := interpreter.evm.ProcessingHook.L1BlockHash(interpreter.evm.Context, arg64)
+ //hash, err := interpreter.evm.Context.GetHash(arg64)
if err != nil {
arg.Clear()
return pc, nil, err
@@ -706,8 +718,19 @@ func opTimestamp(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (ui
}
func opNumber(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uint64, []byte, error) {
- v := new(uint256.Int).SetUint64(interpreter.evm.Context.BlockNumber)
- scope.Stack.push(*v)
+ if true { // MERGE_ARBITRUM
+ bnum, err := interpreter.evm.ProcessingHook.L1BlockNumber(interpreter.evm.Context)
+ if err != nil {
+ return nil, err
+ }
+
+ v := uint256.NewInt(bnum)
+ scope.Stack.push(v)
+ } else {
+ v := new(uint256.Int).SetUint64(interpreter.evm.Context.BlockNumber)
+ scope.Stack.push(*v)
+
+ }
return pc, nil, nil
}
@@ -976,9 +999,14 @@ func opCreate(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uint6
// reuse size int for stackvalue
stackvalue := size
- scope.useGas(gas, interpreter.evm.Config().Tracer, tracing.GasChangeCallContractCreation)
+ if true { // MERGE_ARBITRUM
+ scope.Contract.UseMultiGas(multigas.ComputationGas(gas), interpreter.evm.Config().Tracer, tracing.GasChangeCallContractCreation)
+ }
+ else {
+ scope.useGas(gas, interpreter.evm.Config().Tracer, tracing.GasChangeCallContractCreation)
+ }
- res, addr, returnGas, suberr := interpreter.evm.Create(scope.Contract.Address(), input, gas, value, false)
+ res, addr, returnGas, usedMultiGas, suberr := interpreter.evm.Create(scope.Contract.Address(), input, gas, &value, false)
// Push item on the stack based on the returned error. If the ruleset is
// homestead we must check for CodeStoreOutOfGasError (homestead only
@@ -992,8 +1020,13 @@ func opCreate(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uint6
addrVal := addr.Value()
stackvalue.SetBytes(addrVal[:])
}
+ //scope.Stack.push(stackvalue) // TODO arbiturm does thtat but we get stack corruption if we do that here
scope.refundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ if true { // MERGE_ARBITRUM
+ scope.Contract.RetainedMultiGas.SaturatingIncrementInto(multigas.ResourceKindComputation, gas)
+ scope.Contract.UsedMultiGas.SaturatingAddInto(usedMultiGas)
+ }
if suberr == ErrExecutionReverted {
interpreter.returnData = res // set REVERT data to return data buffer
@@ -1029,10 +1062,10 @@ func opCreate2(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uint
// Apply EIP150
gas -= gas / 64
- scope.useGas(gas, interpreter.evm.Config().Tracer, tracing.GasChangeCallContractCreation2)
+ scope.Contract.UseMultiGas(multigas.ComputationGas(gas), interpreter.evm.Config().Tracer, tracing.GasChangeCallContractCreation)
// reuse size int for stackvalue
stackValue := size
- res, addr, returnGas, suberr := interpreter.evm.Create2(scope.Contract.Address(), input, gas, endowment, &salt, false)
+ res, addr, returnGas, usedMultiGas, suberr := interpreter.evm.Create2(scope.Contract.Address(), input, gas, &endowment, &salt, false)
// Push item on the stack based on the returned error.
if suberr != nil {
@@ -1044,6 +1077,10 @@ func opCreate2(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uint
scope.Stack.push(stackValue)
scope.refundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ if true { // MERGE_ARBITRUM
+ scope.Contract.RetainedMultiGas.SaturatingIncrementInto(multigas.ResourceKindComputation, gas)
+ scope.Contract.UsedMultiGas.SaturatingAddInto(usedMultiGas)
+ }
if suberr == ErrExecutionReverted {
interpreter.returnData = res // set REVERT data to return data buffer
@@ -1076,6 +1113,7 @@ func opCall(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uint64,
toAddr := accounts.InternAddress(addr.Bytes20())
// Get the arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
+ ogGas := gas
if !value.IsZero() {
if interpreter.readOnly {
@@ -1084,7 +1122,7 @@ func opCall(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uint64,
gas += params.CallStipend
}
- ret, returnGas, err := interpreter.evm.Call(scope.Contract.Address(), toAddr, args, gas, value, false /* bailout */)
+ ret, returnGas, usedMultiGas, err := interpreter.evm.Call(scope.Contract.Address(), toAddr, args, gas, &value, false /* bailout */)
if err != nil {
temp.Clear()
@@ -1098,6 +1136,11 @@ func opCall(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uint64,
}
scope.refundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ if true { // MERGE_ARBITRUM
+ scope.Contract.UsedMultiGas.SaturatingAddInto(usedMultiGas)
+ // Use original gas value, since evm.callGasTemp may be updated by a nested call.
+ scope.Contract.RetainedMultiGas.SaturatingIncrementInto(multigas.ResourceKindComputation, ogGas)
+ }
interpreter.returnData = ret
return pc, ret, nil
@@ -1124,12 +1167,13 @@ func opCallCode(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uin
toAddr := accounts.InternAddress(addr.Bytes20())
// Get arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
+ ogGas := gas
if !value.IsZero() {
gas += params.CallStipend
}
- ret, returnGas, err := interpreter.evm.CallCode(scope.Contract.Address(), toAddr, args, gas, value)
+ ret, returnGas, usedMultiGas, err := interpreter.evm.CallCode(scope.Contract.Address(), toAddr, args, gas, &value)
if err != nil {
temp.Clear()
} else {
@@ -1142,6 +1186,12 @@ func opCallCode(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (uin
}
scope.refundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ if true { // MERGE_ARBITRUM
+ scope.Contract.UsedMultiGas.SaturatingAddInto(usedMultiGas)
+
+ // Use original gas value, since evm.callGasTemp may be updated by a nested call.
+ scope.Contract.RetainedMultiGas.SaturatingIncrementInto(multigas.ResourceKindComputation, ogGas)
+ }
interpreter.returnData = ret
return pc, ret, nil
@@ -1169,7 +1219,7 @@ func opDelegateCall(pc uint64, interpreter *EVMInterpreter, scope *CallContext)
// Get arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
- ret, returnGas, err := interpreter.evm.DelegateCall(scope.Contract.addr, scope.Contract.caller, toAddr, args, scope.Contract.value, gas)
+ ret, returnGas, usedMultiGas, err := interpreter.evm.DelegateCall(scope.Contract.addr, scope.Contract.caller, toAddr, args, scope.Contract.value, gas)
if err != nil {
temp.Clear()
} else {
@@ -1182,6 +1232,11 @@ func opDelegateCall(pc uint64, interpreter *EVMInterpreter, scope *CallContext)
}
scope.refundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ if true { // MERGE_ARBITRUM
+ scope.Contract.UsedMultiGas.SaturatingAddInto(usedMultiGas)
+ // Use original gas value, since evm.callGasTemp may be updated by a nested call.
+ scope.Contract.RetainedMultiGas.SaturatingIncrementInto(multigas.ResourceKindComputation, gas)
+ }
interpreter.returnData = ret
return pc, ret, nil
@@ -1209,7 +1264,7 @@ func opStaticCall(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (u
// Get arguments from the memory.
args := scope.Memory.GetPtr(inOffset.Uint64(), inSize.Uint64())
- ret, returnGas, err := interpreter.evm.StaticCall(scope.Contract.Address(), toAddr, args, gas)
+ ret, returnGas, usedMultiGas, err := interpreter.evm.StaticCall(scope.Contract.Address(), toAddr, args, gas)
if err != nil {
temp.Clear()
} else {
@@ -1221,6 +1276,11 @@ func opStaticCall(pc uint64, interpreter *EVMInterpreter, scope *CallContext) (u
}
scope.refundGas(returnGas, interpreter.evm.config.Tracer, tracing.GasChangeCallLeftOverRefunded)
+ if true { // MERGE_ARBITRUM
+ scope.Contract.UsedMultiGas.SaturatingAddInto(usedMultiGas)
+ // Use original gas value, since evm.callGasTemp may be updated by a nested call.
+ scope.Contract.RetainedMultiGas.SaturatingIncrementInto(multigas.ResourceKindComputation, gas)
+ }
interpreter.returnData = ret
return pc, ret, nil
@@ -1268,6 +1328,10 @@ func opSelfdestruct(pc uint64, interpreter *EVMInterpreter, scope *CallContext)
if err != nil {
return pc, nil, err
}
+ if beneficiaryAddr == scope.Contract.Address() {
+ // Arbitrum: calling selfdestruct(this) burns the balance
+ interpreter.evm.IntraBlockState().ExpectBalanceBurn(&balance)
+ }
interpreter.evm.IntraBlockState().AddBalance(beneficiaryAddr, balance, tracing.BalanceIncreaseSelfdestruct)
interpreter.evm.IntraBlockState().Selfdestruct(callerAddr)
@@ -1284,6 +1348,19 @@ func opSelfdestruct6780(pc uint64, interpreter *EVMInterpreter, scope *CallConte
if interpreter.readOnly {
return pc, nil, ErrWriteProtection
}
+ // Arbitrum: revert if acting account is a Stylus program
+ if interpreter.evm.chainRules.IsStylus {
+ actingAddress := scope.Contract.Address()
+ code, err := interpreter.evm.intraBlockState.GetCode(actingAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ if state.IsStylusProgram(code) {
+ return nil, ErrExecutionReverted
+ }
+ }
+
beneficiary := scope.Stack.pop()
callerAddr := scope.Contract.Address()
beneficiaryAddr := accounts.InternAddress(beneficiary.Bytes20())
@@ -1294,6 +1371,16 @@ func opSelfdestruct6780(pc uint64, interpreter *EVMInterpreter, scope *CallConte
interpreter.evm.IntraBlockState().SubBalance(callerAddr, balance, tracing.BalanceDecreaseSelfdestruct)
interpreter.evm.IntraBlockState().AddBalance(beneficiaryAddr, balance, tracing.BalanceIncreaseSelfdestruct)
interpreter.evm.IntraBlockState().Selfdestruct6780(callerAddr)
+
+ // TODO arbiturm ??? before we did not needed this one
+ if interpreter.evm.chainConfig.IsArbitrum() && beneficiary.Bytes20() == scope.Contract.Address() {
+ // SelfDestruct6780 only destructs the contract if selfdestructing in the same transaction as contract creation
+ // So we only account for the balance burn if the contract is actually destructed by checking if the balance is zero.
+ if b, err := interpreter.evm.IntraBlockState().GetBalance(scope.Contract.Address()); err == nil && b.Sign() == 0 {
+ // Arbitrum: calling selfdestruct(this) burns the balance
+ interpreter.evm.IntraBlockState().ExpectBalanceBurn(&balance)
+ }
+ }
if interpreter.evm.Config().Tracer != nil && interpreter.evm.Config().Tracer.OnEnter != nil {
interpreter.cfg.Tracer.OnEnter(interpreter.depth, byte(SELFDESTRUCT), scope.Contract.Address(), beneficiaryAddr, false, []byte{}, 0, balance, nil)
}
@@ -1428,3 +1515,30 @@ func makeSwapStringer(n int) stringer {
return fmt.Sprintf("SWAP%d (%d %d)", n, &scope.Stack.data[len(scope.Stack.data)-1], &scope.Stack.data[len(scope.Stack.data)-(n+1)])
}
}
+
+// Arbitrum: adaptation of opBlockHash that doesn't require an EVM interpreter
+// func BlockHashOp(evm *EVM, block *big.Int) common.Hash {
+// if !block.IsUint64() {
+// return common.Hash{}
+// }
+// num64 := block.Uint64()
+// upper, err := evm.ProcessingHook.L1BlockNumber(evm.Context)
+// if err != nil {
+// return common.Hash{}
+// }
+
+// var lower uint64
+// if upper <= params.BlockHashOldWindow {
+// lower = 0
+// } else {
+// lower = upper - params.BlockHashOldWindow
+// }
+// if num64 >= lower && num64 < upper {
+// hash, err := evm.ProcessingHook.L1BlockHash(evm.Context, num64)
+// if err != nil {
+// return common.Hash{}
+// }
+// return hash
+// }
+// return common.Hash{}
+// }
diff --git a/execution/vm/instructions_test.go b/execution/vm/instructions_test.go
index 6bf966cff28..af5b7028cd7 100644
--- a/execution/vm/instructions_test.go
+++ b/execution/vm/instructions_test.go
@@ -828,7 +828,7 @@ func TestOpMCopy(t *testing.T) {
if dynamicCost, err := gasMcopy(env, callContext, 0, memorySize); err != nil {
t.Error(err)
} else {
- haveGas = GasFastestStep + dynamicCost
+ haveGas = GasFastestStep + dynamicCost.SingleGas()
}
// Expand mem
if memorySize > 0 {
diff --git a/execution/vm/interpreter.go b/execution/vm/interpreter.go
index f6965801c87..245c14b7160 100644
--- a/execution/vm/interpreter.go
+++ b/execution/vm/interpreter.go
@@ -26,6 +26,7 @@ import (
"slices"
"sync"
+ "github.com/erigontech/erigon/execution/state"
"github.com/holiman/uint256"
"github.com/erigontech/erigon/common"
@@ -49,6 +50,8 @@ type Config struct {
RestoreState bool // Revert all changes made to the state (useful for constant system calls)
ExtraEips []int // Additional EIPS that are to be enabled
+
+ ExposeMultiGas bool // Arbitrum: Expose multi-gas used in transaction receipts
}
func (vmConfig *Config) HasEip3860(rules *chain.Rules) bool {
@@ -301,6 +304,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter {
// considered a revert-and-consume-all-gas operation except for
// ErrExecutionReverted which means revert-and-keep-gas-left.
func (in *EVMInterpreter) Run(contract Contract, gas uint64, input []byte, readOnly bool) (_ []byte, _ uint64, err error) {
+ in.evm.ProcessingHook.PushContract(&contract)
+ defer func() { in.evm.ProcessingHook.PopContract() }()
// Don't bother with the execution if there's no code.
if len(contract.Code) == 0 {
return nil, gas, nil
@@ -336,6 +341,7 @@ func (in *EVMInterpreter) Run(contract Contract, gas uint64, input []byte, readO
if restoreReadonly {
in.readOnly = true
}
+
// Increment the call depth which is restricted to 1024
in.depth++
defer func() {
@@ -356,6 +362,12 @@ func (in *EVMInterpreter) Run(contract Contract, gas uint64, input []byte, readO
in.depth--
}()
+ // Arbitrum: handle Stylus programs
+ if in.evm.chainRules.IsStylus && state.IsStylusProgram(contract.Code) {
+ ret, err = in.evm.ProcessingHook.ExecuteWASM(callContext, input, in)
+ return
+ }
+
// The Interpreter main run loop (contextual). This loop runs until either an
// explicit STOP, RETURN or SELFDESTRUCT is executed, an error occurred during
// the execution of one of the operations or until the done flag is set by the
@@ -381,6 +393,19 @@ func (in *EVMInterpreter) Run(contract Contract, gas uint64, input []byte, readO
logged, pcCopy, gasCopy = false, pc, callContext.gas
blockNum, txIndex, txIncarnation = in.evm.intraBlockState.BlockNumber(), in.evm.intraBlockState.TxIndex(), in.evm.intraBlockState.Incarnation()
}
+
+ // TODO ARBITRUM DO WE NEED THIS
+ //if isEIP4762 && !contract.IsDeployment && !contract.IsSystemCall {
+ // // if the PC ends up in a new "chunk" of verkleized code, charge the
+ // // associated costs.
+ // contractAddr := contract.Address()
+ // consumed, wanted := in.evm.TxContext.AccessEvents.CodeChunksRangeGas(contractAddr, pc, 1, uint64(len(contract.Code)), false, contract.Gas)
+ // contract.UseMultiGas(multigas.StorageGrowthGas(consumed), in.evm.Config().Tracer, tracing.GasChangeWitnessCodeChunk)
+ // if consumed < wanted {
+ // return nil, ErrOutOfGas
+ // }
+ //}
+
// Get the operation from the jump table and validate the stack to ensure there are
// enough stack items available to perform the operation.
op = contract.GetOp(pc)
@@ -395,6 +420,7 @@ func (in *EVMInterpreter) Run(contract Contract, gas uint64, input []byte, readO
if !callContext.useGas(cost, in.cfg.Tracer, tracing.GasChangeIgnored) {
return nil, callContext.gas, ErrOutOfGas
}
+ addConstantMultiGas(&contract.UsedMultiGas, cost, op)
// All ops with a dynamic memory usage also has a dynamic gas cost.
var memorySize uint64
@@ -416,8 +442,7 @@ func (in *EVMInterpreter) Run(contract Contract, gas uint64, input []byte, readO
}
// Consume the gas and return an error if not enough gas is available.
// cost is explicitly set so that the capture state defer method can get the proper cost
- var dynamicCost uint64
- dynamicCost, err = operation.dynamicGas(in.evm, callContext, callContext.gas, memorySize)
+ multigasDynamicCost, err := operation.dynamicGas(in.evm, callContext, callContext.gas, memorySize)
if err != nil {
return nil, callContext.gas, fmt.Errorf("%w: %v", ErrOutOfGas, err)
}
@@ -426,10 +451,14 @@ func (in *EVMInterpreter) Run(contract Contract, gas uint64, input []byte, readO
if dbg.TraceDyanmicGas && dynamicCost > 0 {
fmt.Printf("%d (%d.%d) Dynamic Gas: %d (%s)\n", blockNum, txIndex, txIncarnation, traceGas(op, callGas, cost), op)
}
+ dynamicCost := multigasDynamicCost.SingleGas()
+ cost += dynamicCost // for tracing
+ // TODO seems it should be once UseMultiGas call
if !callContext.useGas(dynamicCost, in.cfg.Tracer, tracing.GasChangeIgnored) {
return nil, callContext.gas, ErrOutOfGas
}
+ contract.UsedMultiGas.SaturatingAddInto(multigasDynamicCost)
}
// Do gas tracing before memory expansion
diff --git a/execution/vm/jump_table.go b/execution/vm/jump_table.go
index c0044505521..c12f5cf0bdf 100644
--- a/execution/vm/jump_table.go
+++ b/execution/vm/jump_table.go
@@ -22,12 +22,13 @@ package vm
import (
"fmt"
+ "github.com/erigontech/erigon/arb/multigas"
"github.com/erigontech/erigon/execution/protocol/params"
)
type (
executionFunc func(pc uint64, interpreter *EVMInterpreter, callContext *CallContext) (uint64, []byte, error)
- gasFunc func(*EVM, *CallContext, uint64, uint64) (uint64, error) // last parameter is the requested memory size as a uint64
+ gasFunc func(*EVM, *CallContext, uint64, uint64) (multigas.MultiGas, error) // last parameter is the requested memory size as a uint64
// memorySizeFunc returns the required size, and whether the operation overflowed a uint64
memorySizeFunc func(*CallContext) (size uint64, overflow bool)
stringer func(pc uint64, callContext *CallContext) string
diff --git a/execution/vm/operations_acl.go b/execution/vm/operations_acl.go
index 5eda3480ed0..3582baa0bb2 100644
--- a/execution/vm/operations_acl.go
+++ b/execution/vm/operations_acl.go
@@ -24,6 +24,7 @@ import (
"github.com/holiman/uint256"
+ "github.com/erigontech/erigon/arb/multigas"
"github.com/erigontech/erigon/common/math"
"github.com/erigontech/erigon/execution/protocol/params"
"github.com/erigontech/erigon/execution/tracing"
@@ -31,23 +32,25 @@ import (
)
func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
- return func(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+ return func(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
// If we fail the minimum gas availability invariant, fail (0)
if scopeGas <= params.SstoreSentryGasEIP2200 {
- return 0, errors.New("not enough gas for reentrancy sentry")
+ return multigas.ZeroGas(), errors.New("not enough gas for reentrancy sentry")
}
// Gas sentry honoured, do the actual gas calculation based on the stored value
var (
y, x = callContext.Stack.Back(1), callContext.Stack.peek()
slot = accounts.InternKey(x.Bytes32())
current uint256.Int
- cost = uint64(0)
+ multiGas = multigas.ZeroGas()
)
current, _ = evm.IntraBlockState().GetState(callContext.Address(), slot)
// If the caller cannot afford the cost, this change will be rolled back
if _, slotMod := evm.IntraBlockState().AddSlotToAccessList(callContext.Address(), slot); slotMod {
- cost = params.ColdSloadCostEIP2929
+ // Cold slot access considered as storage access.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageAccess, params.ColdSloadCostEIP2929)
}
var value uint256.Int
value.Set(y)
@@ -55,21 +58,36 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
if current.Eq(&value) { // noop (1)
// EIP 2200 original clause:
// return params.SloadGasEIP2200, nil
- return cost + params.WarmStorageReadCostEIP2929, nil // SLOAD_GAS
+
+ // Warm slot read considered as computation (access lists).
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindComputation, params.WarmStorageReadCostEIP2929)
+ return multiGas, nil // SLOAD_GAS
}
slotCommited := accounts.InternKey(x.Bytes32())
- var original, _ = evm.IntraBlockState().GetCommittedState(callContext.Address(), slotCommited)
+ var original uint256.Int0
+ original, err := evm.IntraBlockState().GetCommittedState(callContext.Address(), slotCommited)
+ if err != nil {
+ return multigas.ZeroGas(), err
+ }
if original.Eq(¤t) {
if original.IsZero() { // create slot (2.1.1)
- return cost + params.SstoreSetGasEIP2200, nil
+ // Creating a new slot considered as storage growth.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageGrowth, params.SstoreSetGasEIP2200)
+ return multiGas, nil
}
if value.IsZero() { // delete slot (2.1.2b)
evm.IntraBlockState().AddRefund(clearingRefund)
}
// EIP-2200 original clause:
// return params.SstoreResetGasEIP2200, nil // write existing slot (2.1.2)
- return cost + (params.SstoreResetGasEIP2200 - params.ColdSloadCostEIP2929), nil // write existing slot (2.1.2)
+
+ // Storage slot writes (nonzero → zero) considered as storage access.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageAccess, params.SstoreResetGasEIP2200-params.ColdSloadCostEIP2929)
+ return multiGas, nil // write existing slot (2.1.2)
}
if !original.IsZero() {
if current.IsZero() { // recreate slot (2.2.1.1)
@@ -81,11 +99,11 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
if original.Eq(&value) {
if original.IsZero() { // reset to original inexistent slot (2.2.2.1)
// EIP 2200 Original clause:
- //evm.StateDB.AddRefund(params.SstoreSetGasEIP2200 - params.SloadGasEIP2200)
+ //evm.IntraBlockState().AddRefund(params.SstoreSetGasEIP2200 - params.SloadGasEIP2200)
evm.IntraBlockState().AddRefund(params.SstoreSetGasEIP2200 - params.WarmStorageReadCostEIP2929)
} else { // reset to original existing slot (2.2.2.2)
// EIP 2200 Original clause:
- // evm.StateDB.AddRefund(params.SstoreResetGasEIP2200 - params.SloadGasEIP2200)
+ // evm.IntraBlockState().AddRefund(params.SstoreResetGasEIP2200 - params.SloadGasEIP2200)
// - SSTORE_RESET_GAS redefined as (5000 - COLD_SLOAD_COST)
// - SLOAD_GAS redefined as WARM_STORAGE_READ_COST
// Final: (5000 - COLD_SLOAD_COST) - WARM_STORAGE_READ_COST
@@ -94,7 +112,11 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
}
// EIP-2200 original clause:
//return params.SloadGasEIP2200, nil // dirty update (2.2)
- return cost + params.WarmStorageReadCostEIP2929, nil // dirty update (2.2)
+
+ // Warm slot read considered as computation (access lists).
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindComputation, params.WarmStorageReadCostEIP2929)
+ return multiGas, nil // dirty update (2.2)
}
}
@@ -103,14 +125,19 @@ func makeGasSStoreFunc(clearingRefund uint64) gasFunc {
// whose storage is being read) is not yet in accessed_storage_keys,
// charge 2100 gas and add the pair to accessed_storage_keys.
// If the pair is already in accessed_storage_keys, charge 100 gas.
-func gasSLoadEIP2929(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+func gasSLoadEIP2929(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
loc := callContext.Stack.peek()
// If the caller cannot afford the cost, this change will be rolled back
// If he does afford it, we can skip checking the same thing later on, during execution
if _, slotMod := evm.IntraBlockState().AddSlotToAccessList(callContext.Address(), accounts.InternKey(loc.Bytes32())); slotMod {
- return params.ColdSloadCostEIP2929, nil
+ // Cold slot access considered as storage access.
+ return multigas.MultiGasFromPairs(
+ multigas.Pair{Kind: multigas.ResourceKindStorageAccess, Amount: params.ColdSloadCostEIP2929 - params.WarmStorageReadCostEIP2929},
+ multigas.Pair{Kind: multigas.ResourceKindComputation, Amount: params.WarmStorageReadCostEIP2929},
+ ), nil
}
- return params.WarmStorageReadCostEIP2929, nil
+ // Warm slot access considered as storage access.
+ return multigas.ComputationGas(params.WarmStorageReadCostEIP2929), nil
}
// gasExtCodeCopyEIP2929 implements extcodecopy according to EIP-2929
@@ -118,24 +145,28 @@ func gasSLoadEIP2929(evm *EVM, callContext *CallContext, scopeGas uint64, memory
// > If the target is not in accessed_addresses,
// > charge COLD_ACCOUNT_ACCESS_COST gas, and add the address to accessed_addresses.
// > Otherwise, charge WARM_STORAGE_READ_COST gas.
-func gasExtCodeCopyEIP2929(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+func gasExtCodeCopyEIP2929(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
// memory expansion first (dynamic part of pre-2929 implementation)
- gas, err := gasExtCodeCopy(evm, callContext, scopeGas, memorySize)
+ multiGas, err := gasExtCodeCopy(evm, callContext, scopeGas, memorySize)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
addr := accounts.InternAddress(callContext.Stack.peek().Bytes20())
// Check slot presence in the access list
- if evm.IntraBlockState().AddAddressToAccessList(addr) {
+ if !evm.IntraBlockState().AddressInAccessList(addr) {
+ evm.IntraBlockState().AddAddressToAccessList(addr)
+
var overflow bool
// We charge (cold-warm), since 'warm' is already charged as constantGas
- if gas, overflow = math.SafeAdd(gas, params.ColdAccountAccessCostEIP2929-params.WarmStorageReadCostEIP2929); overflow {
- return 0, ErrGasUintOverflow
+ // Charge cold → warm delta as storage-access gas.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindStorageAccess, params.ColdAccountAccessCostEIP2929-params.WarmStorageReadCostEIP2929); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+ return multiGas, nil
}
- return gas, nil
-}
+ return multiGas, nil
+}7
// gasEip2929AccountCheck checks whether the first stack item (as address) is present in the access list.
// If it is, this method returns '0', otherwise 'cold-warm' gas, presuming that the opcode using it
@@ -144,19 +175,30 @@ func gasExtCodeCopyEIP2929(evm *EVM, callContext *CallContext, scopeGas uint64,
// - extcodehash,
// - extcodesize,
// - (ext) balance
-func gasEip2929AccountCheck(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+func gasEip2929AccountCheck(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
addr := accounts.InternAddress(callContext.Stack.peek().Bytes20())
// If the caller cannot afford the cost, this change will be rolled back
- if evm.IntraBlockState().AddAddressToAccessList(addr) {
+ if !evm.IntraBlockState().AddressInAccessList(addr) {
+ evm.IntraBlockState().AddAddressToAccessList(addr)
+
// The warm storage read cost is already charged as constantGas
- return params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929, nil
+ // charge cold -> warm delta as storage access
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ return multigas.StorageAccessGas(params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929), nil
}
- return 0, nil
+ return multigas.ZeroGas(), nil
}
-func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc {
- return func(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
- addr := accounts.InternAddress(callContext.Stack.Back(1).Bytes20())
+func makeCallVariantGasCallEIP2929(oldCalculator gasFunc, addressPosition int) gasFunc {
+ return func(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
+ var addr accounts.Address;
+ if true { // MERGE_ARBITRUM
+ addr := accounts.InternAddress(callContext.Stack.Back(addressPosition).Bytes20())
+ }
+ else {
+ addr := accounts.InternAddress(callContext.Stack.Back(1).Bytes20())
+ }
+
// The WarmStorageReadCostEIP2929 (100) is already deducted in the form of a constant cost, so
// the cost to charge for cold access, if any, is Cold - Warm
coldCost := params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929
@@ -166,37 +208,62 @@ func makeCallVariantGasCallEIP2929(oldCalculator gasFunc) gasFunc {
if addrMod {
// Charge the remaining difference here already, to correctly calculate available
// gas for call
- if _, ok := useGas(scopeGas, coldCost, evm.Config().Tracer, tracing.GasChangeCallStorageColdAccess); !ok {
- return 0, ErrOutOfGas
+ if true { // MERGE_ARBITRUM
+ if !contract.UseMultiGas(multigas.StorageAccessGas(coldCost), evm.Config().Tracer, tracing.GasChangeCallStorageColdAccess) {
+ return multigas.ZeroGas(), ErrOutOfGas
+ }
+ }
+ else {
+ if _, ok := useGas(scopeGas, coldCost, evm.Config().Tracer, tracing.GasChangeCallStorageColdAccess); !ok {
+ return 0, ErrOutOfGas
+ }
}
- scopeGas -= coldCost
+ if false { // MERGE_ARBITRUM
+ scopeGas -= coldCost
+ }
}
+
+
// Now call the old calculator, which takes into account
// - create new account
// - transfer value
// - memory expansion
// - 63/64ths rule
- gas, err := oldCalculator(evm, callContext, scopeGas, memorySize)
+ multiGas, err := oldCalculator(evm, callContext, scopeGas, memorySize)
if warmAccess || err != nil {
- return gas, err
+ return multiGas, err
}
// In case of a cold access, we temporarily add the cold charge back, and also
// add it to the returned gas. By adding it to the return, it will be charged
// outside of this function, as part of the dynamic gas, and that will make it
// also become correctly reported to tracers.
- return gas + coldCost, nil
+ if true { // MERGE_ARBITRUM
+ contract.Gas += coldCost
+ contract.RetainedMultiGas.SaturatingIncrementInto(multigas.ResourceKindStorageAccess, coldCost)
+
+ // Cold slot access considered as storage access.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ var overflow bool
+ if multiGas, overflow = multiGas.SafeIncrement(multigas.ResourceKindStorageAccess, coldCost); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
+ }
+ return multiGas, nil
+ }
+ else {
+ return gas + coldCost, nil
+ }
}
}
var (
- gasCallEIP2929 = makeCallVariantGasCallEIP2929(gasCall)
- gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall)
- gasStaticCallEIP2929 = makeCallVariantGasCallEIP2929(gasStaticCall)
- gasCallCodeEIP2929 = makeCallVariantGasCallEIP2929(gasCallCode)
+ gasCallEIP2929 = makeCallVariantGasCallEIP2929(gasCall, 1)
+ gasDelegateCallEIP2929 = makeCallVariantGasCallEIP2929(gasDelegateCall, 1)
+ gasStaticCallEIP2929 = makeCallVariantGasCallEIP2929(gasStaticCall, 1)
+ gasCallCodeEIP2929 = makeCallVariantGasCallEIP2929(gasCallCode, 1)
gasSelfdestructEIP2929 = makeSelfdestructGasFn(true)
- // gasSelfdestructEIP3529 implements the changes in EIP-2539 (no refunds)
+ // gasSelfdestructEIP3529 implements the changes in EIP-3539 (no refunds)
gasSelfdestructEIP3529 = makeSelfdestructGasFn(false)
// gasSStoreEIP2929 implements gas cost for SSTORE according to EIP-2929
@@ -220,35 +287,40 @@ var (
// makeSelfdestructGasFn can create the selfdestruct dynamic gas function for EIP-2929 and EIP-2539
func makeSelfdestructGasFn(refundsEnabled bool) gasFunc {
- gasFunc := func(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+ gasFunc := func(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
var (
- gas uint64
+ multiGas = multigas.ZeroGas()
address = accounts.InternAddress(callContext.Stack.peek().Bytes20())
)
- // If the caller cannot afford the cost, this change will be rolled back
- if evm.IntraBlockState().AddAddressToAccessList(address) {
- gas = params.ColdAccountAccessCostEIP2929
+ if !evm.IntraBlockState().AddressInAccessList(address) {
+ // If the caller cannot afford the cost, this change will be rolled back
+ evm.IntraBlockState().AddAddressToAccessList(address)
+ // Cold account access considered as storage access.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageAccess, params.ColdAccountAccessCostEIP2929)
}
// if empty and transfers value
empty, err := evm.IntraBlockState().Empty(address)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
balance, err := evm.IntraBlockState().GetBalance(callContext.Address())
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if empty && !balance.IsZero() {
- gas += params.CreateBySelfdestructGas
+ // New account creation considered as storage growth.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageGrowth, params.CreateBySelfdestructGas)
}
hasSelfdestructed, err := evm.IntraBlockState().HasSelfdestructed(callContext.Address())
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if refundsEnabled && !hasSelfdestructed {
evm.IntraBlockState().AddRefund(params.SelfdestructRefundGas)
}
- return gas, nil
+ return multiGas, nil
}
return gasFunc
}
@@ -261,8 +333,9 @@ var (
)
func makeCallVariantGasCallEIP7702(oldCalculator gasFunc) gasFunc {
- return func(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (uint64, error) {
+ return func(evm *EVM, callContext *CallContext, scopeGas uint64, memorySize uint64) (multigas.MultiGas, error) {
addr := accounts.InternAddress(callContext.Stack.Back(1).Bytes20())
+ multiGas = multigas.ZeroGas()
// Check slot presence in the access list
var dynCost uint64
if evm.intraBlockState.AddAddressToAccessList(addr) {
@@ -271,15 +344,25 @@ func makeCallVariantGasCallEIP7702(oldCalculator gasFunc) gasFunc {
dynCost = params.ColdAccountAccessCostEIP2929 - params.WarmStorageReadCostEIP2929
// Charge the remaining difference here already, to correctly calculate available
// gas for call
- if _, ok := useGas(scopeGas, dynCost, evm.Config().Tracer, tracing.GasChangeCallStorageColdAccess); !ok {
- return 0, ErrOutOfGas
+ if true { // MERGE_ARBITRUM
+ if !contract.UseMultiGas(multigas.StorageAccessGas(dynCost), evm.Config().Tracer, tracing.GasChangeCallStorageColdAccess) {
+ return multigas.ZeroGas(), ErrOutOfGas
+ }
}
+ else {
+ if _, ok := useGas(scopeGas, dynCost, evm.Config().Tracer, tracing.GasChangeCallStorageColdAccess); !ok {
+ return 0, ErrOutOfGas
+ }
+ }
+ // Cold slot access considered as storage access.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageAccess, dynCost)
}
// Check if code is a delegation and if so, charge for resolution.
dd, ok, err := evm.intraBlockState.GetDelegatedDesignation(addr)
if err != nil {
- return 0, err
+ return multigas.ZeroGas(), err
}
if ok {
var ddCost uint64
@@ -289,25 +372,44 @@ func makeCallVariantGasCallEIP7702(oldCalculator gasFunc) gasFunc {
ddCost = params.WarmStorageReadCostEIP2929
}
- if _, ok := useGas(scopeGas, ddCost, evm.Config().Tracer, tracing.GasChangeDelegatedDesignation); !ok {
- return 0, ErrOutOfGas
+ if true { // MERGE_ARBITRUM
+ if !contract.UseMultiGas(multigas.StorageAccessGas(ddCost), evm.Config().Tracer, tracing.GasChangeDelegatedDesignation) { // GasChangeCallStorageColdAccess
+ return multigas.ZeroGas(), ErrOutOfGas
+ }
+ } else {
+ if _, ok := useGas(scopeGas, ddCost, evm.Config().Tracer, tracing.GasChangeDelegatedDesignation); !ok {
+ return 0, ErrOutOfGas
+ }
}
- dynCost += ddCost
+
+ // Target address resolution considered as storage access.
+ // See rationale in: https://github.com/OffchainLabs/nitro/blob/master/docs/decisions/0002-multi-dimensional-gas-metering.md
+ multiGas = multiGas.SaturatingIncrement(multigas.ResourceKindStorageAccess, ddCost)
}
+
// Now call the old calculator, which takes into account
// - create new account
// - transfer value
// - memory expansion
// - 63/64ths rule
- gas, err := oldCalculator(evm, callContext, scopeGas-dynCost, memorySize)
+ multiOld, err := oldCalculator(evm, callContext, scopeGas-dynCost, memorySize)
if dynCost == 0 || err != nil {
- return gas, err
+ return multiOld, err
+ }
+ if true { // MERGE_ARBITRUM
+ // In case of a cold access, we temporarily add the cold charge back, and also
+ // add it to the returned gas. By adding it to the return, it will be charged
+ // outside of this function, as part of the dynamic gas, and that will make it
+ // also become correctly reported to tracers.
+ contract.Gas += multiGas.SingleGas()
+ contract.RetainedMultiGas.SaturatingAddInto(multiGas)
}
var overflow bool
- if gas, overflow = math.SafeAdd(gas, dynCost); overflow {
- return 0, ErrGasUintOverflow
+ if multiGas, overflow = multiGas.SafeAdd(multiOld); overflow {
+ return multigas.ZeroGas(), ErrGasUintOverflow
}
- return gas, nil
+
+ return multiGas, nil
}
}
diff --git a/execution/vm/runtime/env.go b/execution/vm/runtime/env.go
index 376a52ee54c..62dc1711cbe 100644
--- a/execution/vm/runtime/env.go
+++ b/execution/vm/runtime/env.go
@@ -43,6 +43,5 @@ func NewEnv(cfg *Config) *vm.EVM {
GasLimit: cfg.GasLimit,
BaseFee: cfg.BaseFee,
}
-
return vm.NewEVM(blockContext, txContext, cfg.State, cfg.ChainConfig, cfg.EVMConfig)
}
diff --git a/execution/vm/runtime/runtime.go b/execution/vm/runtime/runtime.go
index 60e928d9a17..d6799ed9267 100644
--- a/execution/vm/runtime/runtime.go
+++ b/execution/vm/runtime/runtime.go
@@ -157,7 +157,7 @@ func Execute(code, input []byte, cfg *Config, tempdir string) ([]byte, *state.In
if cfg.EVMConfig.Tracer != nil && cfg.EVMConfig.Tracer.OnTxStart != nil {
cfg.EVMConfig.Tracer.OnTxStart(&tracing.VMContext{IntraBlockState: cfg.State}, nil, accounts.ZeroAddress)
}
- ret, _, err := vmenv.Call(
+ ret, _, _, err := vmenv.Call(
sender,
contractAsAddress,
input,
@@ -208,13 +208,14 @@ func Create(input []byte, cfg *Config, blockNr uint64) ([]byte, common.Address,
cfg.State.Prepare(rules, cfg.Origin, cfg.Coinbase, accounts.NilAddress, vm.ActivePrecompiles(rules), nil, nil)
// Call the code with the given configuration.
- code, address, leftOverGas, err := vmenv.Create(
+ code, address, leftOverGas, usedMultiGas, err := vmenv.Create(
sender,
input,
cfg.GasLimit,
cfg.Value,
false,
)
+ _ = usedMultiGas
return code, address.Value(), leftOverGas, err
}
@@ -241,7 +242,7 @@ func Call(address accounts.Address, input []byte, cfg *Config) ([]byte, uint64,
}
// Call the code with the given configuration.
- ret, leftOverGas, err := vmenv.Call(
+ ret, leftOverGas, _, err := vmenv.Call(
sender.Address(),
address,
input,
diff --git a/execution/vm/stack.go b/execution/vm/stack.go
index 0ecbfb97a43..b3ca4039287 100644
--- a/execution/vm/stack.go
+++ b/execution/vm/stack.go
@@ -57,7 +57,8 @@ func (st *Stack) pop() (ret uint256.Int) {
st.data = st.data[:len(st.data)-1]
return
}
-
+func (st *Stack) Push(d *uint256.Int) { st.push(d) }
+func (st *Stack) Pop() uint256.Int { return st.pop() }
func (st *Stack) Cap() int {
return cap(st.data)
}
@@ -136,3 +137,11 @@ func ReturnNormalStack(s *Stack) {
s.data = s.data[:0]
stackPool.Put(s)
}
+
+func (st *Stack) String() string {
+ var s string
+ for _, di := range st.data {
+ s += di.Hex() + ", "
+ }
+ return s
+}
diff --git a/go.mod b/go.mod
index 9c336b49bbf..efc8b6e436f 100644
--- a/go.mod
+++ b/go.mod
@@ -6,6 +6,8 @@ replace github.com/holiman/bloomfilter/v2 => github.com/AskAlexSharov/bloomfilte
replace github.com/crate-crypto/go-eth-kzg => github.com/Giulio2002/zero-alloc-go-eth-kzg v0.0.0-20260105034637-43cb6f34f8e0
+replace github.com/erigontech/nitro-erigon => ../
+
require (
github.com/erigontech/erigon-snapshot v1.3.1-0.20260105114333-2f59a10db72b
github.com/erigontech/erigonwatch v0.0.0-20240718131902-b6576bde1116
diff --git a/go.work b/go.work
new file mode 100644
index 00000000000..27a424ae1c0
--- /dev/null
+++ b/go.work
@@ -0,0 +1,9 @@
+go 1.24.0
+
+toolchain go1.24.7
+
+use .
+
+//use ./erigon-lib
+
+use ../
diff --git a/go.work.sum b/go.work.sum
new file mode 100644
index 00000000000..c46b475b350
--- /dev/null
+++ b/go.work.sum
@@ -0,0 +1,1269 @@
+buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.6-20250425153114-8976f5be98c1.1 h1:YhMSc48s25kr7kv31Z8vf7sPUIq5YJva9z1mn/hAt0M=
+buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.6-20250425153114-8976f5be98c1.1/go.mod h1:avRlCjnFzl98VPaeCtJ24RrV/wwHFzB8sWXhj26+n/U=
+buf.build/go/protovalidate v0.12.0 h1:4GKJotbspQjRCcqZMGVSuC8SjwZ/FmgtSuKDpKUTZew=
+buf.build/go/protovalidate v0.12.0/go.mod h1:q3PFfbzI05LeqxSwq+begW2syjy2Z6hLxZSkP1OH/D0=
+cel.dev/expr v0.20.0 h1:OunBvVCfvpWlt4dN7zg3FM6TDkzOePe1+foGJ9AXeeI=
+cel.dev/expr v0.20.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
+cloud.google.com/go v0.75.0 h1:XgtDnVJRCPEUG21gjFiRPz4zI1Mjg16R+NYQjfmU4XY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go/accessapproval v1.7.7 h1:vO95gvBi7qUgfA9SflexQs9hB4U4tnri/GwADIrLQy8=
+cloud.google.com/go/accessapproval v1.7.7/go.mod h1:10ZDPYiTm8tgxuMPid8s2DL93BfCt6xBh/Vg0Xd8pU0=
+cloud.google.com/go/accesscontextmanager v1.8.7 h1:GgdNoDwZR5RIO3j8XwXqa6Gc6q5mP3KYMdFC7FEVyG4=
+cloud.google.com/go/accesscontextmanager v1.8.7/go.mod h1:jSvChL1NBQ+uLY9zUBdPy9VIlozPoHptdBnRYeWuQoM=
+cloud.google.com/go/aiplatform v1.68.0 h1:EPPqgHDJpBZKRvv+OsB3cr0jYz3EL2pZ+802rBPcG8U=
+cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME=
+cloud.google.com/go/analytics v0.23.2 h1:O0fj88npvQFxg8LfXo7fArcSrC/wtAstGuWQ7dCHWjg=
+cloud.google.com/go/analytics v0.23.2/go.mod h1:vtE3olAXZ6edJYk1UOndEs6EfaEc9T2B28Y4G5/a7Fo=
+cloud.google.com/go/apigateway v1.6.7 h1:DO5Vn3zmY1aDyfoqni8e8+x+lwrfLCoAAbEui9NB0y8=
+cloud.google.com/go/apigateway v1.6.7/go.mod h1:7wAMb/33Rzln+PrGK16GbGOfA1zAO5Pq6wp19jtIt7c=
+cloud.google.com/go/apigeeconnect v1.6.7 h1:z08Xuv7ZtaB2d4jsJi9/WhbnnI5s19wlLDZpssn3Fus=
+cloud.google.com/go/apigeeconnect v1.6.7/go.mod h1:hZxCKvAvDdKX8+eT0g5eEAbRSS9Gkzi+MPWbgAMAy5U=
+cloud.google.com/go/apigeeregistry v0.8.5 h1:o1C/+IvzwYeV1doum61XmJQ/Bwpk/4+2DT1JyVu2x64=
+cloud.google.com/go/apigeeregistry v0.8.5/go.mod h1:ZMg60hq2K35tlqZ1VVywb9yjFzk9AJ7zqxrysOxLi3o=
+cloud.google.com/go/apikeys v0.6.0 h1:B9CdHFZTFjVti89tmyXXrO+7vSNo2jvZuHG8zD5trdQ=
+cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8=
+cloud.google.com/go/appengine v1.8.7 h1:qYrjEHEFY7+CL4QlHIHuwTgrTnZbSKzdPFqgjZDsQNo=
+cloud.google.com/go/appengine v1.8.7/go.mod h1:1Fwg2+QTgkmN6Y+ALGwV8INLbdkI7+vIvhcKPZCML0g=
+cloud.google.com/go/area120 v0.8.7 h1:sUrR96yokdL6tTTXK0X13V1TLMta8/1u328bRG5lWZc=
+cloud.google.com/go/area120 v0.8.7/go.mod h1:L/xTq4NLP9mmxiGdcsVz7y1JLc9DI8pfaXRXbnjkR6w=
+cloud.google.com/go/artifactregistry v1.14.9 h1:SSvoD0ofOydm5gA1++15pW9VPgQbk0OmNlcb7JczoO4=
+cloud.google.com/go/artifactregistry v1.14.9/go.mod h1:n2OsUqbYoUI2KxpzQZumm6TtBgtRf++QulEohdnlsvI=
+cloud.google.com/go/asset v1.19.1 h1:mCqyoaDjDzaW1RqmmQtCJuawb9nca5bEu7HvVcpZDwg=
+cloud.google.com/go/asset v1.19.1/go.mod h1:kGOS8DiCXv6wU/JWmHWCgaErtSZ6uN5noCy0YwVaGfs=
+cloud.google.com/go/assuredworkloads v1.11.7 h1:xieyFA+JKyTDkO/Z9UyVEpkHW8pDYykU51O4G0pvXEg=
+cloud.google.com/go/assuredworkloads v1.11.7/go.mod h1:CqXcRH9N0KCDtHhFisv7kk+cl//lyV+pYXGi1h8rCEU=
+cloud.google.com/go/automl v1.13.7 h1:w9AyogtMLXbcy5kzXPvk/Q3MGQkgJH7ZDB8fAUUxTt8=
+cloud.google.com/go/automl v1.13.7/go.mod h1:E+s0VOsYXUdXpq0y4gNZpi0A/s6y9+lAarmV5Eqlg40=
+cloud.google.com/go/baremetalsolution v1.2.6 h1:W4oSMS6vRCo9DLr1RPyDP8oeLverbvhJRzaZSsipft8=
+cloud.google.com/go/baremetalsolution v1.2.6/go.mod h1:KkS2BtYXC7YGbr42067nzFr+ABFMs6cxEcA1F+cedIw=
+cloud.google.com/go/batch v1.8.7 h1:zaQwOAd7TlE84pwPHavNMsnv5zRyRV8ym2DJ4iQ2cV0=
+cloud.google.com/go/batch v1.8.7/go.mod h1:O5/u2z8Wc7E90Bh4yQVLQIr800/0PM5Qzvjac3Jxt4k=
+cloud.google.com/go/beyondcorp v1.0.6 h1:KBcujO3QRvBIwzZLtvQEPB9SXdovHnMBx0V/uhucH9o=
+cloud.google.com/go/beyondcorp v1.0.6/go.mod h1:wRkenqrVRtnGFfnyvIg0zBFUdN2jIfeojFF9JJDwVIA=
+cloud.google.com/go/bigquery v1.8.0 h1:PQcPefKFdaIzjQFbiyOgAqyx8q5djaE7x9Sqe712DPA=
+cloud.google.com/go/bigquery v1.61.0 h1:w2Goy9n6gh91LVi6B2Sc+HpBl8WbWhIyzdvVvrAuEIw=
+cloud.google.com/go/bigquery v1.61.0/go.mod h1:PjZUje0IocbuTOdq4DBOJLNYB0WF3pAKBHzAYyxCwFo=
+cloud.google.com/go/billing v1.18.5 h1:GbOg1uGvoV8FXxMStFoNcq5z9AEUwCpKt/6GNcuDSZM=
+cloud.google.com/go/billing v1.18.5/go.mod h1:lHw7fxS6p7hLWEPzdIolMtOd0ahLwlokW06BzbleKP8=
+cloud.google.com/go/binaryauthorization v1.8.3 h1:RHnEM4HXbWShlGhPA0Jzj2YYETCHxmisNMU0OE2fXQM=
+cloud.google.com/go/binaryauthorization v1.8.3/go.mod h1:Cul4SsGlbzEsWPOz2sH8m+g2Xergb6ikspUyQ7iOThE=
+cloud.google.com/go/certificatemanager v1.8.1 h1:XURrQhj5COWAEvICivbGID/Hu67AvMYHAhMRIyc3Ux8=
+cloud.google.com/go/certificatemanager v1.8.1/go.mod h1:hDQzr50Vx2gDB+dOfmDSsQzJy/UPrYRdzBdJ5gAVFIc=
+cloud.google.com/go/channel v1.17.7 h1:PrplNaAS6Dn187e+OcGzyEKETX8iL3tCaDqcPPW7Zoo=
+cloud.google.com/go/channel v1.17.7/go.mod h1:b+FkgBrhMKM3GOqKUvqHFY/vwgp+rwsAuaMd54wCdN4=
+cloud.google.com/go/cloudbuild v1.16.1 h1:zkCG1dBezxRM3dtgQ9h1Y+IJ7V+lARWgp0l9k/SZsfU=
+cloud.google.com/go/cloudbuild v1.16.1/go.mod h1:c2KUANTtCBD8AsRavpPout6Vx8W+fsn5zTsWxCpWgq4=
+cloud.google.com/go/clouddms v1.7.6 h1:Q47KKoA0zsNcC9U5aCmop5TPPItVq4cx7Wwqgra+5PU=
+cloud.google.com/go/clouddms v1.7.6/go.mod h1:8HWZ2tznZ0mNAtTpfnRNT0QOThqn9MBUqTj0Lx8npIs=
+cloud.google.com/go/cloudtasks v1.12.8 h1:Y0HUuiCAVk9BojLItOycBl91tY25NXH8oFsyi1IC/U4=
+cloud.google.com/go/cloudtasks v1.12.8/go.mod h1:aX8qWCtmVf4H4SDYUbeZth9C0n9dBj4dwiTYi4Or/P4=
+cloud.google.com/go/compute v1.27.0 h1:EGawh2RUnfHT5g8f/FX3Ds6KZuIBC77hZoDrBvEZw94=
+cloud.google.com/go/compute v1.27.0/go.mod h1:LG5HwRmWFKM2C5XxHRiNzkLLXW48WwvyVC0mfWsYPOM=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=
+cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
+cloud.google.com/go/contactcenterinsights v1.13.2 h1:46ertIh+cGkTg/lN7fN+TOx09SoM65dpdUp96vXBcMY=
+cloud.google.com/go/contactcenterinsights v1.13.2/go.mod h1:AfkSB8t7mt2sIY6WpfO61nD9J9fcidIchtxm9FqJVXk=
+cloud.google.com/go/container v1.37.0 h1:KZ5IbxnUNcTdBkMfZNum6z4tu61sFHf9Zk7Xl8RzWVI=
+cloud.google.com/go/container v1.37.0/go.mod h1:AFsgViXsfLvZHsgHrWQqPqfAPjCwXrZmLjKJ64uhLIw=
+cloud.google.com/go/containeranalysis v0.11.6 h1:mSrneOVadcpnDZHJebg+ts/10azGTUKOCSQET7KdT7g=
+cloud.google.com/go/containeranalysis v0.11.6/go.mod h1:YRf7nxcTcN63/Kz9f86efzvrV33g/UV8JDdudRbYEUI=
+cloud.google.com/go/datacatalog v1.20.1 h1:czcba5mxwRM5V//jSadyig0y+8aOHmN7gUl9GbHu59E=
+cloud.google.com/go/datacatalog v1.20.1/go.mod h1:Jzc2CoHudhuZhpv78UBAjMEg3w7I9jHA11SbRshWUjk=
+cloud.google.com/go/dataflow v0.9.7 h1:wKEakCbRevlwsWqTn34pWJUFmdbx0HKwpRH6HhU7NIs=
+cloud.google.com/go/dataflow v0.9.7/go.mod h1:3BjkOxANrm1G3+/EBnEsTEEgJu1f79mFqoOOZfz3v+E=
+cloud.google.com/go/dataform v0.9.4 h1:MiK1Us7YP9+sdNViUE4X2B2vLScrKcjOPw5b6uamZvE=
+cloud.google.com/go/dataform v0.9.4/go.mod h1:jjo4XY+56UrNE0wsEQsfAw4caUs4DLJVSyFBDelRDtQ=
+cloud.google.com/go/datafusion v1.7.7 h1:ViFnMnUK7LNcWvisZgihxXit76JxSHFeijYI5U/gjOE=
+cloud.google.com/go/datafusion v1.7.7/go.mod h1:qGTtQcUs8l51lFA9ywuxmZJhS4ozxsBSus6ItqCUWMU=
+cloud.google.com/go/datalabeling v0.8.7 h1:M6irSHns6VxMro+IbvDxDJLD6tkfjlW+mo2MPaM23KA=
+cloud.google.com/go/datalabeling v0.8.7/go.mod h1:/PPncW5gxrU15UzJEGQoOT3IobeudHGvoExrtZ8ZBwo=
+cloud.google.com/go/dataplex v1.16.1 h1:vuDBDNOBl75GFLUnFXG/Y10lUtVUx/58vbO+yRQmW+I=
+cloud.google.com/go/dataplex v1.16.1/go.mod h1:szV2OpxfbmRBcw1cYq2ln8QsLR3FJq+EwTTIo+0FnyE=
+cloud.google.com/go/dataproc v1.12.0 h1:W47qHL3W4BPkAIbk4SWmIERwsWBaNnWm0P2sdx3YgGU=
+cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
+cloud.google.com/go/dataproc/v2 v2.4.2 h1:RNMG5ffWKdbWOkwvjC4GqxLaxEaWFpm2hQCF2WFW/vo=
+cloud.google.com/go/dataproc/v2 v2.4.2/go.mod h1:smGSj1LZP3wtnsM9eyRuDYftNAroAl6gvKp/Wk64XDE=
+cloud.google.com/go/dataqna v0.8.7 h1:qM60MGNTGsSJuzAziVJjtRA7pGby2dA8OuqdVRe/lYo=
+cloud.google.com/go/dataqna v0.8.7/go.mod h1:hvxGaSvINAVH5EJJsONIwT1y+B7OQogjHPjizOFoWOo=
+cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ=
+cloud.google.com/go/datastore v1.17.1 h1:6Me8ugrAOAxssGhSo8im0YSuy4YvYk4mbGvCadAH5aE=
+cloud.google.com/go/datastore v1.17.1/go.mod h1:mtzZ2HcVtz90OVrEXXGDc2pO4NM1kiBQy8YV4qGe0ZM=
+cloud.google.com/go/datastream v1.10.6 h1:FfNUy9j3aRQ99L4a5Rdm82RMuiw0BIe3lpPn2ykom8k=
+cloud.google.com/go/datastream v1.10.6/go.mod h1:lPeXWNbQ1rfRPjBFBLUdi+5r7XrniabdIiEaCaAU55o=
+cloud.google.com/go/deploy v1.19.0 h1:fzbObuGgoViO0ArFuOQIJ2yr5bH5YzbORVvMDBrDC5I=
+cloud.google.com/go/deploy v1.19.0/go.mod h1:BW9vAujmxi4b/+S7ViEuYR65GiEsqL6Mhf5S/9TeDRU=
+cloud.google.com/go/dialogflow v1.54.0 h1:DgCHYpasCaI3b03X6pxqGmEzX72DAHZhV0f/orrpxYM=
+cloud.google.com/go/dialogflow v1.54.0/go.mod h1:/YQLqB0bdDJl+zFKN+UNQsYUqLfWZb1HsJUQqMT7Q6k=
+cloud.google.com/go/dlp v1.14.0 h1:/GQVl5gOPR2dUemrR2YJxZG5D9MCE3AYgmDxjzP54jI=
+cloud.google.com/go/dlp v1.14.0/go.mod h1:4fvEu3EbLsHrgH3QFdFlTNIiCP5mHwdYhS/8KChDIC4=
+cloud.google.com/go/documentai v1.30.1 h1:501VbUcVCg1LaWIsnmNdTsrEsQNTPD086aqejBvvOCc=
+cloud.google.com/go/documentai v1.30.1/go.mod h1:RohRpAfvuv3uk3WQtXPpgQ3YABvzacWnasyJQb6AAPk=
+cloud.google.com/go/domains v0.9.7 h1:IixFIMRzUJWZUAOe8s/K2X4Bvtp0A3xjHLljfNC4aSo=
+cloud.google.com/go/domains v0.9.7/go.mod h1:u/yVf3BgfPJW3QDZl51qTJcDXo9PLqnEIxfGmGgbHEc=
+cloud.google.com/go/edgecontainer v1.2.1 h1:xa6MIQhGylE24QdWaxhfIfAJE3Pupcr+i77WEx3NJrg=
+cloud.google.com/go/edgecontainer v1.2.1/go.mod h1:OE2D0lbkmGDVYLCvpj8Y0M4a4K076QB7E2JupqOR/qU=
+cloud.google.com/go/errorreporting v0.3.0 h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0=
+cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
+cloud.google.com/go/essentialcontacts v1.6.8 h1:p5Y7ZNVPiV9pEAHzvWiPcSiQRMQqcuHxOP0ZOP0vVww=
+cloud.google.com/go/essentialcontacts v1.6.8/go.mod h1:EHONVDSum2xxG2p+myyVda/FwwvGbY58ZYC4XqI/lDQ=
+cloud.google.com/go/eventarc v1.13.6 h1:we+qx5uCZ88aQzQS3MJXRvAh/ik+EmqVyjcW1oYFW44=
+cloud.google.com/go/eventarc v1.13.6/go.mod h1:QReOaYnDNdjwAQQWNC7nfr63WnaKFUw7MSdQ9PXJYj0=
+cloud.google.com/go/filestore v1.8.3 h1:CpRnsUpMU5gxUKyfh7TD0SM+E+7E4ORaDea2JctKfpY=
+cloud.google.com/go/filestore v1.8.3/go.mod h1:QTpkYpKBF6jlPRmJwhLqXfJQjVrQisplyb4e2CwfJWc=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/firestore v1.15.0 h1:/k8ppuWOtNuDHt2tsRV42yI21uaGnKDEQnRFeBpbFF8=
+cloud.google.com/go/firestore v1.15.0/go.mod h1:GWOxFXcv8GZUtYpWHw/w6IuYNux/BtmeVTMmjrm4yhk=
+cloud.google.com/go/functions v1.16.2 h1:83bd2lCgtu2nLbX2jrqsrQhIs7VuVA1N6Op5syeRVIg=
+cloud.google.com/go/functions v1.16.2/go.mod h1:+gMvV5E3nMb9EPqX6XwRb646jTyVz8q4yk3DD6xxHpg=
+cloud.google.com/go/gaming v1.9.0 h1:7vEhFnZmd931Mo7sZ6pJy7uQPDxF7m7v8xtBheG08tc=
+cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0=
+cloud.google.com/go/gkebackup v1.5.0 h1:wysUXEkggPwENZY3BXroOyWoyVfPypzaqNHgOZD9Kck=
+cloud.google.com/go/gkebackup v1.5.0/go.mod h1:eLaf/+n8jEmIvOvDriGjo99SN7wRvVadoqzbZu0WzEw=
+cloud.google.com/go/gkeconnect v0.8.7 h1:BfXsTXYs5xlicAlgbtlo8Cw+YdzU3PrlBg7dATJUwrk=
+cloud.google.com/go/gkeconnect v0.8.7/go.mod h1:iUH1jgQpTyNFMK5LgXEq2o0beIJ2p7KKUUFerkf/eGc=
+cloud.google.com/go/gkehub v0.14.7 h1:bHwcvgh8AmcYm6p6/ZrWW3a7J7sKBDtqtsyVXKssnPs=
+cloud.google.com/go/gkehub v0.14.7/go.mod h1:NLORJVTQeCdxyAjDgUwUp0A6BLEaNLq84mCiulsM4OE=
+cloud.google.com/go/gkemulticloud v1.2.0 h1:zaWBakKPT6mPHVn5iefuRqttjpbNsb8LlMw9KgfyfyU=
+cloud.google.com/go/gkemulticloud v1.2.0/go.mod h1:iN5wBxTLPR6VTBWpkUsOP2zuPOLqZ/KbgG1bZir1Cng=
+cloud.google.com/go/grafeas v0.3.5 h1:Z87HxC4vnjR1kWWtzP6BuQXa6xBmndRK/kaz4iu6oMA=
+cloud.google.com/go/grafeas v0.3.5/go.mod h1:y54iTBcI+lgUdI+kAPKb8jtPqeTkA2dsYzWSrQtpc5s=
+cloud.google.com/go/gsuiteaddons v1.6.7 h1:06Jg3JeLslEfBYX1sDqOPLnF7a3wmhNcDUXF/fVOb50=
+cloud.google.com/go/gsuiteaddons v1.6.7/go.mod h1:u+sGBvr07OKNnOnQiB/Co1q4U2cjo50ERQwvnlcpNis=
+cloud.google.com/go/iap v1.9.6 h1:rcuRS9XfOgr1v6TAoihVeSXntOnpVhFlVHtPfgOkLAo=
+cloud.google.com/go/iap v1.9.6/go.mod h1:YiK+tbhDszhaVifvzt2zTEF2ch9duHtp6xzxj9a0sQk=
+cloud.google.com/go/ids v1.4.7 h1:wtd+r415yrfZ8LsB6yH6WrOZ26tYt7w6wy3i5a4HQZ8=
+cloud.google.com/go/ids v1.4.7/go.mod h1:yUkDC71u73lJoTaoONy0dsA0T7foekvg6ZRg9IJL0AA=
+cloud.google.com/go/iot v1.7.7 h1:M9SKIj9eoxoXCzytkLZVAuf5wmoui1OeDqEjC97wRbY=
+cloud.google.com/go/iot v1.7.7/go.mod h1:tr0bCOSPXtsg64TwwZ/1x+ReTWKlQRVXbM+DnrE54yM=
+cloud.google.com/go/kms v1.18.0 h1:pqNdaVmZJFP+i8OVLocjfpdTWETTYa20FWOegSCdrRo=
+cloud.google.com/go/kms v1.18.0/go.mod h1:DyRBeWD/pYBMeyiaXFa/DGNyxMDL3TslIKb8o/JkLkw=
+cloud.google.com/go/language v1.12.5 h1:kOYJEcuZgyUX/i/4DFrfXPcrddm1XCQD2lDI5hIFmZQ=
+cloud.google.com/go/language v1.12.5/go.mod h1:w/6a7+Rhg6Bc2Uzw6thRdKKNjnOzfKTJuxzD0JZZ0nM=
+cloud.google.com/go/lifesciences v0.9.7 h1:qqEmApr5YFOQjkrU8Jy6o6QpkESqfGbfrE6bnUZZbV8=
+cloud.google.com/go/lifesciences v0.9.7/go.mod h1:FQ713PhjAOHqUVnuwsCe1KPi9oAdaTfh58h1xPiW13g=
+cloud.google.com/go/logging v1.10.0 h1:f+ZXMqyrSJ5vZ5pE/zr0xC8y/M9BLNzQeLBwfeZ+wY4=
+cloud.google.com/go/logging v1.10.0/go.mod h1:EHOwcxlltJrYGqMGfghSet736KR3hX1MAj614mrMk9I=
+cloud.google.com/go/managedidentities v1.6.7 h1:uWA9WQyfA0JdkeAFymWUsa3qE9tC33LUElla790Ou1A=
+cloud.google.com/go/managedidentities v1.6.7/go.mod h1:UzslJgHnc6luoyx2JV19cTCi2Fni/7UtlcLeSYRzTV8=
+cloud.google.com/go/maps v1.11.1 h1:2U1NB/GIoXhNNmYMlGiLM7juL7nxh51lSkNELbYddB8=
+cloud.google.com/go/maps v1.11.1/go.mod h1:XcSsd8lg4ZhLPCtJ2YHcu/xLVePBzZOlI7GmR2cRCws=
+cloud.google.com/go/mediatranslation v0.8.7 h1:izgww3TlyvWyDWdFKnrASpbh12IkAuw8o2ION8sAjX0=
+cloud.google.com/go/mediatranslation v0.8.7/go.mod h1:6eJbPj1QJwiCP8R4K413qMx6ZHZJUi9QFpApqY88xWU=
+cloud.google.com/go/memcache v1.10.7 h1:hE7f3ze3+eWh/EbYXEz7oXkm0LXcr7UCoLklwi7gsLU=
+cloud.google.com/go/memcache v1.10.7/go.mod h1:SrU6+QBhvXJV0TA59+B3oCHtLkPx37eqdKmRUlmSE1k=
+cloud.google.com/go/metastore v1.13.6 h1:otHcJkci5f/sNRedrSM7eM81QRnu0yZ3HvkvWGphABA=
+cloud.google.com/go/metastore v1.13.6/go.mod h1:OBCVMCP7X9vA4KKD+5J4Q3d+tiyKxalQZnksQMq5MKY=
+cloud.google.com/go/monitoring v1.19.0 h1:NCXf8hfQi+Kmr56QJezXRZ6GPb80ZI7El1XztyUuLQI=
+cloud.google.com/go/monitoring v1.19.0/go.mod h1:25IeMR5cQ5BoZ8j1eogHE5VPJLlReQ7zFp5OiLgiGZw=
+cloud.google.com/go/networkconnectivity v1.14.6 h1:jYpQ86mZ7OYZc7WadvCIlIaPXmXhr5nD7wgE/ekMVpM=
+cloud.google.com/go/networkconnectivity v1.14.6/go.mod h1:/azB7+oCSmyBs74Z26EogZ2N3UcXxdCHkCPcz8G32bU=
+cloud.google.com/go/networkmanagement v1.13.2 h1:Ex1/aYkA0areleSmOGXHvEFBGohteIYJr2SGPrjOUe0=
+cloud.google.com/go/networkmanagement v1.13.2/go.mod h1:24VrV/5HFIOXMEtVQEUoB4m/w8UWvUPAYjfnYZcBc4c=
+cloud.google.com/go/networksecurity v0.9.7 h1:aepEkfiwOvUL9eu3ginVZhTaXDRHncQKi9lTT1BycH0=
+cloud.google.com/go/networksecurity v0.9.7/go.mod h1:aB6UiPnh/l32+TRvgTeOxVRVAHAFFqvK+ll3idU5BoY=
+cloud.google.com/go/notebooks v1.11.5 h1:sFU1ETg1HfIN/Tev8gD0dleAITLv7cHp0JClwFmJ6bo=
+cloud.google.com/go/notebooks v1.11.5/go.mod h1:pz6P8l2TvhWqAW3sysIsS0g2IUJKOzEklsjWJfi8sd4=
+cloud.google.com/go/optimization v1.6.5 h1:FPfowA/LEckKTQT0A4NJMI2bSou999c2ZyFX1zGiYxY=
+cloud.google.com/go/optimization v1.6.5/go.mod h1:eiJjNge1NqqLYyY75AtIGeQWKO0cvzD1ct/moCFaP2Q=
+cloud.google.com/go/orchestration v1.9.2 h1:C2WL4ZnclXsh4XickGhKYKlPjqVZj35y1sbRjdsZ3g4=
+cloud.google.com/go/orchestration v1.9.2/go.mod h1:8bGNigqCQb/O1kK7PeStSNlyi58rQvZqDiuXT9KAcbg=
+cloud.google.com/go/orgpolicy v1.12.3 h1:fGftW2bPi8vTjQm57xlwtLBZQcrgC+c3HMFBzJ+KWPc=
+cloud.google.com/go/orgpolicy v1.12.3/go.mod h1:6BOgIgFjWfJzTsVcib/4QNHOAeOjCdaBj69aJVs//MA=
+cloud.google.com/go/osconfig v1.12.7 h1:HXsXGFaFaLTklwKgSob/GSE+c3verYDQDgreFaosxyc=
+cloud.google.com/go/osconfig v1.12.7/go.mod h1:ID7Lbqr0fiihKMwAOoPomWRqsZYKWxfiuafNZ9j1Y1M=
+cloud.google.com/go/oslogin v1.13.3 h1:7AgOWH1oMPrB1AVU0/f47ADdOt+XfdBY7QRb8tcMUp8=
+cloud.google.com/go/oslogin v1.13.3/go.mod h1:WW7Rs1OJQ1iSUckZDilvNBSNPE8on740zF+4ZDR4o8U=
+cloud.google.com/go/phishingprotection v0.8.7 h1:CbCjfR/pgDHyRMu94o9nuGwaONEcarWnUfSGGw+I2ZI=
+cloud.google.com/go/phishingprotection v0.8.7/go.mod h1:FtYaOyGc/HQQU7wY4sfwYZBFDKAL+YtVBjUj8E3A3/I=
+cloud.google.com/go/policytroubleshooter v1.10.5 h1:LGt85MZUKlq9oqsbBL9+M6jAyeuR1TtCx6k5HfAQxTY=
+cloud.google.com/go/policytroubleshooter v1.10.5/go.mod h1:bpOf94YxjWUqsVKokzPBibMSAx937Jp2UNGVoMAtGYI=
+cloud.google.com/go/privatecatalog v0.9.7 h1:wGZKKJhYyuf4gcAEywQqQ6F19yxhBJGnzgyxOTbJjBw=
+cloud.google.com/go/privatecatalog v0.9.7/go.mod h1:NWLa8MCL6NkRSt8jhL8Goy2A/oHkvkeAxiA0gv0rIXI=
+cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU=
+cloud.google.com/go/pubsub v1.39.0 h1:qt1+S6H+wwW8Q/YvDwM8lJnq+iIFgFEgaD/7h3lMsAI=
+cloud.google.com/go/pubsub v1.39.0/go.mod h1:FrEnrSGU6L0Kh3iBaAbIUM8KMR7LqyEkMboVxGXCT+s=
+cloud.google.com/go/pubsublite v1.8.2 h1:jLQozsEVr+c6tOU13vDugtnaBSUy/PD5zK6mhm+uF1Y=
+cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI=
+cloud.google.com/go/recaptchaenterprise/v2 v2.13.0 h1:+QG02kE63W13vXI+rwAxFF3EhGX6K7gXwFz9OKwKcHw=
+cloud.google.com/go/recaptchaenterprise/v2 v2.13.0/go.mod h1:jNYyn2ScR4DTg+VNhjhv/vJQdaU8qz+NpmpIzEE7HFQ=
+cloud.google.com/go/recommendationengine v0.8.7 h1:N6n/TEr0FQzeP4ZtvF5daMszOhdZI94uMiPiAi9kFMo=
+cloud.google.com/go/recommendationengine v0.8.7/go.mod h1:YsUIbweUcpm46OzpVEsV5/z+kjuV6GzMxl7OAKIGgKE=
+cloud.google.com/go/recommender v1.12.3 h1:v9x75vXP5wMXw3QG3xmgjVHLlqYufuLn/ht3oNWCA3w=
+cloud.google.com/go/recommender v1.12.3/go.mod h1:OgN0MjV7/6FZUUPgF2QPQtYErtZdZc4u+5onvurcGEI=
+cloud.google.com/go/redis v1.16.0 h1:1veL/h/x5bgzG2CLK2cdG3plWdgO0p1qoHgwFBqG7+c=
+cloud.google.com/go/redis v1.16.0/go.mod h1:NLzG3Ur8ykVIZk+i5ienRnycsvWzQ0uCLcil6Htc544=
+cloud.google.com/go/resourcemanager v1.9.7 h1:SdvD0PaPX60+yeKoSe16mawFpM0EPuiPPihTIVlhRsY=
+cloud.google.com/go/resourcemanager v1.9.7/go.mod h1:cQH6lJwESufxEu6KepsoNAsjrUtYYNXRwxm4QFE5g8A=
+cloud.google.com/go/resourcesettings v1.7.0 h1:yEuByg5XBHhTG9wPEU7GiEtC9Orp1wSEyiiX4IPqoSY=
+cloud.google.com/go/resourcesettings v1.7.0/go.mod h1:pFzZYOQMyf1hco9pbNWGEms6N/2E7nwh0oVU1Tz+4qA=
+cloud.google.com/go/retail v1.17.0 h1:YTKwc6K02xpa/SYkPpxY7QEmsd3deP/+ceMTuAQ1RVg=
+cloud.google.com/go/retail v1.17.0/go.mod h1:GZ7+J084vyvCxO1sjdBft0DPZTCA/lMJ46JKWxWeb6w=
+cloud.google.com/go/run v1.3.7 h1:E4Z5e681Qh7UJrJRMCgYhp+3tkcoXiaKGh3UZmUPaAQ=
+cloud.google.com/go/run v1.3.7/go.mod h1:iEUflDx4Js+wK0NzF5o7hE9Dj7QqJKnRj0/b6rhVq20=
+cloud.google.com/go/scheduler v1.10.8 h1:Jn/unfNUgRiNJRc1nrApzimKiVj91UYlLT8mMfpUu48=
+cloud.google.com/go/scheduler v1.10.8/go.mod h1:0YXHjROF1f5qTMvGTm4o7GH1PGAcmu/H/7J7cHOiHl0=
+cloud.google.com/go/secretmanager v1.13.1 h1:TTGo2Vz7ZxYn2QbmuFP7Zo4lDm5VsbzBjDReo3SA5h4=
+cloud.google.com/go/secretmanager v1.13.1/go.mod h1:y9Ioh7EHp1aqEKGYXk3BOC+vkhlHm9ujL7bURT4oI/4=
+cloud.google.com/go/security v1.17.0 h1:u4RCnEQPvlrrnFRFinU0T3WsjtrsQErkWBfqTM5oUQI=
+cloud.google.com/go/security v1.17.0/go.mod h1:eSuFs0SlBv1gWg7gHIoF0hYOvcSwJCek/GFXtgO6aA0=
+cloud.google.com/go/securitycenter v1.30.0 h1:Y8C0I/mzLbaxAl5cw3EaLox0Rvpy+VUwEuCGWIQDMU8=
+cloud.google.com/go/securitycenter v1.30.0/go.mod h1:/tmosjS/dfTnzJxOzZhTXdX3MXWsCmPWfcYOgkJmaJk=
+cloud.google.com/go/servicecontrol v1.11.1 h1:d0uV7Qegtfaa7Z2ClDzr9HJmnbJW7jn0WhZ7wOX6hLE=
+cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk=
+cloud.google.com/go/servicedirectory v1.11.7 h1:c3OAhTcZ8LbIiKps5T3p6i0QcPI8/aWYwOfoZobICKo=
+cloud.google.com/go/servicedirectory v1.11.7/go.mod h1:fiO/tM0jBpVhpCAe7Yp5HmEsmxSUcOoc4vPrO02v68I=
+cloud.google.com/go/servicemanagement v1.8.0 h1:fopAQI/IAzlxnVeiKn/8WiV6zKndjFkvi+gzu+NjywY=
+cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4=
+cloud.google.com/go/serviceusage v1.6.0 h1:rXyq+0+RSIm3HFypctp7WoXxIA563rn206CfMWdqXX4=
+cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA=
+cloud.google.com/go/shell v1.7.7 h1:HxCzcUxSsCh6FJWkmbOUrGI1sKe4E1Yy4vaykn4RhJ4=
+cloud.google.com/go/shell v1.7.7/go.mod h1:7OYaMm3TFMSZBh8+QYw6Qef+fdklp7CjjpxYAoJpZbQ=
+cloud.google.com/go/spanner v1.63.0 h1:P6+BY70Wtol4MtryBgnXZVTZfsdySEvWfz0EpyLwHi4=
+cloud.google.com/go/spanner v1.63.0/go.mod h1:iqDx7urZpgD7RekZ+CFvBRH6kVTW1ZSEb2HMDKOp5Cc=
+cloud.google.com/go/speech v1.23.1 h1:TcWEAOLQH1Lb2fhHS6/GjvAh+ue0dt4xUDHXHG6vF04=
+cloud.google.com/go/speech v1.23.1/go.mod h1:UNgzNxhNBuo/OxpF1rMhA/U2rdai7ILL6PBXFs70wq0=
+cloud.google.com/go/storage v1.14.0 h1:6RRlFMv1omScs6iq2hfE3IvgE+l6RfJPampq8UZc5TU=
+cloud.google.com/go/storagetransfer v1.10.6 h1:CXmoNEvz7y2NtHFZuH3Z8ASN43rxRINWa2Q/IlBzM2k=
+cloud.google.com/go/storagetransfer v1.10.6/go.mod h1:3sAgY1bx1TpIzfSzdvNGHrGYldeCTyGI/Rzk6Lc6A7w=
+cloud.google.com/go/talent v1.6.8 h1:RoyEtftfJrbwJcu63zuWE4IjC76xMyVsJBhmleIp3bE=
+cloud.google.com/go/talent v1.6.8/go.mod h1:kqPAJvhxmhoUTuqxjjk2KqA8zUEeTDmH+qKztVubGlQ=
+cloud.google.com/go/texttospeech v1.7.7 h1:qR6Mu+EM2OfaZR1/Rl8BDBTVfi2X5OtwKKvJRSQyG+o=
+cloud.google.com/go/texttospeech v1.7.7/go.mod h1:XO4Wr2VzWHjzQpMe3gS58Oj68nmtXMyuuH+4t0wy9eA=
+cloud.google.com/go/tpu v1.6.7 h1:ngQokxUB1z2gvHn3vAf04m7SFnNYMiQIIpny81fCGAs=
+cloud.google.com/go/tpu v1.6.7/go.mod h1:o8qxg7/Jgt7TCgZc3jNkd4kTsDwuYD3c4JTMqXZ36hU=
+cloud.google.com/go/trace v1.10.7 h1:gK8z2BIJQ3KIYGddw9RJLne5Fx0FEXkrEQzPaeEYVvk=
+cloud.google.com/go/trace v1.10.7/go.mod h1:qk3eiKmZX0ar2dzIJN/3QhY2PIFh1eqcIdaN5uEjQPM=
+cloud.google.com/go/translate v1.10.3 h1:g+B29z4gtRGsiKDoTF+bNeH25bLRokAaElygX2FcZkE=
+cloud.google.com/go/translate v1.10.3/go.mod h1:GW0vC1qvPtd3pgtypCv4k4U8B7EdgK9/QEF2aJEUovs=
+cloud.google.com/go/video v1.21.0 h1:ue/1C8TF8H2TMzKMBdNnFxT7QaeWMtqfDr9TSQGgUhA=
+cloud.google.com/go/video v1.21.0/go.mod h1:Kqh97xHXZ/bIClgDHf5zkKvU3cvYnLyRefmC8yCBqKI=
+cloud.google.com/go/videointelligence v1.11.7 h1:SKBkFTuOclESLjQL1LwraqVFm2fL5oL9tbzKITU+FOY=
+cloud.google.com/go/videointelligence v1.11.7/go.mod h1:iMCXbfjurmBVgKuyLedTzv90kcnppOJ6ttb0+rLDID0=
+cloud.google.com/go/vision/v2 v2.8.2 h1:j9RxG8DcyJO/D7/ps2pOey8VZys+TMqF79bWAhuM7QU=
+cloud.google.com/go/vision/v2 v2.8.2/go.mod h1:BHZA1LC7dcHjSr9U9OVhxMtLKd5l2jKPzLRALEJvuaw=
+cloud.google.com/go/vmmigration v1.7.7 h1:bf2qKqEN7iqT62IptQ/FDadoDLJI9sthyrW3PVaH8bY=
+cloud.google.com/go/vmmigration v1.7.7/go.mod h1:qYIK5caZY3IDMXQK+A09dy81QU8qBW0/JDTc39OaKRw=
+cloud.google.com/go/vmwareengine v1.1.3 h1:x4KwHB4JlBEzMaITVhrbbpHrU+2I5LrlvHGEEluT0vc=
+cloud.google.com/go/vmwareengine v1.1.3/go.mod h1:UoyF6LTdrIJRvDN8uUB8d0yimP5A5Ehkr1SRzL1APZw=
+cloud.google.com/go/vpcaccess v1.7.7 h1:F5woMLufKnshmDvPVxCzoC+Di12RYXQ1W8kNmpBT8z0=
+cloud.google.com/go/vpcaccess v1.7.7/go.mod h1:EzfSlgkoAnFWEMznZW0dVNvdjFjEW97vFlKk4VNBhwY=
+cloud.google.com/go/webrisk v1.9.7 h1:EWTSVagWWeQjVAsebiF/wJMwC5bq6Zz3LqOmD9Uid4s=
+cloud.google.com/go/webrisk v1.9.7/go.mod h1:7FkQtqcKLeNwXCdhthdXHIQNcFWPF/OubrlyRcLHNuQ=
+cloud.google.com/go/websecurityscanner v1.6.7 h1:R5OW5SNRqD0DSEmyWLUMNYAXWYnz/NLSXBawVFrc9a0=
+cloud.google.com/go/websecurityscanner v1.6.7/go.mod h1:EpiW84G5KXxsjtFKK7fSMQNt8JcuLA8tQp7j0cyV458=
+cloud.google.com/go/workflows v1.12.6 h1:2bE69mh68law1UZWPjgmvOQsjsGSppRudABAXwNAy58=
+cloud.google.com/go/workflows v1.12.6/go.mod h1:oDbEHKa4otYg4abwdw2Z094jB0TLLiFGAPA78EDAKag=
+codeberg.org/go-fonts/liberation v0.5.0 h1:SsKoMO1v1OZmzkG2DY+7ZkCL9U+rrWI09niOLfQ5Bo0=
+codeberg.org/go-fonts/liberation v0.5.0/go.mod h1:zS/2e1354/mJ4pGzIIaEtm/59VFCFnYC7YV6YdGl5GU=
+codeberg.org/go-latex/latex v0.1.0 h1:hoGO86rIbWVyjtlDLzCqZPjNykpWQ9YuTZqAzPcfL3c=
+codeberg.org/go-latex/latex v0.1.0/go.mod h1:LA0q/AyWIYrqVd+A9Upkgsb+IqPcmSTKc9Dny04MHMw=
+codeberg.org/go-pdf/fpdf v0.10.0 h1:u+w669foDDx5Ds43mpiiayp40Ov6sZalgcPMDBcZRd4=
+codeberg.org/go-pdf/fpdf v0.10.0/go.mod h1:Y0DGRAdZ0OmnZPvjbMp/1bYxmIPxm0ws4tfoPOc4LjU=
+crawshaw.io/iox v0.0.0-20181124134642-c51c3df30797 h1:yDf7ARQc637HoxDho7xjqdvO5ZA2Yb+xzv/fOnnvZzw=
+crawshaw.io/sqlite v0.3.2 h1:N6IzTjkiw9FItHAa0jp+ZKC6tuLzXqAYIv+ccIWos1I=
+crawshaw.io/sqlite v0.3.3-0.20220618202545-d1964889ea3c h1:wvzox0eLO6CKQAMcOqz7oH3UFqMpMmK7kwmwV+22HIs=
+crawshaw.io/sqlite v0.3.3-0.20220618202545-d1964889ea3c/go.mod h1:igAO5JulrQ1DbdZdtVq48mnZUBAPOeFzer7VhDWNtW4=
+dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3 h1:hJiie5Bf3QucGRa4ymsAUOxyhYwGEz1xrsVk0P8erlw=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=
+dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0 h1:SPOUaucgtVls75mg+X7CXigS71EnsfVUK/2CgVrwqgw=
+dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412 h1:GvWw74lx5noHocd+f6HBMXK6DuggBB1dhVkuGZbv7qM=
+dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c h1:ivON6cwHK1OH26MZyWDCnbTRZZf0IhNsENoNAKFS1g4=
+git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 h1:OR8VhtwhcAI3U48/rzBsVOuHi0zDPzYI1xASVcdSgR8=
+git.sr.ht/~sbinet/gg v0.6.0 h1:RIzgkizAk+9r7uPzf/VfbJHBMKUr0F5hRFxTUGMnt38=
+git.sr.ht/~sbinet/gg v0.6.0/go.mod h1:uucygbfC9wVPQIfrmwM2et0imr8L7KQWywX0xpFMm94=
+github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=
+github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8=
+github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
+github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
+github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
+github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0 h1:oVLqHXhnYtUwM89y9T1fXGaK9wTkXHgNp8/ZNMQzUxE=
+github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0 h1:f2Qw/Ehhimh5uO1fayV0QIW7DShEQqhtUfhYc+cBPlw=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.26.0/go.mod h1:2bIszWvQRlJVmJLiuLhukLImRjKPcYdzzsx6darK02A=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 h1:UQUsRi8WTzhZntp5313l+CHIAT95ojUI2lpP/ExlZa4=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0/go.mod h1:Cz6ft6Dkn3Et6l2v2a9/RpN7epQ1GtDlO6lj8bEcOvw=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0=
+github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8 h1:n3F+mWm+b4D7uNbx1syN/uQTVDwt2sWfk23Mhzwzec4=
+github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
+github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
+github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/RoaringBitmap/roaring v1.2.3/go.mod h1:plvDsJQpxOC5bw8LRteu/MLWHsHez/3y6cubLI4/1yE=
+github.com/Shopify/sarama v1.19.0 h1:9oksLxC6uxVPHPVYUmq6xhr1BOF/hHobWH2UzO67z1s=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw=
+github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM=
+github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
+github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
+github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
+github.com/alexflint/go-arg v1.4.2/go.mod h1:9iRbDxne7LcR/GSvEr7ma++GLpdIU1zrghf2y2768kM=
+github.com/alexflint/go-arg v1.4.3 h1:9rwwEBpMXfKQKceuZfYcwuc/7YY7tWJbFsgG5cAU/uo=
+github.com/alexflint/go-arg v1.4.3/go.mod h1:3PZ/wp/8HuqRZMUUgu7I+e1qcpUbvmS258mRXkFH4IA=
+github.com/alexflint/go-scalar v1.0.0/go.mod h1:GpHzbCOZXEKMEcygYQ5n/aa4Aq84zbxjy3MxYW0gjYw=
+github.com/alexflint/go-scalar v1.1.0 h1:aaAouLLzI9TChcPXotr6gUhq+Scr8rl0P9P4PnltbhM=
+github.com/alexflint/go-scalar v1.1.0/go.mod h1:LoFvNMqS1CPrMVltza4LvnGKhaSpc3oyLEBUZVhhS2o=
+github.com/anacrolix/args v0.5.1-0.20220509024600-c3b77d0b61ac h1:XWoepbk3zgOQ8jMO3vpOnohd6MfENPbFZPivB2L7myc=
+github.com/anacrolix/args v0.5.1-0.20220509024600-c3b77d0b61ac/go.mod h1:Fj/N2PehEwTBE5t/V/9xgTcxDkuYQ+5IBoFw/8gkldI=
+github.com/anacrolix/backtrace v0.0.0-20221205112523-22a61db8f82e h1:A0Ty9UeyBDIo29ZMnk0AvPqWDIa4HVvCaJqWNlCrMXA=
+github.com/anacrolix/backtrace v0.0.0-20221205112523-22a61db8f82e/go.mod h1:4YFqy+788tLJWtin2jNliYVJi+8aDejG9zcu/2/pONw=
+github.com/anacrolix/bargle v0.0.0-20221014000746-4f2739072e9d h1:ypNOsIwvdumNRlqWj/hsnLs5TyQWQOylwi+T9Qs454A=
+github.com/anacrolix/bargle v0.0.0-20221014000746-4f2739072e9d/go.mod h1:9xUiZbkh+94FbiIAL1HXpAIBa832f3Mp07rRPl5c5RQ=
+github.com/anacrolix/bargle v1.0.0 h1:jc4EKULEJx4GV10rpERpQp1jsiJWWOEGplsqssc4sAQ=
+github.com/anacrolix/bargle v1.0.0/go.mod h1:9xUiZbkh+94FbiIAL1HXpAIBa832f3Mp07rRPl5c5RQ=
+github.com/anacrolix/bargle/v2 v2.0.0-20240909020204-5265698a6040 h1:gneDM+Y60h9YY5xyG1buJss/OWjBZa+eYFO/Go3e7ro=
+github.com/anacrolix/bargle/v2 v2.0.0-20240909020204-5265698a6040/go.mod h1:rKvwnOHgcXKPJTINj5RmkifgpxgEGC9bkJiv5kM4ctM=
+github.com/anacrolix/bargle/v2 v2.0.0 h1:jeKsKOCiAWKMi0HJdlrux/+V12I7PVX5dny/HxQ3xZw=
+github.com/anacrolix/bargle/v2 v2.0.0/go.mod h1:rKvwnOHgcXKPJTINj5RmkifgpxgEGC9bkJiv5kM4ctM=
+github.com/anacrolix/chansync v0.3.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k=
+github.com/anacrolix/envpprof v1.2.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4=
+github.com/anacrolix/envpprof v1.3.0/go.mod h1:7QIG4CaX1uexQ3tqd5+BRa/9e2D02Wcertl6Yh0jCB0=
+github.com/anacrolix/fuse v0.3.2-0.20250603105216-aeb550c91d7a h1:bP+SxvpLAWXgpRvDKmB+d8n4XEWYHH5czGlcZ5Kw66Y=
+github.com/anacrolix/fuse v0.3.2-0.20250603105216-aeb550c91d7a/go.mod h1:vN3X/6E+uHNjg5F8Oy9FD9I+pYxeDWeB8mNjIoxL5ds=
+github.com/anacrolix/fuse v0.3.2 h1:ablJbmHt2BeYGnNrlfXkAcKN96mMMeXZN/ZAqo1AY/o=
+github.com/anacrolix/fuse v0.3.2/go.mod h1:vN3X/6E+uHNjg5F8Oy9FD9I+pYxeDWeB8mNjIoxL5ds=
+github.com/anacrolix/generics v0.0.0-20230816105729-c755655aee45/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8=
+github.com/anacrolix/gostdapp v0.1.0 h1:sZC+gSLhA7Hdalak5rPCkhO0YSEl0tt/lsovxh6qka4=
+github.com/anacrolix/gostdapp v0.1.0/go.mod h1:2pstbgWcpBCY3rFUldM0NbDCrP86vWsh61wj8yY517E=
+github.com/anacrolix/gostdapp v0.2.0 h1:UNuF8oKKFIa2tzcXLTiVStCYSUfRith81EskV05gIfk=
+github.com/anacrolix/gostdapp v0.2.0/go.mod h1:2pstbgWcpBCY3rFUldM0NbDCrP86vWsh61wj8yY517E=
+github.com/anacrolix/gostdapp v0.2.1-0.20251125125325-fbac3f25c013 h1:fdjruiqT1gnhzeX9kzumnGaluIlRDq6dZXZDaUUiK1k=
+github.com/anacrolix/gostdapp v0.2.1-0.20251125125325-fbac3f25c013/go.mod h1:HamXfNHg2qYDW7Trq9N8zPqhQGFocF/TLB1NngI6mAM=
+github.com/anacrolix/log v0.13.2-0.20220711050817-613cb738ef30/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68=
+github.com/anacrolix/log v0.14.1/go.mod h1:1OmJESOtxQGNMlUO5rcv96Vpp9mfMqXXbe2RdinFLdY=
+github.com/anacrolix/log v0.15.2/go.mod h1:m0poRtlr41mriZlXBQ9SOVZ8yZBkLjOkDhd5Li5pITA=
+github.com/anacrolix/missinggo/v2 v2.7.1/go.mod h1:2IZIvmRTizALNYFYXsPR7ofXPzJgyBpKZ4kMqMEICkI=
+github.com/anacrolix/missinggo/v2 v2.7.3/go.mod h1:mIEtp9pgaXqt8VQ3NQxFOod/eQ1H0D1XsZzKUQfwtac=
+github.com/anacrolix/multiless v0.3.1-0.20221221005021-2d12701f83f7/go.mod h1:zJv1JF9AqdZiHwxqPgjuOZDGWER6nyE48WBCi/OOrMM=
+github.com/anacrolix/possum/go v0.3.2 h1:w3YUJ0g687leLaOBcXleB/dVjdIfWX3KyozfHpEv/Fk=
+github.com/anacrolix/possum/go v0.3.2/go.mod h1:whUIKRXLj4sSSUmJWxmnASflkUh4/DUzscn5nC6A340=
+github.com/anacrolix/possum/go v0.4.1-0.20250821022006-9d91a37b5d3d h1:pfv3nv6j7Cu1taUUECn77jzHAP3g7cq8fKLJY8lcddI=
+github.com/anacrolix/possum/go v0.4.1-0.20250821022006-9d91a37b5d3d/go.mod h1:LMkSvp9JAi1eKzmrDgJ6iDcWGalpb8Ddnsd9Ovy+ey8=
+github.com/anacrolix/publicip v0.2.0 h1:n/BmRxXRlOT/wQFd6Xhu57r9uTU+Xvb9MyEkLooh3TU=
+github.com/anacrolix/publicip v0.2.0/go.mod h1:67G1lVkLo8UjdEcJkwScWVTvlJ35OCDsRJoWXl/wi4g=
+github.com/anacrolix/squirrel v0.6.4 h1:K6ABRMCms0xwpEIdY3kAaDBUqiUeUYCKLKI0yHTr9IQ=
+github.com/anacrolix/squirrel v0.6.4/go.mod h1:0kFVjOLMOKVOet6ja2ac1vTOrqVbLj2zy2Fjp7+dkE8=
+github.com/anacrolix/sync v0.4.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g=
+github.com/anacrolix/tagflag v1.3.0 h1:5NI+9CniDnEH0BWA4UcQbERyFPjKJqZnVkItGVIDy/s=
+github.com/anacrolix/tagflag v1.3.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8=
+github.com/anacrolix/torrent v1.48.1-0.20230103142631-c20f73d53e9f/go.mod h1:PwdFzmApEr96LcqogJhuw41XOdd1oHGkp+qE9hhXyDc=
+github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
+github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow/go/v15 v15.0.2 h1:60IliRbiyTWCWjERBCkO1W4Qun9svcYoZrSLcyOsMLE=
+github.com/apache/arrow/go/v15 v15.0.2/go.mod h1:DGXsR3ajT524njufqf95822i+KTh+yea1jass9YXgjA=
+github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
+github.com/aws/aws-sdk-go-v2/service/appconfig v1.4.2 h1:5fez51yE//mtmaEkh9JTAcLl4xg60Ha86pE+FIqinGc=
+github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
+github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bits-and-blooms/bitset v1.20.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
+github.com/bketelsen/crypt v0.0.4 h1:w/jqZtC9YD4DS/Vp9GhWfWcCpuAL58oTnLoI8vE9YHU=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
+github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625 h1:ckJgFhFWywOx+YLEMIJsTb+NV6NexWICk5+AMSuz3ss=
+github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23 h1:D21IyuvjDCshj1/qq+pCNd3VZOAEI9jy6Bi131YlXgI=
+github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY=
+github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
+github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
+github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
+github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk=
+github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
+github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/charmbracelet/harmonica v0.2.0 h1:8NxJWRWg/bzKqqEaaeFNipOu77YR5t8aSwG4pgaUBiQ=
+github.com/charmbracelet/harmonica v0.2.0/go.mod h1:KSri/1RMQOZLbw7AHqgcBycp8pgJnQMYYT8QZRqZ1Ao=
+github.com/chelnak/ysmrr v0.2.1 h1:9xLbVcrgnvEFovFAPnDiTCtxHiuLmz03xCg5OUgdOfc=
+github.com/chelnak/ysmrr v0.2.1/go.mod h1:9TEgLy2xDMGN62zJm9XZrEWY/fHoGoBslSVEkEpRCXk=
+github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89 h1:aPflPkRFkVwbW6dmcVqfgwp1i+UWGFH6VgR1Jim5Ygc=
+github.com/chromedp/chromedp v0.9.2 h1:dKtNz4kApb06KuSXoTQIyUC2TrA0fhGDwNZf3bcgfKw=
+github.com/chromedp/sysutil v1.0.0 h1:+ZxhTpfpZlmchB58ih/LBHX52ky7w2VhQVKQMucy3Ic=
+github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM=
+github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI=
+github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
+github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403 h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk=
+github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk=
+github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls=
+github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=
+github.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=
+github.com/consensys/bavard v0.1.27/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
+github.com/consensys/bavard v0.1.31-0.20250406004941-2db259e4b582 h1:dTlIwEdFQmldzFf5F6bbTcYWhvnAgZai2g8eq3Wwxqg=
+github.com/consensys/bavard v0.1.31-0.20250406004941-2db259e4b582/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
+github.com/consensys/bavard v0.2.1 h1:i2/ZeLXpp7eblPWzUIWf+dtfBocKQIxuiqy9XZlNSfQ=
+github.com/consensys/bavard v0.2.1/go.mod h1:k/zVjHHC4B+PQy1Pg7fgvG3ALicQw540Crag8qx+dZs=
+github.com/consensys/gnark-crypto v0.16.0/go.mod h1:Ke3j06ndtPTVvo++PhGNgvm+lgpLvzbcE2MqljY7diU=
+github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d h1:t5Wuyh53qYyg9eqn4BbnlIT+vmhyww0TatL+zT3uWgI=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w=
+github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8=
+github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE=
+github.com/dgraph-io/ristretto v0.0.2 h1:a5WaUrDa0qm0YrAAS1tUykT5El3kt62KNZZeMxQn3po=
+github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
+github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7 h1:tYwu/z8Y0NkkzGEh3z21mSWggMg4LwLRFucLS7TjARg=
+github.com/dop251/goja_nodejs v0.0.0-20211022123610-8dd9abb0616d h1:W1n4DvpzZGOISgp7wWNtraLcHtnmnTwBlJidqtMIuwQ=
+github.com/dvyukov/go-fuzz v0.0.0-20220726122315-1d375ef9f9f6 h1:sE4tvxWw01v7K3MAHwKF2UF3xQbgy23PRURntuV1CkU=
+github.com/dvyukov/go-fuzz v0.0.0-20220726122315-1d375ef9f9f6/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
+github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw=
+github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
+github.com/ebitengine/purego v0.9.0/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
+github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
+github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
+github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
+github.com/elliotchance/orderedmap v1.4.0 h1:wZtfeEONCbx6in1CZyE6bELEt/vFayMvsxqI5SgsR+A=
+github.com/elliotchance/orderedmap v1.4.0/go.mod h1:wsDwEaX5jEoyhbs7x93zk2H/qv0zwuhg4inXhDkYqys=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
+github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
+github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM=
+github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
+github.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo=
+github.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=
+github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
+github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
+github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
+github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
+github.com/erigontech/erigon-snapshot v1.3.1-0.20250718024755-5b6d5407844d/go.mod h1:ooHlCl+eEYzebiPu+FP6Q6SpPUeMADn8Jxabv3IKb9M=
+github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
+github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
+github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
+github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
+github.com/gliderlabs/ssh v0.1.1 h1:j3L6gSLQalDETeEg/Jg0mGY0/y/N6zI2xX1978P0Uqw=
+github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a h1:FQqoVvjbiUioBBFUL5up+h+GdCa/AnJsL/1bIs/veSI=
+github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31 h1:gclg6gY70GLy3PbkQ1AERPfmLMMagS60DKF78eWwLn8=
+github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=
+github.com/go-jose/go-jose/v4 v4.0.4 h1:VsjPI33J0SB9vQM6PLmNjoHqMQNGPiZ0rHL7Ni7Q6/E=
+github.com/go-jose/go-jose/v4 v4.0.4/go.mod h1:NKb5HO1EZccyMpiZNbdUw/14tiXNyUJh188dfnMCAfc=
+github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI=
+github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA=
+github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
+github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
+github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
+github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=
+github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw=
+github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
+github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
+github.com/go-ldap/ldap v3.0.2+incompatible h1:kD5HQcAzlQ7yrhfn+h+MSABeAy/jAJhvIJ/QDllP44g=
+github.com/go-llsqlite/crawshaw v0.5.6-0.20250312230104-194977a03421/go.mod h1:/YJdV7uBQaYDE0fwe4z3wwJIZBJxdYzd38ICggWqtaE=
+github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
+github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
+github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
+github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
+github.com/goccmack/gocc v0.0.0-20230228185258-2292f9e40198 h1:FSii2UQeSLngl3jFoR4tUKZLprO7qUlh/TKKticc0BM=
+github.com/goccmack/gocc v0.0.0-20230228185258-2292f9e40198/go.mod h1:DTh/Y2+NbnOVVoypCCQrovMPDKUGp4yZpSbWg5D0XIM=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
+github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7 h1:2hRPrmiwPrp3fQX967rNJIhQPtiGXdlQWAxKbKw3VHA=
+github.com/golang/mock v1.4.4 h1:l75CXGRSwbaYNpl/Z2X1XIIAMSCquvXgpVZDhwEIJsc=
+github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/flatbuffers v23.5.26+incompatible h1:M9dgRyhJemaM4Sw8+66GHBu8ioaQmyPLg1b8VwK5WJg=
+github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
+github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9 h1:OF1IPgv+F4NmqmJ98KTjdN97Vs1JxDPB3vbmYzV2dpk=
+github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY=
+github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
+github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
+github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
+github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=
+github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
+github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
+github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 h1:tlyzajkF3030q6M8SvmJSemC9DTHL/xaMa18b65+JM4=
+github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99 h1:twflg0XRTjwKpxb/jFExr4HGq6on2dEOmnL6FV+fgPw=
+github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
+github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
+github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
+github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
+github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
+github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 h1:kr3j8iIMR4ywO/O0rvksXaJvauGGCMg2zAZIiNZ9uIQ=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0/go.mod h1:ummNFgdgLhhX7aIiy35vVmQNS0rWXknfPE0qe6fmFXg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
+github.com/hashicorp/consul/api v1.1.0 h1:BNQPM9ytxj6jbjjdRPioQ94T6YXriSopn0i8COv6SRA=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1 h1:LnuDWGNsoajlhGyHJvuWW6FVqRl8JOTPqS6CPTsYjhY=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
+github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
+github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
+github.com/hashicorp/go-plugin v1.0.1 h1:4OtAfUGbnKC6yS48p0CtMX2oFYtzFZVv6rok3cRWgnE=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
+github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
+github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0=
+github.com/hashicorp/go.net v0.0.1 h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0 h1:WhIgCr5a7AaVH6jPUwjtRuuE7/RDufnUvzIr48smyxs=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/vault/api v1.0.4 h1:j08Or/wryXT4AcHj1oCbMd7IijXcKzYUGw59LGu9onU=
+github.com/hashicorp/vault/sdk v0.1.13 h1:mOEPeOhT7jl0J4AMl1E705+BcmeRs1VmKNb9F0sMLy8=
+github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
+github.com/honeycombio/honeycomb-opentelemetry-go v0.3.0 h1:3qotL5cFNAiuLk/YZsUGNmz9ywnXqGP9hGFQoNo5PdA=
+github.com/honeycombio/honeycomb-opentelemetry-go v0.3.0/go.mod h1:qzzIv/RAGWhyRgyRwwRaxmn5tZMkc/bbTX3zit4sBGI=
+github.com/honeycombio/honeycomb-opentelemetry-go v0.11.0 h1:x0DndAGP+m1rk9JrlPLnrnAjJotoq/EbWtg9agwnl7I=
+github.com/honeycombio/honeycomb-opentelemetry-go v0.11.0/go.mod h1:2DZt7DdTnnd1o7biwC9Ab0Z3OAGXId56gYVPR7LsLQs=
+github.com/honeycombio/opentelemetry-go-contrib/launcher v0.0.0-20221031150637-a3c60ed98d54 h1:CFyJMKF0jR2dv+3Cpj/GuRa5XBXKnJqiqmWMYifTzok=
+github.com/honeycombio/opentelemetry-go-contrib/launcher v0.0.0-20221031150637-a3c60ed98d54/go.mod h1:30UdGSqrIP+QzOGVyFiK6konkG1bQzs342GvLicmmnY=
+github.com/honeycombio/otel-config-go v1.17.0 h1:3/zig0L3IGnfgiCrEfAwBsM0rF57+TKTyJ/a8yqW2eM=
+github.com/honeycombio/otel-config-go v1.17.0/go.mod h1:g2mMdfih4sYKfXBtz2mNGvo3HiQYqX4Up4pdA8JOF2s=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI=
+github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465 h1:KwWnWVWCNtNq/ewIX7HIKnELmEx2nDP42yskD/pi7QE=
+github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk=
+github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8=
+github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro=
+github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek=
+github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo=
+github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q=
+github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o=
+github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4=
+github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1 h1:ujPKutqRlJtcfWk6toYVYagwra7HQHbXOaS171b4Tg8=
+github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
+github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/juju/clock v0.0.0-20180524022203-d293bb356ca4 h1:v4AMWbdtZyIX8Ohv+FEpSwaCtho9uTtGbwjZab+rDuw=
+github.com/juju/retry v0.0.0-20160928201858-1998d01ba1c3 h1:56R9RckAEUeaptI0yGE8tzNAs3dD6Wf7giI6D51Czx8=
+github.com/juju/utils v0.0.0-20180808125547-9dfc6dbfb02b h1:nuDUxG8lPFiE1nEw7cJpUBBciNI4hoBOPV+tFn/QZ2w=
+github.com/juju/version v0.0.0-20161031051906-1f41e27e54f2 h1:loQDi5MyxxNm7Q42mBGuPD6X+F6zw8j5S9yexLgn/BE=
+github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/kevinmbeaulieu/eq-go v1.0.0 h1:AQgYHURDOmnVJ62jnEk0W/7yFKEn+Lv8RHN6t7mB0Zo=
+github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM=
+github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
+github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
+github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
+github.com/kr/pty v1.1.3 h1:/Um6a/ZmD5tF7peoOJ5oN5KMQ0DrGVQSXLNwyckutPk=
+github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
+github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80 h1:6Yzfa6GP0rIo/kULo2bwGEkFvCePZ3qHDDTC3/J9Swo=
+github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q=
+github.com/libp2p/zeroconf/v2 v2.2.0/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs=
+github.com/lispad/go-generics-tools v1.1.0 h1:mbSgcxdFVmpoyso1X/MJHXbSbSL3dD+qhRryyxk+/XY=
+github.com/lispad/go-generics-tools v1.1.0/go.mod h1:2csd1EJljo/gy5qG4khXol7ivCPptNjG5Uv2X8MgK84=
+github.com/logrusorgru/aurora/v4 v4.0.0 h1:sRjfPpun/63iADiSvGGjgA1cAYegEWMPCJdUpJYn9JA=
+github.com/logrusorgru/aurora/v4 v4.0.0/go.mod h1:lP0iIa2nrnT/qoFXcOZSrZQpJ1o6n2CUf/hyHi2Q4ZQ=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8=
+github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4 h1:sIXJOMrYnQZJu7OB7ANSF4MYri2fTEGIsRLz6LwI4xE=
+github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
+github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
+github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe h1:W/GaMY0y69G4cFlmsC6B9sbuo2fP8OFP1ABjt4kPz+w=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/matryer/moq v0.4.0 h1:HsZIdEsj8+9nE940WW7FFxMgrgSxGfMkNXhVTHUhfMU=
+github.com/matryer/moq v0.4.0/go.mod h1:kUfalaLk7TcyXhrhonBYQ2Ewun63+/xGbZ7/MzzzC4Y=
+github.com/matryer/moq v0.5.2 h1:b2bsanSaO6IdraaIvPBzHnqcrkkQmk1/310HdT2nNQs=
+github.com/matryer/moq v0.5.2/go.mod h1:W/k5PLfou4f+bzke9VPXTbfJljxoeR1tLHigsmbshmU=
+github.com/mattn/getwild v0.0.1 h1:+Nlzxt7fonj2MtO9y/rg5hxOnM3H6tuTqeD38W25jfo=
+github.com/mattn/getwild v0.0.1/go.mod h1:AG+GKQydHp7iLJn+VV+D7y8LeYs5bQ0Xz4fmKd5o1Sg=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/microcosm-cc/bluemonday v1.0.1 h1:SIYunPjnlXcW+gVfvm0IlSeR5U3WZUOLfVmqg85Go44=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
+github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
+github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
+github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
+github.com/mitchellh/gox v0.4.0 h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY=
+github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU=
+github.com/mmcloughlin/profile v0.1.1 h1:jhDmAqPyebOsVDOCICJoINoLb/AnLBaUw58nFzxWS2w=
+github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU=
+github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM=
+github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86 h1:D6paGObi5Wud7xg83MaEFyjxQB1W5bz5d0IFppr+ymk=
+github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab h1:eFXv9Nu1lGbrNbj619aWwZfVF5HBrm9Plte8aNptuTI=
+github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c h1:bY6ktFuJkt+ZXkX0RChQch2FtHpWQLVS8Qo1YasiIVk=
+github.com/neelance/sourcemap v0.0.0-20200213170602-2833bce08e4c/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/npillmayer/nestext v0.1.3 h1:2dkbzJ5xMcyJW5b8wwrX+nnRNvf/Nn1KwGhIauGyE2E=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
+github.com/onsi/ginkgo/v2 v2.19.0/go.mod h1:rlwLi9PilAFJ8jCg9UE1QP6VBpd6/xj3SRC0d6TU0To=
+github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
+github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
+github.com/openzipkin/zipkin-go v0.1.6 h1:yXiysv1CSK7Q5yjGy1710zZGnsbMUIjluWBxtLXHPBo=
+github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde h1:x0TT0RDC7UhAVbbWWBzr41ElhJx5tXPWkIHA2HWPRuw=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
+github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
+github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ=
+github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pion/transport v0.13.1 h1:/UH5yLeQtwm2VZIPjxwnNFxjS4DFhyLfS4GlfuKUzfA=
+github.com/pion/transport v0.13.1/go.mod h1:EBxbqzyv+ZrmDb82XswEE0BjfQFtuw1Nu6sjnjWCsGg=
+github.com/pion/udp v0.1.1 h1:8UAPvyqmsxK8oOjloDk4wUt63TzFe9WEJkg5lChlj7o=
+github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
+github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA=
+github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
+github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
+github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
+github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.35.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
+github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
+github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481 h1:jMxcLa+VjJKhpCwbLUXAD15wJ+hhvXMLujCl3MkXpfM=
+github.com/robertkrimen/godocdown v0.0.0-20130622164427-0bfa04905481/go.mod h1:C9WhFzY47SzYBIvzFqSvHIR6ROgDo4TtdTuRaOMjF/s=
+github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
+github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
+github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=
+github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
+github.com/sahilm/fuzzy v0.1.1 h1:ceu5RHF8DGgoi+/dR5PsECjCDH1BE3Fnmpo7aVXOdRA=
+github.com/sahilm/fuzzy v0.1.1/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y=
+github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGuOeFkFbUWfBgK4=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/sethvargo/go-envconfig v0.8.2 h1:DDUVuG21RMgeB/bn4leclUI/837y6cQCD4w8hb5797k=
+github.com/sethvargo/go-envconfig v0.8.2/go.mod h1:Iz1Gy1Sf3T64TQlJSvee81qDhf7YIlt8GMUX6yyNFs0=
+github.com/sethvargo/go-envconfig v1.3.0 h1:gJs+Fuv8+f05omTpwWIu6KmuseFAXKrIaOZSh8RMt0U=
+github.com/sethvargo/go-envconfig v1.3.0/go.mod h1:JLd0KFWQYzyENqnEPWWZ49i4vzZo/6nRidxI8YvGiHw=
+github.com/shirou/gopsutil/v3 v3.22.9 h1:yibtJhIVEMcdw+tCTbOPiF1VcsuDeTE4utJ8Dm4c5eA=
+github.com/shirou/gopsutil/v3 v3.22.9/go.mod h1:bBYl1kjgEJpWpxeHmLI+dVHWtyAwfcmSBLDsp2TNT8A=
+github.com/shirou/gopsutil/v3 v3.24.3 h1:eoUGJSmdfLzJ3mxIhmOAhgKEKgQkeOwKpz1NbhVnuPE=
+github.com/shirou/gopsutil/v3 v3.24.3/go.mod h1:JpND7O217xa72ewWz9zN2eIIkPWsDN/3pl0H8Qt0uwg=
+github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4 h1:Fth6mevc5rX7glNLpbAMJnqKlfIkcTjZCSHEeqvKbcI=
+github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48 h1:vabduItPAIz9px5iryD5peyx7O3Ya8TBThapgXim98o=
+github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470 h1:qb9IthCFBmROJ6YBS31BEMeSYjOscSiG+EO+JVNTz64=
+github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e h1:MZM7FHLqUHYI0Y/mQAt3d2aYa0SiNms/hFqC9qJYolM=
+github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636 h1:aSISeOcal5irEhJd1M+IrApc0PdcN7e7Aj4yuEnOrfQ=
+github.com/shurcooL/go v0.0.0-20200502201357-93f07166e636/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041 h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc=
+github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d h1:Yoy/IzG4lULT6qZg62sVC+qyBL8DQkmD2zv6i7OImrc=
+github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c h1:UOk+nlt1BJtTcH15CT7iNO7YVWTfTv/DNwEAQHLIaDQ=
+github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b h1:vYEG87HxbU6dXj5npkeulCS96Dtz5xg3jcfCgpcvbIw=
+github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20 h1:7pDq9pAMCQgRohFmd25X8hIH8VxmT3TaDm+r9LHxgBk=
+github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9 h1:MPblCbqA5+z6XARjScMfz1TqtJC7TuTRj0U9VqIBs6k=
+github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50 h1:crYRwvwjdVh1biHzzciFHe8DrZcYrVcZFlJtykhRctg=
+github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc h1:eHRtZoIi6n9Wo1uR+RU44C247msLWwyA89hVKwRLkMk=
+github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo=
+github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk=
+github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9 h1:fxoFD0in0/CBzXoyNhMTjvBZYW6ilSnTw7N7y/8vkmM=
+github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191 h1:T4wuULTrzCKMFlg3HmKHgXAF8oStFb/+lOIupLV2v+o=
+github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241 h1:Y+TeIabU8sJD10Qwd/zMty2/LEaT9GNDaA6nyZf+jgo=
+github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122 h1:TQVQrsyNaimGwF7bIhzoVC9QkKm4KsWd8cECGzFx8gI=
+github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2 h1:bu666BQci+y4S0tVRVjsHUeRon6vUXmsGBwdowgMrg4=
+github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82 h1:LneqU9PHDsg/AkPDU3AkqMxnMYL+imaqkpflHu73us8=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537 h1:YGaxtkYjb8mnTvtufv2LKLwCQu2/C7qFB7UtrOlTWOY=
+github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU=
+github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
+github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133 h1:JtcyT0rk/9PKOdnKQzuDR+FSjh7SGtJwpgVpfZBRKlQ=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY=
+github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec=
+github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac h1:wbW+Bybf9pXxnCFAOWZTqkRjAc7rAIwo2e1ArUhiHxg=
+github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
+github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
+github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d h1:yKm7XZV6j9Ev6lojP2XaIshpT4ymkqhMeSghO5Ps00E=
+github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e h1:qpG93cPwA5f7s/ZPBJnGOYQNK/vKsaDaseuKT5Asee8=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
+github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44=
+github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE=
+github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g=
+github.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo=
+github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=
+github.com/stephens2424/writerset v1.0.2 h1:znRLgU6g8RS5euYRcy004XeE4W+Tu44kALzy7ghPif8=
+github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
+github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
+github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07 h1:UyzmZLoiDWMRywV4DUYb9Fbt8uiOSooupjTq10vpvnU=
+github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
+github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
+github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
+github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
+github.com/tklauser/numcpus v0.5.0/go.mod h1:OGzpTxpcIMNGYQdit2BYL1pvk/dSOaJWjKoflh+RQjo=
+github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
+github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
+github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
+github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
+github.com/viant/assertly v0.4.8 h1:5x1GzBaRteIwTr5RAGFVG14uNeRFxVNbXPWrK2qAgpc=
+github.com/viant/toolbox v0.24.0 h1:6TteTDQ68CjgcCe8wH3D3ZhUQQOJXMTbj/D9rkk2a1k=
+github.com/willf/bitset v1.1.10 h1:NotGKqX0KwQ72NUzqrjZq5ipPNDQex9lo3WpaS8L2sc=
+github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
+github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
+github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
+github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM=
+github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
+github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0=
+github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
+go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI=
+go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI=
+go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/contrib/detectors/aws/lambda v0.53.0 h1:KG6fOUk3EwSH1dEpsAbsLKFbn3cFwN9xDu8plGu55zI=
+go.opentelemetry.io/contrib/detectors/aws/lambda v0.53.0/go.mod h1:bSd579exEkh/P5msRcom8YzVB6NsUxYKyV+D/FYOY7Y=
+go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao=
+go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo=
+go.opentelemetry.io/contrib/detectors/gcp v1.36.0 h1:F7q2tNlCaHY9nMKHR6XH9/qkp8FktLnIcy6jJNyOCQw=
+go.opentelemetry.io/contrib/detectors/gcp v1.36.0/go.mod h1:IbBN8uAIIx734PTonTPxAxnjc2pQTxWNkwfstZ+6H2k=
+go.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs=
+go.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts=
+go.opentelemetry.io/contrib/instrumentation/host v0.36.4 h1:2D0q/69KewnkCkOI9I9uXgi1XQXvwQIfMebMcPft0no=
+go.opentelemetry.io/contrib/instrumentation/host v0.36.4/go.mod h1:IQdse+GFHec/g2M4wtj6cE4uA5PJGQjjXP/602LjHBQ=
+go.opentelemetry.io/contrib/instrumentation/host v0.63.0 h1:zsaUrWypCf0NtYSUby+/BS6QqhXVNxMQD5w4dLczKCQ=
+go.opentelemetry.io/contrib/instrumentation/host v0.63.0/go.mod h1:Ru+kuFO+ToZqBKwI59rCStOhW6LWrbGisYrFaX61bJk=
+go.opentelemetry.io/contrib/instrumentation/runtime v0.36.4 h1:7AY5NdRzyU5s1ek3E4VK3FBnPtQ6La1i7sIn9hNgjsk=
+go.opentelemetry.io/contrib/instrumentation/runtime v0.36.4/go.mod h1:yFSLOnffweT7Es+IzY1DF5KP0xa2Wl15SJfKqAyDXq8=
+go.opentelemetry.io/contrib/instrumentation/runtime v0.63.0 h1:PeBoRj6af6xMI7qCupwFvTbbnd49V7n5YpG6pg8iDYQ=
+go.opentelemetry.io/contrib/instrumentation/runtime v0.63.0/go.mod h1:ingqBCtMCe8I4vpz/UVzCW6sxoqgZB37nao91mLQ3Bw=
+go.opentelemetry.io/contrib/processors/baggage/baggagetrace v0.1.0 h1:Q9kJxbstjL5DwlvCyQnTYbMvOftoVANnQcSzqYeA0AQ=
+go.opentelemetry.io/contrib/processors/baggage/baggagetrace v0.1.0/go.mod h1:2rK1JRWN+pfF/E9OhEjUczWwbX7oELQdsKyUgOTEliw=
+go.opentelemetry.io/contrib/processors/baggagecopy v0.11.0 h1:kCgcpaw83eiQq3q9kC0mlSF+2/GFj979aphWGlHmxRw=
+go.opentelemetry.io/contrib/processors/baggagecopy v0.11.0/go.mod h1:HA84H6DSS0J6sbXzDj8bjmrooSK1UhPZvh3Dijltw5A=
+go.opentelemetry.io/contrib/propagators/b3 v1.11.1 h1:icQ6ttRV+r/2fnU46BIo/g/mPu6Rs5Ug8Rtohe3KqzI=
+go.opentelemetry.io/contrib/propagators/b3 v1.11.1/go.mod h1:ECIveyMXgnl4gorxFcA7RYjJY/Ql9n20ubhbfDc3QfA=
+go.opentelemetry.io/contrib/propagators/b3 v1.38.0 h1:uHsCCOSKl0kLrV2dLkFK+8Ywk9iKa/fptkytc6aFFEo=
+go.opentelemetry.io/contrib/propagators/b3 v1.38.0/go.mod h1:wMRSZJZcY8ya9mApLLhwIMjqmApy2o/Ml+62lhvxyHU=
+go.opentelemetry.io/contrib/propagators/ot v1.11.1 h1:iezQwYW2sAaXwbXXA6Zg+PLjNnzc+M4hLKvOR6Q/CvI=
+go.opentelemetry.io/contrib/propagators/ot v1.11.1/go.mod h1:oBced35DewKV7xvvIWC/oCaCFvthvTa6zjyvP2JhPAY=
+go.opentelemetry.io/contrib/propagators/ot v1.38.0 h1:k4gSyyohaDXI8F9BDXYC3uO2vr5sRNeQFMsN9Zn0EoI=
+go.opentelemetry.io/contrib/propagators/ot v1.38.0/go.mod h1:2hDsuiHRO39SRUMhYGqmj64z/IuMRoxE4bBSFR82Lo8=
+go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1 h1:X2GndnMCsUPh6CiY2a+frAbNsXaPLbB0soHRYhAZ5Ig=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.1/go.mod h1:i8vjiSzbiUC7wOQplijSXMYUpNM93DtlS5CbUT+C6oQ=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.33.0 h1:OT/UjHcjog4A1s1UMCtyehIKS+vpjM5Du0r7KGsH6TE=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.33.0/go.mod h1:0XctNDHEWmiSDIU8NPbJElrK05gBJFcYlGP4FMGo4g4=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.33.0 h1:1SVtGtRsNyGgv1fRfNXfh+sJowIwzF0gkf+61lvTgdg=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.33.0/go.mod h1:ryB27ubOBXsiqfh6MwtSdx5knzbSZtjvPnMMmt3AykQ=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0 h1:vl9obrcoWVKp/lwl8tRE33853I8Xru9HFbw/skNeLs8=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.38.0/go.mod h1:GAXRxmLJcVM3u22IjTg74zWBrRCKq8BnOqUVLodpcpw=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.33.0 h1:NoG4v01cdLZfOeNGBQmSe4f4SeP+fx8I/0qzRgTKsGI=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v0.33.0/go.mod h1:6anbDXBcTp3Qit87pfFmT0paxTJ8sWRccTNYVywN/H8=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0 h1:Oe2z/BCg5q7k4iXC3cqJxKYg0ieRiOqF0cecFYdPTwk=
+go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.38.0/go.mod h1:ZQM5lAJpOsKnYagGg/zV2krVqTtaVdYdDkhMoX6Oalg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1 h1:MEQNafcNCB0uQIti/oHgU7CZpUMYQ7qigBwMVKycHvc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.1/go.mod h1:19O5I2U5iys38SsmT2uDJja/300woyzE1KPIQxEUBUc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1 h1:LYyG/f1W/jzAix16jbksJfMQFpOH/Ma6T639pVPMgfI=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.1/go.mod h1:QrRRQiY3kzAoYPNLP0W/Ikg0gR6V3LMc+ODSxr7yyvg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.1 h1:tFl63cpAAcD9TOU6U8kZU7KyXuSRYAZlbx1C61aaB74=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.1/go.mod h1:X620Jww3RajCJXw/unA+8IRTgxkdS7pi+ZwK9b7KUJk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.11.1 h1:3Yvzs7lgOw8MmbxmLRsQGwYdCubFmUHSooKaEhQunFQ=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.11.1/go.mod h1:pyHDt0YlyuENkD2VwHsiRDf+5DfI3EH7pfhUYW6sQUE=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 h1:kJxSDN4SgWWTjG/hPp3O7LCGLcHXFlvS2/FFOrwL+SE=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0/go.mod h1:mgIOzS7iZeKJdeB8/NYHrJ48fdGc71Llo5bJ1J4DWUE=
+go.opentelemetry.io/otel/log v0.14.0 h1:2rzJ+pOAZ8qmZ3DDHg73NEKzSZkhkGIua9gXtxNGgrM=
+go.opentelemetry.io/otel/log v0.14.0/go.mod h1:5jRG92fEAgx0SU/vFPxmJvhIuDU9E1SUnEQrMlJpOno=
+go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/sdk v1.11.1/go.mod h1:/l3FE4SupHJ12TduVjUkZtlfFqDCQJlOlithYrdktys=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
+go.opentelemetry.io/otel/sdk/log v0.14.0 h1:JU/U3O7N6fsAXj0+CXz21Czg532dW2V4gG1HE/e8Zrg=
+go.opentelemetry.io/otel/sdk/log v0.14.0/go.mod h1:imQvII+0ZylXfKU7/wtOND8Hn4OpT3YUoIgqJVksUkM=
+go.opentelemetry.io/otel/sdk/metric v0.33.0/go.mod h1:xdypMeA21JBOvjjzDUtD0kzIcHO/SPez+a8HOzJPGp0=
+go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
+go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
+go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
+go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
+go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
+go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
+go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
+go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
+go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
+go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
+go4.org v0.0.0-20180809161055-417644f6feb5 h1:+hE86LblG4AyDgwMCLTE6FOlM9+qjHSYS+rKqxUVdsM=
+golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d h1:E2M5QgjZ/Jg+ObCQAudsXxuTsLj7Nl5RV/lZcQZmKSo=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
+golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
+golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
+golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ=
+golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
+golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
+golang.org/x/exp v0.0.0-20220518171630-0b5c67f07fdf/go.mod h1:yh0Ynu2b5ZUe3MQfp2nM0ecK7wsgouWTDN0FNeJuIys=
+golang.org/x/exp v0.0.0-20221026004748-78e5e7837ae6/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
+golang.org/x/exp v0.0.0-20221217163422-3c43f8badb15/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
+golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
+golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
+golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak=
+golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
+golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4=
+golang.org/x/image v0.25.0 h1:Y6uW6rH1y5y/LK1J8BPWZtr6yZ7hrsy6hFrXjgsc2fQ=
+golang.org/x/image v0.25.0/go.mod h1:tCAmOEGthTtkalusGp1g3xa2gke8J6c2N565dTyl9Rs=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 h1:2M3HP5CCK1Si9FQhwnzYhXdG6DXeebvUHFpre8QvbyI=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
+golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
+golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
+golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
+golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
+golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
+golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
+golang.org/x/net v0.46.1-0.20251013234738-63d1a5100f82/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
+golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
+golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
+golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852 h1:xYq6+9AtI+xP3M4r0N1hCkHrInHDBohhquRgx9Kk6gI=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
+golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk=
+golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
+golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
+golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
+golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
+golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
+golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
+golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
+golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
+golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ=
+golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
+golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
+golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
+golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
+golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
+golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
+golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
+golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
+golang.org/x/tools/go/expect v0.1.1-deprecated h1:jpBZDwmgPhXsKZC6WhL20P4b/wmnpsEAGHaNy0n/rJM=
+golang.org/x/tools/go/expect v0.1.1-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
+golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
+golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
+golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
+gonum.org/v1/plot v0.15.2 h1:Tlfh/jBk2tqjLZ4/P8ZIwGrLEWQSPDLRm/SNWKNXiGI=
+gonum.org/v1/plot v0.15.2/go.mod h1:DX+x+DWso3LTha+AdkJEv5Txvi+Tql3KAGkehP0/Ubg=
+google.golang.org/api v0.40.0 h1:uWrpz12dpVPn7cojP82mk02XDgTJLDPc2KbVTxrWb4A=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
+google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
+google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
+google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
+google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20240617180043-68d350f18fd4/go.mod h1:px9SlOOZBg1wM1zdnr8jEL4CNGUBZ+ZKYtNPApNQc4c=
+google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
+google.golang.org/genproto/googleapis/api v0.0.0-20251124214823-79d6a2a48846 h1:ZdyUkS9po3H7G0tuh955QVyyotWvOD4W0aEapeGeUYk=
+google.golang.org/genproto/googleapis/api v0.0.0-20251124214823-79d6a2a48846/go.mod h1:Fk4kyraUvqD7i5H6S43sj2W98fbZa75lpZz/eUyhfO0=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240624140628-dc46fd24d27d h1:UJNyZnYNR8oYgVe9BhTdmgmEMiCOaOrAEAnqhIIT5j4=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:/oe3+SiHAwz6s+M25PyTygWm3lnrhmGqIuIfkoUocqk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240624140628-dc46fd24d27d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250212204824-5a70512c5d8b/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251111163417-95abcf5c77ba/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
+google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
+google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
+google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA=
+google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
+google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
+google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20 h1:MLBCGN1O7GzIx+cBiwfYPwtmZ41U3Mn/cotLJciaArI=
+google.golang.org/grpc/examples v0.0.0-20230224211313-3775f633ce20/go.mod h1:Nr5H8+MlGWr5+xX/STzdoEqJrO+YteqFbMyCsrb6mH0=
+google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6 h1:ExN12ndbJ608cboPYflpTny6mXSzPrDLh0iTaVrRrds=
+google.golang.org/grpc/examples v0.0.0-20250407062114-b368379ef8f6/go.mod h1:6ytKWczdvnpnO+m+JiG9NjEDzR1FJfsnmJdG7B8QVZ8=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
+google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
+gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
+gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+grpc.go4.org v0.0.0-20170609214715-11d0a25b4919 h1:tmXTu+dfa+d9Evp8NpJdgOy6+rt8/x4yG7qPBrtNfLY=
+honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
+lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA=
+lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
+lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
+modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20=
+modernc.org/cc/v3 v3.38.1/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20=
+modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
+modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
+modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
+modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
+modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI=
+modernc.org/ccgo/v3 v3.0.0-20220910160915-348f15de615a/go.mod h1:8p47QxPkdugex9J4n9P2tLZ9bK01yngIVp00g4nomW0=
+modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g=
+modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
+modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
+modernc.org/ccgo/v3 v3.17.0 h1:o3OmOqx4/OFnl4Vm3G8Bgmqxnvxnh0nbxeT5p/dWChA=
+modernc.org/ccgo/v3 v3.17.0/go.mod h1:Sg3fwVpmLvCUTaqEUjiBDAvshIaKDB0RXaf+zgqFu8I=
+modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
+modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
+modernc.org/ccorpus2 v1.5.2 h1:Ui+4tc58mf/W+2arcYCJR903y3zl3ecsI7Fpaaqozyw=
+modernc.org/ccorpus2 v1.5.2/go.mod h1:Wifvo4Q/qS/h1aRoC2TffcHsnxwTikmi1AuLANuucJQ=
+modernc.org/fileutil v1.3.3/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc=
+modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/lex v1.1.1 h1:prSCNTLw1R4rn7M/RzwsuMtAuOytfyR3cnyM07P+Pas=
+modernc.org/lex v1.1.1/go.mod h1:6r8o8DLJkAnOsQaGi8fMoi+Vt6LTbDaCrkUK729D8xM=
+modernc.org/lexer v1.0.4 h1:hU7xVbZsqwPphyzChc7nMSGrsuaD2PDNOmzrzkS5AlE=
+modernc.org/lexer v1.0.4/go.mod h1:tOajb8S4sdfOYitzCgXDFmbVJ/LE0v1fNJ7annTw36U=
+modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA=
+modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0=
+modernc.org/libc v1.19.0/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0=
+modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0=
+modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI=
+modernc.org/libc v1.21.5/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI=
+modernc.org/libc v1.22.3/go.mod h1:MQrloYP209xa2zHome2a8HLiLm6k0UT8CoHpV74tOFw=
+modernc.org/libc v1.65.10/go.mod h1:StFvYpx7i/mXtBAfVOjaU0PWZOvIRoZSgXhrwXzr8Po=
+modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/scannertest v1.0.2 h1:JPtfxcVdbRvzmRf2YUvsDibJsQRw8vKA/3jb31y7cy0=
+modernc.org/scannertest v1.0.2/go.mod h1:RzTm5RwglF/6shsKoEivo8N91nQIoWtcWI7ns+zPyGA=
+modernc.org/sqlite v1.20.0/go.mod h1:EsYz8rfOvLCiYTy5ZFsOYzoCcRMu98YYkwAcCw5YIYw=
+modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
+modernc.org/tcl v1.15.0/go.mod h1:xRoGotBZ6dU+Zo2tca+2EqVEeMmOUBzHnhIwq4YrVnE=
+modernc.org/tcl v1.15.1 h1:mOQwiEK4p7HruMZcwKTZPw/aqtGM4aY00uzWhlKKYws=
+modernc.org/tcl v1.15.1/go.mod h1:aEjeGJX2gz1oWKOLDVZ2tnEWLUrIn8H+GFu+akoDhqs=
+modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE=
+modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ=
+rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
+rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=
+rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=
+rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU=
+rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sourcegraph.com/sourcegraph/go-diff v0.5.0 h1:eTiIR0CoWjGzJcnQ3OkhIl/b9GJovq4lSAVRt0ZFEG8=
+sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c=
+zombiezen.com/go/sqlite v0.12.0/go.mod h1:RKdRR9xoQDSnB47yy7G4PtrjGZJtupb/SyEbJZLaRes=
diff --git a/interfaces.go b/interfaces.go
index c77c58e0918..124542822a8 100644
--- a/interfaces.go
+++ b/interfaces.go
@@ -108,6 +108,26 @@ type SyncProgress struct {
HighestBlock uint64 // Highest alleged block number in the chain
PulledStates uint64 // Number of state trie entries already downloaded
KnownStates uint64 // Total number of state trie entries known about
+
+ // Arbitrum "snap sync" fields.
+ SyncedAccounts uint64 // Number of accounts downloaded
+ SyncedAccountBytes uint64 // Number of account trie bytes persisted to disk
+ SyncedBytecodes uint64 // Number of bytecodes downloaded
+ SyncedBytecodeBytes uint64 // Number of bytecode bytes downloaded
+ SyncedStorage uint64 // Number of storage slots downloaded
+ SyncedStorageBytes uint64 // Number of storage trie bytes persisted to disk
+
+ HealedTrienodes uint64 // Number of state trie nodes downloaded
+ HealedTrienodeBytes uint64 // Number of state trie bytes persisted to disk
+ HealedBytecodes uint64 // Number of bytecodes downloaded
+ HealedBytecodeBytes uint64 // Number of bytecodes persisted to disk
+
+ HealingTrienodes uint64 // Number of state trie nodes pending
+ HealingBytecode uint64 // Number of bytecodes pending
+
+ // "transaction indexing" fields
+ TxIndexFinishedBlocks uint64 // Number of blocks whose transactions are already indexed
+ TxIndexRemainingBlocks uint64 // Number of blocks whose transactions are not indexed yet
}
// ChainSyncReader wraps access to the node's current sync status. If there's no
@@ -131,6 +151,8 @@ type CallMsg struct {
AccessList types.AccessList // EIP-2930 access list
BlobHashes []common.Hash // EIP-4844 versioned blob hashes
Authorizations []types.Authorization // EIP-3074 authorizations
+
+ SkipL1Charging bool // L1 charging is disabled when SkipL1Charging is true
}
// A ContractCaller provides contract calls, essentially transactions that are executed by
diff --git a/node/cli/default_flags.go b/node/cli/default_flags.go
index d9646b80a4b..08ce6108e10 100644
--- a/node/cli/default_flags.go
+++ b/node/cli/default_flags.go
@@ -40,6 +40,8 @@ var DefaultFlags = []cli.Flag{
&utils.TxPoolTraceSendersFlag,
&utils.TxPoolCommitEveryFlag,
&PruneDistanceFlag,
+ &L2RPCAddrFlag,
+ &L2RPCReceiptAddrFlag,
&PruneBlocksDistanceFlag,
&PruneModeFlag,
&utils.KeepExecutionProofsFlag,
diff --git a/node/cli/flags.go b/node/cli/flags.go
index e13306154ba..62b3ac96d7e 100644
--- a/node/cli/flags.go
+++ b/node/cli/flags.go
@@ -74,6 +74,17 @@ var (
Value: kv.ReadersLimit - 128,
}
+ L2RPCAddrFlag = cli.StringFlag{
+ Name: "l2rpc",
+ Usage: "address of arbitrum L2 rpc server to get blocks and transactions from",
+ Value: "",
+ }
+ L2RPCReceiptAddrFlag = cli.StringFlag{
+ Name: "l2rpc.receipt",
+ Usage: "address of arbitrum L2 rpc server to fetch receipts from (if different from l2rpc)",
+ Value: "",
+ }
+
PruneModeFlag = cli.StringFlag{
Name: "prune.mode",
Usage: `Choose a pruning preset to run onto. Available values: "full", "archive", "minimal", "blocks".
@@ -240,6 +251,15 @@ func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config, logger log.
}
_ = chainId
+ cfg.L2RPCAddr = ctx.String(L2RPCAddrFlag.Name)
+ log.Info("[Arbitrum] Using L2 RPC server to fetch blocks", "address", cfg.L2RPCAddr)
+
+ cfg.L2RPCReceiptAddr = ctx.String(L2RPCReceiptAddrFlag.Name)
+ if cfg.L2RPCReceiptAddr == "" {
+ cfg.L2RPCReceiptAddr = cfg.L2RPCAddr
+ }
+ log.Info("[Arbitrum] Using L2 RPC server to fetch receipts", "address", cfg.L2RPCReceiptAddr)
+
blockDistance := ctx.Uint64(PruneBlocksDistanceFlag.Name)
distance := ctx.Uint64(PruneDistanceFlag.Name)
@@ -347,6 +367,25 @@ func ApplyFlagsForEthConfigCobra(f *pflag.FlagSet, cfg *ethconfig.Config) {
cfg.Prune = mode
+ if flg := f.Lookup(L2RPCAddrFlag.Name); flg != nil {
+ cfg.L2RPCAddr = flg.Value.String()
+ } else {
+ cfg.L2RPCAddr = *f.String(L2RPCAddrFlag.Name, L2RPCAddrFlag.DefaultText, "")
+ }
+ if cfg.L2RPCAddr != "" {
+ log.Info("[Arbitrum] Using L2 RPC server to fetch blocks", "address", cfg.L2RPCAddr)
+ }
+
+ if flg := f.Lookup(L2RPCReceiptAddrFlag.Name); flg != nil {
+ cfg.L2RPCReceiptAddr = flg.Value.String()
+ } else {
+ cfg.L2RPCReceiptAddr = *f.String(L2RPCReceiptAddrFlag.Name, L2RPCReceiptAddrFlag.DefaultText, "")
+ }
+ if cfg.L2RPCReceiptAddr == "" {
+ cfg.L2RPCReceiptAddr = cfg.L2RPCAddr
+ }
+ log.Info("[Arbitrum] Using L2 RPC server to fetch receipts", "address", cfg.L2RPCReceiptAddr)
+
if v := f.String(BatchSizeFlag.Name, BatchSizeFlag.Value, BatchSizeFlag.Usage); v != nil {
err := cfg.BatchSize.UnmarshalText([]byte(*v))
if err != nil {
diff --git a/node/debug/loudpanic.go b/node/debug/loudpanic.go
index 56b76c4686b..1d5a5c0a5b6 100644
--- a/node/debug/loudpanic.go
+++ b/node/debug/loudpanic.go
@@ -19,7 +19,9 @@
package debug
-import "runtime/debug"
+import (
+ "runtime/debug"
+)
// LoudPanic panics in a way that gets all goroutine stacks printed on stderr.
func LoudPanic(x any) {
diff --git a/node/eth/backend.go b/node/eth/backend.go
index 1051d38a083..3275af2d740 100644
--- a/node/eth/backend.go
+++ b/node/eth/backend.go
@@ -36,6 +36,8 @@ import (
"sync/atomic"
"time"
+ "github.com/erigontech/erigon/arb/ethdb/wasmdb"
+
"github.com/erigontech/mdbx-go/mdbx"
lru "github.com/hashicorp/golang-lru/arc/v2"
"golang.org/x/sync/errgroup"
@@ -135,6 +137,7 @@ import (
"github.com/erigontech/erigon/txnprovider/txpool"
"github.com/erigontech/erigon/txnprovider/txpool/txpoolcfg"
+ _ "github.com/erigontech/erigon/arb/chain" // Register Arbitrum chains
_ "github.com/erigontech/erigon/polygon/chain" // Register Polygon chains
)
@@ -414,7 +417,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
backend.chainDB = temporalDb
// Can happen in some configurations
- if err := backend.setUpSnapDownloader(ctx, stack.Config(), config.Downloader); err != nil {
+ if err := backend.setUpSnapDownloader(ctx, stack.Config(), config.Downloader, chainConfig); err != nil {
return nil, err
}
@@ -903,6 +906,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
config.Sync,
stageloop.SilkwormForExecutionStage(backend.silkworm, config),
config.ExperimentalBAL,
+ wasmdb.OpenArbitrumWasmDB(ctx, dirs.ArbitrumWasm),
),
stagedsync.StageSendersCfg(backend.chainDB, chainConfig, config.Sync, false, dirs.Tmp, config.Prune, blockReader, backend.sentriesClient.Hd),
stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt, param.PayloadId, txnProvider, blockReader),
@@ -1142,6 +1146,7 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig
}
// start HTTP API
+ stack.Config().Http.IsArbitrum = chainConfig.IsArbitrum()
httpRpcCfg := stack.Config().Http
if config.Ethstats != "" {
var headCh chan [][]byte
@@ -1200,6 +1205,14 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig
return nil
}
+func (s *Ethereum) Engine() consensus.Engine {
+ return s.engine
+}
+
+//func (e *Ethereum) BlockChain() core.BlockChain {
+// panic("implment blockchain return")
+//}
+
func (s *Ethereum) APIs() []rpc.API {
return s.apiList
}
diff --git a/node/ethconfig/config.go b/node/ethconfig/config.go
index 1c93543ba3d..214a18b1599 100644
--- a/node/ethconfig/config.go
+++ b/node/ethconfig/config.go
@@ -107,6 +107,9 @@ var Defaults = Config{
GPO: FullNodeGPO,
RPCTxFeeCap: 1, // 1 ether
+ ArbRPCEVMTimeout: 5 * time.Second,
+ L2RPCAddr: "", // arbitrum only field, url to connect to L2 RPC server
+
ImportMode: false,
Snapshot: BlocksFreezing{
KeepBlocks: false,
@@ -230,7 +233,8 @@ type Config struct {
// RPCTxFeeCap is the global transaction fee(price * gaslimit) cap for
// send-transction variants. The unit is ether.
- RPCTxFeeCap float64 `toml:",omitempty"`
+ RPCTxFeeCap float64 `toml:",omitempty"`
+ ArbRPCEVMTimeout time.Duration `toml:",omitempty"`
StateStream bool
@@ -275,6 +279,9 @@ type Config struct {
// fork choice update timeout
FcuTimeout time.Duration
+
+ L2RPCAddr string
+ L2RPCReceiptAddr string
}
type Sync struct {
diff --git a/node/gdbme/gdbme_darwin.go b/node/gdbme/gdbme_darwin.go
index 15b8347ab48..c741fdcf4b7 100644
--- a/node/gdbme/gdbme_darwin.go
+++ b/node/gdbme/gdbme_darwin.go
@@ -10,6 +10,8 @@ import (
"strings"
"syscall"
+ "github.com/erigontech/erigon/common/dir"
+
"github.com/erigontech/erigon/cmd/utils"
"github.com/erigontech/erigon/common/dir"
)
diff --git a/node/node.go b/node/node.go
index e9ecdf3fb9c..9f17114795c 100644
--- a/node/node.go
+++ b/node/node.go
@@ -300,6 +300,8 @@ func OpenDatabase(ctx context.Context, config *nodecfg.Config, label kv.Label, n
name = "txpool"
case dbcfg.PolygonBridgeDB:
name = "polygon-bridge"
+ case dbcfg.ArbitrumDB:
+ name = "arbitrum"
case dbcfg.ConsensusDB:
if len(name) == 0 {
return nil, errors.New("expected a consensus name")
diff --git a/node/rulesconfig/config.go b/node/rulesconfig/config.go
index 0025dd212b5..554ee77857d 100644
--- a/node/rulesconfig/config.go
+++ b/node/rulesconfig/config.go
@@ -49,6 +49,10 @@ func CreateRulesEngine(ctx context.Context, nodeConfig *nodecfg.Config, chainCon
) rules.Engine {
var eng rules.Engine
+ if chainConfig.IsArbitrum() {
+ return arbos.Engine{}
+ }
+
switch consensusCfg := config.(type) {
case *ethashcfg.Config:
switch consensusCfg.PowMode {
diff --git a/p2p/sentry/sentry_multi_client/sentry_multi_client_test.go b/p2p/sentry/sentry_multi_client/sentry_multi_client_test.go
index 25a675511f5..e8d1b9143b3 100644
--- a/p2p/sentry/sentry_multi_client/sentry_multi_client_test.go
+++ b/p2p/sentry/sentry_multi_client/sentry_multi_client_test.go
@@ -16,6 +16,7 @@ import (
proto_sentry "github.com/erigontech/erigon/node/gointerfaces/sentryproto"
proto_types "github.com/erigontech/erigon/node/gointerfaces/typesproto"
"github.com/erigontech/erigon/p2p/protocols/eth"
+ "github.com/erigontech/erigon/turbo/services"
)
type receiptRLP69 struct {
@@ -127,6 +128,78 @@ func TestMultiClient_GetReceipts69(t *testing.T) {
}
}
+func TestMultiClient_AnnounceBlockRangeLoop(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ testMinimumBlockHeight := uint64(100)
+ testLatestBlockHeight := uint64(200)
+ testBestHash := common.HexToHash("0xabc")
+
+ var sentMessage *proto_sentry.OutboundMessageData
+ mockSentry := &mockSentryClient{
+ sendMessageToAllFunc: func(ctx context.Context, req *proto_sentry.OutboundMessageData, opts ...grpc.CallOption) (*proto_sentry.SentPeers, error) {
+ sentMessage = req
+ return &proto_sentry.SentPeers{}, nil
+ },
+ handShakeFunc: func(ctx context.Context, req *emptypb.Empty, opts ...grpc.CallOption) (*proto_sentry.HandShakeReply, error) {
+ return &proto_sentry.HandShakeReply{
+ Protocol: proto_sentry.Protocol_ETH69,
+ }, nil
+ },
+ }
+
+ mockStatus := &mockStatusDataProvider{
+ getStatusDataFunc: func(ctx context.Context) (*proto_sentry.StatusData, error) {
+ return &proto_sentry.StatusData{
+ MinimumBlockHeight: testMinimumBlockHeight,
+ MaxBlockHeight: testLatestBlockHeight,
+ BestHash: gointerfaces.ConvertHashToH256(testBestHash),
+ }, nil
+ },
+ }
+
+ mockBlockReader := &mockFullBlockReader{
+ readyFunc: func(ctx context.Context) <-chan error {
+ ch := make(chan error, 1)
+ ch <- nil // Signal that the block reader is ready
+ return ch
+ },
+ }
+
+ cs := &MultiClient{
+ sentries: []proto_sentry.SentryClient{mockSentry},
+ statusDataProvider: mockStatus,
+ blockReader: mockBlockReader,
+ logger: log.New(),
+ }
+
+ cs.doAnnounceBlockRange(ctx)
+
+ if sentMessage == nil {
+ t.Fatal("No message was sent")
+ }
+ if sentMessage.Id != proto_sentry.MessageId_BLOCK_RANGE_UPDATE_69 {
+ t.Errorf("Expected message ID %v, got %v", proto_sentry.MessageId_BLOCK_RANGE_UPDATE_69, sentMessage.Id)
+ }
+
+ var response eth.BlockRangeUpdatePacket
+ if err := rlp.DecodeBytes(sentMessage.Data, &response); err != nil {
+ t.Fatalf("Failed to decode response: %v", err)
+ }
+
+ if response.Earliest != testMinimumBlockHeight {
+ t.Errorf("Expected earliest block height %d, got %d", testMinimumBlockHeight, response.Earliest)
+ }
+ if response.Latest != testLatestBlockHeight {
+ t.Errorf("Expected latest block height %d, got %d", testLatestBlockHeight, response.Latest)
+ }
+ if response.LatestHash != testBestHash {
+ t.Errorf("Expected latest hash %s, got %s", testBestHash.Hex(), response.LatestHash.Hex())
+ }
+}
+
+// Mock implementations
type mockSentryClient struct {
proto_sentry.SentryClient
sendMessageByIdFunc func(ctx context.Context, req *proto_sentry.SendMessageByIdRequest, opts ...grpc.CallOption) (*proto_sentry.SentPeers, error)
diff --git a/polygon/bridge/snapshot_integrity.go b/polygon/bridge/snapshot_integrity.go
index de8fe6465f9..0b96a92c49c 100644
--- a/polygon/bridge/snapshot_integrity.go
+++ b/polygon/bridge/snapshot_integrity.go
@@ -9,6 +9,7 @@ import (
"github.com/erigontech/erigon/common/log/v3"
"github.com/erigontech/erigon/db/kv"
"github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/stagedsync/stages"
"github.com/erigontech/erigon/polygon/bor/borcfg"
polychain "github.com/erigontech/erigon/polygon/chain"
"github.com/erigontech/erigon/polygon/heimdall"
@@ -84,6 +85,20 @@ func ValidateBorEvents(ctx context.Context, db kv.TemporalRoDB, blockReader bloc
if db != nil {
err = db.View(ctx, func(tx kv.Tx) error {
+ if false {
+ lastEventId, err := NewSnapshotStore(NewTxStore(tx), snapshots, nil).LastEventId(ctx)
+ if err != nil {
+ return err
+ }
+
+ bodyProgress, err := stages.GetStageProgress(tx, stages.Bodies)
+ if err != nil {
+ return err
+ }
+
+ log.Info("[integrity] LAST Event", "event", lastEventId, "body-progress", bodyProgress)
+ }
+
return nil
})
diff --git a/polygon/bridge/snapshot_store_test.go b/polygon/bridge/snapshot_store_test.go
index 1c6cf1096d6..6de882ec554 100644
--- a/polygon/bridge/snapshot_store_test.go
+++ b/polygon/bridge/snapshot_store_test.go
@@ -18,6 +18,7 @@ import (
"github.com/erigontech/erigon/db/snaptype"
"github.com/erigontech/erigon/db/snaptype2"
"github.com/erigontech/erigon/db/version"
+ "github.com/erigontech/erigon/eth/ethconfig"
"github.com/erigontech/erigon/execution/chain/networkname"
"github.com/erigontech/erigon/node/ethconfig"
"github.com/erigontech/erigon/polygon/heimdall"
diff --git a/polygon/heimdall/snapshot_store_test.go b/polygon/heimdall/snapshot_store_test.go
index 20d3df2ffa7..02c8690f721 100644
--- a/polygon/heimdall/snapshot_store_test.go
+++ b/polygon/heimdall/snapshot_store_test.go
@@ -19,6 +19,7 @@ import (
"github.com/erigontech/erigon/db/snaptype"
"github.com/erigontech/erigon/db/snaptype2"
"github.com/erigontech/erigon/db/version"
+ "github.com/erigontech/erigon/eth/ethconfig"
"github.com/erigontech/erigon/execution/chain/networkname"
"github.com/erigontech/erigon/node/ethconfig"
)
diff --git a/rpc/ethapi/api.go b/rpc/ethapi/api.go
index 5c2c9146de2..3d2fc276941 100644
--- a/rpc/ethapi/api.go
+++ b/rpc/ethapi/api.go
@@ -56,9 +56,11 @@ type CallArgs struct {
ChainID *hexutil.Big `json:"chainId,omitempty"`
BlobVersionedHashes []common.Hash `json:"blobVersionedHashes,omitempty"`
AuthorizationList []types.JsonAuthorization `json:"authorizationList"`
+
+ SkipL1Charging *bool `json:"skipL1Charging"` // Arbitrum
}
-func (args *CallArgs) FromOrEmpty() accounts.Address {
+func (args *CallArgs) FromOrEmpty() common.Address {
return args.from()
}
@@ -317,6 +319,23 @@ func (args *CallArgs) ToTransaction(globalGasCap uint64, baseFee *uint256.Int) (
return tx, nil
}
+// Arbiturm
+// Raises the vanilla gas cap by the tx's l1 data costs in l2 terms. This creates a new gas cap that after
+// data payments are made, equals the original vanilla cap for the remaining, L2-specific work the tx does.
+func (args *CallArgs) L2OnlyGasCap(gasCap uint64, header *types.Header) (uint64, error) {
+ msg, err := args.ToMessage(gasCap, nil)
+ if err != nil {
+ return 0, err
+ }
+ InterceptRPCGasCap(&gasCap, msg, header)
+ return gasCap, nil
+}
+
+// Allows ArbOS to update the gas cap so that it ignores the message's specific L1 poster costs.
+var InterceptRPCGasCap = func(gascap *uint64, msg *types.Message, header *types.Header) {}
+
+// End arbitrum
+
// Account indicates the overriding fields of account during the execution of
// a message call.
// Note, state and stateDiff can't be specified at the same time. If state is
@@ -475,11 +494,11 @@ func RPCMarshalHeader(head *types.Header) map[string]any {
// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are
// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain
// transaction hashes.
-func RPCMarshalBlockDeprecated(block *types.Block, inclTx bool, fullTx bool) (map[string]any, error) {
- return RPCMarshalBlockExDeprecated(block, inclTx, fullTx, nil, common.Hash{})
+func RPCMarshalBlockDeprecated(block *types.Block, inclTx bool, fullTx bool, isArbitrumNitro bool) (map[string]any, error) {
+ return RPCMarshalBlockExDeprecated(block, inclTx, fullTx, nil, common.Hash{}, isArbitrumNitro)
}
-func RPCMarshalBlockExDeprecated(block *types.Block, inclTx bool, fullTx bool, borTx types.Transaction, borTxHash common.Hash) (map[string]any, error) {
+func RPCMarshalBlockExDeprecated(block *types.Block, inclTx bool, fullTx bool, borTx types.Transaction, borTxHash common.Hash, isArbitrumNitro bool) (map[string]any, error) {
fields := RPCMarshalHeader(block.Header())
fields["size"] = hexutil.Uint64(block.Size())
if _, ok := fields["transactions"]; !ok {
@@ -524,10 +543,19 @@ func RPCMarshalBlockExDeprecated(block *types.Block, inclTx bool, fullTx bool, b
if block.Withdrawals() != nil {
fields["withdrawals"] = block.Withdrawals()
}
-
+ if isArbitrumNitro {
+ fillArbitrumHeaderInfo(block.Header(), fields)
+ }
return fields, nil
}
+func fillArbitrumHeaderInfo(header *types.Header, fields map[string]interface{}) {
+ info := types.DeserializeHeaderExtraInformation(header)
+ fields["l1BlockNumber"] = hexutil.Uint64(info.L1BlockNumber)
+ fields["sendRoot"] = info.SendRoot
+ fields["sendCount"] = hexutil.Uint64(info.SendCount)
+}
+
// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction
type RPCTransaction struct {
BlockHash *common.Hash `json:"blockHash"`
@@ -553,6 +581,20 @@ type RPCTransaction struct {
YParity *hexutil.Big `json:"yParity,omitempty"`
R *hexutil.Big `json:"r"`
S *hexutil.Big `json:"s"`
+
+ // Arbitrum fields:
+ RequestId *common.Hash `json:"requestId,omitempty"` // Contract SubmitRetryable Deposit
+ TicketId *common.Hash `json:"ticketId,omitempty"` // Retry
+ MaxRefund *hexutil.Big `json:"maxRefund,omitempty"` // Retry
+ SubmissionFeeRefund *hexutil.Big `json:"submissionFeeRefund,omitempty"` // Retry
+ RefundTo *common.Address `json:"refundTo,omitempty"` // SubmitRetryable Retry
+ L1BaseFee *hexutil.Big `json:"l1BaseFee,omitempty"` // SubmitRetryable
+ DepositValue *hexutil.Big `json:"depositValue,omitempty"` // SubmitRetryable
+ RetryTo *common.Address `json:"retryTo,omitempty"` // SubmitRetryable
+ RetryValue *hexutil.Big `json:"retryValue,omitempty"` // SubmitRetryable
+ RetryData *hexutil.Bytes `json:"retryData,omitempty"` // SubmitRetryable
+ Beneficiary *common.Address `json:"beneficiary,omitempty"` // SubmitRetryable
+ MaxSubmissionFee *hexutil.Big `json:"maxSubmissionFee,omitempty"` // SubmitRetryable
}
// NewRPCTransaction returns a transaction that will serialize to the RPC
@@ -619,7 +661,41 @@ func NewRPCTransaction(txn types.Transaction, blockHash common.Hash, blockNumber
}
}
- signer := types.LatestSignerForChainID(chainId.ToBig())
+ // Arbitrum transaction types
+ switch tx := txn.(type) {
+ case *types.ArbitrumInternalTx:
+ result.GasPrice = (*hexutil.Big)(tx.GetPrice().ToBig())
+ case *types.ArbitrumDepositTx:
+ result.GasPrice = (*hexutil.Big)(tx.GetPrice().ToBig())
+ result.RequestId = &tx.L1RequestId
+ case *types.ArbitrumContractTx:
+ result.GasPrice = (*hexutil.Big)(tx.GasFeeCap)
+ result.RequestId = &tx.RequestId
+ result.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap)
+ case *types.ArbitrumRetryTx:
+ result.GasPrice = (*hexutil.Big)(tx.GasFeeCap)
+ result.TicketId = &tx.TicketId
+ result.RefundTo = &tx.RefundTo
+ result.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap)
+ result.MaxRefund = (*hexutil.Big)(tx.MaxRefund)
+ result.SubmissionFeeRefund = (*hexutil.Big)(tx.SubmissionFeeRefund)
+ case *types.ArbitrumSubmitRetryableTx:
+ result.GasPrice = (*hexutil.Big)(tx.GasFeeCap)
+ result.RequestId = &tx.RequestId
+ result.L1BaseFee = (*hexutil.Big)(tx.L1BaseFee)
+ result.DepositValue = (*hexutil.Big)(tx.DepositValue)
+ result.RetryTo = tx.RetryTo
+ result.RetryValue = (*hexutil.Big)(tx.RetryValue)
+ result.RetryData = (*hexutil.Bytes)(&tx.RetryData)
+ result.Beneficiary = &tx.Beneficiary
+ result.RefundTo = &tx.FeeRefundAddr
+ result.MaxSubmissionFee = (*hexutil.Big)(tx.MaxSubmissionFee)
+ result.MaxFeePerGas = (*hexutil.Big)(tx.GasFeeCap)
+ case *types.ArbitrumUnsignedTx:
+ result.GasPrice = (*hexutil.Big)(tx.GasFeeCap)
+ }
+
+ signer := types.NewArbitrumSigner(*types.LatestSignerForChainID(chainId.ToBig()))
from, err := txn.Sender(*signer)
if err != nil {
log.Warn("sender recovery", "err", err)
@@ -677,3 +753,143 @@ func NewRPCBorTransaction(opaqueTxn types.Transaction, txHash common.Hash, block
func newRPCTransactionFromBlockAndTxGivenIndex(b *types.Block, txn types.Transaction, index uint64) *RPCTransaction {
return NewRPCTransaction(txn, b.Hash(), b.NumberU64(), index, b.BaseFee())
}
+
+// SendTxArgs represents the arguments to submit a new transaction into the transaction pool.
+type SendTxArgs struct {
+ From common.Address `json:"from"`
+ To *common.Address `json:"to"`
+ Gas *hexutil.Uint64 `json:"gas"`
+ GasPrice *hexutil.Big `json:"gasPrice"`
+ MaxPriorityFeePerGas *hexutil.Big `json:"tip"`
+ MaxFeePerGas *hexutil.Big `json:"feeCap"`
+ Value *hexutil.Big `json:"value"`
+ Nonce *hexutil.Uint64 `json:"nonce"`
+ // We accept "data" and "input" for backwards-compatibility reasons. "input" is the
+ // newer name and should be preferred by clients.
+ Data *hexutil.Bytes `json:"data"`
+ Input *hexutil.Bytes `json:"input"`
+
+ // For non-legacy transactions
+ AccessList *types.AccessList `json:"accessList,omitempty"`
+ ChainID *hexutil.Big `json:"chainId,omitempty"`
+
+ SkipL1Charging *bool `json:"skipL1Charging"` // Arbitrum
+}
+
+func (args *SendTxArgs) ToTransaction() types.Transaction {
+ return args.toTransaction()
+}
+
+// toTransaction converts the arguments to a transaction.
+// This assumes that setDefaults has been called.
+func (args *SendTxArgs) toTransaction() types.Transaction {
+ var input []byte
+ if args.Input != nil {
+ input = *args.Input
+ } else if args.Data != nil {
+ input = *args.Data
+ }
+
+ var tx types.Transaction
+ gasPrice, _ := uint256.FromBig((*big.Int)(args.GasPrice))
+ value, _ := uint256.FromBig((*big.Int)(args.Value))
+ if args.AccessList == nil {
+ tx = &types.LegacyTx{
+ CommonTx: types.CommonTx{
+ To: args.To,
+ Nonce: uint64(*args.Nonce),
+ GasLimit: uint64(*args.Gas),
+ Value: value,
+ Data: input,
+ },
+ GasPrice: gasPrice,
+ }
+ } else {
+ chainId, _ := uint256.FromBig((*big.Int)(args.ChainID))
+ if args.MaxFeePerGas == nil {
+ tx = &types.AccessListTx{
+ LegacyTx: types.LegacyTx{
+ CommonTx: types.CommonTx{
+ To: args.To,
+ Nonce: uint64(*args.Nonce),
+ GasLimit: uint64(*args.Gas),
+ Value: value,
+ Data: input,
+ },
+ GasPrice: gasPrice,
+ },
+ ChainID: chainId,
+ AccessList: *args.AccessList,
+ }
+ } else {
+ tip, _ := uint256.FromBig((*big.Int)(args.MaxPriorityFeePerGas))
+ feeCap, _ := uint256.FromBig((*big.Int)(args.MaxFeePerGas))
+ tx = &types.DynamicFeeTransaction{
+ CommonTx: types.CommonTx{
+ To: args.To,
+ Nonce: uint64(*args.Nonce),
+ GasLimit: uint64(*args.Gas),
+ Value: value,
+ Data: input,
+ },
+ TipCap: tip,
+ FeeCap: feeCap,
+ // MaxFeePerGas: feeCap,
+ // MaxPriorityFeePerGas: tip,
+ ChainID: chainId,
+ AccessList: *args.AccessList,
+ }
+ }
+ }
+ return tx
+}
+
+// // DoEstimateGas returns the lowest possible gas limit that allows the transaction to run
+// // successfully at block `blockNrOrHash`. It returns error if the transaction would revert, or if
+// // there are unexpected failures. The gas limit is capped by both `args.Gas` (if non-nil &
+// // non-zero) and `gasCap` (if non-zero).
+// func DoEstimateGas(ctx context.Context, b Backend, args SendTxArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *StateOverrides, gasCap uint64) (hexutil.Uint64, error) {
+// // Retrieve the base state and mutate it with any overrides
+// state, header, err := b.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash)
+// if state == nil || err != nil {
+// return 0, err
+// }
+// if err = overrides.Apply(state); err != nil {
+// return 0, err
+// }
+// header = updateHeaderForPendingBlocks(blockNrOrHash, header)
+
+// // Construct the gas estimator option from the user input
+// opts := &gasestimator.Options{
+// Config: b.ChainConfig(),
+// Chain: NewChainContext(ctx, b),
+// Header: header,
+// State: state,
+// Backend: b,
+// ErrorRatio: gasestimator.EstimateGasErrorRatio,
+// RunScheduledTxes: runScheduledTxes,
+// }
+// // Run the gas estimation andwrap any revertals into a custom return
+// // Arbitrum: this also appropriately recursively calls another args.ToMessage with increased gasCap by posterCostInL2Gas amount
+// call, err := args.ToMessage(gasCap, header, state, types.MessageGasEstimationMode)
+// if err != nil {
+// return 0, err
+// }
+
+// // Arbitrum: raise the gas cap to ignore L1 costs so that it's compute-only
+// {
+// gasCap, err = args.L2OnlyGasCap(gasCap, header, state, types.MessageGasEstimationMode)
+// if err != nil {
+// return 0, err
+// }
+// }
+
+// estimate, revert, err := gasestimator.Estimate(ctx, call, opts, gasCap)
+// if err != nil {
+// if len(revert) > 0 {
+// return 0, newRevertError(revert)
+// }
+// return 0, err
+// }
+// return hexutil.Uint64(estimate), nil
+// }
diff --git a/rpc/filters/arbitrum.go b/rpc/filters/arbitrum.go
new file mode 100644
index 00000000000..a19c5136e02
--- /dev/null
+++ b/rpc/filters/arbitrum.go
@@ -0,0 +1,45 @@
+package filters
+
+import (
+ "sync/atomic"
+ "time"
+
+ "github.com/erigontech/erigon/arb/lru"
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/execution/types"
+)
+
+// Config represents the configuration of the filter system.
+type Config struct {
+ LogCacheSize int // maximum number of cached blocks (default: 32)
+ Timeout time.Duration // how long filters stay active (default: 5min)
+}
+
+func (cfg Config) withDefaults() Config {
+ if cfg.Timeout == 0 {
+ cfg.Timeout = 5 * time.Minute
+ }
+ if cfg.LogCacheSize == 0 {
+ cfg.LogCacheSize = 32
+ }
+ return cfg
+}
+
+// FilterSystem holds resources shared by all filters.
+type FilterSystem struct {
+ backend Backend
+ logsCache *lru.Cache[common.Hash, *logCacheElem]
+ cfg *Config
+}
+type Backend struct{}
+
+// NewFilterSystem creates a filter system.
+func NewFilterSystem(backend Backend, config Config) *FilterSystem {
+ config = config.withDefaults()
+ return &FilterSystem{backend: backend, logsCache: lru.NewCache[common.Hash, *logCacheElem](config.LogCacheSize), cfg: &config}
+}
+
+type logCacheElem struct {
+ logs []*types.Log
+ body atomic.Value
+}
diff --git a/rpc/gasprice/feehistory.go b/rpc/gasprice/feehistory.go
index 4dfb6fdd5f2..5fb9205ce21 100644
--- a/rpc/gasprice/feehistory.go
+++ b/rpc/gasprice/feehistory.go
@@ -117,8 +117,9 @@ func (oracle *Oracle) processBlock(bf *blockFees, percentiles []float64) {
}
bf.gasUsedRatio = float64(bf.header.GasUsed) / float64(bf.header.GasLimit)
- if blobGasUsed := bf.header.BlobGasUsed; blobGasUsed != nil && chainconfig.GetMaxBlobGasPerBlock(bf.header.Time) != 0 {
- bf.blobGasUsedRatio = float64(*blobGasUsed) / float64(chainconfig.GetMaxBlobGasPerBlock(bf.header.Time))
+ arbOsVersion := types.GetArbOSVersion(bf.header, chainconfig)
+ if blobGasUsed := bf.header.BlobGasUsed; blobGasUsed != nil && chainconfig.GetMaxBlobGasPerBlock(bf.header.Time, arbOsVersion) != 0 {
+ bf.blobGasUsedRatio = float64(*blobGasUsed) / float64(chainconfig.GetMaxBlobGasPerBlock(bf.header.Time, arbOsVersion))
}
if len(percentiles) == 0 {
diff --git a/rpc/gasprice/gasprice.go b/rpc/gasprice/gasprice.go
index d8031dffe6c..c35aff7610d 100644
--- a/rpc/gasprice/gasprice.go
+++ b/rpc/gasprice/gasprice.go
@@ -299,3 +299,8 @@ func setBorDefaultGpoIgnorePrice(chainConfig *chain.Config, gasPriceConfig gaspr
gasPriceConfig.IgnorePrice = gaspricecfg.BorDefaultGpoIgnorePrice
}
}
+
+// Arbitrum
+// EstimateGasErrorRatio is the amount of overestimation eth_estimateGas is
+// allowed to produce in order to speed up calculations.
+const EstimateGasErrorRatio = 0.015
diff --git a/rpc/http.go b/rpc/http.go
index e62d49ad059..745a7ee9307 100644
--- a/rpc/http.go
+++ b/rpc/http.go
@@ -36,6 +36,7 @@ import (
"time"
"github.com/golang-jwt/jwt/v4"
+ "golang.org/x/time/rate"
"github.com/erigontech/erigon/common"
"github.com/erigontech/erigon/common/dbg"
@@ -59,6 +60,13 @@ type httpConn struct {
closeCh chan any
mu sync.Mutex // protects headers
headers http.Header
+ limiter *rate.Limiter
+}
+
+// sets limit of r requests per second
+func (hc *httpConn) SetLimit(r rate.Limit, b int) {
+ hc.limiter.SetLimit(r)
+ hc.limiter.SetBurst(b)
}
// httpConn implements ServerCodec, but it is treated specially by Client
@@ -108,7 +116,8 @@ func DialHTTPWithClient(endpoint string, client *http.Client, logger log.Logger)
client: client,
headers: headers,
url: endpoint,
- closeCh: make(chan any),
+ closeCh: make(chan interface{}),
+ limiter: rate.NewLimiter(rate.Inf, 100), // no limit by default
}
return hc, nil
}, logger)
@@ -148,7 +157,14 @@ func (c *Client) sendBatchHTTP(ctx context.Context, op *requestOp, msgs []*jsonr
return nil
}
-func (hc *httpConn) doRequest(ctx context.Context, msg any) ([]byte, error) {
+// SetRequestLimit sets a rate limit for requests: r requests per second with a burst of b.
+// b means limit on concurrent requests.
+func (c *Client) SetRequestLimit(r rate.Limit, b int) {
+ hc := c.writeConn.(*httpConn)
+ hc.SetLimit(r, b)
+}
+
+func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) ([]byte, error) {
body, err := json.Marshal(msg)
if err != nil {
return nil, err
@@ -164,6 +180,9 @@ func (hc *httpConn) doRequest(ctx context.Context, msg any) ([]byte, error) {
req.Header = hc.headers.Clone()
hc.mu.Unlock()
+ if err = hc.limiter.Wait(ctx); err != nil {
+ return nil, err
+ }
// do request
resp, err := hc.client.Do(req)
if err != nil {
diff --git a/rpc/jsonrpc/daemon.go b/rpc/jsonrpc/daemon.go
index 31799fba65f..0d6719d006c 100644
--- a/rpc/jsonrpc/daemon.go
+++ b/rpc/jsonrpc/daemon.go
@@ -80,12 +80,17 @@ func APIList(db kv.TemporalRoDB, eth rpchelper.ApiBackend, txPool txpoolproto.Tx
for _, enabledAPI := range cfg.API {
switch enabledAPI {
case "eth":
- list = append(list, rpc.API{
+ rapi := rpc.API{
Namespace: "eth",
Public: true,
- Service: EthAPI(ethImpl),
Version: "1.0",
- })
+ }
+ if cfg.IsArbitrum {
+ rapi.Service = EthAPI(&ArbAPIImpl{APIImpl: ethImpl})
+ } else {
+ rapi.Service = EthAPI(ethImpl)
+ }
+ list = append(list, rapi)
case "debug":
list = append(list, rpc.API{
Namespace: "debug",
@@ -94,12 +99,18 @@ func APIList(db kv.TemporalRoDB, eth rpchelper.ApiBackend, txPool txpoolproto.Tx
Version: "1.0",
})
case "net":
- list = append(list, rpc.API{
+ rapi := rpc.API{
Namespace: "net",
Public: true,
- Service: NetAPI(netImpl),
Version: "1.0",
- })
+ }
+ if cfg.IsArbitrum {
+ rapi.Service = NetAPIArb(NewNetAPIArbImpl(eth))
+ } else {
+ rapi.Service = NetAPI(netImpl)
+ }
+
+ list = append(list, rapi)
case "txpool":
list = append(list, rpc.API{
Namespace: "txpool",
diff --git a/rpc/jsonrpc/debug_api.go b/rpc/jsonrpc/debug_api.go
index 60f5e75940a..3a6e568828c 100644
--- a/rpc/jsonrpc/debug_api.go
+++ b/rpc/jsonrpc/debug_api.go
@@ -508,6 +508,10 @@ func (api *DebugAPIImpl) GetBadBlocks(ctx context.Context) ([]map[string]any, er
// Return empty array if no bad blocks found to align with other clients and spec
return []map[string]any{}, err
}
+ chainConfig, err := api.chainConfig(ctx, tx)
+ if err != nil {
+ return nil, err
+ }
results := make([]map[string]any, 0, len(blocks))
for _, block := range blocks {
@@ -518,7 +522,7 @@ func (api *DebugAPIImpl) GetBadBlocks(ctx context.Context) ([]map[string]any, er
blockRlp = fmt.Sprintf("%#x", rlpBytes)
}
- blockJson, err := ethapi.RPCMarshalBlock(block, true, true, nil)
+ blockJson, err := ethapi.RPCMarshalBlock(block, true, true, nil, chainConfig.IsArbitrumNitro(block.Number()))
if err != nil {
log.Error("Failed to marshal block", "err", err)
blockJson = map[string]any{}
diff --git a/rpc/jsonrpc/erigon_block.go b/rpc/jsonrpc/erigon_block.go
index fe3a2af28ed..8fde98a0479 100644
--- a/rpc/jsonrpc/erigon_block.go
+++ b/rpc/jsonrpc/erigon_block.go
@@ -111,9 +111,12 @@ func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Ti
}
firstHeaderTime := firstHeader.Time
-
+ chainConfig, err := api.chainConfig(ctx, tx)
+ if err != nil {
+ return nil, err
+ }
if currentHeaderTime <= uintTimestamp {
- blockResponse, err := buildBlockResponse(ctx, api._blockReader, tx, highestNumber, fullTx)
+ blockResponse, err := buildBlockResponse(ctx, api._blockReader, tx, highestNumber, fullTx, chainConfig)
if err != nil {
return nil, err
}
@@ -122,7 +125,7 @@ func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Ti
}
if firstHeaderTime >= uintTimestamp {
- blockResponse, err := buildBlockResponse(ctx, api._blockReader, tx, 0, fullTx)
+ blockResponse, err := buildBlockResponse(ctx, api._blockReader, tx, 0, fullTx, chainConfig)
if err != nil {
return nil, err
}
@@ -166,7 +169,7 @@ func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Ti
resultingHeader = beforeHeader
}
- response, err := buildBlockResponse(ctx, api._blockReader, tx, uint64(blockNum), fullTx)
+ response, err := buildBlockResponse(ctx, api._blockReader, tx, uint64(blockNum), fullTx, chainConfig)
if err != nil {
return nil, err
}
@@ -174,7 +177,7 @@ func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Ti
return response, nil
}
-func buildBlockResponse(ctx context.Context, br services.FullBlockReader, db kv.Tx, blockNum uint64, fullTx bool) (map[string]any, error) {
+func buildBlockResponse(ctx context.Context, br services.FullBlockReader, db kv.Tx, blockNum uint64, fullTx bool, chainConfig *chain.Config) (map[string]any, error) {
header, err := br.HeaderByNumber(ctx, db, blockNum)
if err != nil {
return nil, err
@@ -193,7 +196,7 @@ func buildBlockResponse(ctx context.Context, br services.FullBlockReader, db kv.
additionalFields := make(map[string]any)
- response, err := ethapi.RPCMarshalBlockEx(block, true, fullTx, nil, common.Hash{}, additionalFields)
+ response, err := ethapi.RPCMarshalBlockEx(block, true, fullTx, nil, common.Hash{}, additionalFields, chainConfig.IsArbitrumNitro(block.Number()))
if err == nil && rpc.BlockNumber(block.NumberU64()) == rpc.PendingBlockNumber {
// Pending blocks need to nil out a few fields
diff --git a/rpc/jsonrpc/eth_api_arb.go b/rpc/jsonrpc/eth_api_arb.go
new file mode 100644
index 00000000000..258d808660b
--- /dev/null
+++ b/rpc/jsonrpc/eth_api_arb.go
@@ -0,0 +1,42 @@
+package jsonrpc
+
+import (
+ "context"
+ "errors"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/hexutil"
+ "github.com/erigontech/erigon/execution/types"
+)
+
+type ArbAPIImpl struct {
+ *APIImpl
+}
+
+func (api *ArbAPIImpl) Coinbase(ctx context.Context) (common.Address, error) {
+ return common.Address{}, errors.New("the method eth_coinbase does not exist/is not available")
+}
+
+func (api *ArbAPIImpl) Mining(ctx context.Context) (bool, error) {
+ return false, errors.New("the method eth_mining does not exist/is not available")
+}
+
+func (api *ArbAPIImpl) Hashrate(ctx context.Context) (uint64, error) {
+ return 0, errors.New("the method eth_hashrate does not exist/is not available")
+}
+
+func (api *ArbAPIImpl) GetWork(ctx context.Context) ([4]string, error) {
+ return [4]string{}, errors.New("the method eth_getWork does not exist/is not available")
+}
+
+func (api *ArbAPIImpl) SubmitWork(ctx context.Context, nonce types.BlockNonce, powHash, digest common.Hash) (bool, error) {
+ return false, errors.New("the method eth_submitWork does not exist/is not available")
+}
+
+func (api *ArbAPIImpl) SubmitHashrate(ctx context.Context, hashRate hexutil.Uint64, id common.Hash) (bool, error) {
+ return false, errors.New("the method eth_submitHashrate does not exist/is not available")
+}
+
+func (api *ArbAPIImpl) ProtocolVersion(_ context.Context) (hexutil.Uint, error) {
+ return 0, errors.New("the method eth_protocolVersion does not exist/is not available")
+}
diff --git a/rpc/jsonrpc/eth_block.go b/rpc/jsonrpc/eth_block.go
index d12d83df02f..83f910f3231 100644
--- a/rpc/jsonrpc/eth_block.go
+++ b/rpc/jsonrpc/eth_block.go
@@ -114,6 +114,8 @@ func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stat
blockNumber := stateBlockNumber + 1
+ // TODO arbitrum
+ // timestamp := parent.Time + clparams.MainnetBeaconConfig.SecondsPerSlot
timestamp := parent.Time + chainConfig.SecondsPerSlot()
coinbase := parent.Coinbase
@@ -245,7 +247,9 @@ func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber
}
}
- response, err := ethapi.RPCMarshalBlockEx(b, true, fullTx, borTx, borTxHash, additionalFields)
+ response, err := ethapi.RPCMarshalBlockEx(b, true, fullTx, borTx, borTxHash, additionalFields, chainConfig.IsArbitrumNitro(b.Number()))
+ api.checkAndFillArbClassicL1BlockNumber(ctx, b, response, chainConfig, tx)
+
if err == nil && number == rpc.PendingBlockNumber {
// Pending blocks need to nil out a few fields
for _, field := range []string{"hash", "nonce", "miner"} {
@@ -304,7 +308,9 @@ func (api *APIImpl) GetBlockByHash(ctx context.Context, numberOrHash rpc.BlockNu
}
}
- response, err := ethapi.RPCMarshalBlockEx(block, true, fullTx, borTx, borTxHash, additionalFields)
+ response, err := ethapi.RPCMarshalBlockEx(block, true, fullTx, borTx, borTxHash, additionalFields, chainConfig.IsArbitrumNitro(block.Number()))
+ api.checkAndFillArbClassicL1BlockNumber(ctx, block, response, chainConfig, tx)
+
if err == nil && int64(number) == rpc.PendingBlockNumber.Int64() {
// Pending blocks need to nil out a few fields
for _, field := range []string{"hash", "nonce", "miner"} {
@@ -315,6 +321,50 @@ func (api *APIImpl) GetBlockByHash(ctx context.Context, numberOrHash rpc.BlockNu
return response, err
}
+func (api *APIImpl) checkAndFillArbClassicL1BlockNumber(ctx context.Context, block *types.Block, response map[string]interface{}, chainConfig *chain.Config, tx kv.Tx) {
+ if chainConfig.IsArbitrum() && !chainConfig.IsArbitrumNitro(block.Number()) {
+ l1BlockNumber, err := api.fillArbClassicL1BlockNumber(ctx, block, tx)
+ if err != nil {
+ log.Error("error trying to fill legacy l1BlockNumber", "err", err)
+ } else {
+ response["l1BlockNumber"] = l1BlockNumber
+ }
+ }
+}
+
+// L1 block number is different in Arbitrum Classic than in Nitro.
+// In Classic, the L1 block number is stored in the first transaction of the block, excluding empty or filler blocks(for example during reorg),
+// so it is needed to traverse the chain backwards until a block with transactions is found.
+// https://github.com/OffchainLabs/go-ethereum/blob/25fc5f0842584e72455e4d60a61f035623b1aba0/internal/ethapi/api.go#L1098-L1125
+func (api *APIImpl) fillArbClassicL1BlockNumber(ctx context.Context, block *types.Block, tx kv.Tx) (hexutil.Uint64, error) {
+ startBlockNum := block.Number().Int64()
+ blockNum := startBlockNum
+ i := int64(0)
+ for {
+ transactions := block.Transactions()
+ if len(transactions) > 0 {
+ legacyTx, ok := transactions[0].(*types.ArbitrumLegacyTxData)
+ if !ok {
+ return 0, fmt.Errorf("couldn't read legacy transaction from block %d", blockNum)
+ }
+ return hexutil.Uint64(legacyTx.L1BlockNumber), nil
+ }
+ if blockNum == 0 {
+ return 0, nil
+ }
+ i++
+ blockNum = startBlockNum - i
+ if i > 50 {
+ return 0, fmt.Errorf("couldn't find block with transactions. Reached %d", blockNum)
+ }
+ var err error
+ block, err = api.blockByNumber(ctx, rpc.BlockNumber(blockNum), tx)
+ if err != nil {
+ return 0, err
+ }
+ }
+}
+
// GetBlockTransactionCountByNumber implements eth_getBlockTransactionCountByNumber. Returns the number of transactions in a block given the block's block number.
func (api *APIImpl) GetBlockTransactionCountByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*hexutil.Uint, error) {
tx, err := api.db.BeginTemporalRo(ctx)
diff --git a/rpc/jsonrpc/eth_call.go b/rpc/jsonrpc/eth_call.go
index 3bc176e8171..30c209a1e47 100644
--- a/rpc/jsonrpc/eth_call.go
+++ b/rpc/jsonrpc/eth_call.go
@@ -24,6 +24,9 @@ import (
"math/big"
"unsafe"
+ "github.com/erigontech/nitro-erigon/arbos"
+ "github.com/erigontech/nitro-erigon/arbos/arbosState"
+ "github.com/erigontech/nitro-erigon/arbos/l1pricing"
"github.com/holiman/uint256"
"google.golang.org/grpc"
@@ -184,7 +187,6 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs
}
blockNum := *(header.Number)
-
stateReader, err := rpchelper.CreateStateReaderFromBlockNumber(ctx, dbtx, blockNum.Uint64(), isLatest, 0, api.stateCache, api._txNumReader)
if err != nil {
return 0, err
@@ -199,6 +201,42 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs
if args.From == nil {
args.From = new(common.Address)
}
+ stateDb := state.New(stateReader)
+
+ if chainConfig.IsArbitrum() {
+ arbState := state.NewArbitrum(stateDb)
+ arbosVersion := arbosState.ArbOSVersion(arbState)
+ if arbosVersion == 0 {
+ // ArbOS hasn't been installed, so use the vanilla gas cap
+ return 0, nil
+ }
+ state, err := arbosState.OpenSystemArbosState(arbState, nil, true)
+ if err != nil {
+ return 0, err
+ }
+ if header.BaseFee.Sign() == 0 {
+ // if gas is free or there's no reimbursable poster, the user won't pay for L1 data costs
+ return 0, nil
+ }
+
+ brotliCompressionLevel, err := state.BrotliCompressionLevel()
+ if err != nil {
+ return 0, err
+ }
+
+ var baseFee *uint256.Int = nil
+ if header.BaseFee != nil {
+ baseFee, _ = uint256.FromBig(header.BaseFee)
+ }
+ msg, err := args.ToMessage(api.GasCap, baseFee)
+ if err != nil {
+ return 0, err
+ }
+ posterCost, _ := state.L1PricingState().PosterDataCost(msg, l1pricing.BatchPosterAddress, brotliCompressionLevel)
+ // Use estimate mode because this is used to raise the gas cap, so we don't want to underestimate.
+ postingGas := arbos.GetPosterGas(state, header.BaseFee, types.NewMessageGasEstimationContext(), posterCost)
+ api.GasCap += postingGas
+ }
// Determine the highest gas limit can be used during the estimation.
if args.Gas != nil && uint64(*args.Gas) >= params.TxGas {
@@ -207,7 +245,8 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs
// Retrieve the block to act as the gas ceiling
hi = header.GasLimit
}
- if hi > params.MaxTxnGasLimit && chainConfig.IsOsaka(header.Time) {
+ var arbosVersion uint64
+ if hi > params.MaxTxnGasLimit && chainConfig.IsOsaka(header.Time, arbosVersion) {
// Cap the maximum gas allowance according to EIP-7825 if Osaka
hi = params.MaxTxnGasLimit
}
@@ -224,8 +263,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi2.CallArgs
}
// Recap the highest gas limit with account's available balance.
if feeCap.Sign() != 0 {
- state := state.New(stateReader)
- if state == nil {
+ if stateDb == nil {
return 0, errors.New("can't get the current state")
}
@@ -889,9 +927,15 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi2.CallArgs,
args.From = &common.Address{}
}
+ //var arbosFormatVersion uint64
+ //if chainConfig.IsArbitrum() {
+ // arbosFormatVersion = types.DeserializeHeaderExtraInformation(header).ArbOSFormatVersion
+ //}
+
// Retrieve the precompiles since they don't need to be added to the access list
blockCtx := transactions.NewEVMBlockContext(engine, header, bNrOrHash.RequireCanonical, tx, api._blockReader, chainConfig)
- precompiles := vm.ActivePrecompiles(blockCtx.Rules(chainConfig))
+ // TODO arbitrum
+ precompiles := vm.ActivePrecompiles(blockCtx.Rules(chainConfig)) // blockNumber, header.Time, arbosFormatVersion))
excl := make(map[common.Address]struct{})
// Add 'from', 'to', precompiles to the exclusion list
excl[*args.From] = struct{}{}
diff --git a/rpc/jsonrpc/eth_call_test.go b/rpc/jsonrpc/eth_call_test.go
index 0de2703c14c..261b5d735be 100644
--- a/rpc/jsonrpc/eth_call_test.go
+++ b/rpc/jsonrpc/eth_call_test.go
@@ -268,9 +268,11 @@ func TestGetBlockByTimestampLatestTime(t *testing.T) {
defer tx.Rollback()
api := NewErigonAPI(newBaseApiForTest(m), m.DB, nil)
+ chainConfig, err := api.chainConfig(ctx, tx)
+ assert.NoError(t, err)
latestBlock, err := m.BlockReader.CurrentBlock(tx)
require.NoError(t, err)
- response, err := ethapi.RPCMarshalBlockDeprecated(latestBlock, true, false)
+ response, err := ethapi.RPCMarshalBlockDeprecated(latestBlock, true, false, chainConfig.IsArbitrumNitro(latestBlock.Number()))
if err != nil {
t.Error("couldn't get the rpc marshal block")
@@ -308,7 +310,9 @@ func TestGetBlockByTimestampOldestTime(t *testing.T) {
t.Error("couldn't retrieve oldest block")
}
- response, err := ethapi.RPCMarshalBlockDeprecated(oldestBlock, true, false)
+ chainConfig, err := api.chainConfig(ctx, tx)
+ assert.NoError(t, err)
+ response, err := ethapi.RPCMarshalBlockDeprecated(oldestBlock, true, false, chainConfig.IsArbitrumNitro(oldestBlock.Number()))
if err != nil {
t.Error("couldn't get the rpc marshal block")
@@ -344,7 +348,9 @@ func TestGetBlockByTimeHigherThanLatestBlock(t *testing.T) {
latestBlock, err := m.BlockReader.CurrentBlock(tx)
require.NoError(t, err)
- response, err := ethapi.RPCMarshalBlockDeprecated(latestBlock, true, false)
+ chainConfig, err := api.chainConfig(ctx, tx)
+ assert.NoError(t, err)
+ response, err := ethapi.RPCMarshalBlockDeprecated(latestBlock, true, false, chainConfig.IsArbitrumNitro(latestBlock.Number()))
if err != nil {
t.Error("couldn't get the rpc marshal block")
@@ -392,7 +398,9 @@ func TestGetBlockByTimeMiddle(t *testing.T) {
t.Error("couldn't retrieve middle block")
}
- response, err := ethapi.RPCMarshalBlockDeprecated(middleBlock, true, false)
+ chainConfig, err := api.chainConfig(ctx, tx)
+ assert.NoError(t, err)
+ response, err := ethapi.RPCMarshalBlockDeprecated(middleBlock, true, false, chainConfig.IsArbitrumNitro(middleBlock.Number()))
if err != nil {
t.Error("couldn't get the rpc marshal block")
@@ -433,7 +441,9 @@ func TestGetBlockByTimestamp(t *testing.T) {
if pickedBlock == nil {
t.Error("couldn't retrieve picked block")
}
- response, err := ethapi.RPCMarshalBlockDeprecated(pickedBlock, true, false)
+ chainConfig, err := api.chainConfig(ctx, tx)
+ assert.NoError(t, err)
+ response, err := ethapi.RPCMarshalBlockDeprecated(pickedBlock, true, false, chainConfig.IsArbitrumNitro(pickedBlock.Number()))
if err != nil {
t.Error("couldn't get the rpc marshal block")
diff --git a/rpc/jsonrpc/eth_simulation.go b/rpc/jsonrpc/eth_simulation.go
index 2f9758a5950..7f99617b3db 100644
--- a/rpc/jsonrpc/eth_simulation.go
+++ b/rpc/jsonrpc/eth_simulation.go
@@ -285,11 +285,11 @@ func (s *simulator) makeHeaders(blocks []SimulatedBlock) ([]*types.Header, error
overrides := block.BlockOverrides
var withdrawalsHash *common.Hash
- if s.chainConfig.IsShanghai((uint64)(*overrides.Timestamp)) {
+ if s.chainConfig.IsShanghai((uint64)(*overrides.Timestamp), 0) {
withdrawalsHash = &empty.WithdrawalsHash
}
var parentBeaconRoot *common.Hash
- if s.chainConfig.IsCancun((uint64)(*overrides.Timestamp)) {
+ if s.chainConfig.IsCancun((uint64)(*overrides.Timestamp), 0) {
parentBeaconRoot = &common.Hash{}
if overrides.BeaconRoot != nil {
parentBeaconRoot = overrides.BeaconRoot
@@ -395,9 +395,9 @@ func (s *simulator) simulateBlock(
}
}
}
- if s.chainConfig.IsCancun(header.Time) {
+ if s.chainConfig.IsCancun(header.Time, 0) {
var excess uint64
- if s.chainConfig.IsCancun(parent.Time) {
+ if s.chainConfig.IsCancun(parent.Time, 0) {
excess = misc.CalcExcessBlobGas(s.chainConfig, parent, header.Time)
}
header.ExcessBlobGas = &excess
@@ -416,6 +416,8 @@ func (s *simulator) simulateBlock(
if err != nil {
return nil, nil, err
}
+ txnIndex := len(bsc.Calls)
+ txNum := minTxNum + 1 + uint64(txnIndex)
sharedDomains.SetBlockNum(blockNumber)
sharedDomains.SetTxNum(minTxNum)
@@ -496,7 +498,7 @@ func (s *simulator) simulateBlock(
}
}
header.GasUsed = cumulativeGasUsed
- if s.chainConfig.IsCancun(header.Time) {
+ if s.chainConfig.IsCancun(header.Time, 0) {
header.BlobGasUsed = &cumulativeBlobGasUsed
}
@@ -539,6 +541,22 @@ func (s *simulator) simulateBlock(
// We cannot compute the state root for historical state w/o commitment history, so we just use the zero hash (default value).
}
+ var withdrawals types.Withdrawals
+ if s.chainConfig.IsShanghai(header.Time, 0) {
+ withdrawals = types.Withdrawals{}
+ }
+ engine, ok := s.engine.(consensus.Engine)
+ if !ok {
+ return nil, nil, errors.New("consensus engine reader does not support full consensus.Engine")
+ }
+ systemCall := func(contract common.Address, data []byte) ([]byte, error) {
+ return core.SysCallContract(contract, data, s.chainConfig, intraBlockState, header, engine, false /* constCall */, vmConfig)
+ }
+ block, _, err := engine.FinalizeAndAssemble(s.chainConfig, header, intraBlockState, txnList, nil,
+ receiptList, withdrawals, nil, systemCall, nil, s.logger)
+ if err != nil {
+ return nil, nil, err
+ }
// Marshal the block in RPC format including the call results in a custom field.
additionalFields := make(map[string]any)
blockResult, err := ethapi.RPCMarshalBlock(block, true, s.fullTransactions, additionalFields)
diff --git a/rpc/jsonrpc/eth_system.go b/rpc/jsonrpc/eth_system.go
index c6c7d6126b2..d7ae1dace80 100644
--- a/rpc/jsonrpc/eth_system.go
+++ b/rpc/jsonrpc/eth_system.go
@@ -331,12 +331,13 @@ func (api *APIImpl) Config(ctx context.Context, blockTimeOverride *hexutil.Uint6
func fillForkConfig(chainConfig *chain.Config, forkId [4]byte, activationTime uint64) *EthHardForkConfig {
forkConfig := EthHardForkConfig{}
forkConfig.ActivationTime = activationTime
- forkConfig.BlobSchedule = chainConfig.GetBlobConfig(activationTime)
+ forkConfig.BlobSchedule = chainConfig.GetBlobConfig(activationTime, 0 /* currentArbosVer */)
forkConfig.ChainId = hexutil.Uint(chainConfig.ChainID.Uint64())
forkConfig.ForkId = forkId[:]
blockContext := evmtypes.BlockContext{
BlockNumber: math.MaxUint64,
Time: activationTime,
+ ArbOSVersion: 0,
}
precompiles := vm.Precompiles(blockContext.Rules(chainConfig))
forkConfig.Precompiles = make(map[string]common.Address, len(precompiles))
diff --git a/rpc/jsonrpc/eth_uncles.go b/rpc/jsonrpc/eth_uncles.go
index d451cb9ee47..f24e6ad6691 100644
--- a/rpc/jsonrpc/eth_uncles.go
+++ b/rpc/jsonrpc/eth_uncles.go
@@ -48,6 +48,7 @@ func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rp
if err != nil {
return nil, err
}
+ // For Nitro it should return here as arbitrum chain does not have uncles
if block == nil {
return nil, nil // not error, see https://github.com/erigontech/erigon/issues/1645
}
@@ -58,8 +59,12 @@ func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rp
log.Trace("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index)
return nil, nil
}
+ chainConfig, err := api.chainConfig(ctx, tx)
+ if err != nil {
+ return nil, err
+ }
uncle := types.NewBlockWithHeader(uncles[index])
- return ethapi.RPCMarshalBlock(uncle, false, false, additionalFields)
+ return ethapi.RPCMarshalBlock(uncle, false, false, additionalFields, chainConfig.IsArbitrumNitro(uncle.Number()))
}
// GetUncleByBlockHashAndIndex implements eth_getUncleByBlockHashAndIndex. Returns information about an uncle given a block's hash and the index of the uncle.
@@ -74,6 +79,7 @@ func (api *APIImpl) GetUncleByBlockHashAndIndex(ctx context.Context, hash common
if err != nil {
return nil, err
}
+ // For Nitro it should return here as arbitrum chain does not have uncles
if block == nil {
return nil, nil // not error, see https://github.com/erigontech/erigon/issues/1645
}
@@ -84,9 +90,14 @@ func (api *APIImpl) GetUncleByBlockHashAndIndex(ctx context.Context, hash common
log.Trace("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index)
return nil, nil
}
+
+ chainConfig, err := api.chainConfig(ctx, tx)
+ if err != nil {
+ return nil, err
+ }
uncle := types.NewBlockWithHeader(uncles[index])
- return ethapi.RPCMarshalBlock(uncle, false, false, additionalFields)
+ return ethapi.RPCMarshalBlock(uncle, false, false, additionalFields, chainConfig.IsArbitrumNitro(uncle.Number()))
}
// GetUncleCountByBlockNumber implements eth_getUncleCountByBlockNumber. Returns the number of uncles in the block, if any.
diff --git a/rpc/jsonrpc/net_api_arb.go b/rpc/jsonrpc/net_api_arb.go
new file mode 100644
index 00000000000..0de2c28a1bf
--- /dev/null
+++ b/rpc/jsonrpc/net_api_arb.go
@@ -0,0 +1,57 @@
+// Copyright 2024 The Erigon Authors
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package jsonrpc
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ "github.com/erigontech/erigon/rpc/rpchelper"
+)
+
+// NetAPIArb is the interface for the net_ RPC commands.
+type NetAPIArb interface {
+ Version(_ context.Context) (string, error)
+}
+
+// NetAPIArbImpl is a data structure to store things needed for net_ commands.
+type NetAPIArbImpl struct {
+ ethBackend rpchelper.ApiBackend
+}
+
+// NewNetAPIArbImpl returns a NetAPIArbImpl instance.
+func NewNetAPIArbImpl(eth rpchelper.ApiBackend) *NetAPIArbImpl {
+ return &NetAPIArbImpl{
+ ethBackend: eth,
+ }
+}
+
+// Version implements net_version. Returns the current network ID.
+func (api *NetAPIArbImpl) Version(ctx context.Context) (string, error) {
+ if api.ethBackend == nil {
+ // We're running in --datadir mode or otherwise cannot get the backend
+ return "", fmt.Errorf(NotAvailableChainData, "net_version")
+ }
+
+ res, err := api.ethBackend.NetVersion(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ return strconv.FormatUint(res, 10), nil
+}
diff --git a/rpc/jsonrpc/otterscan_api.go b/rpc/jsonrpc/otterscan_api.go
index 0b8a40465ac..7f96741e7a9 100644
--- a/rpc/jsonrpc/otterscan_api.go
+++ b/rpc/jsonrpc/otterscan_api.go
@@ -282,8 +282,9 @@ func (api *OtterscanAPIImpl) traceBlocks(ctx context.Context, addr common.Addres
return results[:totalBlocksTraced], hasMore, nil
}
-func delegateGetBlockByNumber(tx kv.Tx, b *types.Block, number rpc.BlockNumber, inclTx bool) (map[string]any, error) {
- response, err := ethapi.RPCMarshalBlock(b, inclTx, inclTx, nil)
+func delegateGetBlockByNumber(tx kv.Tx, b *types.Block, number rpc.BlockNumber, inclTx bool, chainConfig *chain.Config) (map[string]any, error) {
+ additionalFields := make(map[string]interface{})
+ response, err := ethapi.RPCMarshalBlock(b, inclTx, inclTx, additionalFields, chainConfig.IsArbitrumNitro(b.Number()))
if !inclTx {
delete(response, "transactions") // workaround for https://github.com/erigontech/erigon/issues/4989#issuecomment-1218415666
}
@@ -406,7 +407,7 @@ func (api *OtterscanAPIImpl) GetBlockTransactions(ctx context.Context, number rp
return nil, err
}
- getBlockRes, err := delegateGetBlockByNumber(tx, b, number, true)
+ getBlockRes, err := delegateGetBlockByNumber(tx, b, number, true, chainConfig)
if err != nil {
return nil, err
}
diff --git a/rpc/jsonrpc/otterscan_block_details.go b/rpc/jsonrpc/otterscan_block_details.go
index fbb7e0e6c6b..29ac8ee72b7 100644
--- a/rpc/jsonrpc/otterscan_block_details.go
+++ b/rpc/jsonrpc/otterscan_block_details.go
@@ -78,7 +78,7 @@ func (api *OtterscanAPIImpl) getBlockDetailsImpl(ctx context.Context, tx kv.Temp
return nil, err
}
- getBlockRes, err := delegateGetBlockByNumber(tx, b, number, false)
+ getBlockRes, err := delegateGetBlockByNumber(tx, b, number, false, chainConfig)
if err != nil {
return nil, err
}
diff --git a/rpc/jsonrpc/receipts/receipts_generator.go b/rpc/jsonrpc/receipts/receipts_generator.go
index 464847ef8b9..3dcabfbdfa4 100644
--- a/rpc/jsonrpc/receipts/receipts_generator.go
+++ b/rpc/jsonrpc/receipts/receipts_generator.go
@@ -123,7 +123,8 @@ func (g *Generator) PrepareEnv(ctx context.Context, header *types.Header, cfg *c
gasUsed := new(uint64)
usedBlobGas := new(uint64)
- gp := new(protocol.GasPool).AddGas(header.GasLimit).AddBlobGas(cfg.GetMaxBlobGasPerBlock(header.Time))
+ arbOsVersion := types.GetArbOSVersion(header, cfg)
+ gp := new(protocol.GasPool).AddGas(header.GasLimit).AddBlobGas(cfg.GetMaxBlobGasPerBlock(header.Time, arbOsVersion))
noopWriter := state.NewNoopWriter()
@@ -231,6 +232,7 @@ func (g *Generator) GetReceipt(ctx context.Context, cfg *chain.Config, tx kv.Tem
return nil, err
}
+ var evm *vm.EVM
if txn.Type() == types.AccountAbstractionTxType {
genEnv, err = g.PrepareEnv(ctx, header, cfg, tx, index)
if err != nil {
@@ -335,7 +337,22 @@ func (g *Generator) GetReceipt(ctx context.Context, cfg *chain.Config, tx kv.Tem
evm.Cancel()
}()
- receipt, _, err = protocol.ApplyTransactionWithEVM(cfg, g.engine, genEnv.gp, genEnv.ibs, stateWriter, genEnv.header, txn, genEnv.gasUsed, genEnv.usedBlobGas, vm.Config{}, evm)
+
+ if cfg.IsArbitrum() {
+ var msg *types.Message
+ msg, err = txn.AsMessage(*types.MakeSigner(cfg, blockNum, header.Time), header.BaseFee, evm.ChainRules())
+ if err != nil {
+ return nil, err
+ }
+ if evm.ProcessingHookSet.CompareAndSwap(false, true) {
+ evm.ProcessingHook = arbos.NewTxProcessorIBS(evm, state.NewArbitrum(genEnv.ibs), msg)
+ } else {
+ evm.ProcessingHook.SetMessage(msg, state.NewArbitrum(genEnv.ibs))
+ }
+ receipt, _, err = core.ApplyArbTransactionVmenv(cfg, g.engine, genEnv.gp, genEnv.ibs, genEnv.noopWriter, genEnv.header, txn, genEnv.gasUsed, genEnv.usedBlobGas, vm.Config{}, evm)
+ } else {
+ receipt, _, err = protocol.ApplyTransactionWithEVM(cfg, g.engine, genEnv.gp, genEnv.ibs, stateWriter, genEnv.header, txn, genEnv.gasUsed, genEnv.usedBlobGas, vm.Config{}, evm)
+ }
if err != nil {
return nil, fmt.Errorf("ReceiptGen.GetReceipt: bn=%d, txnIdx=%d, %w", blockNum, index, err)
}
@@ -359,6 +376,10 @@ func (g *Generator) GetReceipt(ctx context.Context, cfg *chain.Config, tx kv.Tem
return nil, fmt.Errorf("execution aborted (timeout = %v)", g.evmTimeout)
}
+ if evm.Cancelled() {
+ return nil, fmt.Errorf("execution aborted (timeout = %v)", g.evmTimeout)
+ }
+
if rawtemporaldb.ReceiptStoresFirstLogIdx(tx) {
firstLogIndex = logIdxAfterTx
} else {
@@ -432,7 +453,10 @@ func (g *Generator) GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Te
return nil, err
}
//genEnv.ibs.SetTrace(true)
- vmCfg := vm.Config{}
+
+ vmCfg := vm.Config{
+ JumpDestCache: vm.NewJumpDestCache(16),
+ }
ctx, cancel := context.WithTimeout(ctx, g.evmTimeout)
defer cancel()
@@ -479,7 +503,23 @@ func (g *Generator) GetReceipts(ctx context.Context, cfg *chain.Config, tx kv.Te
}()
genEnv.ibs.SetTxContext(blockNum, i)
- receipt, _, err := protocol.ApplyTransactionWithEVM(cfg, g.engine, genEnv.gp, genEnv.ibs, stateWriter, genEnv.header, txn, genEnv.gasUsed, genEnv.usedBlobGas, vmCfg, evm)
+
+ var receipt *types.Receipt
+ if cfg.IsArbitrum() {
+ var msg *types.Message
+ msg, err = txn.AsMessage(*types.MakeSigner(cfg, block.NumberU64(), block.Time()), block.BaseFee(), evm.ChainRules())
+ if err != nil {
+ return nil, err
+ }
+ if evm.ProcessingHookSet.CompareAndSwap(false, true) {
+ evm.ProcessingHook = arbos.NewTxProcessorIBS(evm, state.NewArbitrum(genEnv.ibs), msg)
+ } else {
+ evm.ProcessingHook.SetMessage(msg, state.NewArbitrum(genEnv.ibs))
+ }
+ receipt, _, err = core.ApplyArbTransactionVmenv(cfg, g.engine, genEnv.gp, genEnv.ibs, genEnv.noopWriter, genEnv.header, txn, genEnv.gasUsed, genEnv.usedBlobGas, vmCfg, evm)
+ } else {
+ receipt, _, err := protocol.ApplyTransactionWithEVM(cfg, g.engine, genEnv.gp, genEnv.ibs, stateWriter, genEnv.header, txn, genEnv.gasUsed, genEnv.usedBlobGas, vmCfg, evm)
+ }
if err != nil {
return nil, fmt.Errorf("ReceiptGen.GetReceipts: bn=%d, txnIdx=%d, %w", block.NumberU64(), i, err)
}
diff --git a/rpc/jsonrpc/send_transaction.go b/rpc/jsonrpc/send_transaction.go
index fdec7fb2de4..4316bfe3312 100644
--- a/rpc/jsonrpc/send_transaction.go
+++ b/rpc/jsonrpc/send_transaction.go
@@ -19,10 +19,24 @@ func (api *APIImpl) SendRawTransaction(ctx context.Context, encodedTx hexutil.By
return common.Hash{}, err
}
- // If the transaction fee cap is already specified, ensure the
- // fee of the given transaction is _reasonable_.
- if err := checkTxFee(txn.GetFeeCap().ToBig(), txn.GetGasLimit(), api.FeeCap); err != nil {
- return common.Hash{}, err
+ // TODO arbitrum - this code is still needed?
+ if txn.Type() == types.BlobTxType || txn.Type() == types.DynamicFeeTxType || txn.Type() == types.SetCodeTxType {
+ baseFeeBig, err := api.BaseFee(ctx)
+ if err != nil {
+ return common.Hash{}, err
+ }
+
+ // If the transaction fee cap is already specified, ensure the
+ // effective gas fee is less than fee cap.
+ if err := checkDynamicTxFee(txn.GetFeeCap(), baseFeeBig); err != nil {
+ return common.Hash{}, err
+ }
+ } else {
+ // If the transaction fee cap is already specified, ensure the
+ // fee of the given transaction is _reasonable_.
+ if err := CheckTxFee(txn.GetFeeCap().ToBig(), txn.GetGasLimit(), api.FeeCap); err != nil {
+ return common.Hash{}, err
+ }
}
if !txn.Protected() && !api.AllowUnprotectedTxs {
@@ -69,9 +83,9 @@ func (api *APIImpl) SendTransaction(_ context.Context, txObject any) (common.Has
return common.Hash{0}, fmt.Errorf(NotImplemented, "eth_sendTransaction")
}
-// checkTxFee is an internal function used to check whether the fee of
+// CheckTxFee is an internal function used to check whether the fee of
// the given transaction is _reasonable_(under the cap).
-func checkTxFee(gasPrice *big.Int, gas uint64, gasCap float64) error {
+func CheckTxFee(gasPrice *big.Int, gas uint64, gasCap float64) error {
// Short circuit if there is no gasCap for transaction fee at all.
if gasCap == 0 {
return nil
@@ -85,3 +99,20 @@ func checkTxFee(gasPrice *big.Int, gas uint64, gasCap float64) error {
return nil
}
+
+// TODO arbitrum - needed ?
+// checkTxFee is an internal function used to check whether the fee of
+// the given transaction is _reasonable_(under the cap).
+func checkDynamicTxFee(gasCap *uint256.Int, baseFeeBig *hexutil.Big) error {
+ baseFee := uint256.NewInt(0)
+ overflow := baseFee.SetFromBig(baseFeeBig.ToInt())
+ if overflow {
+ return errors.New("opts.Value higher than 2^256-1")
+ }
+
+ if gasCap.Lt(baseFee) {
+ return errors.New("fee cap is lower than the base fee")
+ }
+
+ return nil
+}
diff --git a/rpc/transactions/tracing.go b/rpc/transactions/tracing.go
index 05717c4b8ae..792a9d74fc7 100644
--- a/rpc/transactions/tracing.go
+++ b/rpc/transactions/tracing.go
@@ -43,6 +43,7 @@ import (
"github.com/erigontech/erigon/execution/vm/evmtypes"
"github.com/erigontech/erigon/rpc/jsonstream"
"github.com/erigontech/erigon/rpc/rpchelper"
+ "github.com/erigontech/nitro-erigon/arbos"
)
type BlockGetter interface {
@@ -133,6 +134,14 @@ func TraceTx(
if tracer != nil && tracer.OnTxStart != nil {
tracer.OnTxStart(evm.GetVMContext(), tx, message.From())
}
+
+ if chainConfig.IsArbitrum() {
+ msg := types.NewMessage(message.From(), message.To(), message.Nonce(), message.Value(), message.Gas(), message.GasPrice(), message.FeeCap(), message.TipCap(), message.Data(), message.AccessList(), true, false, false, false, message.MaxFeePerBlobGas())
+
+ msg.Tx = tx
+ evm.ProcessingHook = arbos.NewTxProcessorIBS(evm, state.NewArbitrum(ibs), msg)
+ }
+
result, err := protocol.ApplyMessage(evm, message, gp, refunds, false /* gasBailout */, engine)
if err != nil {
if tracer != nil && tracer.OnTxEnd != nil {
diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go
index 0b493e6a812..73a267c4da5 100644
--- a/rpc/websocket_test.go
+++ b/rpc/websocket_test.go
@@ -85,6 +85,10 @@ func TestWebsocketOriginCheck(t *testing.T) {
// This test checks whether calls exceeding the request size limit are rejected.
func TestWebsocketLargeCall(t *testing.T) {
+ //if runtime.GOOS == "darwin" {
+ t.Skip("issue #16875")
+ //}
+
if testing.Short() {
t.Skip()
}
diff --git a/turbo/app/reset-datadir.go b/turbo/app/reset-datadir.go
new file mode 100644
index 00000000000..914a2d5a55e
--- /dev/null
+++ b/turbo/app/reset-datadir.go
@@ -0,0 +1,317 @@
+package app
+
+import (
+ "errors"
+ "fmt"
+ "io/fs"
+ "os"
+ "path/filepath"
+ "strings"
+
+ g "github.com/anacrolix/generics"
+ "github.com/anacrolix/torrent/metainfo"
+ "github.com/urfave/cli/v2"
+
+ "github.com/erigontech/erigon/cmd/utils"
+ "github.com/erigontech/erigon/common/dir"
+ "github.com/erigontech/erigon/db/datadir"
+ "github.com/erigontech/erigon/db/kv"
+ "github.com/erigontech/erigon/db/kv/dbcfg"
+ "github.com/erigontech/erigon/db/kv/mdbx"
+ "github.com/erigontech/erigon/db/rawdb"
+ "github.com/erigontech/erigon/db/snapcfg"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/log/v3"
+)
+
+var (
+ removeLocalFlag = cli.BoolFlag{
+ Name: "local",
+ Usage: "Remove files not described in snapshot set (probably generated locally).",
+ Value: true,
+ Aliases: []string{"l"},
+ Category: "Reset",
+ }
+ dryRunFlag = cli.BoolFlag{
+ Name: "dry-run",
+ Usage: "Print files that would be removed, but do not remove them.",
+ Value: false,
+ Aliases: []string{"n"},
+ Category: "Reset",
+ }
+)
+
+// Checks if a value was explicitly set in the given CLI command context or any of its parents. In
+// urfave/cli@v2, you must check the lineage to see if a flag was set in any context. It may be
+// different in v3.
+func isSetLineage(cliCtx *cli.Context, flagName string) bool {
+ for _, ctx := range cliCtx.Lineage() {
+ if ctx.IsSet(flagName) {
+ return true
+ }
+ }
+ return false
+}
+
+func resetCliAction(cliCtx *cli.Context) (err error) {
+ // This is set up in snapshots cli.Command.Before.
+ logger := log.Root()
+ removeLocal := removeLocalFlag.Get(cliCtx)
+ dryRun := dryRunFlag.Get(cliCtx)
+ dataDirPath := cliCtx.String(utils.DataDirFlag.Name)
+ logger.Info("resetting datadir", "path", dataDirPath)
+
+ dirs := datadir.Open(dataDirPath)
+
+ configChainName, chainNameErr := getChainNameFromChainData(cliCtx, logger, dirs.Chaindata)
+
+ chainName := utils.ChainFlag.Get(cliCtx)
+ // Check the lineage, we don't want to use the mainnet default, but due to how urfave/cli@v2
+ // works we shouldn't randomly re-add the chain flag in the current command context.
+ if isSetLineage(cliCtx, utils.ChainFlag.Name) {
+ if configChainName.Ok && configChainName.Value != chainName {
+ // Pedantic but interesting.
+ logger.Warn("chain name flag and chain config do not match", "flag", chainName, "config", configChainName.Value)
+ }
+ logger.Info("using chain name from flag", "chain", chainName)
+ } else {
+ if chainNameErr != nil {
+ logger.Warn("error getting chain name from chaindata", "err", chainNameErr)
+ }
+ if !configChainName.Ok {
+ return errors.New(
+ "chain flag not set and chain name not found in chaindata. datadir is ready for sync, invalid, or requires chain flag to reset")
+ }
+ chainName = configChainName.Unwrap()
+ logger.Info("read chain name from config", "chain", chainName)
+ }
+
+ unlock, err := dirs.TryFlock()
+ if err != nil {
+ return fmt.Errorf("failed to lock data dir %v: %w", dirs.DataDir, err)
+ }
+ defer unlock()
+ err = snapcfg.LoadRemotePreverified(cliCtx.Context)
+ if err != nil {
+ // TODO: Check if we should continue? What if we ask for a git revision and
+ // can't get it? What about a branch? Can we reset to the embedded snapshot hashes?
+ return fmt.Errorf("loading remote preverified snapshots: %w", err)
+ }
+ cfg, known := snapcfg.KnownCfg(chainName)
+ if !known {
+ // Wtf does this even mean?
+ return fmt.Errorf("config for chain %v is not known", chainName)
+ }
+ // Should we check cfg.Local? We could be resetting to the preverified.toml...?
+ logger.Info(
+ "Loaded preverified snapshots hashes",
+ "len", len(cfg.Preverified.Items),
+ "chain", chainName,
+ )
+ removeFunc := func(path string) error {
+ logger.Debug("Removing snapshot dir file", "path", path)
+ return dir.RemoveFile(filepath.Join(dirs.Snap, path))
+ }
+ if dryRun {
+ removeFunc = dryRunRemove
+ }
+ reset := reset{
+ removeUnknown: removeLocal,
+ logger: logger,
+ }
+ logger.Info("Resetting snapshots directory", "path", dirs.Snap)
+ err = reset.walkSnapshots(dirs.Snap, cfg.Preverified.Items, removeFunc)
+ if err != nil {
+ err = fmt.Errorf("walking snapshots: %w", err)
+ return
+ }
+ logger.Info("Files NOT removed from snapshots directory",
+ "torrents", reset.stats.retained.torrentFiles,
+ "data", reset.stats.retained.dataFiles)
+ logger.Info("Files removed from snapshots directory",
+ "torrents", reset.stats.removed.torrentFiles,
+ "data", reset.stats.removed.dataFiles)
+ // Remove chaindata last, so that the config is available if there's an error.
+ if removeLocal {
+ for _, extraDir := range []string{
+ dbcfg.HeimdallDB,
+ dbcfg.PolygonBridgeDB,
+ } {
+ extraFullPath := filepath.Join(dirs.DataDir, extraDir)
+ err = dir.RemoveAll(extraFullPath)
+ if err != nil {
+ return fmt.Errorf("removing extra dir %q: %w", extraDir, err)
+ }
+ }
+ logger.Info("Removing chaindata dir", "path", dirs.Chaindata)
+ if !dryRun {
+ err = dir.RemoveAll(dirs.Chaindata)
+ }
+ if err != nil {
+ err = fmt.Errorf("removing chaindata dir: %w", err)
+ return
+ }
+ }
+ err = removeFunc(datadir.PreverifiedFileName)
+ if err == nil {
+ logger.Info("Removed snapshots lock file", "path", datadir.PreverifiedFileName)
+ } else {
+ if !errors.Is(err, fs.ErrNotExist) {
+ err = fmt.Errorf("removing snapshot lock file: %w", err)
+ return
+ }
+ }
+ logger.Info("Reset complete. Start Erigon as usual, missing files will be downloaded.")
+ return nil
+}
+
+func getChainNameFromChainData(cliCtx *cli.Context, logger log.Logger, chainDataDir string) (_ g.Option[string], err error) {
+ _, err = os.Stat(chainDataDir)
+ if err != nil {
+ return
+ }
+ ctx := cliCtx.Context
+ var db kv.RoDB
+ db, err = mdbx.New(dbcfg.ChainDB, logger).Path(chainDataDir).Accede(true).Readonly(true).Open(ctx)
+ if err != nil {
+ err = fmt.Errorf("opening chaindata database: %w", err)
+ return
+ }
+ defer db.Close()
+ var chainCfg *chain.Config
+ // See tool.ChainConfigFromDB for another example, but that panics on errors.
+ err = db.View(ctx, func(tx kv.Tx) (err error) {
+ genesis, err := rawdb.ReadCanonicalHash(tx, 0)
+ if err != nil {
+ err = fmt.Errorf("reading genesis block hash: %w", err)
+ return
+ }
+ // Do we need genesis block hash here?
+ chainCfg, err = rawdb.ReadChainConfig(tx, genesis)
+ if err != nil {
+ err = fmt.Errorf("reading chain config: %w", err)
+ return
+ }
+ return
+ })
+ if err != nil {
+ err = fmt.Errorf("reading chaindata db: %w", err)
+ return
+ }
+ if chainCfg == nil {
+ return
+ }
+ return g.Some(chainCfg.ChainName), nil
+}
+
+func dryRunRemove(path string) error {
+ return nil
+}
+
+type resetStats struct {
+ torrentFiles int
+ dataFiles int
+ unknownFiles int
+}
+
+type reset struct {
+ logger log.Logger
+ removeUnknown bool
+ stats struct {
+ removed resetStats
+ retained resetStats
+ }
+}
+
+type resetItemInfo struct {
+ path string
+ realFilePath func() string
+ hash g.Option[string]
+ isTorrent bool
+ inPreverified bool
+}
+
+// Walks the given snapshots directory, removing files that are not in the preverified set.
+func (me *reset) walkSnapshots(
+ // Could almost pass fs.FS here except metainfo.LoadFromFile expects a string filepath.
+ snapDir string,
+ preverified snapcfg.PreverifiedItems,
+ // path is the relative path to the walk root. Called for each file that should be removed.
+ // Error is passed back to the walk function.
+ remove func(path string) error,
+) error {
+ return fs.WalkDir(
+ os.DirFS(snapDir),
+ ".",
+ func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ // Our job is to remove anything that shouldn't be here... so if we can't read a dir
+ // we are in trouble.
+ return fmt.Errorf("error walking path %v: %w", path, err)
+ }
+ if d.IsDir() {
+ return nil
+ }
+ if path == datadir.PreverifiedFileName {
+ return nil
+ }
+ slashPath := filepath.ToSlash(path)
+ itemName, _ := strings.CutSuffix(slashPath, ".part")
+ itemName, isTorrent := strings.CutSuffix(itemName, ".torrent")
+ item, ok := preverified.Get(itemName)
+ doRemove := me.decideRemove(resetItemInfo{
+ path: path,
+ realFilePath: func() string { return filepath.Join(snapDir, path) },
+ hash: func() g.Option[string] { return g.OptionFromTuple(item.Hash, ok) }(),
+ isTorrent: isTorrent,
+ inPreverified: ok,
+ })
+ stats := &me.stats.retained
+ if doRemove {
+ stats = &me.stats.removed
+ err = remove(path)
+ if err != nil {
+ return fmt.Errorf("removing file %v: %w", path, err)
+ }
+ }
+ if isTorrent {
+ stats.torrentFiles++
+ } else {
+ stats.dataFiles++
+ }
+ return nil
+ },
+ )
+}
+
+// Decides whether to remove a file, and logs the reasoning.
+func (me *reset) decideRemove(file resetItemInfo) bool {
+ logger := me.logger
+ path := file.path
+ if !file.inPreverified {
+ logger.Debug("file NOT in preverified list", "path", path)
+ return me.removeUnknown
+ }
+ if file.isTorrent {
+ mi, err := metainfo.LoadFromFile(file.realFilePath())
+ if err != nil {
+ logger.Error("error loading metainfo file", "path", path, "err", err)
+ return true
+ }
+ expectedHash := file.hash.Unwrap()
+ if mi.HashInfoBytes().String() == expectedHash {
+ logger.Debug("torrent file matches preverified hash", "path", path)
+ return false
+ } else {
+ logger.Debug("torrent file infohash does NOT match preverified",
+ "path", path,
+ "expected", expectedHash,
+ "actual", mi.HashInfoBytes())
+ return true
+ }
+ } else {
+ // No checks required. Downloader will clobber it into shape after reset on next run.
+ logger.Debug("data file is in preverified", "path", path)
+ return false
+ }
+}
diff --git a/turbo/transactions/call.go b/turbo/transactions/call.go
new file mode 100644
index 00000000000..09aa448b479
--- /dev/null
+++ b/turbo/transactions/call.go
@@ -0,0 +1,349 @@
+// Copyright 2024 The Erigon Authors
+// This file is part of Erigon.
+//
+// Erigon is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// Erigon is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with Erigon. If not, see .
+
+package transactions
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math/big"
+ "time"
+
+ "github.com/erigontech/nitro-erigon/arbos"
+ "github.com/holiman/uint256"
+
+ "github.com/erigontech/erigon/common"
+ "github.com/erigontech/erigon/common/hexutil"
+ "github.com/erigontech/erigon/core"
+ "github.com/erigontech/erigon/core/state"
+ "github.com/erigontech/erigon/core/vm"
+ "github.com/erigontech/erigon/core/vm/evmtypes"
+ "github.com/erigontech/erigon/db/kv"
+ "github.com/erigontech/erigon/execution/chain"
+ "github.com/erigontech/erigon/execution/consensus"
+ "github.com/erigontech/erigon/execution/types"
+ "github.com/erigontech/erigon/log/v3"
+ "github.com/erigontech/erigon/rpc"
+ ethapi2 "github.com/erigontech/erigon/rpc/ethapi"
+ "github.com/erigontech/erigon/turbo/services"
+)
+
+type BlockOverrides struct {
+ BlockNumber *hexutil.Uint64 `json:"number"`
+ Coinbase *common.Address `json:"feeRecipient"`
+ Timestamp *hexutil.Uint64 `json:"time"`
+ GasLimit *hexutil.Uint `json:"gasLimit"`
+ Difficulty *hexutil.Uint `json:"difficulty"`
+ BaseFee *uint256.Int `json:"baseFeePerGas"`
+ BlockHash *map[uint64]common.Hash `json:"blockHash"`
+ BeaconRoot *common.Hash `json:"beaconRoot"`
+ Withdrawals *types.Withdrawals `json:"withdrawals"`
+}
+
+type BlockHashOverrides map[uint64]common.Hash
+
+func (o *BlockOverrides) OverrideHeader(header *types.Header) *types.Header {
+ h := types.CopyHeader(header)
+ if o.BlockNumber != nil {
+ h.Number = new(big.Int).SetUint64(uint64(*o.BlockNumber))
+ }
+ if o.Difficulty != nil {
+ h.Difficulty = new(big.Int).SetUint64(uint64(*o.Difficulty))
+ }
+ if o.Timestamp != nil {
+ h.Time = o.Timestamp.Uint64()
+ }
+ if o.GasLimit != nil {
+ h.GasLimit = uint64(*o.GasLimit)
+ }
+ if o.Coinbase != nil {
+ h.Coinbase = *o.Coinbase
+ }
+ if o.BaseFee != nil {
+ h.BaseFee = o.BaseFee.ToBig()
+ }
+ return h
+}
+
+func (o *BlockOverrides) OverrideBlockContext(blockCtx *evmtypes.BlockContext, overrideBlockHash BlockHashOverrides) {
+ if o.BlockNumber != nil {
+ blockCtx.BlockNumber = uint64(*o.BlockNumber)
+ }
+ if o.BaseFee != nil {
+ blockCtx.BaseFee = o.BaseFee
+ }
+ if o.Coinbase != nil {
+ blockCtx.Coinbase = *o.Coinbase
+ }
+ if o.Difficulty != nil {
+ blockCtx.Difficulty = new(big.Int).SetUint64(uint64(*o.Difficulty))
+ }
+ if o.Timestamp != nil {
+ blockCtx.Time = uint64(*o.Timestamp)
+ }
+ if o.GasLimit != nil {
+ blockCtx.GasLimit = uint64(*o.GasLimit)
+ }
+ if o.BlockHash != nil {
+ for blockNum, hash := range *o.BlockHash {
+ overrideBlockHash[blockNum] = hash
+ }
+ }
+}
+
+func DoCall(
+ ctx context.Context,
+ engine consensus.EngineReader,
+ args ethapi2.CallArgs,
+ tx kv.Tx,
+ blockNrOrHash rpc.BlockNumberOrHash,
+ header *types.Header,
+ overrides *ethapi2.StateOverrides,
+ gasCap uint64,
+ chainConfig *chain.Config,
+ stateReader state.StateReader,
+ headerReader services.HeaderReader,
+ callTimeout time.Duration,
+) (*evmtypes.ExecutionResult, error) {
+ // todo: Pending state is only known by the miner
+ /*
+ if blockNrOrHash.BlockNumber != nil && *blockNrOrHash.BlockNumber == rpc.PendingBlockNumber {
+ block, state, _ := b.eth.miner.Pending()
+ return state, block.Header(), nil
+ }
+ */
+
+ ibs := state.New(stateReader)
+
+ // Override the fields of specified contracts before execution.
+ if overrides != nil {
+ if err := overrides.Override(ibs); err != nil {
+ return nil, err
+ }
+ }
+
+ // Setup context so it may be cancelled the call has completed
+ // or, in case of unmetered gas, setup a context with a timeout.
+ var cancel context.CancelFunc
+ if callTimeout > 0 {
+ ctx, cancel = context.WithTimeout(ctx, callTimeout)
+ } else {
+ ctx, cancel = context.WithCancel(ctx)
+ }
+
+ // Make sure the context is cancelled when the call has completed
+ // this makes sure resources are cleaned up.
+ defer cancel()
+
+ // Get a new instance of the EVM.
+ var baseFee *uint256.Int
+ if header != nil && header.BaseFee != nil {
+ var overflow bool
+ baseFee, overflow = uint256.FromBig(header.BaseFee)
+ if overflow {
+ return nil, errors.New("header.BaseFee uint256 overflow")
+ }
+ }
+ msg, err := args.ToMessage(gasCap, baseFee)
+ if err != nil {
+ return nil, err
+ }
+ blockCtx := NewEVMBlockContext(engine, header, blockNrOrHash.RequireCanonical, tx, headerReader, chainConfig)
+ txCtx := core.NewEVMTxContext(msg)
+
+ evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vm.Config{NoBaseFee: true})
+
+ if chainConfig.IsArbitrum() {
+ message := types.NewMessage(msg.From(), msg.To(), msg.Nonce(), msg.Value(), msg.Gas(), msg.GasPrice(), msg.FeeCap(), msg.TipCap(), msg.Data(), msg.AccessList(), false, false, true, msg.MaxFeePerBlobGas())
+ message.Tx, _ = args.ToTransaction(gasCap, baseFee)
+ evm.ProcessingHook = arbos.NewTxProcessorIBS(evm, state.NewArbitrum(ibs), message)
+ }
+
+ // Wait for the context to be done and cancel the evm. Even if the
+ // EVM has finished, cancelling may be done (repeatedly)
+ go func() {
+ <-ctx.Done()
+ evm.Cancel()
+ }()
+
+ gp := new(core.GasPool).AddGas(msg.Gas()).AddBlobGas(msg.BlobGas())
+ result, err := core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */, engine)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the timer caused an abort, return an appropriate error message
+ if evm.Cancelled() {
+ return nil, fmt.Errorf("execution aborted (timeout = %v)", callTimeout)
+ }
+ return result, nil
+}
+
+func NewEVMBlockContextWithOverrides(ctx context.Context, engine consensus.EngineReader, header *types.Header, tx kv.Getter,
+ reader services.CanonicalReader, config *chain.Config, blockOverrides *BlockOverrides, blockHashOverrides BlockHashOverrides) evmtypes.BlockContext {
+ blockHashFunc := MakeBlockHashProvider(ctx, tx, reader, blockHashOverrides)
+ blockContext := core.NewEVMBlockContext(header, blockHashFunc, engine, nil /* author */, config)
+ if blockOverrides != nil {
+ blockOverrides.OverrideBlockContext(&blockContext, blockHashOverrides)
+ }
+ return blockContext
+}
+
+func NewEVMBlockContext(engine consensus.EngineReader, header *types.Header, requireCanonical bool, tx kv.Getter,
+ headerReader services.HeaderReader, config *chain.Config) evmtypes.BlockContext {
+ blockHashFunc := MakeHeaderGetter(requireCanonical, tx, headerReader)
+ return core.NewEVMBlockContext(header, blockHashFunc, engine, nil /* author */, config)
+}
+
+type BlockHashProvider func(blockNum uint64) (common.Hash, error)
+
+func MakeBlockHashProvider(ctx context.Context, tx kv.Getter, reader services.CanonicalReader, overrides BlockHashOverrides) BlockHashProvider {
+ return func(blockNum uint64) (common.Hash, error) {
+ if blockHash, ok := overrides[blockNum]; ok {
+ return blockHash, nil
+ }
+ blockHash, ok, err := reader.CanonicalHash(ctx, tx, blockNum)
+ if err != nil || !ok {
+ log.Debug("Can't get block hash by number", "blockNum", blockNum, "ok", ok, "err", err)
+ }
+ return blockHash, err
+ }
+}
+
+func MakeHeaderGetter(requireCanonical bool, tx kv.Getter, headerReader services.HeaderReader) BlockHashProvider {
+ return func(n uint64) (common.Hash, error) {
+ h, err := headerReader.HeaderByNumber(context.Background(), tx, n)
+ if err != nil {
+ log.Error("Can't get block hash by number", "number", n, "only-canonical", requireCanonical)
+ return common.Hash{}, err
+ }
+ if h == nil {
+ log.Warn("[evm] header is nil", "blockNum", n)
+ return common.Hash{}, nil
+ }
+ return h.Hash(), nil
+ }
+}
+
+type ReusableCaller struct {
+ evm *vm.EVM
+ intraBlockState *state.IntraBlockState
+ gasCap uint64
+ baseFee *uint256.Int
+ stateReader state.StateReader
+ callTimeout time.Duration
+ message *types.Message
+}
+
+func (r *ReusableCaller) DoCallWithNewGas(
+ ctx context.Context,
+ newGas uint64,
+ engine consensus.EngineReader,
+ overrides *ethapi2.StateOverrides,
+) (*evmtypes.ExecutionResult, error) {
+ var cancel context.CancelFunc
+ if r.callTimeout > 0 {
+ ctx, cancel = context.WithTimeout(ctx, r.callTimeout)
+ } else {
+ ctx, cancel = context.WithCancel(ctx)
+ }
+
+ // Make sure the context is cancelled when the call has completed
+ // this makes sure resources are cleaned up.
+ defer cancel()
+
+ r.message.ChangeGas(r.gasCap, newGas)
+
+ // reset the EVM so that we can continue to use it with the new context
+ txCtx := core.NewEVMTxContext(r.message)
+ if overrides == nil {
+ r.intraBlockState = state.New(r.stateReader)
+ }
+
+ r.evm.Reset(txCtx, r.intraBlockState)
+
+ timedOut := false
+ go func() {
+ <-ctx.Done()
+ timedOut = true
+ }()
+
+ gp := new(core.GasPool).AddGas(r.message.Gas()).AddBlobGas(r.message.BlobGas())
+
+ result, err := core.ApplyMessage(r.evm, r.message, gp, true /* refunds */, false /* gasBailout */, engine)
+ if err != nil {
+ return nil, err
+ }
+
+ // If the timer caused an abort, return an appropriate error message
+ if timedOut {
+ return nil, fmt.Errorf("execution aborted (timeout = %v)", r.callTimeout)
+ }
+
+ return result, nil
+}
+
+func NewReusableCaller(
+ engine consensus.EngineReader,
+ stateReader state.StateReader,
+ overrides *ethapi2.StateOverrides,
+ header *types.Header,
+ initialArgs ethapi2.CallArgs,
+ gasCap uint64,
+ blockNrOrHash rpc.BlockNumberOrHash,
+ tx kv.Tx,
+ headerReader services.HeaderReader,
+ chainConfig *chain.Config,
+ callTimeout time.Duration,
+) (*ReusableCaller, error) {
+ ibs := state.New(stateReader)
+
+ if overrides != nil {
+ if err := overrides.Override(ibs); err != nil {
+ return nil, err
+ }
+ }
+
+ var baseFee *uint256.Int
+ if header != nil && header.BaseFee != nil {
+ var overflow bool
+ baseFee, overflow = uint256.FromBig(header.BaseFee)
+ if overflow {
+ return nil, errors.New("header.BaseFee uint256 overflow")
+ }
+ }
+
+ msg, err := initialArgs.ToMessage(gasCap, baseFee)
+ if err != nil {
+ return nil, err
+ }
+
+ blockCtx := NewEVMBlockContext(engine, header, blockNrOrHash.RequireCanonical, tx, headerReader, chainConfig)
+ txCtx := core.NewEVMTxContext(msg)
+
+ evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vm.Config{NoBaseFee: true})
+
+ return &ReusableCaller{
+ evm: evm,
+ intraBlockState: ibs,
+ baseFee: baseFee,
+ gasCap: gasCap,
+ callTimeout: callTimeout,
+ stateReader: stateReader,
+ message: msg,
+ }, nil
+}
diff --git a/txnprovider/shutter/pool_test.go b/txnprovider/shutter/pool_test.go
index 89c34283a07..d1a907db1ab 100644
--- a/txnprovider/shutter/pool_test.go
+++ b/txnprovider/shutter/pool_test.go
@@ -75,6 +75,7 @@ func TestPoolCleanup(t *testing.T) {
// simulate some encrypted txn submissions and simulate a new block
encTxn1 := MockEncryptedTxn(t, handle.config.ChainId, ekg.Eon())
encTxn2 := MockEncryptedTxn(t, handle.config.ChainId, ekg.Eon())
+ require.Len(t, pool.AllEncryptedTxns(), 0)
err = handle.SimulateLogEvents(ctx, []types.Log{
MockTxnSubmittedEventLog(t, handle.config, ekg.Eon(), 1, encTxn1),
MockTxnSubmittedEventLog(t, handle.config, ekg.Eon(), 2, encTxn2),
@@ -532,6 +533,7 @@ func (cb *MockContractBackend) PrepareMocks() {
cb.mu.Lock()
defer cb.mu.Unlock()
var res []types.Log
+ addrStrs := make([]string, 0, len(query.Addresses))
for _, addr := range query.Addresses {
logs := cb.mockedFilterLogs[addr]
if len(logs) == 0 {
@@ -540,6 +542,7 @@ func (cb *MockContractBackend) PrepareMocks() {
}
res = append(res, logs[0]...)
cb.mockedFilterLogs[addr] = logs[1:]
+ addrStrs = append(addrStrs, addr.Hex())
}
cb.logger.Trace("--- DEBUG --- called FilterLogs")
return res, nil
diff --git a/txnprovider/txpool/fetch.go b/txnprovider/txpool/fetch.go
index b75ee48acfe..fe82005a36c 100644
--- a/txnprovider/txpool/fetch.go
+++ b/txnprovider/txpool/fetch.go
@@ -546,6 +546,12 @@ func (f *Fetch) handleStateChanges(ctx context.Context, client StateChangesClien
}
func (f *Fetch) handleStateChangesRequest(ctx context.Context, req *remoteproto.StateChangeBatch) error {
+ if tp, ok := f.pool.(*TxPool); ok {
+ // Arbitrum does not support state changes by txpool - transactions are delivered by streamer
+ if tp.chainConfig.IsArbitrum() {
+ return nil
+ }
+ }
var unwindTxns, unwindBlobTxns, minedTxns TxnSlots
for _, change := range req.ChangeBatch {
if change.Direction == remoteproto.Direction_FORWARD {
diff --git a/txnprovider/txpool/pool.go b/txnprovider/txpool/pool.go
index 2735bcf29eb..56b1d6db677 100644
--- a/txnprovider/txpool/pool.go
+++ b/txnprovider/txpool/pool.go
@@ -1224,7 +1224,8 @@ func (p *TxPool) isOsaka() bool {
func (p *TxPool) GetMaxBlobsPerBlock() uint64 {
now := time.Now().Unix()
- return p.chainConfig.GetMaxBlobsPerBlock(uint64(now))
+ // TODO arbitrum
+ return p.chainConfig.GetMaxBlobsPerBlock(uint64(now), 0)
}
// Check that the serialized txn should not exceed a certain max size