From ea5a60354a0bb19ab647d42aad5a5e3daa17428c Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 11:24:56 +0800 Subject: [PATCH 01/36] port changes from #1013 --- cmd/geth/main.go | 5 + cmd/utils/flags.go | 49 ++++ common/backoff/exponential.go | 51 ++++ common/backoff/exponential_test.go | 39 +++ common/heap.go | 109 ++++++++ common/heap_test.go | 40 +++ common/shrinkingmap.go | 71 +++++ common/shrinkingmap_test.go | 135 ++++++++++ core/blockchain.go | 50 ++++ core/rawdb/accessors_da_syncer.go | 39 +++ core/rawdb/schema.go | 3 + eth/backend.go | 58 ++++- eth/ethconfig/config.go | 10 + go.mod | 4 +- go.sum | 3 + miner/miner.go | 10 +- miner/miner_test.go | 2 +- miner/scroll_worker.go | 8 +- miner/scroll_worker_test.go | 2 +- node/config.go | 2 + node/node.go | 11 +- rollup/da_syncer/batch_queue.go | 102 ++++++++ .../blob_client/beacon_node_client.go | 192 ++++++++++++++ rollup/da_syncer/blob_client/blob_client.go | 64 +++++ .../da_syncer/blob_client/blob_scan_client.go | 92 +++++++ .../blob_client/block_native_client.go | 85 ++++++ rollup/da_syncer/block_queue.go | 56 ++++ rollup/da_syncer/da/calldata_blob_source.go | 246 ++++++++++++++++++ rollup/da_syncer/da/commitV0.go | 172 ++++++++++++ rollup/da_syncer/da/commitV1.go | 82 ++++++ rollup/da_syncer/da/da.go | 69 +++++ rollup/da_syncer/da/finalize.go | 34 +++ rollup/da_syncer/da/revert.go | 33 +++ rollup/da_syncer/da_queue.go | 70 +++++ rollup/da_syncer/da_syncer.go | 49 ++++ rollup/da_syncer/data_source.go | 44 ++++ rollup/da_syncer/modes.go | 52 ++++ rollup/da_syncer/serrors/errors.go | 62 +++++ rollup/da_syncer/syncing_pipeline.go | 233 +++++++++++++++++ rollup/rollup_sync_service/abi.go | 4 +- rollup/rollup_sync_service/abi_test.go | 4 +- rollup/rollup_sync_service/l1client.go | 80 +++++- rollup/rollup_sync_service/l1client_test.go | 8 +- .../rollup_sync_service.go | 6 +- .../rollup_sync_service_test.go | 8 +- 45 files changed, 2502 insertions(+), 46 deletions(-) create mode 100644 common/backoff/exponential.go create mode 100644 common/backoff/exponential_test.go create mode 100644 common/heap.go create mode 100644 common/heap_test.go create mode 100644 common/shrinkingmap.go create mode 100644 common/shrinkingmap_test.go create mode 100644 core/rawdb/accessors_da_syncer.go create mode 100644 rollup/da_syncer/batch_queue.go create mode 100644 rollup/da_syncer/blob_client/beacon_node_client.go create mode 100644 rollup/da_syncer/blob_client/blob_client.go create mode 100644 rollup/da_syncer/blob_client/blob_scan_client.go create mode 100644 rollup/da_syncer/blob_client/block_native_client.go create mode 100644 rollup/da_syncer/block_queue.go create mode 100644 rollup/da_syncer/da/calldata_blob_source.go create mode 100644 rollup/da_syncer/da/commitV0.go create mode 100644 rollup/da_syncer/da/commitV1.go create mode 100644 rollup/da_syncer/da/da.go create mode 100644 rollup/da_syncer/da/finalize.go create mode 100644 rollup/da_syncer/da/revert.go create mode 100644 rollup/da_syncer/da_queue.go create mode 100644 rollup/da_syncer/da_syncer.go create mode 100644 rollup/da_syncer/data_source.go create mode 100644 rollup/da_syncer/modes.go create mode 100644 rollup/da_syncer/serrors/errors.go create mode 100644 rollup/da_syncer/syncing_pipeline.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 24760af5e080..f2147f35d4ad 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -171,6 +171,11 @@ var ( utils.CircuitCapacityCheckWorkersFlag, utils.RollupVerifyEnabledFlag, utils.ShadowforkPeersFlag, + utils.DASyncEnabledFlag, + utils.DASnapshotFileFlag, + utils.DABlockNativeAPIEndpointFlag, + utils.DABlobScanAPIEndpointFlag, + utils.DABeaconNodeAPIEndpointFlag, } rpcFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 3e4ad289a906..445248b1ff6f 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -74,6 +74,7 @@ import ( "github.com/scroll-tech/go-ethereum/p2p/nat" "github.com/scroll-tech/go-ethereum/p2p/netutil" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer" "github.com/scroll-tech/go-ethereum/rollup/tracing" "github.com/scroll-tech/go-ethereum/rpc" ) @@ -871,6 +872,28 @@ var ( Name: "net.shadowforkpeers", Usage: "peer ids of shadow fork peers", } + + // DA syncing settings + DASyncEnabledFlag = &cli.BoolFlag{ + Name: "da.sync", + Usage: "Enable node syncing from DA", + } + DASnapshotFileFlag = &cli.StringFlag{ + Name: "da.snapshot.file", + Usage: "Snapshot file to sync from DA", + } + DABlobScanAPIEndpointFlag = &cli.StringFlag{ + Name: "da.blob.blobscan", + Usage: "BlobScan blob API endpoint", + } + DABlockNativeAPIEndpointFlag = &cli.StringFlag{ + Name: "da.blob.blocknative", + Usage: "BlockNative blob API endpoint", + } + DABeaconNodeAPIEndpointFlag = &cli.StringFlag{ + Name: "da.blob.beaconnode", + Usage: "Beacon node API endpoint", + } ) // MakeDataDir retrieves the currently requested data directory, terminating @@ -1315,6 +1338,10 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { setSmartCard(ctx, cfg) setL1(ctx, cfg) + if ctx.IsSet(DASyncEnabledFlag.Name) { + cfg.DaSyncingEnabled = ctx.Bool(DASyncEnabledFlag.Name) + } + if ctx.GlobalIsSet(ExternalSignerFlag.Name) { cfg.ExternalSigner = ctx.GlobalString(ExternalSignerFlag.Name) } @@ -1597,6 +1624,27 @@ func setEnableRollupVerify(ctx *cli.Context, cfg *ethconfig.Config) { } } +func setDA(ctx *cli.Context, cfg *ethconfig.Config) { + if ctx.IsSet(DASyncEnabledFlag.Name) { + cfg.EnableDASyncing = ctx.Bool(DASyncEnabledFlag.Name) + if ctx.IsSet(DAModeFlag.Name) { + cfg.DA.FetcherMode = *flags.GlobalTextMarshaler(ctx, DAModeFlag.Name).(*da_syncer.FetcherMode) + } + if ctx.IsSet(DASnapshotFileFlag.Name) { + cfg.DA.SnapshotFilePath = ctx.String(DASnapshotFileFlag.Name) + } + if ctx.IsSet(DABlobScanAPIEndpointFlag.Name) { + cfg.DA.BlobScanAPIEndpoint = ctx.String(DABlobScanAPIEndpointFlag.Name) + } + if ctx.IsSet(DABlockNativeAPIEndpointFlag.Name) { + cfg.DA.BlockNativeAPIEndpoint = ctx.String(DABlockNativeAPIEndpointFlag.Name) + } + if ctx.IsSet(DABeaconNodeAPIEndpointFlag.Name) { + cfg.DA.BeaconNodeAPIEndpoint = ctx.String(DABeaconNodeAPIEndpointFlag.Name) + } + } +} + func setMaxBlockRange(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.GlobalIsSet(MaxBlockRangeFlag.Name) { cfg.MaxBlockRange = ctx.GlobalInt64(MaxBlockRangeFlag.Name) @@ -1672,6 +1720,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { setLes(ctx, cfg) setCircuitCapacityCheck(ctx, cfg) setEnableRollupVerify(ctx, cfg) + setDA(ctx, cfg) setMaxBlockRange(ctx, cfg) if ctx.GlobalIsSet(ShadowforkPeersFlag.Name) { cfg.ShadowForkPeerIDs = ctx.GlobalStringSlice(ShadowforkPeersFlag.Name) diff --git a/common/backoff/exponential.go b/common/backoff/exponential.go new file mode 100644 index 000000000000..e1f9b53a350e --- /dev/null +++ b/common/backoff/exponential.go @@ -0,0 +1,51 @@ +package backoff + +import ( + "math" + "math/rand" + "time" +) + +// Exponential is a backoff strategy that increases the delay between retries exponentially. +type Exponential struct { + attempt int + + maxJitter time.Duration + + min time.Duration + max time.Duration +} + +func NewExponential(minimum, maximum, maxJitter time.Duration) *Exponential { + return &Exponential{ + min: minimum, + max: maximum, + maxJitter: maxJitter, + } +} + +func (e *Exponential) NextDuration() time.Duration { + var jitter time.Duration + if e.maxJitter > 0 { + jitter = time.Duration(rand.Int63n(e.maxJitter.Nanoseconds())) + } + + minFloat := float64(e.min) + duration := math.Pow(2, float64(e.attempt)) * minFloat + + // limit at configured maximum + if duration > float64(e.max) { + duration = float64(e.max) + } + + e.attempt++ + return time.Duration(duration) + jitter +} + +func (e *Exponential) Reset() { + e.attempt = 0 +} + +func (e *Exponential) Attempt() int { + return e.attempt +} diff --git a/common/backoff/exponential_test.go b/common/backoff/exponential_test.go new file mode 100644 index 000000000000..ff659337a2b0 --- /dev/null +++ b/common/backoff/exponential_test.go @@ -0,0 +1,39 @@ +package backoff + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestExponentialBackoff(t *testing.T) { + t.Run("Multiple attempts", func(t *testing.T) { + e := NewExponential(100*time.Millisecond, 10*time.Second, 0) + expectedDurations := []time.Duration{ + 100 * time.Millisecond, + 200 * time.Millisecond, + 400 * time.Millisecond, + 800 * time.Millisecond, + 1600 * time.Millisecond, + 3200 * time.Millisecond, + 6400 * time.Millisecond, + 10 * time.Second, // capped at max + } + for i, expected := range expectedDurations { + require.Equal(t, expected, e.NextDuration(), "attempt %d", i) + } + }) + + t.Run("Jitter added", func(t *testing.T) { + e := NewExponential(1*time.Second, 10*time.Second, 1*time.Second) + duration := e.NextDuration() + require.GreaterOrEqual(t, duration, 1*time.Second) + require.Less(t, duration, 2*time.Second) + }) + + t.Run("Edge case: min > max", func(t *testing.T) { + e := NewExponential(10*time.Second, 5*time.Second, 0) + require.Equal(t, 5*time.Second, e.NextDuration()) + }) +} diff --git a/common/heap.go b/common/heap.go new file mode 100644 index 000000000000..67b79a1136d1 --- /dev/null +++ b/common/heap.go @@ -0,0 +1,109 @@ +package common + +import ( + "container/heap" +) + +// Heap is a generic min-heap (or max-heap, depending on Comparable behavior) implementation. +type Heap[T Comparable[T]] struct { + heap innerHeap[T] +} + +func NewHeap[T Comparable[T]]() *Heap[T] { + return &Heap[T]{ + heap: make(innerHeap[T], 0), + } +} + +func (h *Heap[T]) Len() int { + return len(h.heap) +} + +func (h *Heap[T]) Push(element T) *HeapElement[T] { + heapElement := NewHeapElement(element) + heap.Push(&h.heap, heapElement) + + return heapElement +} + +func (h *Heap[T]) Pop() *HeapElement[T] { + return heap.Pop(&h.heap).(*HeapElement[T]) +} + +func (h *Heap[T]) Peek() *HeapElement[T] { + if h.Len() == 0 { + return nil + } + + return h.heap[0] +} + +func (h *Heap[T]) Remove(element *HeapElement[T]) { + heap.Remove(&h.heap, element.index) +} + +func (h *Heap[T]) Clear() { + h.heap = make(innerHeap[T], 0) +} + +type innerHeap[T Comparable[T]] []*HeapElement[T] + +func (h innerHeap[T]) Len() int { + return len(h) +} + +func (h innerHeap[T]) Less(i, j int) bool { + return h[i].Value().CompareTo(h[j].Value()) < 0 +} + +func (h innerHeap[T]) Swap(i, j int) { + h[i], h[j] = h[j], h[i] + h[i].index, h[j].index = i, j +} + +func (h *innerHeap[T]) Push(x interface{}) { + data := x.(*HeapElement[T]) + *h = append(*h, data) + data.index = len(*h) - 1 +} + +func (h *innerHeap[T]) Pop() interface{} { + n := len(*h) + element := (*h)[n-1] + (*h)[n-1] = nil // avoid memory leak + *h = (*h)[:n-1] + element.index = -1 + + return element +} + +// Comparable is an interface for types that can be compared. +type Comparable[T any] interface { + // CompareTo compares x with other. + // To create a min heap, return: + // -1 if x < other + // 0 if x == other + // +1 if x > other + // To create a max heap, return the opposite. + CompareTo(other T) int +} + +// HeapElement is a wrapper around the value stored in the heap. +type HeapElement[T Comparable[T]] struct { + value T + index int +} + +func NewHeapElement[T Comparable[T]](value T) *HeapElement[T] { + return &HeapElement[T]{ + value: value, + } +} + +func (h *HeapElement[T]) Value() T { + return h.value +} + +func (h *HeapElement[T]) Index() int { + return h.index +} diff --git a/common/heap_test.go b/common/heap_test.go new file mode 100644 index 000000000000..ac927c375de4 --- /dev/null +++ b/common/heap_test.go @@ -0,0 +1,40 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type Int int + +func (i Int) CompareTo(other Int) int { + if i < other { + return -1 + } else if i > other { + return 1 + } else { + return 0 + } +} + +func TestHeap(t *testing.T) { + h := NewHeap[Int]() + + require.Equal(t, 0, h.Len(), "Heap should be empty initially") + + h.Push(Int(3)) + h.Push(Int(1)) + h.Push(Int(2)) + + require.Equal(t, 3, h.Len(), "Heap should have three elements after pushing") + + require.EqualValues(t, 1, h.Pop(), "Pop should return the smallest element") + require.Equal(t, 2, h.Len(), "Heap should have two elements after popping") + + require.EqualValues(t, 2, h.Pop(), "Pop should return the next smallest element") + require.Equal(t, 1, h.Len(), "Heap should have one element after popping") + + require.EqualValues(t, 3, h.Pop(), "Pop should return the last element") + require.Equal(t, 0, h.Len(), "Heap should be empty after popping all elements") +} diff --git a/common/shrinkingmap.go b/common/shrinkingmap.go new file mode 100644 index 000000000000..4bf98f87c2da --- /dev/null +++ b/common/shrinkingmap.go @@ -0,0 +1,71 @@ +package common + +// ShrinkingMap is a map that shrinks itself (by allocating a new map) after a certain number of deletions have been performed. +// If shrinkAfterDeletionsCount is set to <=0, the map will never shrink. +// This is useful to prevent memory leaks in long-running processes that delete a lot of keys from a map. +// See here for more details: https://github.com/golang/go/issues/20135 +type ShrinkingMap[K comparable, V any] struct { + m map[K]V + deletedKeys int + + shrinkAfterDeletionsCount int +} + +func NewShrinkingMap[K comparable, V any](shrinkAfterDeletionsCount int) *ShrinkingMap[K, V] { + return &ShrinkingMap[K, V]{ + m: make(map[K]V), + shrinkAfterDeletionsCount: shrinkAfterDeletionsCount, + } +} + +func (s *ShrinkingMap[K, V]) Set(key K, value V) { + s.m[key] = value +} + +func (s *ShrinkingMap[K, V]) Get(key K) (value V, exists bool) { + value, exists = s.m[key] + return value, exists +} + +func (s *ShrinkingMap[K, V]) Has(key K) bool { + _, exists := s.m[key] + return exists +} + +func (s *ShrinkingMap[K, V]) Delete(key K) (deleted bool) { + if _, exists := s.m[key]; !exists { + return false + } + + delete(s.m, key) + s.deletedKeys++ + + if s.shouldShrink() { + s.shrink() + } + + return true +} + +func (s *ShrinkingMap[K, V]) Size() (size int) { + return len(s.m) +} + +func (s *ShrinkingMap[K, V]) Clear() { + s.m = make(map[K]V) + s.deletedKeys = 0 +} + +func (s *ShrinkingMap[K, V]) shouldShrink() bool { + return s.shrinkAfterDeletionsCount > 0 && s.deletedKeys >= s.shrinkAfterDeletionsCount +} + +func (s *ShrinkingMap[K, V]) shrink() { + newMap := make(map[K]V, len(s.m)) + for k, v := range s.m { + newMap[k] = v + } + + s.m = newMap + s.deletedKeys = 0 +} diff --git a/common/shrinkingmap_test.go b/common/shrinkingmap_test.go new file mode 100644 index 000000000000..c94a917ee140 --- /dev/null +++ b/common/shrinkingmap_test.go @@ -0,0 +1,135 @@ +package common + +import ( + "fmt" + "runtime" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestShrinkingMap_Shrink(t *testing.T) { + m := NewShrinkingMap[int, int](10) + + for i := 0; i < 100; i++ { + m.Set(i, i) + } + + for i := 0; i < 100; i++ { + val, exists := m.Get(i) + require.Equal(t, true, exists) + require.Equal(t, i, val) + + has := m.Has(i) + require.Equal(t, true, has) + } + + for i := 0; i < 9; i++ { + m.Delete(i) + } + require.Equal(t, 9, m.deletedKeys) + + // Delete the 10th key -> shrinks the map + m.Delete(9) + require.Equal(t, 0, m.deletedKeys) + + for i := 0; i < 100; i++ { + if i < 10 { + val, exists := m.Get(i) + require.Equal(t, false, exists) + require.Equal(t, 0, val) + + has := m.Has(i) + require.Equal(t, false, has) + } else { + val, exists := m.Get(i) + require.Equal(t, true, exists) + require.Equal(t, i, val) + + has := m.Has(i) + require.Equal(t, true, has) + } + } + + require.Equal(t, 90, m.Size()) +} + +func TestNewShrinkingMap_NoShrinking(t *testing.T) { + m := NewShrinkingMap[int, int](0) + for i := 0; i < 10000; i++ { + m.Set(i, i) + } + + for i := 0; i < 10000; i++ { + val, exists := m.Get(i) + require.Equal(t, true, exists) + require.Equal(t, i, val) + + m.Delete(i) + } + + require.Equal(t, 0, m.Size()) + require.Equal(t, 10000, m.deletedKeys) +} + +func TestShrinkingMap_MemoryShrinking(t *testing.T) { + t.Skip("Only for manual testing and memory profiling") + + gcAndPrintAlloc("start") + m := NewShrinkingMap[int, int](10000) + + const mapSize = 1_000_000 + + for i := 0; i < mapSize; i++ { + m.Set(i, i) + } + + gcAndPrintAlloc("after map creation") + + for i := 0; i < mapSize/2; i++ { + m.Delete(i) + } + + gcAndPrintAlloc("after removing half of the elements") + + val, exist := m.Get(mapSize - 1) + require.Equal(t, true, exist) + require.Equal(t, mapSize-1, val) + + gcAndPrintAlloc("end") +} + +func TestShrinkingMap_MemoryNoShrinking(t *testing.T) { + t.Skip("Only for manual testing and memory profiling") + + gcAndPrintAlloc("start") + m := NewShrinkingMap[int, int](0) + + const mapSize = 1_000_000 + + for i := 0; i < mapSize; i++ { + m.Set(i, i) + } + + gcAndPrintAlloc("after map creation") + + for i := 0; i < mapSize/2; i++ { + m.Delete(i) + } + + gcAndPrintAlloc("after removing half of the elements") + + val, exist := m.Get(mapSize - 1) + require.Equal(t, true, exist) + require.Equal(t, mapSize-1, val) + + gcAndPrintAlloc("end") +} + +func gcAndPrintAlloc(prefix string) { + runtime.GC() + + var stats runtime.MemStats + runtime.ReadMemStats(&stats) + fmt.Printf(prefix+", Allocated memory %d KiB\n", stats.Alloc/1024) +} diff --git a/core/blockchain.go b/core/blockchain.go index 63b244cc06c7..e3294ded5bde 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1803,6 +1803,56 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er return it.index, err } +func (bc *BlockChain) BuildAndWriteBlock(parentBlock *types.Block, header *types.Header, txs types.Transactions) (WriteStatus, error) { + if !bc.chainmu.TryLock() { + return NonStatTy, errInsertionInterrupted + } + defer bc.chainmu.Unlock() + + statedb, err := state.New(parentBlock.Root(), bc.stateCache, bc.snaps) + if err != nil { + return NonStatTy, err + } + + statedb.StartPrefetcher("l1sync") + defer statedb.StopPrefetcher() + + header.ParentHash = parentBlock.Hash() + + tempBlock := types.NewBlockWithHeader(header).WithBody(txs, nil) + receipts, logs, gasUsed, err := bc.processor.Process(tempBlock, statedb, bc.vmConfig) + if err != nil { + return NonStatTy, fmt.Errorf("error processing block: %w", err) + } + + // TODO: once we have the extra and difficulty we need to verify the signature of the block with Clique + // This should be done with https://github.com/scroll-tech/go-ethereum/pull/913. + + // finalize and assemble block as fullBlock + header.GasUsed = gasUsed + header.Root = statedb.IntermediateRoot(bc.chainConfig.IsEIP158(header.Number)) + + fullBlock := types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) + + blockHash := fullBlock.Hash() + // manually replace the block hash in the receipts + for i, receipt := range receipts { + // add block location fields + receipt.BlockHash = blockHash + receipt.BlockNumber = tempBlock.Number() + receipt.TransactionIndex = uint(i) + + for _, l := range receipt.Logs { + l.BlockHash = blockHash + } + } + for _, l := range logs { + l.BlockHash = blockHash + } + + return bc.writeBlockAndSetHead(fullBlock, receipts, logs, statedb, false) +} + // insertSideChain is called when an import batch hits upon a pruned ancestor // error, which happens when a sidechain with a sufficiently old fork-block is // found. diff --git a/core/rawdb/accessors_da_syncer.go b/core/rawdb/accessors_da_syncer.go new file mode 100644 index 000000000000..96f816685652 --- /dev/null +++ b/core/rawdb/accessors_da_syncer.go @@ -0,0 +1,39 @@ +package rawdb + +import ( + "math/big" + + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/log" +) + +// WriteDASyncedL1BlockNumber writes the highest synced L1 block number to the database. +func WriteDASyncedL1BlockNumber(db ethdb.KeyValueWriter, L1BlockNumber uint64) { + value := big.NewInt(0).SetUint64(L1BlockNumber).Bytes() + + if err := db.Put(daSyncedL1BlockNumberKey, value); err != nil { + log.Crit("Failed to update DA synced L1 block number", "err", err) + } +} + +// ReadDASyncedL1BlockNumber retrieves the highest synced L1 block number. +func ReadDASyncedL1BlockNumber(db ethdb.Reader) *uint64 { + data, err := db.Get(daSyncedL1BlockNumberKey) + if err != nil && isNotFoundErr(err) { + return nil + } + if err != nil { + log.Crit("Failed to read DA synced L1 block number from database", "err", err) + } + if len(data) == 0 { + return nil + } + + number := new(big.Int).SetBytes(data) + if !number.IsUint64() { + log.Crit("Unexpected DA synced L1 block number in database", "number", number) + } + + value := number.Uint64() + return &value +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 2f8281c83d1d..2e4f2a18c5de 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -125,6 +125,9 @@ var ( numSkippedTransactionsKey = []byte("NumberOfSkippedTransactions") skippedTransactionPrefix = []byte("skip") // skippedTransactionPrefix + tx hash -> skipped transaction skippedTransactionHashPrefix = []byte("sh") // skippedTransactionHashPrefix + index -> tx hash + + // Scroll da syncer store + daSyncedL1BlockNumberKey = []byte("LastDASyncedL1BlockNumber") ) // Use the updated "L1" prefix on all new networks diff --git a/eth/backend.go b/eth/backend.go index 4c7b024f4969..2b6c663d2744 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -56,6 +56,7 @@ import ( "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/rollup/ccc" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer" "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" "github.com/scroll-tech/go-ethereum/rollup/sync_service" "github.com/scroll-tech/go-ethereum/rpc" @@ -70,10 +71,12 @@ type Ethereum struct { config *ethconfig.Config // Handlers - txPool *core.TxPool - syncService *sync_service.SyncService - rollupSyncService *rollup_sync_service.RollupSyncService - asyncChecker *ccc.AsyncChecker + txPool *core.TxPool + syncService *sync_service.SyncService + rollupSyncService *rollup_sync_service.RollupSyncService + asyncChecker *ccc.AsyncChecker + syncingPipeline *da_syncer.SyncingPipeline + blockchain *core.BlockChain handler *handler ethDialCandidates enode.Iterator @@ -220,6 +223,18 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client sync_service.EthCl } eth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain) + // Initialize and start DA syncing pipeline before SyncService as SyncService is blocking until all L1 messages are loaded. + // We need SyncService to load the L1 messages for DA syncing, but since both sync from last known L1 state, we can + // simply let them run simultaneously. If messages are missing in DA syncing, it will be handled by the syncing pipeline + // by waiting and retrying. + if config.EnableDASyncing { + eth.syncingPipeline, err = da_syncer.NewSyncingPipeline(context.Background(), eth.blockchain, chainConfig, eth.chainDb, l1Client, stack.Config().L1DeploymentBlock, config.DA) + if err != nil { + return nil, fmt.Errorf("cannot initialize da syncer: %w", err) + } + eth.syncingPipeline.Start() + } + // initialize and start L1 message sync service eth.syncService, err = sync_service.NewSyncService(context.Background(), chainConfig, stack.Config(), eth.chainDb, l1Client) if err != nil { @@ -257,7 +272,7 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client sync_service.EthCl return nil, err } - eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock) + eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock, config.EnableDASyncing) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} @@ -330,6 +345,15 @@ func (s *Ethereum) APIs() []rpc.API { // Append any APIs exposed explicitly by the consensus engine apis = append(apis, s.engine.APIs(s.BlockChain())...) + if !s.config.EnableDASyncing { + apis = append(apis, rpc.API{ + Namespace: "eth", + Version: "1.0", + Service: downloader.NewPublicDownloaderAPI(s.handler.downloader, s.eventMux), + Public: true, + }) + } + // Append all the local APIs and return return append(apis, []rpc.API{ { @@ -342,11 +366,6 @@ func (s *Ethereum) APIs() []rpc.API { Version: "1.0", Service: NewPublicMinerAPI(s), Public: true, - }, { - Namespace: "eth", - Version: "1.0", - Service: downloader.NewPublicDownloaderAPI(s.handler.downloader, s.eventMux), - Public: true, }, { Namespace: "miner", Version: "1.0", @@ -553,6 +572,11 @@ func (s *Ethereum) SyncService() *sync_service.SyncService { return s.syncServic // Protocols returns all the currently configured // network protocols to start. func (s *Ethereum) Protocols() []p2p.Protocol { + // if DA syncing enabled then we don't create handler + if s.config.EnableDASyncing { + return nil + } + protos := eth.MakeProtocols((*ethHandler)(s.handler), s.networkID, s.ethDialCandidates) if !s.blockchain.Config().Scroll.ZktrieEnabled() && s.config.SnapshotCache > 0 { protos = append(protos, snap.MakeProtocols((*snapHandler)(s.handler), s.snapDialCandidates)...) @@ -577,7 +601,11 @@ func (s *Ethereum) Start() error { // maxPeers -= s.config.LightPeers //} // Start the networking layer and the light server if requested - s.handler.Start(maxPeers) + + // handler is not enabled when DA syncing enabled + if !s.config.EnableDASyncing { + s.handler.Start(maxPeers) + } return nil } @@ -587,7 +615,10 @@ func (s *Ethereum) Stop() error { // Stop all the peer-related stuff first. s.ethDialCandidates.Close() s.snapDialCandidates.Close() - s.handler.Stop() + // handler is not enabled if DA syncing enabled + if !s.config.EnableDASyncing { + s.handler.Stop() + } // Then stop everything else. s.bloomIndexer.Close() @@ -597,6 +628,9 @@ func (s *Ethereum) Stop() error { if s.config.EnableRollupVerify { s.rollupSyncService.Stop() } + if s.config.EnableDASyncing { + s.syncingPipeline.Stop() + } s.miner.Close() if s.config.CheckCircuitCapacity { s.asyncChecker.Wait() diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 5a933a95e5f9..ad295d5de3be 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -37,6 +37,7 @@ import ( "github.com/scroll-tech/go-ethereum/miner" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer" ) // FullNodeGPO contains default gasprice oracle settings for full node. @@ -93,6 +94,9 @@ var Defaults = Config{ GPO: FullNodeGPO, RPCTxFeeCap: 1, // 1 ether MaxBlockRange: -1, // Default unconfigured value: no block range limit for backward compatibility + DA: da_syncer.Config{ + FetcherMode: da_syncer.L1RPC, + }, } func init() { @@ -218,6 +222,12 @@ type Config struct { // List of peer ids that take part in the shadow-fork ShadowForkPeerIDs []string + + // Enable syncing node from DA + EnableDASyncing bool + + // DA syncer options + DA da_syncer.Config } // CreateConsensusEngine creates a consensus engine for the given chain configuration. diff --git a/go.mod b/go.mod index 658a06109d0e..2bda32ee00d7 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac + github.com/scroll-tech/da-codec v0.1.2 github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 @@ -96,7 +96,7 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rivo/uniseg v0.4.4 // indirect - github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 // indirect + github.com/supranational/blst v0.3.11 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect diff --git a/go.sum b/go.sum index 290418161087..8c96ce6e7cd5 100644 --- a/go.sum +++ b/go.sum @@ -394,6 +394,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac h1:DjLrqjoOLVFug9ZkAbJYwjtYW51YZE0Num3p4cZXaZs= github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= +github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc= +github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= @@ -430,6 +432,7 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0= github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= diff --git a/miner/miner.go b/miner/miner.go index f0920ade1376..e6b1b2ae5d38 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -76,7 +76,7 @@ type Miner struct { wg sync.WaitGroup } -func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(block *types.Block) bool) *Miner { +func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *event.TypeMux, engine consensus.Engine, isLocalBlock func(block *types.Block) bool, daSyncingEnabled bool) *Miner { miner := &Miner{ eth: eth, mux: mux, @@ -84,10 +84,12 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even exitCh: make(chan struct{}), startCh: make(chan common.Address), stopCh: make(chan struct{}), - worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true), + worker: newWorker(config, chainConfig, engine, eth, mux, isLocalBlock, true, daSyncingEnabled), + } + if !daSyncingEnabled { + miner.wg.Add(1) + go miner.update() } - miner.wg.Add(1) - go miner.update() return miner } diff --git a/miner/miner_test.go b/miner/miner_test.go index d84c9aea703e..f39700193430 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -276,5 +276,5 @@ func createMiner(t *testing.T) (*Miner, *event.TypeMux) { // Create event Mux mux := new(event.TypeMux) // Create Miner - return New(backend, &config, chainConfig, mux, engine, nil), mux + return New(backend, &config, chainConfig, mux, engine, nil, false), mux } diff --git a/miner/scroll_worker.go b/miner/scroll_worker.go index ec6337bef406..e152878d40e6 100644 --- a/miner/scroll_worker.go +++ b/miner/scroll_worker.go @@ -177,7 +177,7 @@ type worker struct { skipTxHash common.Hash } -func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool) *worker { +func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus.Engine, eth Backend, mux *event.TypeMux, isLocalBlock func(*types.Block) bool, init bool, daSyncingEnabled bool) *worker { worker := &worker{ config: config, chainConfig: chainConfig, @@ -192,6 +192,12 @@ func newWorker(config *Config, chainConfig *params.ChainConfig, engine consensus startCh: make(chan struct{}, 1), reorgCh: make(chan reorgTrigger, 1), } + + if daSyncingEnabled { + log.Info("Worker will not start, because DA syncing is enabled") + return worker + } + worker.asyncChecker = ccc.NewAsyncChecker(worker.chain, config.CCCMaxWorkers, false).WithOnFailingBlock(worker.onBlockFailingCCC) // Subscribe NewTxsEvent for tx pool diff --git a/miner/scroll_worker_test.go b/miner/scroll_worker_test.go index 407d508cf819..70ec4a9582d3 100644 --- a/miner/scroll_worker_test.go +++ b/miner/scroll_worker_test.go @@ -208,7 +208,7 @@ func (b *testWorkerBackend) newRandomTx(creation bool) *types.Transaction { func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*worker, *testWorkerBackend) { backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks) backend.txPool.AddLocals(pendingTxs) - w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false) + w := newWorker(testConfig, chainConfig, engine, backend, new(event.TypeMux), nil, false, false) w.setEtherbase(testBankAddress) return w, backend } diff --git a/node/config.go b/node/config.go index 439b11a2f170..2dc3207dfdca 100644 --- a/node/config.go +++ b/node/config.go @@ -197,6 +197,8 @@ type Config struct { L1Confirmations rpc.BlockNumber `toml:",omitempty"` // L1 bridge deployment block number L1DeploymentBlock uint64 `toml:",omitempty"` + // Is daSyncingEnabled + DaSyncingEnabled bool `toml:",omitempty"` } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into diff --git a/node/node.go b/node/node.go index ac8c27dde51c..8c02d46f1ccc 100644 --- a/node/node.go +++ b/node/node.go @@ -262,10 +262,15 @@ func (n *Node) doClose(errs []error) error { // openEndpoints starts all network and RPC endpoints. func (n *Node) openEndpoints() error { // start networking endpoints - n.log.Info("Starting peer-to-peer node", "instance", n.server.Name) - if err := n.server.Start(); err != nil { - return convertFileLockError(err) + if !n.config.DaSyncingEnabled { + n.log.Info("Starting peer-to-peer node", "instance", n.server.Name) + if err := n.server.Start(); err != nil { + return convertFileLockError(err) + } + } else { + n.log.Info("Peer-to-peer node will not start, because DA syncing is enabled") } + // start RPC endpoints err := n.startRPC() if err != nil { diff --git a/rollup/da_syncer/batch_queue.go b/rollup/da_syncer/batch_queue.go new file mode 100644 index 000000000000..a0172a86c077 --- /dev/null +++ b/rollup/da_syncer/batch_queue.go @@ -0,0 +1,102 @@ +package da_syncer + +import ( + "context" + "fmt" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" +) + +// BatchQueue is a pipeline stage that reads all batch events from DAQueue and provides only finalized batches to the next stage. +type BatchQueue struct { + DAQueue *DAQueue + db ethdb.Database + lastFinalizedBatchIndex uint64 + batches *common.Heap[da.Entry] + batchesMap *common.ShrinkingMap[uint64, *common.HeapElement[da.Entry]] +} + +func NewBatchQueue(DAQueue *DAQueue, db ethdb.Database) *BatchQueue { + return &BatchQueue{ + DAQueue: DAQueue, + db: db, + lastFinalizedBatchIndex: 0, + batches: common.NewHeap[da.Entry](), + batchesMap: common.NewShrinkingMap[uint64, *common.HeapElement[da.Entry]](1000), + } +} + +// NextBatch finds next finalized batch and returns data, that was committed in that batch +func (bq *BatchQueue) NextBatch(ctx context.Context) (da.Entry, error) { + if batch := bq.getFinalizedBatch(); batch != nil { + return batch, nil + } + + for { + daEntry, err := bq.DAQueue.NextDA(ctx) + if err != nil { + return nil, err + } + switch daEntry.Type() { + case da.CommitBatchV0Type, da.CommitBatchWithBlobType: + bq.addBatch(daEntry) + case da.RevertBatchType: + bq.deleteBatch(daEntry) + case da.FinalizeBatchType: + if daEntry.BatchIndex() > bq.lastFinalizedBatchIndex { + bq.lastFinalizedBatchIndex = daEntry.BatchIndex() + } + + if batch := bq.getFinalizedBatch(); batch != nil { + return batch, nil + } + default: + return nil, fmt.Errorf("unexpected type of daEntry: %T", daEntry) + } + } +} + +// getFinalizedBatch returns next finalized batch if there is available +func (bq *BatchQueue) getFinalizedBatch() da.Entry { + if bq.batches.Len() == 0 { + return nil + } + + batch := bq.batches.Peek().Value() + if batch.BatchIndex() <= bq.lastFinalizedBatchIndex { + bq.deleteBatch(batch) + return batch + } else { + return nil + } +} + +func (bq *BatchQueue) addBatch(batch da.Entry) { + heapElement := bq.batches.Push(batch) + bq.batchesMap.Set(batch.BatchIndex(), heapElement) +} + +// deleteBatch deletes data committed in the batch from map, because this batch is reverted or finalized +// updates DASyncedL1BlockNumber +func (bq *BatchQueue) deleteBatch(batch da.Entry) { + batchHeapElement, exists := bq.batchesMap.Get(batch.BatchIndex()) + if !exists { + return + } + + bq.batchesMap.Delete(batch.BatchIndex()) + bq.batches.Remove(batchHeapElement) + + // we store here min height of currently loaded batches to be able to start syncing from the same place in case of restart + // TODO: we should store this information when the batch is done being processed to avoid inconsistencies + rawdb.WriteDASyncedL1BlockNumber(bq.db, batch.L1BlockNumber()-1) +} + +func (bq *BatchQueue) Reset(height uint64) { + bq.batches.Clear() + bq.batchesMap.Clear() + bq.DAQueue.Reset(height) +} diff --git a/rollup/da_syncer/blob_client/beacon_node_client.go b/rollup/da_syncer/blob_client/beacon_node_client.go new file mode 100644 index 000000000000..5bfd7b9edf6c --- /dev/null +++ b/rollup/da_syncer/blob_client/beacon_node_client.go @@ -0,0 +1,192 @@ +package blob_client + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" + "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" +) + +type BeaconNodeClient struct { + apiEndpoint string + l1Client *rollup_sync_service.L1Client + genesisTime uint64 + secondsPerSlot uint64 +} + +var ( + beaconNodeGenesisEndpoint = "/eth/v1/beacon/genesis" + beaconNodeSpecEndpoint = "/eth/v1/config/spec" + beaconNodeBlobEndpoint = "/eth/v1/beacon/blob_sidecars" +) + +func NewBeaconNodeClient(apiEndpoint string, l1Client *rollup_sync_service.L1Client) (*BeaconNodeClient, error) { + // get genesis time + genesisPath, err := url.JoinPath(apiEndpoint, beaconNodeGenesisEndpoint) + if err != nil { + return nil, fmt.Errorf("failed to join path, err: %w", err) + } + resp, err := http.Get(genesisPath) + if err != nil { + return nil, fmt.Errorf("cannot do request, err: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("beacon node request failed with status: %s: could not read response body: %w", resp.Status, err) + } + bodyStr := string(body) + return nil, fmt.Errorf("beacon node request failed, status: %s, body: %s", resp.Status, bodyStr) + } + + var genesisResp GenesisResp + err = json.NewDecoder(resp.Body).Decode(&genesisResp) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + genesisTime, err := strconv.ParseUint(genesisResp.Data.GenesisTime, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to decode genesis time %s, err: %w", genesisResp.Data.GenesisTime, err) + } + + // get seconds per slot from spec + specPath, err := url.JoinPath(apiEndpoint, beaconNodeSpecEndpoint) + if err != nil { + return nil, fmt.Errorf("failed to join path, err: %w", err) + } + resp, err = http.Get(specPath) + if err != nil { + return nil, fmt.Errorf("cannot do request, err: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("beacon node request failed with status: %s: could not read response body: %w", resp.Status, err) + } + bodyStr := string(body) + return nil, fmt.Errorf("beacon node request failed, status: %s, body: %s", resp.Status, bodyStr) + } + + var specResp SpecResp + err = json.NewDecoder(resp.Body).Decode(&specResp) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + secondsPerSlot, err := strconv.ParseUint(specResp.Data.SecondsPerSlot, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to decode seconds per slot %s, err: %w", specResp.Data.SecondsPerSlot, err) + } + if secondsPerSlot == 0 { + return nil, fmt.Errorf("failed to make new BeaconNodeClient, secondsPerSlot is 0") + } + + return &BeaconNodeClient{ + apiEndpoint: apiEndpoint, + l1Client: l1Client, + genesisTime: genesisTime, + secondsPerSlot: secondsPerSlot, + }, nil +} + +func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { + // get block timestamp to calculate slot + header, err := c.l1Client.GetHeaderByNumber(blockNumber) + if err != nil { + return nil, fmt.Errorf("failed to get header by number, err: %w", err) + } + slot := (header.Time - c.genesisTime) / c.secondsPerSlot + + // get blob sidecar for slot + blobSidecarPath, err := url.JoinPath(c.apiEndpoint, beaconNodeBlobEndpoint, fmt.Sprintf("%d", slot)) + if err != nil { + return nil, fmt.Errorf("failed to join path, err: %w", err) + } + resp, err := http.Get(blobSidecarPath) + if err != nil { + return nil, fmt.Errorf("cannot do request, err: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("beacon node request failed with status: %s: could not read response body: %w", resp.Status, err) + } + bodyStr := string(body) + return nil, fmt.Errorf("beacon node request failed, status: %s, body: %s", resp.Status, bodyStr) + } + + var blobSidecarResp BlobSidecarResp + err = json.NewDecoder(resp.Body).Decode(&blobSidecarResp) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + + // find blob with desired versionedHash + for _, blob := range blobSidecarResp.Data { + // calculate blob hash from commitment and check it with desired + commitmentBytes := common.FromHex(blob.KzgCommitment) + if len(commitmentBytes) != lenKZGCommitment { + return nil, fmt.Errorf("len of kzg commitment is not correct, expected: %d, got: %d", lenKZGCommitment, len(commitmentBytes)) + } + commitment := kzg4844.Commitment(commitmentBytes) + blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &commitment) + + if blobVersionedHash == versionedHash { + // found desired blob + blobBytes := common.FromHex(blob.Blob) + if len(blobBytes) != lenBlobBytes { + return nil, fmt.Errorf("len of blob data is not correct, expected: %d, got: %d", lenBlobBytes, len(blobBytes)) + } + + b := kzg4844.Blob(blobBytes) + return &b, nil + } + } + + return nil, fmt.Errorf("missing blob %v in slot %d, block number %d", versionedHash, slot, blockNumber) +} + +type GenesisResp struct { + Data struct { + GenesisTime string `json:"genesis_time"` + } `json:"data"` +} + +type SpecResp struct { + Data struct { + SecondsPerSlot string `json:"SECONDS_PER_SLOT"` + } `json:"data"` +} + +type BlobSidecarResp struct { + Data []struct { + Index string `json:"index"` + Blob string `json:"blob"` + KzgCommitment string `json:"kzg_commitment"` + KzgProof string `json:"kzg_proof"` + SignedBlockHeader struct { + Message struct { + Slot string `json:"slot"` + ProposerIndex string `json:"proposer_index"` + ParentRoot string `json:"parent_root"` + StateRoot string `json:"state_root"` + BodyRoot string `json:"body_root"` + } `json:"message"` + Signature string `json:"signature"` + } `json:"signed_block_header"` + KzgCommitmentInclusionProof []string `json:"kzg_commitment_inclusion_proof"` + } `json:"data"` +} diff --git a/rollup/da_syncer/blob_client/blob_client.go b/rollup/da_syncer/blob_client/blob_client.go new file mode 100644 index 000000000000..814b1d4faf2d --- /dev/null +++ b/rollup/da_syncer/blob_client/blob_client.go @@ -0,0 +1,64 @@ +package blob_client + +import ( + "context" + "errors" + "fmt" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" +) + +const ( + lenBlobBytes int = 131072 + lenKZGCommitment int = 48 +) + +type BlobClient interface { + GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) +} + +type BlobClients struct { + list []BlobClient + curPos int +} + +func NewBlobClients(blobClients ...BlobClient) *BlobClients { + return &BlobClients{ + list: blobClients, + curPos: 0, + } +} + +func (c *BlobClients) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { + if len(c.list) == 0 { + return nil, fmt.Errorf("BlobClients.GetBlobByVersionedHash: list of BlobClients is empty") + } + + for i := 0; i < len(c.list); i++ { + blob, err := c.list[c.curPos].GetBlobByVersionedHashAndBlockNumber(ctx, versionedHash, blockNumber) + if err == nil { + return blob, nil + } + c.nextPos() + // there was an error, try the next blob client in following iteration + log.Warn("BlobClients: failed to get blob by versioned hash from BlobClient", "err", err, "blob client pos in BlobClients", c.curPos) + } + + // if we iterated over entire list, return a temporary error that will be handled in syncing_pipeline with a backoff and retry + return nil, serrors.NewTemporaryError(errors.New("BlobClients.GetBlobByVersionedHash: failed to get blob by versioned hash from all BlobClients")) +} + +func (c *BlobClients) nextPos() { + c.curPos = (c.curPos + 1) % len(c.list) +} + +func (c *BlobClients) AddBlobClient(blobClient BlobClient) { + c.list = append(c.list, blobClient) +} + +func (c *BlobClients) Size() int { + return len(c.list) +} diff --git a/rollup/da_syncer/blob_client/blob_scan_client.go b/rollup/da_syncer/blob_client/blob_scan_client.go new file mode 100644 index 000000000000..24b03bed32b9 --- /dev/null +++ b/rollup/da_syncer/blob_client/blob_scan_client.go @@ -0,0 +1,92 @@ +package blob_client + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/common/hexutil" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" +) + +type BlobScanClient struct { + client *http.Client + apiEndpoint string +} + +func NewBlobScanClient(apiEndpoint string) *BlobScanClient { + return &BlobScanClient{ + client: http.DefaultClient, + apiEndpoint: apiEndpoint, + } +} + +func (c *BlobScanClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { + // blobscan api docs https://api.blobscan.com/#/blobs/blob-getByBlobId + path, err := url.JoinPath(c.apiEndpoint, versionedHash.String()) + if err != nil { + return nil, fmt.Errorf("failed to join path, err: %w", err) + } + req, err := http.NewRequestWithContext(ctx, "GET", path, nil) + if err != nil { + return nil, fmt.Errorf("cannot create request, err: %w", err) + } + req.Header.Set("accept", "application/json") + resp, err := c.client.Do(req) + if err != nil { + return nil, fmt.Errorf("cannot do request, err: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + if resp.StatusCode == http.StatusNotFound { + return nil, fmt.Errorf("no blob with versioned hash : %s", versionedHash.String()) + } + var res ErrorRespBlobScan + err = json.NewDecoder(resp.Body).Decode(&res) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + return nil, fmt.Errorf("error while fetching blob, message: %s, code: %s, versioned hash: %s", res.Message, res.Code, versionedHash.String()) + } + var result BlobRespBlobScan + + err = json.NewDecoder(resp.Body).Decode(&result) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + blobBytes, err := hex.DecodeString(result.Data[2:]) + if err != nil { + return nil, fmt.Errorf("failed to decode data to bytes, err: %w", err) + } + if len(blobBytes) != lenBlobBytes { + return nil, fmt.Errorf("len of blob data is not correct, expected: %d, got: %d", lenBlobBytes, len(blobBytes)) + } + blob := kzg4844.Blob(blobBytes) + + // sanity check that retrieved blob matches versioned hash + commitment, err := kzg4844.BlobToCommitment(&blob) + if err != nil { + return nil, fmt.Errorf("failed to convert blob to commitment, err: %w", err) + } + + blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &commitment) + if blobVersionedHash != versionedHash { + return nil, fmt.Errorf("blob versioned hash mismatch, expected: %s, got: %s", versionedHash.String(), hexutil.Encode(blobVersionedHash[:])) + } + + return &blob, nil +} + +type BlobRespBlobScan struct { + Data string `json:"data"` +} + +type ErrorRespBlobScan struct { + Message string `json:"message"` + Code string `json:"code"` +} diff --git a/rollup/da_syncer/blob_client/block_native_client.go b/rollup/da_syncer/blob_client/block_native_client.go new file mode 100644 index 000000000000..ddd574d02d10 --- /dev/null +++ b/rollup/da_syncer/blob_client/block_native_client.go @@ -0,0 +1,85 @@ +package blob_client + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/common/hexutil" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" +) + +type BlockNativeClient struct { + apiEndpoint string +} + +func NewBlockNativeClient(apiEndpoint string) *BlockNativeClient { + return &BlockNativeClient{ + apiEndpoint: apiEndpoint, + } +} + +func (c *BlockNativeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { + // blocknative api docs https://docs.blocknative.com/blocknative-data-archive/blob-archive + path, err := url.JoinPath(c.apiEndpoint, versionedHash.String()) + if err != nil { + return nil, fmt.Errorf("failed to join path, err: %w", err) + } + resp, err := http.Get(path) + if err != nil { + return nil, fmt.Errorf("cannot do request, err: %w", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + var res ErrorRespBlockNative + err = json.NewDecoder(resp.Body).Decode(&res) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + return nil, fmt.Errorf("error while fetching blob, message: %s, code: %d, versioned hash: %s", res.Error.Message, res.Error.Code, versionedHash.String()) + } + var result BlobRespBlockNative + err = json.NewDecoder(resp.Body).Decode(&result) + if err != nil { + return nil, fmt.Errorf("failed to decode result into struct, err: %w", err) + } + blobBytes, err := hex.DecodeString(result.Blob.Data[2:]) + if err != nil { + return nil, fmt.Errorf("failed to decode data to bytes, err: %w", err) + } + if len(blobBytes) != lenBlobBytes { + return nil, fmt.Errorf("len of blob data is not correct, expected: %d, got: %d", lenBlobBytes, len(blobBytes)) + } + blob := kzg4844.Blob(blobBytes) + + // sanity check that retrieved blob matches versioned hash + commitment, err := kzg4844.BlobToCommitment(&blob) + if err != nil { + return nil, fmt.Errorf("failed to convert blob to commitment, err: %w", err) + } + + blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &commitment) + if blobVersionedHash != versionedHash { + return nil, fmt.Errorf("blob versioned hash mismatch, expected: %s, got: %s", versionedHash.String(), hexutil.Encode(blobVersionedHash[:])) + } + + return &blob, nil +} + +type BlobRespBlockNative struct { + Blob struct { + Data string `json:"data"` + } `json:"blob"` +} + +type ErrorRespBlockNative struct { + Error struct { + Code int `json:"code"` + Message string `json:"message"` + } `json:"error"` +} diff --git a/rollup/da_syncer/block_queue.go b/rollup/da_syncer/block_queue.go new file mode 100644 index 000000000000..a122d41ab356 --- /dev/null +++ b/rollup/da_syncer/block_queue.go @@ -0,0 +1,56 @@ +package da_syncer + +import ( + "context" + "fmt" + + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" +) + +// BlockQueue is a pipeline stage that reads batches from BatchQueue, extracts all da.PartialBlock from it and +// provides them to the next stage one-by-one. +type BlockQueue struct { + batchQueue *BatchQueue + blocks []*da.PartialBlock +} + +func NewBlockQueue(batchQueue *BatchQueue) *BlockQueue { + return &BlockQueue{ + batchQueue: batchQueue, + blocks: make([]*da.PartialBlock, 0), + } +} + +func (bq *BlockQueue) NextBlock(ctx context.Context) (*da.PartialBlock, error) { + for len(bq.blocks) == 0 { + err := bq.getBlocksFromBatch(ctx) + if err != nil { + return nil, err + } + } + block := bq.blocks[0] + bq.blocks = bq.blocks[1:] + return block, nil +} + +func (bq *BlockQueue) getBlocksFromBatch(ctx context.Context) error { + daEntry, err := bq.batchQueue.NextBatch(ctx) + if err != nil { + return err + } + + entryWithBlocks, ok := daEntry.(da.EntryWithBlocks) + // this should never happen because we only receive CommitBatch entries + if !ok { + return fmt.Errorf("unexpected type of daEntry: %T", daEntry) + } + + bq.blocks = entryWithBlocks.Blocks() + + return nil +} + +func (bq *BlockQueue) Reset(height uint64) { + bq.blocks = make([]*da.PartialBlock, 0) + bq.batchQueue.Reset(height) +} diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go new file mode 100644 index 000000000000..47eabfceb65f --- /dev/null +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -0,0 +1,246 @@ +package da + +import ( + "context" + "errors" + "fmt" + + "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" + "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" +) + +const ( + callDataBlobSourceFetchBlockRange uint64 = 500 + commitBatchEventName = "CommitBatch" + revertBatchEventName = "RevertBatch" + finalizeBatchEventName = "FinalizeBatch" + commitBatchMethodName = "commitBatch" + commitBatchWithBlobProofMethodName = "commitBatchWithBlobProof" + + // the length of method ID at the beginning of transaction data + methodIDLength = 4 +) + +var ( + ErrSourceExhausted = errors.New("data source has been exhausted") +) + +type CalldataBlobSource struct { + ctx context.Context + l1Client *rollup_sync_service.L1Client + blobClient blob_client.BlobClient + l1height uint64 + scrollChainABI *abi.ABI + l1CommitBatchEventSignature common.Hash + l1RevertBatchEventSignature common.Hash + l1FinalizeBatchEventSignature common.Hash + db ethdb.Database + + l1Finalized uint64 +} + +func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, db ethdb.Database) (*CalldataBlobSource, error) { + scrollChainABI, err := rollup_sync_service.ScrollChainMetaData.GetAbi() + if err != nil { + return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) + } + return &CalldataBlobSource{ + ctx: ctx, + l1Client: l1Client, + blobClient: blobClient, + l1height: l1height, + scrollChainABI: scrollChainABI, + l1CommitBatchEventSignature: scrollChainABI.Events[commitBatchEventName].ID, + l1RevertBatchEventSignature: scrollChainABI.Events[revertBatchEventName].ID, + l1FinalizeBatchEventSignature: scrollChainABI.Events[finalizeBatchEventName].ID, + db: db, + }, nil +} + +func (ds *CalldataBlobSource) NextData() (Entries, error) { + var err error + to := ds.l1height + callDataBlobSourceFetchBlockRange + + // If there's not enough finalized blocks to request up to, we need to query finalized block number. + // Otherwise, we know that there's more finalized blocks than we want to request up to + // -> no need to query finalized block number + if to > ds.l1Finalized { + ds.l1Finalized, err = ds.l1Client.GetLatestFinalizedBlockNumber() + if err != nil { + return nil, serrors.NewTemporaryError(fmt.Errorf("failed to query GetLatestFinalizedBlockNumber, error: %v", err)) + } + // make sure we don't request more than finalized blocks + to = min(to, ds.l1Finalized) + } + + if ds.l1height > to { + return nil, ErrSourceExhausted + } + + logs, err := ds.l1Client.FetchRollupEventsInRange(ds.l1height, to) + if err != nil { + return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get events, l1height: %d, error: %v", ds.l1height, err)) + } + da, err := ds.processLogsToDA(logs) + if err != nil { + return nil, serrors.NewTemporaryError(fmt.Errorf("failed to process logs to DA, error: %v", err)) + } + + ds.l1height = to + 1 + return da, nil +} + +func (ds *CalldataBlobSource) L1Height() uint64 { + return ds.l1height +} + +func (ds *CalldataBlobSource) processLogsToDA(logs []types.Log) (Entries, error) { + var entries Entries + var entry Entry + var err error + + for _, vLog := range logs { + switch vLog.Topics[0] { + case ds.l1CommitBatchEventSignature: + event := &rollup_sync_service.L1CommitBatchEvent{} + if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, commitBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack commit rollup event log, err: %w", err) + } + + batchIndex := event.BatchIndex.Uint64() + log.Trace("found new CommitBatch event", "batch index", batchIndex) + + if entry, err = ds.getCommitBatchDA(batchIndex, &vLog); err != nil { + return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", batchIndex, err) + } + + case ds.l1RevertBatchEventSignature: + event := &rollup_sync_service.L1RevertBatchEvent{} + if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, revertBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack revert rollup event log, err: %w", err) + } + + batchIndex := event.BatchIndex.Uint64() + log.Trace("found new RevertBatchType event", "batch index", batchIndex) + entry = NewRevertBatch(batchIndex) + + case ds.l1FinalizeBatchEventSignature: + event := &rollup_sync_service.L1FinalizeBatchEvent{} + if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, finalizeBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack finalized rollup event log, err: %w", err) + } + + batchIndex := event.BatchIndex.Uint64() + log.Trace("found new FinalizeBatchType event", "batch index", event.BatchIndex.Uint64()) + entry = NewFinalizeBatch(batchIndex) + + default: + return nil, fmt.Errorf("unknown event, topic: %v, tx hash: %v", vLog.Topics[0].Hex(), vLog.TxHash.Hex()) + } + + entries = append(entries, entry) + } + return entries, nil +} + +type commitBatchArgs struct { + Version uint8 + ParentBatchHeader []byte + Chunks [][]byte + SkippedL1MessageBitmap []byte +} + +func newCommitBatchArgs(method *abi.Method, values []interface{}) (*commitBatchArgs, error) { + var args commitBatchArgs + err := method.Inputs.Copy(&args, values) + return &args, err +} + +func newCommitBatchArgsFromCommitBatchWithProof(method *abi.Method, values []interface{}) (*commitBatchArgs, error) { + var args commitBatchWithBlobProofArgs + err := method.Inputs.Copy(&args, values) + if err != nil { + return nil, err + } + return &commitBatchArgs{ + Version: args.Version, + ParentBatchHeader: args.ParentBatchHeader, + Chunks: args.Chunks, + SkippedL1MessageBitmap: args.SkippedL1MessageBitmap, + }, nil +} + +type commitBatchWithBlobProofArgs struct { + Version uint8 + ParentBatchHeader []byte + Chunks [][]byte + SkippedL1MessageBitmap []byte + BlobDataProof []byte +} + +func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Log) (Entry, error) { + if batchIndex == 0 { + return NewCommitBatchDAV0Empty(), nil + } + + txData, err := ds.l1Client.FetchTxData(vLog) + if err != nil { + return nil, fmt.Errorf("failed to fetch tx data, tx hash: %v, err: %w", vLog.TxHash.Hex(), err) + } + if len(txData) < methodIDLength { + return nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) + } + + method, err := ds.scrollChainABI.MethodById(txData[:methodIDLength]) + if err != nil { + return nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) + } + values, err := method.Inputs.Unpack(txData[methodIDLength:]) + if err != nil { + return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) + } + if method.Name == commitBatchMethodName { + args, err := newCommitBatchArgs(method, values) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) + } + codecVersion := encoding.CodecVersion(args.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) + } + switch args.Version { + case 0: + return NewCommitBatchDAV0(ds.db, codec, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber) + case 1, 2: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + default: + return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) + } + } else if method.Name == commitBatchWithBlobProofMethodName { + args, err := newCommitBatchArgsFromCommitBatchWithProof(method, values) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) + } + codecVersion := encoding.CodecVersion(args.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) + } + switch args.Version { + case 3, 4: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + default: + return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) + } + } + + return nil, fmt.Errorf("unknown method name: %s", method.Name) +} diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go new file mode 100644 index 000000000000..135a76d79518 --- /dev/null +++ b/rollup/da_syncer/da/commitV0.go @@ -0,0 +1,172 @@ +package da + +import ( + "encoding/binary" + "fmt" + + "github.com/scroll-tech/da-codec/encoding" + + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" +) + +type CommitBatchDAV0 struct { + version uint8 + batchIndex uint64 + parentTotalL1MessagePopped uint64 + skippedL1MessageBitmap []byte + chunks []*encoding.DAChunkRawTx + l1Txs []*types.L1MessageTx + + l1BlockNumber uint64 +} + +func NewCommitBatchDAV0(db ethdb.Database, + codec encoding.Codec, + version uint8, + batchIndex uint64, + parentBatchHeader []byte, + chunks [][]byte, + skippedL1MessageBitmap []byte, + l1BlockNumber uint64, +) (*CommitBatchDAV0, error) { + decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) + if err != nil { + return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", batchIndex, err) + } + + return NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, l1BlockNumber) +} + +func NewCommitBatchDAV0WithChunks(db ethdb.Database, + version uint8, + batchIndex uint64, + parentBatchHeader []byte, + decodedChunks []*encoding.DAChunkRawTx, + skippedL1MessageBitmap []byte, + l1BlockNumber uint64, +) (*CommitBatchDAV0, error) { + parentTotalL1MessagePopped := getBatchTotalL1MessagePopped(parentBatchHeader) + l1Txs, err := getL1Messages(db, parentTotalL1MessagePopped, skippedL1MessageBitmap, getTotalMessagesPoppedFromChunks(decodedChunks)) + if err != nil { + return nil, fmt.Errorf("failed to get L1 messages for v0 batch %d: %w", batchIndex, err) + } + + return &CommitBatchDAV0{ + version: version, + batchIndex: batchIndex, + parentTotalL1MessagePopped: parentTotalL1MessagePopped, + skippedL1MessageBitmap: skippedL1MessageBitmap, + chunks: decodedChunks, + l1Txs: l1Txs, + l1BlockNumber: l1BlockNumber, + }, nil +} + +func NewCommitBatchDAV0Empty() *CommitBatchDAV0 { + return &CommitBatchDAV0{ + batchIndex: 0, + } +} + +func (c *CommitBatchDAV0) Type() Type { + return CommitBatchV0Type +} + +func (c *CommitBatchDAV0) L1BlockNumber() uint64 { + return c.l1BlockNumber +} + +func (c *CommitBatchDAV0) BatchIndex() uint64 { + return c.batchIndex +} + +func (c *CommitBatchDAV0) CompareTo(other Entry) int { + if c.BatchIndex() < other.BatchIndex() { + return -1 + } else if c.BatchIndex() > other.BatchIndex() { + return 1 + } + return 0 +} + +func (c *CommitBatchDAV0) Blocks() []*PartialBlock { + var blocks []*PartialBlock + l1TxPointer := 0 + + curL1TxIndex := c.parentTotalL1MessagePopped + for _, chunk := range c.chunks { + for blockId, daBlock := range chunk.Blocks { + // create txs + txs := make(types.Transactions, 0, daBlock.NumTransactions()) + // insert l1 msgs + for l1TxPointer < len(c.l1Txs) && c.l1Txs[l1TxPointer].QueueIndex < curL1TxIndex+uint64(daBlock.NumL1Messages()) { + l1Tx := types.NewTx(c.l1Txs[l1TxPointer]) + txs = append(txs, l1Tx) + l1TxPointer++ + } + curL1TxIndex += uint64(daBlock.NumL1Messages()) + + // insert l2 txs + txs = append(txs, chunk.Transactions[blockId]...) + + block := NewPartialBlock( + &PartialHeader{ + Number: daBlock.Number(), + Time: daBlock.Timestamp(), + BaseFee: daBlock.BaseFee(), + GasLimit: daBlock.GasLimit(), + Difficulty: 10, // TODO: replace with real difficulty + ExtraData: []byte{1, 2, 3, 4, 5, 6, 7, 8}, // TODO: replace with real extra data + }, + txs) + blocks = append(blocks, block) + } + } + + return blocks +} + +func getTotalMessagesPoppedFromChunks(decodedChunks []*encoding.DAChunkRawTx) int { + totalL1MessagePopped := 0 + for _, chunk := range decodedChunks { + for _, block := range chunk.Blocks { + totalL1MessagePopped += int(block.NumL1Messages()) + } + } + return totalL1MessagePopped +} + +func getL1Messages(db ethdb.Database, parentTotalL1MessagePopped uint64, skippedBitmap []byte, totalL1MessagePopped int) ([]*types.L1MessageTx, error) { + var txs []*types.L1MessageTx + decodedSkippedBitmap, err := encoding.DecodeBitmap(skippedBitmap, totalL1MessagePopped) + if err != nil { + return nil, fmt.Errorf("failed to decode skipped message bitmap: err: %w", err) + } + + // get all necessary l1 messages without skipped + currentIndex := parentTotalL1MessagePopped + for index := 0; index < totalL1MessagePopped; index++ { + if encoding.IsL1MessageSkipped(decodedSkippedBitmap, currentIndex-parentTotalL1MessagePopped) { + currentIndex++ + continue + } + l1Tx := rawdb.ReadL1Message(db, currentIndex) + if l1Tx == nil { + // message not yet available + // we return serrors.EOFError as this will be handled in the syncing pipeline with a backoff and retry + return nil, serrors.EOFError + } + txs = append(txs, l1Tx) + currentIndex++ + } + + return txs, nil +} + +func getBatchTotalL1MessagePopped(data []byte) uint64 { + // total l1 message popped stored in bytes from 17 to 24, accordingly to codec spec + return binary.BigEndian.Uint64(data[17:25]) +} diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go new file mode 100644 index 000000000000..4670eec8bbcb --- /dev/null +++ b/rollup/da_syncer/da/commitV1.go @@ -0,0 +1,82 @@ +package da + +import ( + "context" + "crypto/sha256" + "fmt" + + "github.com/scroll-tech/da-codec/encoding" + + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" + "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" + "github.com/scroll-tech/go-ethereum/ethdb" +) + +type CommitBatchDAV1 struct { + *CommitBatchDAV0 +} + +func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, + codec encoding.Codec, + l1Client *rollup_sync_service.L1Client, + blobClient blob_client.BlobClient, + vLog *types.Log, + version uint8, + batchIndex uint64, + parentBatchHeader []byte, + chunks [][]byte, + skippedL1MessageBitmap []byte, +) (*CommitBatchDAV1, error) { + decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) + if err != nil { + return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", batchIndex, err) + } + + versionedHash, err := l1Client.FetchTxBlobHash(vLog) + if err != nil { + return nil, fmt.Errorf("failed to fetch blob hash, err: %w", err) + } + + blob, err := blobClient.GetBlobByVersionedHashAndBlockNumber(ctx, versionedHash, vLog.BlockNumber) + if err != nil { + return nil, fmt.Errorf("failed to fetch blob from blob client, err: %w", err) + } + if blob == nil { + return nil, fmt.Errorf("unexpected, blob == nil and err != nil, batch index: %d, versionedHash: %s, blobClient: %T", batchIndex, versionedHash.String(), blobClient) + } + + // compute blob versioned hash and compare with one from tx + c, err := kzg4844.BlobToCommitment(blob) + if err != nil { + return nil, fmt.Errorf("failed to create blob commitment") + } + blobVersionedHash := common.Hash(kzg4844.CalcBlobHashV1(sha256.New(), &c)) + if blobVersionedHash != versionedHash { + return nil, fmt.Errorf("blobVersionedHash from blob source is not equal to versionedHash from tx, correct versioned hash: %s, fetched blob hash: %s", versionedHash.String(), blobVersionedHash.String()) + } + + // decode txs from blob + err = codec.DecodeTxsFromBlob(blob, decodedChunks) + if err != nil { + return nil, fmt.Errorf("failed to decode txs from blob: %w", err) + } + + if decodedChunks == nil { + return nil, fmt.Errorf("decodedChunks is nil after decoding") + } + + v0, err := NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, vLog.BlockNumber) + if err != nil { + return nil, err + } + + return &CommitBatchDAV1{v0}, nil +} + +func (c *CommitBatchDAV1) Type() Type { + return CommitBatchWithBlobType +} diff --git a/rollup/da_syncer/da/da.go b/rollup/da_syncer/da/da.go new file mode 100644 index 000000000000..1ad618d7ba3d --- /dev/null +++ b/rollup/da_syncer/da/da.go @@ -0,0 +1,69 @@ +package da + +import ( + "math/big" + + "github.com/scroll-tech/go-ethereum/core/types" +) + +type Type int + +const ( + // CommitBatchV0Type contains data of event of CommitBatchV0Type + CommitBatchV0Type Type = iota + // CommitBatchWithBlobType contains data of event of CommitBatchWithBlobType (v1, v2, v3, v4) + CommitBatchWithBlobType + // RevertBatchType contains data of event of RevertBatchType + RevertBatchType + // FinalizeBatchType contains data of event of FinalizeBatchType + FinalizeBatchType +) + +// Entry represents a single DA event (commit, revert, finalize). +type Entry interface { + Type() Type + BatchIndex() uint64 + L1BlockNumber() uint64 + CompareTo(Entry) int +} + +type EntryWithBlocks interface { + Entry + Blocks() []*PartialBlock +} + +type Entries []Entry + +// PartialHeader represents a partial header (from DA) of a block. +type PartialHeader struct { + Number uint64 + Time uint64 + BaseFee *big.Int + GasLimit uint64 + Difficulty uint64 + ExtraData []byte +} + +func (h *PartialHeader) ToHeader() *types.Header { + return &types.Header{ + Number: big.NewInt(0).SetUint64(h.Number), + Time: h.Time, + BaseFee: h.BaseFee, + GasLimit: h.GasLimit, + Difficulty: new(big.Int).SetUint64(h.Difficulty), + Extra: h.ExtraData, + } +} + +// PartialBlock represents a partial block (from DA). +type PartialBlock struct { + PartialHeader *PartialHeader + Transactions types.Transactions +} + +func NewPartialBlock(partialHeader *PartialHeader, txs types.Transactions) *PartialBlock { + return &PartialBlock{ + PartialHeader: partialHeader, + Transactions: txs, + } +} diff --git a/rollup/da_syncer/da/finalize.go b/rollup/da_syncer/da/finalize.go new file mode 100644 index 000000000000..14d6c2a644cb --- /dev/null +++ b/rollup/da_syncer/da/finalize.go @@ -0,0 +1,34 @@ +package da + +type FinalizeBatch struct { + batchIndex uint64 + + l1BlockNumber uint64 +} + +func NewFinalizeBatch(batchIndex uint64) *FinalizeBatch { + return &FinalizeBatch{ + batchIndex: batchIndex, + } +} + +func (f *FinalizeBatch) Type() Type { + return FinalizeBatchType +} + +func (f *FinalizeBatch) L1BlockNumber() uint64 { + return f.l1BlockNumber +} + +func (f *FinalizeBatch) BatchIndex() uint64 { + return f.batchIndex +} + +func (f *FinalizeBatch) CompareTo(other Entry) int { + if f.BatchIndex() < other.BatchIndex() { + return -1 + } else if f.BatchIndex() > other.BatchIndex() { + return 1 + } + return 0 +} diff --git a/rollup/da_syncer/da/revert.go b/rollup/da_syncer/da/revert.go new file mode 100644 index 000000000000..d84f22ebaa7b --- /dev/null +++ b/rollup/da_syncer/da/revert.go @@ -0,0 +1,33 @@ +package da + +type RevertBatch struct { + batchIndex uint64 + + l1BlockNumber uint64 +} + +func NewRevertBatch(batchIndex uint64) *RevertBatch { + return &RevertBatch{ + batchIndex: batchIndex, + } +} + +func (r *RevertBatch) Type() Type { + return RevertBatchType +} + +func (r *RevertBatch) L1BlockNumber() uint64 { + return r.l1BlockNumber +} +func (r *RevertBatch) BatchIndex() uint64 { + return r.batchIndex +} + +func (r *RevertBatch) CompareTo(other Entry) int { + if r.BatchIndex() < other.BatchIndex() { + return -1 + } else if r.BatchIndex() > other.BatchIndex() { + return 1 + } + return 0 +} diff --git a/rollup/da_syncer/da_queue.go b/rollup/da_syncer/da_queue.go new file mode 100644 index 000000000000..64673a4a646b --- /dev/null +++ b/rollup/da_syncer/da_queue.go @@ -0,0 +1,70 @@ +package da_syncer + +import ( + "context" + "errors" + + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" +) + +// DAQueue is a pipeline stage that reads DA entries from a DataSource and provides them to the next stage. +type DAQueue struct { + l1height uint64 + dataSourceFactory *DataSourceFactory + dataSource DataSource + da da.Entries +} + +func NewDAQueue(l1height uint64, dataSourceFactory *DataSourceFactory) *DAQueue { + return &DAQueue{ + l1height: l1height, + dataSourceFactory: dataSourceFactory, + dataSource: nil, + da: make(da.Entries, 0), + } +} + +func (dq *DAQueue) NextDA(ctx context.Context) (da.Entry, error) { + for len(dq.da) == 0 { + err := dq.getNextData(ctx) + if err != nil { + return nil, err + } + } + daEntry := dq.da[0] + dq.da = dq.da[1:] + return daEntry, nil +} + +func (dq *DAQueue) getNextData(ctx context.Context) error { + var err error + if dq.dataSource == nil { + dq.dataSource, err = dq.dataSourceFactory.OpenDataSource(ctx, dq.l1height) + if err != nil { + return err + } + } + + dq.da, err = dq.dataSource.NextData() + if err == nil { + return nil + } + + // previous dataSource has been exhausted, create new + if errors.Is(err, da.ErrSourceExhausted) { + dq.l1height = dq.dataSource.L1Height() + dq.dataSource = nil + + // we return EOFError to be handled in pipeline + return serrors.EOFError + } + + return err +} + +func (dq *DAQueue) Reset(height uint64) { + dq.l1height = height + dq.dataSource = nil + dq.da = make(da.Entries, 0) +} diff --git a/rollup/da_syncer/da_syncer.go b/rollup/da_syncer/da_syncer.go new file mode 100644 index 000000000000..c3c223ff22a9 --- /dev/null +++ b/rollup/da_syncer/da_syncer.go @@ -0,0 +1,49 @@ +package da_syncer + +import ( + "fmt" + + "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" +) + +var ( + ErrBlockTooLow = fmt.Errorf("block number is too low") + ErrBlockTooHigh = fmt.Errorf("block number is too high") +) + +type DASyncer struct { + blockchain *core.BlockChain +} + +func NewDASyncer(blockchain *core.BlockChain) *DASyncer { + return &DASyncer{ + blockchain: blockchain, + } +} + +// SyncOneBlock receives a PartialBlock, makes sure it's the next block in the chain, executes it and inserts it to the blockchain. +func (s *DASyncer) SyncOneBlock(block *da.PartialBlock) error { + currentBlock := s.blockchain.CurrentBlock() + + // we expect blocks to be consecutive. block.PartialHeader.Number == parentBlock.Number+1. + if block.PartialHeader.Number <= currentBlock.Number.Uint64() { + log.Debug("block number is too low", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number.Uint64()) + return ErrBlockTooLow + } else if block.PartialHeader.Number > currentBlock.Number.Uint64()+1 { + log.Debug("block number is too high", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number.Uint64()) + return ErrBlockTooHigh + } + + parentBlock := s.blockchain.GetBlockByNumber(currentBlock.Number.Uint64()) + if _, err := s.blockchain.BuildAndWriteBlock(parentBlock, block.PartialHeader.ToHeader(), block.Transactions); err != nil { + return fmt.Errorf("failed building and writing block, number: %d, error: %v", block.PartialHeader.Number, err) + } + + if s.blockchain.CurrentBlock().Number.Uint64()%1000 == 0 { + log.Info("L1 sync progress", "blockhain height", s.blockchain.CurrentBlock().Number.Uint64(), "block hash", s.blockchain.CurrentBlock().Hash(), "root", s.blockchain.CurrentBlock().Root) + } + + return nil +} diff --git a/rollup/da_syncer/data_source.go b/rollup/da_syncer/data_source.go new file mode 100644 index 000000000000..f417d09af00e --- /dev/null +++ b/rollup/da_syncer/data_source.go @@ -0,0 +1,44 @@ +package da_syncer + +import ( + "context" + "errors" + + "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" + "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" +) + +type DataSource interface { + NextData() (da.Entries, error) + L1Height() uint64 +} + +type DataSourceFactory struct { + config Config + genesisConfig *params.ChainConfig + l1Client *rollup_sync_service.L1Client + blobClient blob_client.BlobClient + db ethdb.Database +} + +func NewDataSourceFactory(blockchain *core.BlockChain, genesisConfig *params.ChainConfig, config Config, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, db ethdb.Database) *DataSourceFactory { + return &DataSourceFactory{ + config: config, + genesisConfig: genesisConfig, + l1Client: l1Client, + blobClient: blobClient, + db: db, + } +} + +func (ds *DataSourceFactory) OpenDataSource(ctx context.Context, l1height uint64) (DataSource, error) { + if ds.config.FetcherMode == L1RPC { + return da.NewCalldataBlobSource(ctx, l1height, ds.l1Client, ds.blobClient, ds.db) + } else { + return nil, errors.New("snapshot_data_source: not implemented") + } +} diff --git a/rollup/da_syncer/modes.go b/rollup/da_syncer/modes.go new file mode 100644 index 000000000000..bfcc1d1dfba0 --- /dev/null +++ b/rollup/da_syncer/modes.go @@ -0,0 +1,52 @@ +package da_syncer + +import "fmt" + +// FetcherMode represents the mode of fetcher +type FetcherMode int + +const ( + // L1RPC mode fetches DA from L1RPC + L1RPC FetcherMode = iota + // Snapshot mode loads DA from snapshot file + Snapshot +) + +func (mode FetcherMode) IsValid() bool { + return mode >= L1RPC && mode <= Snapshot +} + +// String implements the stringer interface. +func (mode FetcherMode) String() string { + switch mode { + case L1RPC: + return "l1rpc" + case Snapshot: + return "snapshot" + default: + return "unknown" + } +} + +func (mode FetcherMode) MarshalText() ([]byte, error) { + switch mode { + case L1RPC: + return []byte("l1rpc"), nil + case Snapshot: + return []byte("snapshot"), nil + default: + return nil, fmt.Errorf("unknown sync mode %d", mode) + } +} + +func (mode *FetcherMode) UnmarshalText(text []byte) error { + switch string(text) { + case "l1rpc": + *mode = L1RPC + case "snapshot": + *mode = Snapshot + default: + return fmt.Errorf(`unknown sync mode %q, want "l1rpc" or "snapshot"`, text) + } + return nil +} diff --git a/rollup/da_syncer/serrors/errors.go b/rollup/da_syncer/serrors/errors.go new file mode 100644 index 000000000000..aa0426f0771d --- /dev/null +++ b/rollup/da_syncer/serrors/errors.go @@ -0,0 +1,62 @@ +package serrors + +import ( + "fmt" +) + +const ( + temporary Type = iota + eof +) + +var ( + TemporaryError = NewTemporaryError(nil) + EOFError = NewEOFError(nil) +) + +type Type uint8 + +func (t Type) String() string { + switch t { + case temporary: + return "temporary" + case eof: + return "EOF" + default: + return "unknown" + } +} + +type syncError struct { + t Type + err error +} + +func NewTemporaryError(err error) error { + return &syncError{t: temporary, err: err} +} + +func NewEOFError(err error) error { + return &syncError{t: eof, err: err} +} + +func (s *syncError) Error() string { + return fmt.Sprintf("%s: %v", s.t, s.err) +} + +func (s *syncError) Unwrap() error { + return s.err +} + +func (s *syncError) Is(target error) bool { + if target == nil { + return s == nil + } + + targetSyncErr, ok := target.(*syncError) + if !ok { + return false + } + + return s.t == targetSyncErr.t +} diff --git a/rollup/da_syncer/syncing_pipeline.go b/rollup/da_syncer/syncing_pipeline.go new file mode 100644 index 000000000000..6795f2608e05 --- /dev/null +++ b/rollup/da_syncer/syncing_pipeline.go @@ -0,0 +1,233 @@ +package da_syncer + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/scroll-tech/go-ethereum/common/backoff" + "github.com/scroll-tech/go-ethereum/core" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/ethdb" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" + "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/sync_service" +) + +// Config is the configuration parameters of data availability syncing. +type Config struct { + FetcherMode FetcherMode // mode of fetcher + SnapshotFilePath string // path to snapshot file + BlobScanAPIEndpoint string // BlobScan blob api endpoint + BlockNativeAPIEndpoint string // BlockNative blob api endpoint + BeaconNodeAPIEndpoint string // Beacon node api endpoint +} + +// SyncingPipeline is a derivation pipeline for syncing data from L1 and DA and transform it into +// L2 blocks and chain. +type SyncingPipeline struct { + ctx context.Context + cancel context.CancelFunc + wg sync.WaitGroup + expBackoff *backoff.Exponential + + l1DeploymentBlock uint64 + + db ethdb.Database + blockchain *core.BlockChain + blockQueue *BlockQueue + daSyncer *DASyncer +} + +func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesisConfig *params.ChainConfig, db ethdb.Database, ethClient sync_service.EthClient, l1DeploymentBlock uint64, config Config) (*SyncingPipeline, error) { + scrollChainABI, err := rollup_sync_service.ScrollChainMetaData.GetAbi() + if err != nil { + return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) + } + + l1Client, err := rollup_sync_service.NewL1Client(ctx, ethClient, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) + if err != nil { + return nil, err + } + + blobClientList := blob_client.NewBlobClients() + if config.BeaconNodeAPIEndpoint != "" { + beaconNodeClient, err := blob_client.NewBeaconNodeClient(config.BeaconNodeAPIEndpoint, l1Client) + if err != nil { + log.Warn("failed to create BeaconNodeClient", "err", err) + } else { + blobClientList.AddBlobClient(beaconNodeClient) + } + } + if config.BlobScanAPIEndpoint != "" { + blobClientList.AddBlobClient(blob_client.NewBlobScanClient(config.BlobScanAPIEndpoint)) + } + if config.BlockNativeAPIEndpoint != "" { + blobClientList.AddBlobClient(blob_client.NewBlockNativeClient(config.BlockNativeAPIEndpoint)) + } + if blobClientList.Size() == 0 { + return nil, errors.New("DA syncing is enabled but no blob client is configured. Please provide at least one blob client via command line flag") + } + + dataSourceFactory := NewDataSourceFactory(blockchain, genesisConfig, config, l1Client, blobClientList, db) + syncedL1Height := l1DeploymentBlock - 1 + from := rawdb.ReadDASyncedL1BlockNumber(db) + if from != nil { + syncedL1Height = *from + } + + daQueue := NewDAQueue(syncedL1Height, dataSourceFactory) + batchQueue := NewBatchQueue(daQueue, db) + blockQueue := NewBlockQueue(batchQueue) + daSyncer := NewDASyncer(blockchain) + + ctx, cancel := context.WithCancel(ctx) + return &SyncingPipeline{ + ctx: ctx, + cancel: cancel, + expBackoff: backoff.NewExponential(100*time.Millisecond, 10*time.Second, 100*time.Millisecond), + wg: sync.WaitGroup{}, + l1DeploymentBlock: l1DeploymentBlock, + db: db, + blockchain: blockchain, + blockQueue: blockQueue, + daSyncer: daSyncer, + }, nil +} + +func (s *SyncingPipeline) Step() error { + block, err := s.blockQueue.NextBlock(s.ctx) + if err != nil { + return err + } + err = s.daSyncer.SyncOneBlock(block) + return err +} + +func (s *SyncingPipeline) Start() { + log.Info("sync from DA: starting pipeline") + + s.wg.Add(1) + go func() { + s.mainLoop() + s.wg.Done() + }() +} + +func (s *SyncingPipeline) mainLoop() { + stepCh := make(chan struct{}, 1) + var delayedStepCh <-chan time.Time + var resetCounter int + var tempErrorCounter int + + // reqStep is a helper function to request a step to be executed. + // If delay is true, it will request a delayed step with exponential backoff, otherwise it will request an immediate step. + reqStep := func(delay bool) { + if delay { + if delayedStepCh == nil { + delayDur := s.expBackoff.NextDuration() + delayedStepCh = time.After(delayDur) + log.Debug("requesting delayed step", "delay", delayDur, "attempt", s.expBackoff.Attempt()) + } else { + log.Debug("ignoring step request because of ongoing delayed step", "attempt", s.expBackoff.Attempt()) + } + } else { + select { + case stepCh <- struct{}{}: + default: + } + } + } + + // start pipeline + reqStep(false) + + for { + select { + case <-s.ctx.Done(): + return + default: + } + + select { + case <-s.ctx.Done(): + return + case <-delayedStepCh: + delayedStepCh = nil + reqStep(false) + case <-stepCh: + err := s.Step() + if err == nil { + // step succeeded, reset exponential backoff and continue + reqStep(false) + s.expBackoff.Reset() + resetCounter = 0 + tempErrorCounter = 0 + continue + } + + if errors.Is(err, serrors.EOFError) { + // pipeline is empty, request a delayed step + // TODO: eventually (with state manager) this should not trigger a delayed step because external events will trigger a new step anyway + reqStep(true) + tempErrorCounter = 0 + continue + } else if errors.Is(err, serrors.TemporaryError) { + log.Warn("syncing pipeline step failed due to temporary error, retrying", "err", err) + if tempErrorCounter > 100 { + log.Warn("syncing pipeline step failed due to 100 consecutive temporary errors, stopping pipeline worker", "last err", err) + return + } + + // temporary error, request a delayed step + reqStep(true) + tempErrorCounter++ + continue + } else if errors.Is(err, ErrBlockTooLow) { + // block number returned by the block queue is too low, + // we skip the blocks until we reach the correct block number again. + reqStep(false) + tempErrorCounter = 0 + continue + } else if errors.Is(err, ErrBlockTooHigh) { + // block number returned by the block queue is too high, + // reset the pipeline and move backwards from the last L1 block we read + s.reset(resetCounter) + resetCounter++ + reqStep(false) + tempErrorCounter = 0 + continue + } else if errors.Is(err, context.Canceled) { + log.Info("syncing pipeline stopped due to cancelled context", "err", err) + return + } + + log.Warn("syncing pipeline step failed due to unrecoverable error, stopping pipeline worker", "err", err) + return + } + } +} + +func (s *SyncingPipeline) Stop() { + log.Info("sync from DA: stopping pipeline...") + s.cancel() + s.wg.Wait() + log.Info("sync from DA: stopping pipeline... done") +} + +func (s *SyncingPipeline) reset(resetCounter int) { + amount := 100 * uint64(resetCounter) + syncedL1Height := s.l1DeploymentBlock - 1 + from := rawdb.ReadDASyncedL1BlockNumber(s.db) + if from != nil && *from+amount > syncedL1Height { + syncedL1Height = *from - amount + rawdb.WriteDASyncedL1BlockNumber(s.db, syncedL1Height) + } + log.Info("resetting syncing pipeline", "syncedL1Height", syncedL1Height) + s.blockQueue.Reset(syncedL1Height) +} diff --git a/rollup/rollup_sync_service/abi.go b/rollup/rollup_sync_service/abi.go index 6975001f1870..428413dec9c2 100644 --- a/rollup/rollup_sync_service/abi.go +++ b/rollup/rollup_sync_service/abi.go @@ -10,8 +10,8 @@ import ( "github.com/scroll-tech/go-ethereum/core/types" ) -// scrollChainMetaData contains ABI of the ScrollChain contract. -var scrollChainMetaData = &bind.MetaData{ +// ScrollChainMetaData contains ABI of the ScrollChain contract. +var ScrollChainMetaData = &bind.MetaData{ ABI: "[{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"CommitBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"stateRoot\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"FinalizeBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"RevertBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"oldMaxNumTxInChunk\",\"type\": \"uint256\"},{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"newMaxNumTxInChunk\",\"type\": \"uint256\"}],\"name\": \"UpdateMaxNumTxInChunk\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateProver\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateSequencer\",\"type\": \"event\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"}],\"name\": \"commitBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"commitBatchWithBlobProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"committedBatches\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatch4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBundle\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBundleWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"finalizedStateRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"_batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"_stateRoot\",\"type\": \"bytes32\"}],\"name\": \"importGenesisBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"isBatchFinalized\",\"outputs\": [{\"internalType\": \"bool\",\"name\": \"\",\"type\": \"bool\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [],\"name\": \"lastFinalizedBatchIndex\",\"outputs\": [{\"internalType\": \"uint256\",\"name\": \"\",\"type\": \"uint256\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"uint256\",\"name\": \"count\",\"type\": \"uint256\"}],\"name\": \"revertBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"withdrawRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"}]", } diff --git a/rollup/rollup_sync_service/abi_test.go b/rollup/rollup_sync_service/abi_test.go index d47a2c72e190..550c950bb337 100644 --- a/rollup/rollup_sync_service/abi_test.go +++ b/rollup/rollup_sync_service/abi_test.go @@ -13,7 +13,7 @@ import ( ) func TestEventSignatures(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() if err != nil { t.Fatal("failed to get scroll chain abi", "err", err) } @@ -24,7 +24,7 @@ func TestEventSignatures(t *testing.T) { } func TestUnpackLog(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() require.NoError(t, err) mockBatchIndex := big.NewInt(123) diff --git a/rollup/rollup_sync_service/l1client.go b/rollup/rollup_sync_service/l1client.go index 34ffc4db1bc2..b6be3e0bc611 100644 --- a/rollup/rollup_sync_service/l1client.go +++ b/rollup/rollup_sync_service/l1client.go @@ -27,9 +27,9 @@ type L1Client struct { l1FinalizeBatchEventSignature common.Hash } -// newL1Client initializes a new L1Client instance with the provided configuration. +// NewL1Client initializes a new L1Client instance with the provided configuration. // It checks for a valid scrollChainAddress and verifies the chain ID. -func newL1Client(ctx context.Context, l1Client sync_service.EthClient, l1ChainId uint64, scrollChainAddress common.Address, scrollChainABI *abi.ABI) (*L1Client, error) { +func NewL1Client(ctx context.Context, l1Client sync_service.EthClient, l1ChainId uint64, scrollChainAddress common.Address, scrollChainABI *abi.ABI) (*L1Client, error) { if scrollChainAddress == (common.Address{}) { return nil, errors.New("must pass non-zero scrollChainAddress to L1Client") } @@ -55,9 +55,9 @@ func newL1Client(ctx context.Context, l1Client sync_service.EthClient, l1ChainId return &client, nil } -// fetcRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. -func (c *L1Client) fetchRollupEventsInRange(from, to uint64) ([]types.Log, error) { - log.Trace("L1Client fetchRollupEventsInRange", "fromBlock", from, "toBlock", to) +// FetchRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. +func (c *L1Client) FetchRollupEventsInRange(from, to uint64) ([]types.Log, error) { + log.Trace("L1Client FetchRollupEventsInRange", "fromBlock", from, "toBlock", to) query := ethereum.FilterQuery{ FromBlock: big.NewInt(int64(from)), // inclusive @@ -79,8 +79,8 @@ func (c *L1Client) fetchRollupEventsInRange(from, to uint64) ([]types.Log, error return logs, nil } -// getLatestFinalizedBlockNumber fetches the block number of the latest finalized block from the L1 chain. -func (c *L1Client) getLatestFinalizedBlockNumber() (uint64, error) { +// GetLatestFinalizedBlockNumber fetches the block number of the latest finalized block from the L1 chain. +func (c *L1Client) GetLatestFinalizedBlockNumber() (uint64, error) { header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) if err != nil { return 0, err @@ -90,3 +90,69 @@ func (c *L1Client) getLatestFinalizedBlockNumber() (uint64, error) { } return header.Number.Uint64(), nil } + +// FetchTxData fetches tx data corresponding to given event log +func (c *L1Client) FetchTxData(vLog *types.Log) ([]byte, error) { + tx, _, err := c.client.TransactionByHash(c.ctx, vLog.TxHash) + if err != nil { + log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", + "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) + block, err := c.client.BlockByHash(c.ctx, vLog.BlockHash) + if err != nil { + return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + } + + found := false + for _, txInBlock := range block.Transactions() { + if txInBlock.Hash() == vLog.TxHash { + tx = txInBlock + found = true + break + } + } + if !found { + return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + } + } + + return tx.Data(), nil +} + +// FetchTxBlobHash fetches tx blob hash corresponding to given event log +func (c *L1Client) FetchTxBlobHash(vLog *types.Log) (common.Hash, error) { + tx, _, err := c.client.TransactionByHash(c.ctx, vLog.TxHash) + if err != nil { + log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", + "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) + block, err := c.client.BlockByHash(c.ctx, vLog.BlockHash) + if err != nil { + return common.Hash{}, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + } + + found := false + for _, txInBlock := range block.Transactions() { + if txInBlock.Hash() == vLog.TxHash { + tx = txInBlock + found = true + break + } + } + if !found { + return common.Hash{}, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + } + } + blobHashes := tx.BlobHashes() + if len(blobHashes) == 0 { + return common.Hash{}, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", vLog.TxHash.Hex()) + } + return blobHashes[0], nil +} + +// GetHeaderByNumber fetches the block header by number +func (c *L1Client) GetHeaderByNumber(blockNumber uint64) (*types.Header, error) { + header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(0).SetUint64(blockNumber)) + if err != nil { + return nil, err + } + return header, nil +} diff --git a/rollup/rollup_sync_service/l1client_test.go b/rollup/rollup_sync_service/l1client_test.go index 8c7bd92f8b11..acc3f8daad52 100644 --- a/rollup/rollup_sync_service/l1client_test.go +++ b/rollup/rollup_sync_service/l1client_test.go @@ -23,16 +23,16 @@ func TestL1Client(t *testing.T) { t.Fatal("failed to get scroll chain abi", "err", err) } scrollChainAddress := common.HexToAddress("0x0123456789abcdef") - l1Client, err := newL1Client(ctx, mockClient, 11155111, scrollChainAddress, scrollChainABI) + l1Client, err := NewL1Client(ctx, mockClient, 11155111, scrollChainAddress, scrollChainABI) require.NoError(t, err, "Failed to initialize L1Client") - blockNumber, err := l1Client.getLatestFinalizedBlockNumber() + blockNumber, err := l1Client.GetLatestFinalizedBlockNumber() assert.NoError(t, err, "Error getting latest confirmed block number") assert.Equal(t, uint64(36), blockNumber, "Unexpected block number") - logs, err := l1Client.fetchRollupEventsInRange(0, blockNumber) + logs, err := l1Client.FetchRollupEventsInRange(0, blockNumber) assert.NoError(t, err, "Error fetching rollup events in range") - assert.Empty(t, logs, "Expected no logs from fetchRollupEventsInRange") + assert.Empty(t, logs, "Expected no logs from FetchRollupEventsInRange") } type mockEthClient struct { diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index c03d63e05c47..4c5261511328 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -83,7 +83,7 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) } - client, err := newL1Client(ctx, l1Client, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) + client, err := NewL1Client(ctx, l1Client, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) if err != nil { return nil, fmt.Errorf("failed to initialize l1 client: %w", err) } @@ -176,7 +176,7 @@ func (s *RollupSyncService) fetchRollupEvents() { s.stateMu.Lock() defer s.stateMu.Unlock() - latestConfirmed, err := s.client.getLatestFinalizedBlockNumber() + latestConfirmed, err := s.client.GetLatestFinalizedBlockNumber() if err != nil { log.Warn("failed to get latest confirmed block number", "err", err) return @@ -196,7 +196,7 @@ func (s *RollupSyncService) fetchRollupEvents() { to = latestConfirmed } - logs, err := s.client.fetchRollupEventsInRange(from, to) + logs, err := s.client.FetchRollupEventsInRange(from, to) if err != nil { log.Error("failed to fetch rollup events in range", "from block", from, "to block", to, "err", err) return diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index 61d63cdb7419..310d4be2515d 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -51,7 +51,7 @@ func TestRollupSyncServiceStartAndStop(t *testing.T) { } func TestDecodeBatchVersionAndChunkBlockRangesCodecv0(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() require.NoError(t, err) service := &RollupSyncService{ @@ -110,7 +110,7 @@ func TestDecodeBatchVersionAndChunkBlockRangesCodecv0(t *testing.T) { } func TestDecodeBatchVersionAndChunkBlockRangesCodecv1(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() require.NoError(t, err) service := &RollupSyncService{ @@ -163,7 +163,7 @@ func TestDecodeBatchVersionAndChunkBlockRangesCodecv1(t *testing.T) { } func TestDecodeBatchVersionAndChunkBlockRangesCodecv2(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() require.NoError(t, err) service := &RollupSyncService{ @@ -216,7 +216,7 @@ func TestDecodeBatchVersionAndChunkBlockRangesCodecv2(t *testing.T) { } func TestDecodeBatchVersionAndChunkBlockRangesCodecv3(t *testing.T) { - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() require.NoError(t, err) service := &RollupSyncService{ From b30006ffb803c8eed97cfef2fa2a861be080058c Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 12:08:38 +0800 Subject: [PATCH 02/36] port changes from #1068 --- cmd/utils/flags.go | 7 - core/blockchain.go | 2 +- core/rawdb/accessors_rollup_event.go | 41 --- core/rawdb/accessors_rollup_event_test.go | 64 ----- core/rawdb/schema.go | 6 - eth/ethconfig/config.go | 3 - go.mod | 5 +- go.sum | 20 +- rollup/da_syncer/da_syncer.go | 14 +- rollup/da_syncer/data_source.go | 7 +- rollup/da_syncer/syncing_pipeline.go | 8 +- .../rollup_sync_service.go | 248 +++--------------- .../rollup_sync_service_test.go | 225 ++++++++++------ 13 files changed, 211 insertions(+), 439 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 445248b1ff6f..cc0c8aa3a7e6 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -74,7 +74,6 @@ import ( "github.com/scroll-tech/go-ethereum/p2p/nat" "github.com/scroll-tech/go-ethereum/p2p/netutil" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/rollup/da_syncer" "github.com/scroll-tech/go-ethereum/rollup/tracing" "github.com/scroll-tech/go-ethereum/rpc" ) @@ -1627,12 +1626,6 @@ func setEnableRollupVerify(ctx *cli.Context, cfg *ethconfig.Config) { func setDA(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.IsSet(DASyncEnabledFlag.Name) { cfg.EnableDASyncing = ctx.Bool(DASyncEnabledFlag.Name) - if ctx.IsSet(DAModeFlag.Name) { - cfg.DA.FetcherMode = *flags.GlobalTextMarshaler(ctx, DAModeFlag.Name).(*da_syncer.FetcherMode) - } - if ctx.IsSet(DASnapshotFileFlag.Name) { - cfg.DA.SnapshotFilePath = ctx.String(DASnapshotFileFlag.Name) - } if ctx.IsSet(DABlobScanAPIEndpointFlag.Name) { cfg.DA.BlobScanAPIEndpoint = ctx.String(DABlobScanAPIEndpointFlag.Name) } diff --git a/core/blockchain.go b/core/blockchain.go index e3294ded5bde..a0bc05924531 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1850,7 +1850,7 @@ func (bc *BlockChain) BuildAndWriteBlock(parentBlock *types.Block, header *types l.BlockHash = blockHash } - return bc.writeBlockAndSetHead(fullBlock, receipts, logs, statedb, false) + return bc.writeBlockWithState(fullBlock, receipts, logs, statedb, false) } // insertSideChain is called when an import batch hits upon a pruned ancestor diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 6670b4b7b85f..1b60f6e4f0d8 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -58,47 +58,6 @@ func ReadRollupEventSyncedL1BlockNumber(db ethdb.Reader) *uint64 { return &rollupEventSyncedL1BlockNumber } -// WriteBatchChunkRanges writes the block ranges for each chunk within a batch to the database. -// It serializes the chunk ranges using RLP and stores them under a key derived from the batch index. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func WriteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64, chunkBlockRanges []*ChunkBlockRange) { - value, err := rlp.EncodeToBytes(chunkBlockRanges) - if err != nil { - log.Crit("failed to RLP encode batch chunk ranges", "batch index", batchIndex, "err", err) - } - if err := db.Put(batchChunkRangesKey(batchIndex), value); err != nil { - log.Crit("failed to store batch chunk ranges", "batch index", batchIndex, "value", value, "err", err) - } -} - -// DeleteBatchChunkRanges removes the block ranges of all chunks associated with a specific batch from the database. -// Note: Only non-finalized batches can be reverted. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func DeleteBatchChunkRanges(db ethdb.KeyValueWriter, batchIndex uint64) { - if err := db.Delete(batchChunkRangesKey(batchIndex)); err != nil { - log.Crit("failed to delete batch chunk ranges", "batch index", batchIndex, "err", err) - } -} - -// ReadBatchChunkRanges retrieves the block ranges of all chunks associated with a specific batch from the database. -// It returns a list of ChunkBlockRange pointers, or nil if no chunk ranges are found for the given batch index. -// for backward compatibility, new info is also stored in CommittedBatchMeta. -func ReadBatchChunkRanges(db ethdb.Reader, batchIndex uint64) []*ChunkBlockRange { - data, err := db.Get(batchChunkRangesKey(batchIndex)) - if err != nil && isNotFoundErr(err) { - return nil - } - if err != nil { - log.Crit("failed to read batch chunk ranges from database", "err", err) - } - - cr := new([]*ChunkBlockRange) - if err := rlp.Decode(bytes.NewReader(data), cr); err != nil { - log.Crit("Invalid ChunkBlockRange RLP", "batch index", batchIndex, "data", data, "err", err) - } - return *cr -} - // WriteFinalizedBatchMeta stores the metadata of a finalized batch in the database. func WriteFinalizedBatchMeta(db ethdb.KeyValueWriter, batchIndex uint64, finalizedBatchMeta *FinalizedBatchMeta) { value, err := rlp.EncodeToBytes(finalizedBatchMeta) diff --git a/core/rawdb/accessors_rollup_event_test.go b/core/rawdb/accessors_rollup_event_test.go index c74e93524376..a22880ee05a4 100644 --- a/core/rawdb/accessors_rollup_event_test.go +++ b/core/rawdb/accessors_rollup_event_test.go @@ -147,70 +147,6 @@ func TestFinalizedBatchMeta(t *testing.T) { } } -func TestBatchChunkRanges(t *testing.T) { - chunks := [][]*ChunkBlockRange{ - { - {StartBlockNumber: 1, EndBlockNumber: 100}, - {StartBlockNumber: 101, EndBlockNumber: 200}, - }, - { - {StartBlockNumber: 201, EndBlockNumber: 300}, - {StartBlockNumber: 301, EndBlockNumber: 400}, - }, - { - {StartBlockNumber: 401, EndBlockNumber: 500}, - }, - } - - db := NewMemoryDatabase() - - for i, chunkRange := range chunks { - batchIndex := uint64(i) - WriteBatchChunkRanges(db, batchIndex, chunkRange) - } - - for i, chunkRange := range chunks { - batchIndex := uint64(i) - readChunkRange := ReadBatchChunkRanges(db, batchIndex) - if len(readChunkRange) != len(chunkRange) { - t.Fatal("Mismatch in number of chunk ranges", "expected", len(chunkRange), "got", len(readChunkRange)) - } - - for j, cr := range readChunkRange { - if cr.StartBlockNumber != chunkRange[j].StartBlockNumber || cr.EndBlockNumber != chunkRange[j].EndBlockNumber { - t.Fatal("Mismatch in chunk range", "batch index", batchIndex, "expected", chunkRange[j], "got", cr) - } - } - } - - // over-write - newRange := []*ChunkBlockRange{{StartBlockNumber: 1001, EndBlockNumber: 1100}} - WriteBatchChunkRanges(db, 0, newRange) - readChunkRange := ReadBatchChunkRanges(db, 0) - if len(readChunkRange) != 1 || readChunkRange[0].StartBlockNumber != 1001 || readChunkRange[0].EndBlockNumber != 1100 { - t.Fatal("Over-write failed for chunk range", "expected", newRange, "got", readChunkRange) - } - - // read non-existing value - if readChunkRange = ReadBatchChunkRanges(db, uint64(len(chunks)+1)); readChunkRange != nil { - t.Fatal("Expected nil for non-existing value", "got", readChunkRange) - } - - // delete: revert batch - for i := range chunks { - batchIndex := uint64(i) - DeleteBatchChunkRanges(db, batchIndex) - - readChunkRange := ReadBatchChunkRanges(db, batchIndex) - if readChunkRange != nil { - t.Fatal("Chunk range was not deleted", "batch index", batchIndex) - } - } - - // delete non-existing value: ensure the delete operation handles non-existing values without errors. - DeleteBatchChunkRanges(db, uint64(len(chunks)+1)) -} - func TestWriteReadDeleteCommittedBatchMeta(t *testing.T) { db := NewMemoryDatabase() diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index 2e4f2a18c5de..b4a51935b4ff 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -112,7 +112,6 @@ var ( // Scroll rollup event store rollupEventSyncedL1BlockNumberKey = []byte("R-LastRollupEventSyncedL1BlockNumber") - batchChunkRangesPrefix = []byte("R-bcr") batchMetaPrefix = []byte("R-bm") finalizedL2BlockNumberKey = []byte("R-finalized") lastFinalizedBatchIndexKey = []byte("R-finalizedBatchIndex") @@ -304,11 +303,6 @@ func SkippedTransactionHashKey(index uint64) []byte { return append(skippedTransactionHashPrefix, encodeBigEndian(index)...) } -// batchChunkRangesKey = batchChunkRangesPrefix + batch index (uint64 big endian) -func batchChunkRangesKey(batchIndex uint64) []byte { - return append(batchChunkRangesPrefix, encodeBigEndian(batchIndex)...) -} - // batchMetaKey = batchMetaPrefix + batch index (uint64 big endian) func batchMetaKey(batchIndex uint64) []byte { return append(batchMetaPrefix, encodeBigEndian(batchIndex)...) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index ad295d5de3be..e8c7a5aa178c 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -94,9 +94,6 @@ var Defaults = Config{ GPO: FullNodeGPO, RPCTxFeeCap: 1, // 1 ether MaxBlockRange: -1, // Default unconfigured value: no block range limit for backward compatibility - DA: da_syncer.Config{ - FetcherMode: da_syncer.L1RPC, - }, } func init() { diff --git a/go.mod b/go.mod index 2bda32ee00d7..5cd2a1dccda3 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( github.com/Azure/azure-storage-blob-go v0.7.0 - github.com/VictoriaMetrics/fastcache v1.12.1 + github.com/VictoriaMetrics/fastcache v1.12.2 github.com/aws/aws-sdk-go-v2 v1.2.0 github.com/aws/aws-sdk-go-v2/config v1.1.1 github.com/aws/aws-sdk-go-v2/credentials v1.1.1 @@ -50,7 +50,7 @@ require ( github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.2 + github.com/scroll-tech/da-codec v0.1.3-0.20241210035500-70810faccc35 github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 @@ -85,6 +85,7 @@ require ( github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/gotestyourself/gotestyourself v1.4.0 // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect + github.com/klauspost/compress v1.17.9 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d // indirect github.com/mattn/go-runewidth v0.0.15 // indirect diff --git a/go.sum b/go.sum index 8c96ce6e7cd5..a540bb8de247 100644 --- a/go.sum +++ b/go.sum @@ -38,9 +38,11 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= -github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= +github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/agiledragon/gomonkey/v2 v2.12.0 h1:ek0dYu9K1rSV+TgkW5LvNNPRWyDZVIxGMCFI6Pz9o38= +github.com/agiledragon/gomonkey/v2 v2.12.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -278,6 +280,8 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -387,15 +391,15 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac h1:DjLrqjoOLVFug9ZkAbJYwjtYW51YZE0Num3p4cZXaZs= -github.com/scroll-tech/da-codec v0.1.1-0.20240822151711-9e32313056ac/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs= github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc= github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE= +github.com/scroll-tech/da-codec v0.1.3-0.20241210035500-70810faccc35 h1:sytWSptYjLWiVE4/GiGYUCXa9VBxfM9UpNpF5BSalI4= +github.com/scroll-tech/da-codec v0.1.3-0.20241210035500-70810faccc35/go.mod h1:vHY7S9ivJ7wlusDBrCh6Lq7k5qNFkTWP4TRDKx35yck= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= @@ -430,8 +434,7 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2 h1:wh1wzwAhZBNiZO37uWS/nDaKiIwHz4mDo4pnA+fqTO0= -github.com/supranational/blst v0.3.11-0.20230124161941-ca03e11a3ff2/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -580,6 +583,7 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/rollup/da_syncer/da_syncer.go b/rollup/da_syncer/da_syncer.go index c3c223ff22a9..ead133e90b87 100644 --- a/rollup/da_syncer/da_syncer.go +++ b/rollup/da_syncer/da_syncer.go @@ -28,21 +28,21 @@ func (s *DASyncer) SyncOneBlock(block *da.PartialBlock) error { currentBlock := s.blockchain.CurrentBlock() // we expect blocks to be consecutive. block.PartialHeader.Number == parentBlock.Number+1. - if block.PartialHeader.Number <= currentBlock.Number.Uint64() { - log.Debug("block number is too low", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number.Uint64()) + if block.PartialHeader.Number <= currentBlock.Number().Uint64() { + log.Debug("block number is too low", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number().Uint64()) return ErrBlockTooLow - } else if block.PartialHeader.Number > currentBlock.Number.Uint64()+1 { - log.Debug("block number is too high", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number.Uint64()) + } else if block.PartialHeader.Number > currentBlock.Number().Uint64()+1 { + log.Debug("block number is too high", "block number", block.PartialHeader.Number, "parent block number", currentBlock.Number().Uint64()) return ErrBlockTooHigh } - parentBlock := s.blockchain.GetBlockByNumber(currentBlock.Number.Uint64()) + parentBlock := s.blockchain.GetBlockByNumber(currentBlock.Number().Uint64()) if _, err := s.blockchain.BuildAndWriteBlock(parentBlock, block.PartialHeader.ToHeader(), block.Transactions); err != nil { return fmt.Errorf("failed building and writing block, number: %d, error: %v", block.PartialHeader.Number, err) } - if s.blockchain.CurrentBlock().Number.Uint64()%1000 == 0 { - log.Info("L1 sync progress", "blockhain height", s.blockchain.CurrentBlock().Number.Uint64(), "block hash", s.blockchain.CurrentBlock().Hash(), "root", s.blockchain.CurrentBlock().Root) + if s.blockchain.CurrentBlock().Number().Uint64()%1000 == 0 { + log.Info("L1 sync progress", "blockhain height", s.blockchain.CurrentBlock().Number().Uint64(), "block hash", s.blockchain.CurrentBlock().Hash(), "root", s.blockchain.CurrentBlock().Root) } return nil diff --git a/rollup/da_syncer/data_source.go b/rollup/da_syncer/data_source.go index f417d09af00e..7beab3baea32 100644 --- a/rollup/da_syncer/data_source.go +++ b/rollup/da_syncer/data_source.go @@ -2,7 +2,6 @@ package da_syncer import ( "context" - "errors" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/ethdb" @@ -36,9 +35,5 @@ func NewDataSourceFactory(blockchain *core.BlockChain, genesisConfig *params.Cha } func (ds *DataSourceFactory) OpenDataSource(ctx context.Context, l1height uint64) (DataSource, error) { - if ds.config.FetcherMode == L1RPC { - return da.NewCalldataBlobSource(ctx, l1height, ds.l1Client, ds.blobClient, ds.db) - } else { - return nil, errors.New("snapshot_data_source: not implemented") - } + return da.NewCalldataBlobSource(ctx, l1height, ds.l1Client, ds.blobClient, ds.db) } diff --git a/rollup/da_syncer/syncing_pipeline.go b/rollup/da_syncer/syncing_pipeline.go index 6795f2608e05..27eaf20cb38a 100644 --- a/rollup/da_syncer/syncing_pipeline.go +++ b/rollup/da_syncer/syncing_pipeline.go @@ -21,11 +21,9 @@ import ( // Config is the configuration parameters of data availability syncing. type Config struct { - FetcherMode FetcherMode // mode of fetcher - SnapshotFilePath string // path to snapshot file - BlobScanAPIEndpoint string // BlobScan blob api endpoint - BlockNativeAPIEndpoint string // BlockNative blob api endpoint - BeaconNodeAPIEndpoint string // Beacon node api endpoint + BlobScanAPIEndpoint string // BlobScan blob api endpoint + BlockNativeAPIEndpoint string // BlockNative blob api endpoint + BeaconNodeAPIEndpoint string // Beacon node api endpoint } // SyncingPipeline is a derivation pipeline for syncing data from L1 and DA and transform it into diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 4c5261511328..bbb2b4940393 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -4,18 +4,12 @@ import ( "context" "encoding/json" "fmt" - "math/big" "os" "reflect" "sync" "time" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/da-codec/encoding/codecv0" - "github.com/scroll-tech/da-codec/encoding/codecv1" - "github.com/scroll-tech/da-codec/encoding/codecv2" - "github.com/scroll-tech/da-codec/encoding/codecv3" - "github.com/scroll-tech/da-codec/encoding/codecv4" "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" @@ -26,7 +20,6 @@ import ( "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" - "github.com/scroll-tech/go-ethereum/rollup/rcfg" "github.com/scroll-tech/go-ethereum/rollup/sync_service" "github.com/scroll-tech/go-ethereum/rollup/withdrawtrie" @@ -78,7 +71,7 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig return nil, fmt.Errorf("missing L1 config in genesis") } - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() if err != nil { return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) } @@ -222,12 +215,11 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB batchIndex := event.BatchIndex.Uint64() log.Trace("found new CommitBatch event", "batch index", batchIndex) - committedBatchMeta, chunkBlockRanges, err := s.getCommittedBatchMeta(batchIndex, &vLog) + committedBatchMeta, err := s.getCommittedBatchMeta(batchIndex, &vLog) if err != nil { return fmt.Errorf("failed to get chunk ranges, batch index: %v, err: %w", batchIndex, err) } rawdb.WriteCommittedBatchMeta(s.db, batchIndex, committedBatchMeta) - rawdb.WriteBatchChunkRanges(s.db, batchIndex, chunkBlockRanges) case s.l1RevertBatchEventSignature: event := &L1RevertBatchEvent{} @@ -238,7 +230,6 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB log.Trace("found new RevertBatch event", "batch index", batchIndex) rawdb.DeleteCommittedBatchMeta(s.db, batchIndex) - rawdb.DeleteBatchChunkRanges(s.db, batchIndex) case s.l1FinalizeBatchEventSignature: event := &L1FinalizeBatchEvent{} @@ -273,12 +264,12 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB for index := startBatchIndex; index <= batchIndex; index++ { committedBatchMeta := rawdb.ReadCommittedBatchMeta(s.db, index) - chunks, err := s.getLocalChunksForBatch(index) + chunks, err := s.getLocalChunksForBatch(committedBatchMeta.ChunkBlockRanges) if err != nil { return fmt.Errorf("failed to get local node info, batch index: %v, err: %w", index, err) } - endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.bc.Config(), s.stack) + endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.stack) if err != nil { return fmt.Errorf("fatal: validateBatch failed: finalize event: %v, err: %w", event, err) } @@ -313,12 +304,10 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB return nil } -func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encoding.Chunk, error) { - chunkBlockRanges := rawdb.ReadBatchChunkRanges(s.db, batchIndex) +func (s *RollupSyncService) getLocalChunksForBatch(chunkBlockRanges []*rawdb.ChunkBlockRange) ([]*encoding.Chunk, error) { if len(chunkBlockRanges) == 0 { - return nil, fmt.Errorf("failed to get batch chunk ranges, empty chunk block ranges") + return nil, fmt.Errorf("chunkBlockRanges is empty") } - endBlockNumber := chunkBlockRanges[len(chunkBlockRanges)-1].EndBlockNumber for i := 0; i < defaultMaxRetries; i++ { if s.ctx.Err() != nil { @@ -366,13 +355,13 @@ func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encodi return chunks, nil } -func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, []*rawdb.ChunkBlockRange, error) { +func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, error) { if batchIndex == 0 { return &rawdb.CommittedBatchMeta{ Version: 0, BlobVersionedHashes: nil, ChunkBlockRanges: []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, - }, []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, nil + }, nil } tx, _, err := s.client.client.TransactionByHash(s.ctx, vLog.TxHash) @@ -381,11 +370,11 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) block, err := s.client.client.BlockByHash(s.ctx, vLog.BlockHash) if err != nil { - return nil, nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) + return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) } if block == nil { - return nil, nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) } found := false @@ -397,7 +386,7 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types } } if !found { - return nil, nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) + return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) } } @@ -406,19 +395,19 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types if tx.Type() == types.BlobTxType { blobVersionedHashes := tx.BlobHashes() if blobVersionedHashes == nil { - return nil, nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) + return nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) } commitBatchMeta.BlobVersionedHashes = blobVersionedHashes } version, ranges, err := s.decodeBatchVersionAndChunkBlockRanges(tx.Data()) if err != nil { - return nil, nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) } commitBatchMeta.Version = version commitBatchMeta.ChunkBlockRanges = ranges - return &commitBatchMeta, ranges, nil + return &commitBatchMeta, nil } // decodeBatchVersionAndChunkBlockRanges decodes version and chunks' block ranges in a batch based on the commit batch transaction's calldata. @@ -493,10 +482,8 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // - batchIndex: batch index of the validated batch // - event: L1 finalize batch event data // - parentFinalizedBatchMeta: metadata of the finalized parent batch -// - committedBatchMeta: committed batch metadata stored in the database. -// Can be nil for older client versions that don't store this information. +// - committedBatchMeta: committed batch metadata stored in the database // - chunks: slice of chunk data for the current batch -// - chainCfg: chain configuration to identify the codec version when committedBatchMeta is nil // - stack: node stack to terminate the node in case of inconsistency // // Returns: @@ -507,7 +494,7 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // Note: This function is compatible with both "finalize by batch" and "finalize by bundle" methods. // In "finalize by bundle", only the last batch of each bundle is fully verified. // This check still ensures the correctness of all batch hashes in the bundle due to the parent-child relationship between batch hashes. -func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, chainCfg *params.ChainConfig, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { +func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { if len(chunks) == 0 { return 0, nil, fmt.Errorf("invalid argument: length of chunks is 0, batch index: %v", batchIndex) } @@ -532,71 +519,17 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz Chunks: chunks, } - var codecVersion encoding.CodecVersion - if committedBatchMeta != nil { - codecVersion = encoding.CodecVersion(committedBatchMeta.Version) - } else { - codecVersion = determineCodecVersion(startBlock.Header.Number, startBlock.Header.Time, chainCfg) + codecVersion := encoding.CodecVersion(committedBatchMeta.Version) + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return 0, nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) } - var localBatchHash common.Hash - if codecVersion == encoding.CodecV0 { - daBatch, err := codecv0.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv0 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV1 { - daBatch, err := codecv1.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv1 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV2 { - daBatch, err := codecv2.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv2 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV3 { - daBatch, err := codecv3.NewDABatch(batch) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv3 DA batch, batch index: %v, err: %w", batchIndex, err) - } - localBatchHash = daBatch.Hash() - } else if codecVersion == encoding.CodecV4 { - // Check if committedBatchMeta exists, for backward compatibility with older client versions - if committedBatchMeta == nil { - return 0, nil, fmt.Errorf("missing committed batch metadata for codecV4, please use the latest client version, batch index: %v", batchIndex) - } - - // Validate BlobVersionedHashes - if committedBatchMeta.BlobVersionedHashes == nil || len(committedBatchMeta.BlobVersionedHashes) != 1 { - return 0, nil, fmt.Errorf("invalid blob hashes, batch index: %v, blob hashes: %v", batchIndex, committedBatchMeta.BlobVersionedHashes) - } - - // Attempt to create DA batch with compression - daBatch, err := codecv4.NewDABatch(batch, true) - if err != nil { - // If compression fails, try without compression - log.Warn("failed to create codecv4 DA batch with compress enabling", "batch index", batchIndex, "err", err) - daBatch, err = codecv4.NewDABatch(batch, false) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) - } - } else if daBatch.BlobVersionedHash != committedBatchMeta.BlobVersionedHashes[0] { - // Inconsistent blob versioned hash, fallback to uncompressed DA batch - log.Warn("impossible case: inconsistent blob versioned hash", "batch index", batchIndex, "expected", committedBatchMeta.BlobVersionedHashes[0], "actual", daBatch.BlobVersionedHash) - daBatch, err = codecv4.NewDABatch(batch, false) - if err != nil { - return 0, nil, fmt.Errorf("failed to create codecv4 DA batch, batch index: %v, err: %w", batchIndex, err) - } - } - - localBatchHash = daBatch.Hash() - } else { - return 0, nil, fmt.Errorf("unsupported codec version: %v", codecVersion) + daBatch, err := codec.NewDABatch(batch) + if err != nil { + return 0, nil, fmt.Errorf("failed to create DA batch, batch index: %v, codec version: %v, expected blob hashes: %v, err: %w", batchIndex, codecVersion, committedBatchMeta.BlobVersionedHashes, err) } + localBatchHash := daBatch.Hash() localStateRoot := endBlock.Header.Root localWithdrawRoot := endBlock.WithdrawRoot @@ -648,126 +581,29 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz return endBlock.Header.Number.Uint64(), finalizedBatchMeta, nil } -// determineCodecVersion determines the codec version based on the block number and chain configuration. -func determineCodecVersion(startBlockNumber *big.Int, startBlockTimestamp uint64, chainCfg *params.ChainConfig) encoding.CodecVersion { - switch { - case startBlockNumber.Uint64() == 0 || !chainCfg.IsBernoulli(startBlockNumber): - return encoding.CodecV0 // codecv0: genesis batch or batches before Bernoulli - case !chainCfg.IsCurie(startBlockNumber): - return encoding.CodecV1 // codecv1: batches after Bernoulli and before Curie - case !chainCfg.IsDarwin(startBlockTimestamp): - return encoding.CodecV2 // codecv2: batches after Curie and before Darwin - case !chainCfg.IsDarwinV2(startBlockTimestamp): - return encoding.CodecV3 // codecv3: batches after Darwin - default: - return encoding.CodecV4 // codecv4: batches after DarwinV2 - } -} - // decodeBlockRangesFromEncodedChunks decodes the provided chunks into a list of block ranges. func decodeBlockRangesFromEncodedChunks(codecVersion encoding.CodecVersion, chunks [][]byte) ([]*rawdb.ChunkBlockRange, error) { - var chunkBlockRanges []*rawdb.ChunkBlockRange - for _, chunk := range chunks { - if len(chunk) < 1 { - return nil, fmt.Errorf("invalid chunk, length is less than 1") - } - - numBlocks := int(chunk[0]) - - switch codecVersion { - case encoding.CodecV0: - if len(chunk) < 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv0.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv0.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV1: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv1.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv1.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV2: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv2.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv2.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } - - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV3: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv3.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv3.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } + codec, err := encoding.CodecFromVersion(codecVersion) + if err != nil { + return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err) + } - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - case encoding.CodecV4: - if len(chunk) != 1+numBlocks*60 { - return nil, fmt.Errorf("invalid chunk byte length, expected: %v, got: %v", 1+numBlocks*60, len(chunk)) - } - daBlocks := make([]*codecv4.DABlock, numBlocks) - for i := 0; i < numBlocks; i++ { - startIdx := 1 + i*60 // add 1 to skip numBlocks byte - endIdx := startIdx + 60 - daBlocks[i] = &codecv4.DABlock{} - if err := daBlocks[i].Decode(chunk[startIdx:endIdx]); err != nil { - return nil, err - } - } + daChunksRawTx, err := codec.DecodeDAChunksRawTx(chunks) + if err != nil { + return nil, fmt.Errorf("failed to decode DA chunks, version: %v, err: %w", codecVersion, err) + } - chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ - StartBlockNumber: daBlocks[0].BlockNumber, - EndBlockNumber: daBlocks[len(daBlocks)-1].BlockNumber, - }) - default: - return nil, fmt.Errorf("unexpected batch version %v", codecVersion) + var chunkBlockRanges []*rawdb.ChunkBlockRange + for _, daChunkRawTx := range daChunksRawTx { + if len(daChunkRawTx.Blocks) == 0 { + return nil, fmt.Errorf("no blocks found in DA chunk, version: %v", codecVersion) } + + chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ + StartBlockNumber: daChunkRawTx.Blocks[0].Number(), + EndBlockNumber: daChunkRawTx.Blocks[len(daChunkRawTx.Blocks)-1].Number(), + }) } + return chunkBlockRanges, nil } diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index 310d4be2515d..f1b09a37a1f2 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -313,7 +313,7 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x0"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(metadata.Version)) @@ -324,13 +324,13 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { {StartBlockNumber: 911156, EndBlockNumber: 911159}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -367,7 +367,7 @@ func TestGetCommittedBatchMetaCodecv1(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x1"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) @@ -376,13 +376,13 @@ func TestGetCommittedBatchMetaCodecv1(t *testing.T) { {StartBlockNumber: 1, EndBlockNumber: 11}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -419,7 +419,7 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x2"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(metadata.Version)) @@ -456,13 +456,13 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { {StartBlockNumber: 174, EndBlockNumber: 174}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } @@ -499,7 +499,7 @@ func TestGetCommittedBatchMetaCodecv3(t *testing.T) { vLog := &types.Log{ TxHash: common.HexToHash("0x3"), } - metadata, ranges, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(1, vLog) require.NoError(t, err) assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(metadata.Version)) @@ -537,20 +537,18 @@ func TestGetCommittedBatchMetaCodecv3(t *testing.T) { {StartBlockNumber: 70, EndBlockNumber: 70}, } - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) + if len(expectedRanges) != len(metadata.ChunkBlockRanges) { + t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) } - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) + for i := range metadata.ChunkBlockRanges { + if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { + t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) } } } func TestValidateBatchCodecv0(t *testing.T) { - chainConfig := ¶ms.ChainConfig{} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -560,50 +558,57 @@ func TestValidateBatchCodecv0(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0xfd3ecf106ce993adc6db68e42ce701bfe638434395abdeeb871f7bd395ae2368"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0xadb8e526c3fdc2045614158300789cd66e7a945efe5a484db00b5ef9a26016d7"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } + + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv1(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -613,50 +618,56 @@ func TestValidateBatchCodecv1(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x73cb3310646716cb782702a0ec4ad33cf55633c85daf96b641953c5defe58031"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x0129554070e4323800ca0e5ddd17bc447854601b306a70870002a058741214b3")}, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x7f230ce84b4bf86f8ee22ffb5c145e3ef3ddf2a76da4936a33f33cebdb63a48a"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a327088bb2b13151449d8313c281d0006d12e8453e863637b746898b6ad5a6")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv2(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -666,50 +677,56 @@ func TestValidateBatchCodecv2(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0xaccf37a0b974f2058692d366b2ea85502c99db4a0bcb9b77903b49bf866a463b"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV2), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x62ec61e1fdb334868ffd471df601f6858e692af01d42b5077c805a9fd4558c91"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV2), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchCodecv3(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64)} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -719,7 +736,7 @@ func TestValidateBatchCodecv3(t *testing.T) { block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x015eb56fb95bf9a06157cfb8389ba7c2b6b08373e22581ac2ba387003708265d"), @@ -727,46 +744,53 @@ func TestValidateBatchCodecv3(t *testing.T) { WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1, chunk2, chunk3}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 11, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0x382cb0d507e3d7507f556c52e05f76b05e364ad26205e7f62c95967a19c2f35d"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 42, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } func TestValidateBatchUpgrades(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(3), CurieBlock: big.NewInt(14), DarwinTime: func() *uint64 { t := uint64(1684762320); return &t }()} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} - parentBatchMeta1 := &rawdb.FinalizedBatchMeta{} + parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} event1 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(0), BatchHash: common.HexToHash("0x4605465b7470c8565b123330d7186805caf9a7f2656d8e9e744b62e14ca22c3d"), @@ -774,82 +798,97 @@ func TestValidateBatchUpgrades(t *testing.T) { WithdrawRoot: chunk1.Blocks[len(chunk1.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentBatchMeta1, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV0), + BlobVersionedHashes: nil, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) block2 := readBlockFromJSON(t, "./testdata/blockTrace_03.json") chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} - parentBatchMeta2 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ BatchHash: event1.BatchHash, TotalL1MessagePopped: 0, StateRoot: event1.StateRoot, WithdrawRoot: event1.WithdrawRoot, } - assert.Equal(t, parentBatchMeta2, finalizedBatchMeta1) + assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) event2 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(1), BatchHash: common.HexToHash("0xc4af33bce87aa702edc3ad4b7d34730d25719427704e250787f99e0f55049252"), StateRoot: chunk2.Blocks[len(chunk2.Blocks)-1].Header.Root, WithdrawRoot: chunk2.Blocks[len(chunk2.Blocks)-1].WithdrawRoot, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentBatchMeta2, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a688c6e137310df38a62f5ad1e5119b8cb0455c386a9a4079b14fe92a239aa")}, + } + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} - parentBatchMeta3 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ BatchHash: event2.BatchHash, TotalL1MessagePopped: 0, StateRoot: event2.StateRoot, WithdrawRoot: event2.WithdrawRoot, } - assert.Equal(t, parentBatchMeta3, finalizedBatchMeta2) + assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) event3 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(2), BatchHash: common.HexToHash("0x9f87f2de2019ed635f867b1e61be6a607c3174ced096f370fd18556c38833c62"), StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, } - endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentBatchMeta3, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV1), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01ea66c4de196d36e2c3a5d7c0045100b9e46ef65be8f7a921ef20e6f2e99ebd")}, + } + endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) block4 := readBlockFromJSON(t, "./testdata/blockTrace_05.json") chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - parentBatchMeta4 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta4 := &rawdb.FinalizedBatchMeta{ BatchHash: event3.BatchHash, TotalL1MessagePopped: 11, StateRoot: event3.StateRoot, WithdrawRoot: event3.WithdrawRoot, } - assert.Equal(t, parentBatchMeta4, finalizedBatchMeta3) + assert.Equal(t, parentFinalizedBatchMeta4, finalizedBatchMeta3) event4 := &L1FinalizeBatchEvent{ BatchIndex: big.NewInt(3), BatchHash: common.HexToHash("0xd33332aef8efbc9a0be4c4694088ac0dd052d2d3ad3ffda5e4c2010825e476bc"), StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentBatchMeta4, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) - parentBatchMeta5 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ BatchHash: event4.BatchHash, TotalL1MessagePopped: 42, StateRoot: event4.StateRoot, WithdrawRoot: event4.WithdrawRoot, } - assert.Equal(t, parentBatchMeta5, finalizedBatchMeta4) + assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } func TestValidateBatchInFinalizeByBundle(t *testing.T) { - chainConfig := ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: func() *uint64 { t := uint64(0); return &t }()} - block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") block2 := readBlockFromJSON(t, "./testdata/blockTrace_03.json") block3 := readBlockFromJSON(t, "./testdata/blockTrace_04.json") @@ -867,29 +906,49 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, } - endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, nil, []*encoding.Chunk{chunk1}, chainConfig, nil) + committedBatchMeta1 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01bbc6b98d7d3783730b6208afac839ad37dcf211b9d9e7c83a5f9d02125ddd7")}, + } + + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x01c81e5696e00f1e6e7d76c197f74ed51650147c49c4e6e5b0b702cdcc54352a")}, + } + + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x012e15203534ae3f4cbe1b0f58fe6db6e5c29432115a8ece6ef5550bf2ffce4c")}, + } + + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV3), + BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + } + + endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) - endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, nil, []*encoding.Chunk{chunk2}, chainConfig, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) - endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, nil, []*encoding.Chunk{chunk3}, chainConfig, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) - endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, nil, []*encoding.Chunk{chunk4}, chainConfig, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) - parentBatchMeta5 := &rawdb.FinalizedBatchMeta{ + parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ BatchHash: event.BatchHash, TotalL1MessagePopped: 42, StateRoot: event.StateRoot, WithdrawRoot: event.WithdrawRoot, } - assert.Equal(t, parentBatchMeta5, finalizedBatchMeta4) + assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } func readBlockFromJSON(t *testing.T, filename string) *encoding.Block { From de37d472eab1b45807b49121fbd53c820834021b Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:26:22 +0800 Subject: [PATCH 03/36] go.mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index a540bb8de247..95119a13f63d 100644 --- a/go.sum +++ b/go.sum @@ -396,8 +396,6 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.2 h1:QyJ+dQ4zWVVJwuqxNt4MiKyrymVc6rHe4YPtURkjiRc= -github.com/scroll-tech/da-codec v0.1.2/go.mod h1:odz1ck3umvYccCG03osaQBISAYGinZktZYbpk94fYRE= github.com/scroll-tech/da-codec v0.1.3-0.20241210035500-70810faccc35 h1:sytWSptYjLWiVE4/GiGYUCXa9VBxfM9UpNpF5BSalI4= github.com/scroll-tech/da-codec v0.1.3-0.20241210035500-70810faccc35/go.mod h1:vHY7S9ivJ7wlusDBrCh6Lq7k5qNFkTWP4TRDKx35yck= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= From e34fecf3a8e6f5e79f99b34daccab89f2cef76a6 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:28:13 +0800 Subject: [PATCH 04/36] fix compile error --- rollup/rollup_sync_service/l1client_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rollup/rollup_sync_service/l1client_test.go b/rollup/rollup_sync_service/l1client_test.go index acc3f8daad52..394f455b80c5 100644 --- a/rollup/rollup_sync_service/l1client_test.go +++ b/rollup/rollup_sync_service/l1client_test.go @@ -18,7 +18,7 @@ func TestL1Client(t *testing.T) { ctx := context.Background() mockClient := &mockEthClient{} - scrollChainABI, err := scrollChainMetaData.GetAbi() + scrollChainABI, err := ScrollChainMetaData.GetAbi() if err != nil { t.Fatal("failed to get scroll chain abi", "err", err) } From 1327771c7089b51dccf7c62c54fded78e9e134d3 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 18:09:50 +0800 Subject: [PATCH 05/36] fix goimports --- rollup/da_syncer/da/calldata_blob_source.go | 1 + 1 file changed, 1 insertion(+) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 47eabfceb65f..db0f5f01c107 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" From b05954d5b03502cc67e1c45a3b414353ff6488d1 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 19:42:11 +0800 Subject: [PATCH 06/36] fix log --- rollup/da_syncer/da_syncer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rollup/da_syncer/da_syncer.go b/rollup/da_syncer/da_syncer.go index ead133e90b87..b787abff3d8a 100644 --- a/rollup/da_syncer/da_syncer.go +++ b/rollup/da_syncer/da_syncer.go @@ -42,7 +42,7 @@ func (s *DASyncer) SyncOneBlock(block *da.PartialBlock) error { } if s.blockchain.CurrentBlock().Number().Uint64()%1000 == 0 { - log.Info("L1 sync progress", "blockhain height", s.blockchain.CurrentBlock().Number().Uint64(), "block hash", s.blockchain.CurrentBlock().Hash(), "root", s.blockchain.CurrentBlock().Root) + log.Info("L1 sync progress", "blockhain height", s.blockchain.CurrentBlock().Number().Uint64(), "block hash", s.blockchain.CurrentBlock().Hash(), "root", s.blockchain.CurrentBlock().Root()) } return nil From ce8f7856ea6f9ea5c720aa56a3fcd9992f868855 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 12 Dec 2024 11:20:34 +0700 Subject: [PATCH 07/36] address review comments --- rollup/da_syncer/batch_queue.go | 1 + .../blob_client/block_native_client.go | 6 ++- rollup/da_syncer/da/commitV1.go | 2 +- rollup/da_syncer/da_queue.go | 6 +++ rollup/da_syncer/da_syncer.go | 4 ++ rollup/da_syncer/modes.go | 52 ------------------- 6 files changed, 17 insertions(+), 54 deletions(-) delete mode 100644 rollup/da_syncer/modes.go diff --git a/rollup/da_syncer/batch_queue.go b/rollup/da_syncer/batch_queue.go index a0172a86c077..093ce12d830e 100644 --- a/rollup/da_syncer/batch_queue.go +++ b/rollup/da_syncer/batch_queue.go @@ -98,5 +98,6 @@ func (bq *BatchQueue) deleteBatch(batch da.Entry) { func (bq *BatchQueue) Reset(height uint64) { bq.batches.Clear() bq.batchesMap.Clear() + bq.lastFinalizedBatchIndex = 0 bq.DAQueue.Reset(height) } diff --git a/rollup/da_syncer/blob_client/block_native_client.go b/rollup/da_syncer/blob_client/block_native_client.go index ddd574d02d10..7b1cce86f083 100644 --- a/rollup/da_syncer/blob_client/block_native_client.go +++ b/rollup/da_syncer/blob_client/block_native_client.go @@ -30,7 +30,11 @@ func (c *BlockNativeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Con if err != nil { return nil, fmt.Errorf("failed to join path, err: %w", err) } - resp, err := http.Get(path) + req, err := http.NewRequestWithContext(ctx, "GET", path, nil) + if err != nil { + return nil, fmt.Errorf("cannot create request, err: %w", err) + } + resp, err := http.DefaultClient.Do(req) if err != nil { return nil, fmt.Errorf("cannot do request, err: %w", err) } diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 4670eec8bbcb..532b0f81abd6 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -52,7 +52,7 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, // compute blob versioned hash and compare with one from tx c, err := kzg4844.BlobToCommitment(blob) if err != nil { - return nil, fmt.Errorf("failed to create blob commitment") + return nil, fmt.Errorf("failed to create blob commitment: %w", err) } blobVersionedHash := common.Hash(kzg4844.CalcBlobHashV1(sha256.New(), &c)) if blobVersionedHash != versionedHash { diff --git a/rollup/da_syncer/da_queue.go b/rollup/da_syncer/da_queue.go index 64673a4a646b..3602947f51e2 100644 --- a/rollup/da_syncer/da_queue.go +++ b/rollup/da_syncer/da_queue.go @@ -27,6 +27,12 @@ func NewDAQueue(l1height uint64, dataSourceFactory *DataSourceFactory) *DAQueue func (dq *DAQueue) NextDA(ctx context.Context) (da.Entry, error) { for len(dq.da) == 0 { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + err := dq.getNextData(ctx) if err != nil { return nil, err diff --git a/rollup/da_syncer/da_syncer.go b/rollup/da_syncer/da_syncer.go index b787abff3d8a..e0970d37bc9a 100644 --- a/rollup/da_syncer/da_syncer.go +++ b/rollup/da_syncer/da_syncer.go @@ -37,6 +37,10 @@ func (s *DASyncer) SyncOneBlock(block *da.PartialBlock) error { } parentBlock := s.blockchain.GetBlockByNumber(currentBlock.Number().Uint64()) + if parentBlock == nil { + return fmt.Errorf("parent block not found at height %d", currentBlock.Number().Uint64()) + } + if _, err := s.blockchain.BuildAndWriteBlock(parentBlock, block.PartialHeader.ToHeader(), block.Transactions); err != nil { return fmt.Errorf("failed building and writing block, number: %d, error: %v", block.PartialHeader.Number, err) } diff --git a/rollup/da_syncer/modes.go b/rollup/da_syncer/modes.go deleted file mode 100644 index bfcc1d1dfba0..000000000000 --- a/rollup/da_syncer/modes.go +++ /dev/null @@ -1,52 +0,0 @@ -package da_syncer - -import "fmt" - -// FetcherMode represents the mode of fetcher -type FetcherMode int - -const ( - // L1RPC mode fetches DA from L1RPC - L1RPC FetcherMode = iota - // Snapshot mode loads DA from snapshot file - Snapshot -) - -func (mode FetcherMode) IsValid() bool { - return mode >= L1RPC && mode <= Snapshot -} - -// String implements the stringer interface. -func (mode FetcherMode) String() string { - switch mode { - case L1RPC: - return "l1rpc" - case Snapshot: - return "snapshot" - default: - return "unknown" - } -} - -func (mode FetcherMode) MarshalText() ([]byte, error) { - switch mode { - case L1RPC: - return []byte("l1rpc"), nil - case Snapshot: - return []byte("snapshot"), nil - default: - return nil, fmt.Errorf("unknown sync mode %d", mode) - } -} - -func (mode *FetcherMode) UnmarshalText(text []byte) error { - switch string(text) { - case "l1rpc": - *mode = L1RPC - case "snapshot": - *mode = Snapshot - default: - return fmt.Errorf(`unknown sync mode %q, want "l1rpc" or "snapshot"`, text) - } - return nil -} From f10c383837cab17b0a27f0962041e15ca2a9c94e Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 12 Dec 2024 11:27:07 +0700 Subject: [PATCH 08/36] upgrade golang.org/x/net to 0.23.0 --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 5cd2a1dccda3..7a1a9f7d2880 100644 --- a/go.mod +++ b/go.mod @@ -58,9 +58,9 @@ require ( github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef - golang.org/x/crypto v0.17.0 + golang.org/x/crypto v0.21.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.17.0 + golang.org/x/sys v0.18.0 golang.org/x/text v0.14.0 golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce @@ -103,8 +103,8 @@ require ( github.com/yusufpapurcu/wmi v1.2.3 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/net v0.16.0 // indirect - golang.org/x/term v0.15.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/term v0.18.0 // indirect google.golang.org/protobuf v1.23.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 95119a13f63d..fbb9af9bc659 100644 --- a/go.sum +++ b/go.sum @@ -474,8 +474,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -526,8 +526,8 @@ golang.org/x/net v0.0.0-20210220033124-5f55cee0dc0d/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.16.0 h1:7eBu7KsSvFDtSXUIDbh3aqlK4DPsZ1rByC8PFfBThos= -golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -582,14 +582,14 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= From b918a2bede3d165a38b0c3dd105c93951c2c5115 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 10 Dec 2024 17:13:31 +0800 Subject: [PATCH 09/36] port changes from #1018 --- common/heapmap.go | 90 +++++ common/shrinkingmap.go | 16 + eth/backend.go | 3 +- .../blob_client/beacon_node_client.go | 16 +- rollup/da_syncer/blob_client/blob_client.go | 6 +- .../da_syncer/blob_client/blob_scan_client.go | 2 +- .../blob_client/block_native_client.go | 2 +- rollup/da_syncer/da/calldata_blob_source.go | 168 ++------ rollup/da_syncer/da/commitV0.go | 10 +- rollup/da_syncer/da/commitV1.go | 25 +- rollup/da_syncer/data_source.go | 10 +- rollup/da_syncer/syncing_pipeline.go | 21 +- rollup/l1/abi.go | 245 +++++++++++ rollup/l1/abi_test.go | 82 ++++ rollup/l1/l1msg_bindings.go | 150 +++++++ rollup/l1/reader.go | 381 ++++++++++++++++++ rollup/l1/reader_test.go | 125 ++++++ rollup/l1/types.go | 22 + 18 files changed, 1193 insertions(+), 181 deletions(-) create mode 100644 common/heapmap.go create mode 100644 rollup/l1/abi.go create mode 100644 rollup/l1/abi_test.go create mode 100644 rollup/l1/l1msg_bindings.go create mode 100644 rollup/l1/reader.go create mode 100644 rollup/l1/reader_test.go create mode 100644 rollup/l1/types.go diff --git a/common/heapmap.go b/common/heapmap.go new file mode 100644 index 000000000000..90f51e2db25d --- /dev/null +++ b/common/heapmap.go @@ -0,0 +1,90 @@ +package common + +type HeapMap[K comparable, T Comparable[T]] struct { + h *Heap[T] + m *ShrinkingMap[K, *HeapElement[T]] + keyFromElement func(T) K +} + +func NewHeapMap[K comparable, T Comparable[T]](keyFromElement func(T) K) *HeapMap[K, T] { + return &HeapMap[K, T]{ + h: NewHeap[T](), + m: NewShrinkingMap[K, *HeapElement[T]](1000), + keyFromElement: keyFromElement, + } +} + +func (hm *HeapMap[K, T]) Len() int { + return hm.h.Len() +} + +func (hm *HeapMap[K, T]) Push(element T) bool { + k := hm.keyFromElement(element) + + if hm.m.Has(k) { + return false + } + + heapElement := hm.h.Push(element) + hm.m.Set(k, heapElement) + + return true +} + +func (hm *HeapMap[K, T]) Pop() T { + element := hm.h.Pop() + k := hm.keyFromElement(element.Value()) + hm.m.Delete(k) + + return element.Value() +} + +func (hm *HeapMap[K, T]) Peek() T { + return hm.h.Peek().Value() +} + +func (hm *HeapMap[K, T]) RemoveByElement(element T) bool { + key := hm.keyFromElement(element) + heapElement, exists := hm.m.Get(key) + if !exists { + return false + } + + hm.h.Remove(heapElement) + hm.m.Delete(key) + + return true +} + +func (hm *HeapMap[K, T]) RemoveByKey(key K) bool { + heapElement, exists := hm.m.Get(key) + if !exists { + return false + } + + hm.h.Remove(heapElement) + hm.m.Delete(key) + + return true +} + +func (hm *HeapMap[K, T]) Clear() { + hm.h.Clear() + hm.m = NewShrinkingMap[K, *HeapElement[T]](1000) +} + +func (hm *HeapMap[K, T]) Keys() []K { + return hm.m.Keys() +} + +func (hm *HeapMap[K, T]) Elements() []T { + var elements []T + for _, element := range hm.m.Values() { + elements = append(elements, element.Value()) + } + return elements +} + +func (hm *HeapMap[K, T]) Has(element T) bool { + return hm.m.Has(hm.keyFromElement(element)) +} diff --git a/common/shrinkingmap.go b/common/shrinkingmap.go index 4bf98f87c2da..a62c23a7b6c8 100644 --- a/common/shrinkingmap.go +++ b/common/shrinkingmap.go @@ -47,6 +47,22 @@ func (s *ShrinkingMap[K, V]) Delete(key K) (deleted bool) { return true } +func (s *ShrinkingMap[K, V]) Keys() []K { + var keys []K + for k := range s.m { + keys = append(keys, k) + } + return keys +} + +func (s *ShrinkingMap[K, V]) Values() []V { + var values []V + for _, v := range s.m { + values = append(values, v) + } + return values +} + func (s *ShrinkingMap[K, V]) Size() (size int) { return len(s.m) } diff --git a/eth/backend.go b/eth/backend.go index 2b6c663d2744..a119708e52be 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -57,6 +57,7 @@ import ( "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/rollup/ccc" "github.com/scroll-tech/go-ethereum/rollup/da_syncer" + "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" "github.com/scroll-tech/go-ethereum/rollup/sync_service" "github.com/scroll-tech/go-ethereum/rpc" @@ -109,7 +110,7 @@ type Ethereum struct { // New creates a new Ethereum object (including the // initialisation of the common Ethereum object) -func New(stack *node.Node, config *ethconfig.Config, l1Client sync_service.EthClient) (*Ethereum, error) { +func New(stack *node.Node, config *ethconfig.Config, l1Client l1.Client) (*Ethereum, error) { // Ensure configuration values are compatible and sane if config.SyncMode == downloader.LightSync { return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum") diff --git a/rollup/da_syncer/blob_client/beacon_node_client.go b/rollup/da_syncer/blob_client/beacon_node_client.go index 5bfd7b9edf6c..adb61a4199ff 100644 --- a/rollup/da_syncer/blob_client/beacon_node_client.go +++ b/rollup/da_syncer/blob_client/beacon_node_client.go @@ -12,12 +12,10 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/crypto/kzg4844" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" ) type BeaconNodeClient struct { apiEndpoint string - l1Client *rollup_sync_service.L1Client genesisTime uint64 secondsPerSlot uint64 } @@ -28,7 +26,7 @@ var ( beaconNodeBlobEndpoint = "/eth/v1/beacon/blob_sidecars" ) -func NewBeaconNodeClient(apiEndpoint string, l1Client *rollup_sync_service.L1Client) (*BeaconNodeClient, error) { +func NewBeaconNodeClient(apiEndpoint string) (*BeaconNodeClient, error) { // get genesis time genesisPath, err := url.JoinPath(apiEndpoint, beaconNodeGenesisEndpoint) if err != nil { @@ -94,19 +92,13 @@ func NewBeaconNodeClient(apiEndpoint string, l1Client *rollup_sync_service.L1Cli return &BeaconNodeClient{ apiEndpoint: apiEndpoint, - l1Client: l1Client, genesisTime: genesisTime, secondsPerSlot: secondsPerSlot, }, nil } -func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { - // get block timestamp to calculate slot - header, err := c.l1Client.GetHeaderByNumber(blockNumber) - if err != nil { - return nil, fmt.Errorf("failed to get header by number, err: %w", err) - } - slot := (header.Time - c.genesisTime) / c.secondsPerSlot +func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { + slot := (blockTime - c.genesisTime) / c.secondsPerSlot // get blob sidecar for slot blobSidecarPath, err := url.JoinPath(c.apiEndpoint, beaconNodeBlobEndpoint, fmt.Sprintf("%d", slot)) @@ -156,7 +148,7 @@ func (c *BeaconNodeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Cont } } - return nil, fmt.Errorf("missing blob %v in slot %d, block number %d", versionedHash, slot, blockNumber) + return nil, fmt.Errorf("missing blob %v in slot %d", versionedHash, slot) } type GenesisResp struct { diff --git a/rollup/da_syncer/blob_client/blob_client.go b/rollup/da_syncer/blob_client/blob_client.go index 814b1d4faf2d..70635311559f 100644 --- a/rollup/da_syncer/blob_client/blob_client.go +++ b/rollup/da_syncer/blob_client/blob_client.go @@ -17,7 +17,7 @@ const ( ) type BlobClient interface { - GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) + GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) } type BlobClients struct { @@ -32,13 +32,13 @@ func NewBlobClients(blobClients ...BlobClient) *BlobClients { } } -func (c *BlobClients) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { +func (c *BlobClients) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { if len(c.list) == 0 { return nil, fmt.Errorf("BlobClients.GetBlobByVersionedHash: list of BlobClients is empty") } for i := 0; i < len(c.list); i++ { - blob, err := c.list[c.curPos].GetBlobByVersionedHashAndBlockNumber(ctx, versionedHash, blockNumber) + blob, err := c.list[c.curPos].GetBlobByVersionedHashAndBlockTime(ctx, versionedHash, blockTime) if err == nil { return blob, nil } diff --git a/rollup/da_syncer/blob_client/blob_scan_client.go b/rollup/da_syncer/blob_client/blob_scan_client.go index 24b03bed32b9..0185cc9dc96d 100644 --- a/rollup/da_syncer/blob_client/blob_scan_client.go +++ b/rollup/da_syncer/blob_client/blob_scan_client.go @@ -26,7 +26,7 @@ func NewBlobScanClient(apiEndpoint string) *BlobScanClient { } } -func (c *BlobScanClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { +func (c *BlobScanClient) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { // blobscan api docs https://api.blobscan.com/#/blobs/blob-getByBlobId path, err := url.JoinPath(c.apiEndpoint, versionedHash.String()) if err != nil { diff --git a/rollup/da_syncer/blob_client/block_native_client.go b/rollup/da_syncer/blob_client/block_native_client.go index 7b1cce86f083..1fe6efbbab27 100644 --- a/rollup/da_syncer/blob_client/block_native_client.go +++ b/rollup/da_syncer/blob_client/block_native_client.go @@ -24,7 +24,7 @@ func NewBlockNativeClient(apiEndpoint string) *BlockNativeClient { } } -func (c *BlockNativeClient) GetBlobByVersionedHashAndBlockNumber(ctx context.Context, versionedHash common.Hash, blockNumber uint64) (*kzg4844.Blob, error) { +func (c *BlockNativeClient) GetBlobByVersionedHashAndBlockTime(ctx context.Context, versionedHash common.Hash, blockTime uint64) (*kzg4844.Blob, error) { // blocknative api docs https://docs.blocknative.com/blocknative-data-archive/blob-archive path, err := url.JoinPath(c.apiEndpoint, versionedHash.String()) if err != nil { diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index db0f5f01c107..a7489c72c838 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -9,12 +9,10 @@ import ( "github.com/scroll-tech/go-ethereum/accounts/abi" "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb" - "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) const ( @@ -35,7 +33,7 @@ var ( type CalldataBlobSource struct { ctx context.Context - l1Client *rollup_sync_service.L1Client + l1Reader *l1.Reader blobClient blob_client.BlobClient l1height uint64 scrollChainABI *abi.ABI @@ -47,14 +45,14 @@ type CalldataBlobSource struct { l1Finalized uint64 } -func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, db ethdb.Database) (*CalldataBlobSource, error) { - scrollChainABI, err := rollup_sync_service.ScrollChainMetaData.GetAbi() +func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Reader *l1.Reader, blobClient blob_client.BlobClient, db ethdb.Database) (*CalldataBlobSource, error) { + scrollChainABI, err := l1.ScrollChainMetaData.GetAbi() if err != nil { return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) } return &CalldataBlobSource{ ctx: ctx, - l1Client: l1Client, + l1Reader: l1Reader, blobClient: blobClient, l1height: l1height, scrollChainABI: scrollChainABI, @@ -73,7 +71,7 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) { // Otherwise, we know that there's more finalized blocks than we want to request up to // -> no need to query finalized block number if to > ds.l1Finalized { - ds.l1Finalized, err = ds.l1Client.GetLatestFinalizedBlockNumber() + ds.l1Finalized, err = ds.l1Reader.GetLatestFinalizedBlockNumber() if err != nil { return nil, serrors.NewTemporaryError(fmt.Errorf("failed to query GetLatestFinalizedBlockNumber, error: %v", err)) } @@ -85,13 +83,13 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) { return nil, ErrSourceExhausted } - logs, err := ds.l1Client.FetchRollupEventsInRange(ds.l1height, to) + rollupEvents, err := ds.l1Reader.FetchRollupEventsInRange(ds.l1height, to) if err != nil { - return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get events, l1height: %d, error: %v", ds.l1height, err)) + return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get rollup events, l1height: %d, error: %v", ds.l1height, err)) } - da, err := ds.processLogsToDA(logs) + da, err := ds.processRollupEventsToDA(rollupEvents) if err != nil { - return nil, serrors.NewTemporaryError(fmt.Errorf("failed to process logs to DA, error: %v", err)) + return nil, serrors.NewTemporaryError(fmt.Errorf("failed to process rollup events to DA, error: %v", err)) } ds.l1height = to + 1 @@ -102,48 +100,30 @@ func (ds *CalldataBlobSource) L1Height() uint64 { return ds.l1height } -func (ds *CalldataBlobSource) processLogsToDA(logs []types.Log) (Entries, error) { +func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEvents) (Entries, error) { var entries Entries var entry Entry var err error - - for _, vLog := range logs { - switch vLog.Topics[0] { - case ds.l1CommitBatchEventSignature: - event := &rollup_sync_service.L1CommitBatchEvent{} - if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, commitBatchEventName, vLog); err != nil { - return nil, fmt.Errorf("failed to unpack commit rollup event log, err: %w", err) + for _, rollupEvent := range rollupEvents { + switch rollupEvent.Type() { + case l1.CommitEventType: + commitEvent, ok := rollupEvent.(*l1.CommitBatchEvent) + // this should never happen because we just check event type + if !ok { + return nil, fmt.Errorf("unexpected type of rollup event: %T", rollupEvent) } - - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new CommitBatch event", "batch index", batchIndex) - - if entry, err = ds.getCommitBatchDA(batchIndex, &vLog); err != nil { - return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", batchIndex, err) + if entry, err = ds.getCommitBatchDA(commitEvent); err != nil { + return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", rollupEvent.BatchIndex().Uint64(), err) } - case ds.l1RevertBatchEventSignature: - event := &rollup_sync_service.L1RevertBatchEvent{} - if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, revertBatchEventName, vLog); err != nil { - return nil, fmt.Errorf("failed to unpack revert rollup event log, err: %w", err) - } + case l1.RevertEventType: + entry = NewRevertBatch(rollupEvent.BatchIndex().Uint64()) - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new RevertBatchType event", "batch index", batchIndex) - entry = NewRevertBatch(batchIndex) - - case ds.l1FinalizeBatchEventSignature: - event := &rollup_sync_service.L1FinalizeBatchEvent{} - if err = rollup_sync_service.UnpackLog(ds.scrollChainABI, event, finalizeBatchEventName, vLog); err != nil { - return nil, fmt.Errorf("failed to unpack finalized rollup event log, err: %w", err) - } - - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new FinalizeBatchType event", "batch index", event.BatchIndex.Uint64()) - entry = NewFinalizeBatch(batchIndex) + case l1.FinalizeEventType: + entry = NewFinalizeBatch(rollupEvent.BatchIndex().Uint64()) default: - return nil, fmt.Errorf("unknown event, topic: %v, tx hash: %v", vLog.Topics[0].Hex(), vLog.TxHash.Hex()) + return nil, fmt.Errorf("unknown rollup event, type: %v", rollupEvent.Type()) } entries = append(entries, entry) @@ -151,97 +131,27 @@ func (ds *CalldataBlobSource) processLogsToDA(logs []types.Log) (Entries, error) return entries, nil } -type commitBatchArgs struct { - Version uint8 - ParentBatchHeader []byte - Chunks [][]byte - SkippedL1MessageBitmap []byte -} - -func newCommitBatchArgs(method *abi.Method, values []interface{}) (*commitBatchArgs, error) { - var args commitBatchArgs - err := method.Inputs.Copy(&args, values) - return &args, err -} - -func newCommitBatchArgsFromCommitBatchWithProof(method *abi.Method, values []interface{}) (*commitBatchArgs, error) { - var args commitBatchWithBlobProofArgs - err := method.Inputs.Copy(&args, values) - if err != nil { - return nil, err - } - return &commitBatchArgs{ - Version: args.Version, - ParentBatchHeader: args.ParentBatchHeader, - Chunks: args.Chunks, - SkippedL1MessageBitmap: args.SkippedL1MessageBitmap, - }, nil -} - -type commitBatchWithBlobProofArgs struct { - Version uint8 - ParentBatchHeader []byte - Chunks [][]byte - SkippedL1MessageBitmap []byte - BlobDataProof []byte -} - -func (ds *CalldataBlobSource) getCommitBatchDA(batchIndex uint64, vLog *types.Log) (Entry, error) { - if batchIndex == 0 { +func (ds *CalldataBlobSource) getCommitBatchDA(commitEvent *l1.CommitBatchEvent) (Entry, error) { + if commitEvent.BatchIndex().Uint64() == 0 { return NewCommitBatchDAV0Empty(), nil } - txData, err := ds.l1Client.FetchTxData(vLog) + args, err := ds.l1Reader.FetchCommitTxData(commitEvent) if err != nil { - return nil, fmt.Errorf("failed to fetch tx data, tx hash: %v, err: %w", vLog.TxHash.Hex(), err) - } - if len(txData) < methodIDLength { - return nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) + return nil, fmt.Errorf("failed to fetch commit tx data of batch %d, tx hash: %v, err: %w", commitEvent.BatchIndex().Uint64(), commitEvent.TxHash().Hex(), err) } - method, err := ds.scrollChainABI.MethodById(txData[:methodIDLength]) - if err != nil { - return nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) - } - values, err := method.Inputs.Unpack(txData[methodIDLength:]) + codec, err := encoding.CodecFromVersion(encoding.CodecVersion(args.Version)) if err != nil { - return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) - } - if method.Name == commitBatchMethodName { - args, err := newCommitBatchArgs(method, values) - if err != nil { - return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) - } - codecVersion := encoding.CodecVersion(args.Version) - codec, err := encoding.CodecFromVersion(codecVersion) - if err != nil { - return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) - } - switch args.Version { - case 0: - return NewCommitBatchDAV0(ds.db, codec, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, vLog.BlockNumber) - case 1, 2: - return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - default: - return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) - } - } else if method.Name == commitBatchWithBlobProofMethodName { - args, err := newCommitBatchArgsFromCommitBatchWithProof(method, values) - if err != nil { - return nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) - } - codecVersion := encoding.CodecVersion(args.Version) - codec, err := encoding.CodecFromVersion(codecVersion) - if err != nil { - return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", codecVersion, batchIndex, err) - } - switch args.Version { - case 3, 4: - return NewCommitBatchDAWithBlob(ds.ctx, ds.db, codec, ds.l1Client, ds.blobClient, vLog, args.Version, batchIndex, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - default: - return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) - } + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", args.Version, commitEvent.BatchIndex().Uint64(), err) } - return nil, fmt.Errorf("unknown method name: %s", method.Name) + switch codec.Version() { + case 0: + return NewCommitBatchDAV0(ds.db, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + case 1, 2, 3, 4: + return NewCommitBatchDAWithBlob(ds.ctx, ds.db, ds.l1Reader, ds.blobClient, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) + default: + return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) + } } diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go index 135a76d79518..2c4f07869da1 100644 --- a/rollup/da_syncer/da/commitV0.go +++ b/rollup/da_syncer/da/commitV0.go @@ -10,6 +10,7 @@ import ( "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) type CommitBatchDAV0 struct { @@ -25,19 +26,17 @@ type CommitBatchDAV0 struct { func NewCommitBatchDAV0(db ethdb.Database, codec encoding.Codec, - version uint8, - batchIndex uint64, + commitEvent *l1.CommitBatchEvent, parentBatchHeader []byte, chunks [][]byte, skippedL1MessageBitmap []byte, - l1BlockNumber uint64, ) (*CommitBatchDAV0, error) { decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { - return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - return NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, l1BlockNumber) + return NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent.BlockNumber()) } func NewCommitBatchDAV0WithChunks(db ethdb.Database, @@ -141,6 +140,7 @@ func getTotalMessagesPoppedFromChunks(decodedChunks []*encoding.DAChunkRawTx) in func getL1Messages(db ethdb.Database, parentTotalL1MessagePopped uint64, skippedBitmap []byte, totalL1MessagePopped int) ([]*types.L1MessageTx, error) { var txs []*types.L1MessageTx + decodedSkippedBitmap, err := encoding.DecodeBitmap(skippedBitmap, totalL1MessagePopped) if err != nil { return nil, fmt.Errorf("failed to decode skipped message bitmap: err: %w", err) diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 532b0f81abd6..0433479c950b 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -8,10 +8,9 @@ import ( "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto/kzg4844" "github.com/scroll-tech/go-ethereum/ethdb" ) @@ -21,32 +20,34 @@ type CommitBatchDAV1 struct { } func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, - codec encoding.Codec, - l1Client *rollup_sync_service.L1Client, + l1Reader *l1.Reader, blobClient blob_client.BlobClient, - vLog *types.Log, - version uint8, - batchIndex uint64, + codec encoding.Codec, + commitEvent *l1.CommitBatchEvent, parentBatchHeader []byte, chunks [][]byte, skippedL1MessageBitmap []byte, ) (*CommitBatchDAV1, error) { decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { - return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", batchIndex, err) + return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", commitEvent.BatchIndex().Uint64(), err) } - versionedHash, err := l1Client.FetchTxBlobHash(vLog) + versionedHash, err := l1Reader.FetchTxBlobHash(commitEvent.TxHash(), commitEvent.BlockHash()) if err != nil { return nil, fmt.Errorf("failed to fetch blob hash, err: %w", err) } - blob, err := blobClient.GetBlobByVersionedHashAndBlockNumber(ctx, versionedHash, vLog.BlockNumber) + header, err := l1Reader.FetchBlockHeaderByNumber(commitEvent.BlockNumber()) + if err != nil { + return nil, fmt.Errorf("failed to get header by number, err: %w", err) + } + blob, err := blobClient.GetBlobByVersionedHashAndBlockTime(ctx, versionedHash, header.Time) if err != nil { return nil, fmt.Errorf("failed to fetch blob from blob client, err: %w", err) } if blob == nil { - return nil, fmt.Errorf("unexpected, blob == nil and err != nil, batch index: %d, versionedHash: %s, blobClient: %T", batchIndex, versionedHash.String(), blobClient) + return nil, fmt.Errorf("unexpected, blob == nil and err != nil, batch index: %d, versionedHash: %s, blobClient: %T", commitEvent.BatchIndex().Uint64(), versionedHash.String(), blobClient) } // compute blob versioned hash and compare with one from tx @@ -69,7 +70,7 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, return nil, fmt.Errorf("decodedChunks is nil after decoding") } - v0, err := NewCommitBatchDAV0WithChunks(db, version, batchIndex, parentBatchHeader, decodedChunks, skippedL1MessageBitmap, vLog.BlockNumber) + v0, err := NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent.BlockNumber()) if err != nil { return nil, err } diff --git a/rollup/da_syncer/data_source.go b/rollup/da_syncer/data_source.go index 7beab3baea32..048fec6bb3e2 100644 --- a/rollup/da_syncer/data_source.go +++ b/rollup/da_syncer/data_source.go @@ -8,7 +8,7 @@ import ( "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) type DataSource interface { @@ -19,21 +19,21 @@ type DataSource interface { type DataSourceFactory struct { config Config genesisConfig *params.ChainConfig - l1Client *rollup_sync_service.L1Client + l1Reader *l1.Reader blobClient blob_client.BlobClient db ethdb.Database } -func NewDataSourceFactory(blockchain *core.BlockChain, genesisConfig *params.ChainConfig, config Config, l1Client *rollup_sync_service.L1Client, blobClient blob_client.BlobClient, db ethdb.Database) *DataSourceFactory { +func NewDataSourceFactory(blockchain *core.BlockChain, genesisConfig *params.ChainConfig, config Config, l1Reader *l1.Reader, blobClient blob_client.BlobClient, db ethdb.Database) *DataSourceFactory { return &DataSourceFactory{ config: config, genesisConfig: genesisConfig, - l1Client: l1Client, + l1Reader: l1Reader, blobClient: blobClient, db: db, } } func (ds *DataSourceFactory) OpenDataSource(ctx context.Context, l1height uint64) (DataSource, error) { - return da.NewCalldataBlobSource(ctx, l1height, ds.l1Client, ds.blobClient, ds.db) + return da.NewCalldataBlobSource(ctx, l1height, ds.l1Reader, ds.blobClient, ds.db) } diff --git a/rollup/da_syncer/syncing_pipeline.go b/rollup/da_syncer/syncing_pipeline.go index 27eaf20cb38a..6ed84fe85186 100644 --- a/rollup/da_syncer/syncing_pipeline.go +++ b/rollup/da_syncer/syncing_pipeline.go @@ -15,8 +15,7 @@ import ( "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" - "github.com/scroll-tech/go-ethereum/rollup/rollup_sync_service" - "github.com/scroll-tech/go-ethereum/rollup/sync_service" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) // Config is the configuration parameters of data availability syncing. @@ -42,20 +41,18 @@ type SyncingPipeline struct { daSyncer *DASyncer } -func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesisConfig *params.ChainConfig, db ethdb.Database, ethClient sync_service.EthClient, l1DeploymentBlock uint64, config Config) (*SyncingPipeline, error) { - scrollChainABI, err := rollup_sync_service.ScrollChainMetaData.GetAbi() +func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesisConfig *params.ChainConfig, db ethdb.Database, ethClient l1.Client, l1DeploymentBlock uint64, config Config) (*SyncingPipeline, error) { + l1Reader, err := l1.NewReader(ctx, l1.Config{ + ScrollChainAddress: genesisConfig.Scroll.L1Config.ScrollChainAddress, + L1MessageQueueAddress: genesisConfig.Scroll.L1Config.L1MessageQueueAddress, + }, ethClient) if err != nil { - return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) - } - - l1Client, err := rollup_sync_service.NewL1Client(ctx, ethClient, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) - if err != nil { - return nil, err + return nil, fmt.Errorf("failed to initialize l1.Reader, err = %w", err) } blobClientList := blob_client.NewBlobClients() if config.BeaconNodeAPIEndpoint != "" { - beaconNodeClient, err := blob_client.NewBeaconNodeClient(config.BeaconNodeAPIEndpoint, l1Client) + beaconNodeClient, err := blob_client.NewBeaconNodeClient(config.BeaconNodeAPIEndpoint) if err != nil { log.Warn("failed to create BeaconNodeClient", "err", err) } else { @@ -72,7 +69,7 @@ func NewSyncingPipeline(ctx context.Context, blockchain *core.BlockChain, genesi return nil, errors.New("DA syncing is enabled but no blob client is configured. Please provide at least one blob client via command line flag") } - dataSourceFactory := NewDataSourceFactory(blockchain, genesisConfig, config, l1Client, blobClientList, db) + dataSourceFactory := NewDataSourceFactory(blockchain, genesisConfig, config, l1Reader, blobClientList, db) syncedL1Height := l1DeploymentBlock - 1 from := rawdb.ReadDASyncedL1BlockNumber(db) if from != nil { diff --git a/rollup/l1/abi.go b/rollup/l1/abi.go new file mode 100644 index 000000000000..c16123aa5e8b --- /dev/null +++ b/rollup/l1/abi.go @@ -0,0 +1,245 @@ +package l1 + +import ( + "fmt" + "math/big" + + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/accounts/abi/bind" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" +) + +var ( + // ScrollChainABI holds information about ScrollChain's context and available invokable methods. + ScrollChainABI *abi.ABI + // L1MessageQueueABIManual holds information about L1MessageQueue's context and available invokable methods. + L1MessageQueueABIManual *abi.ABI +) + +func init() { + ScrollChainABI, _ = ScrollChainMetaData.GetAbi() + L1MessageQueueABIManual, _ = L1MessageQueueMetaDataManual.GetAbi() +} + +// ScrollChainMetaData contains ABI of the ScrollChain contract. +var ScrollChainMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"CommitBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"stateRoot\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"FinalizeBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"RevertBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"oldMaxNumTxInChunk\",\"type\": \"uint256\"},{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"newMaxNumTxInChunk\",\"type\": \"uint256\"}],\"name\": \"UpdateMaxNumTxInChunk\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateProver\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateSequencer\",\"type\": \"event\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"}],\"name\": \"commitBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"commitBatchWithBlobProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"committedBatches\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatch4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBundle\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBundleWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"finalizedStateRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"_batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"_stateRoot\",\"type\": \"bytes32\"}],\"name\": \"importGenesisBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"isBatchFinalized\",\"outputs\": [{\"internalType\": \"bool\",\"name\": \"\",\"type\": \"bool\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [],\"name\": \"lastFinalizedBatchIndex\",\"outputs\": [{\"internalType\": \"uint256\",\"name\": \"\",\"type\": \"uint256\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"uint256\",\"name\": \"count\",\"type\": \"uint256\"}],\"name\": \"revertBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"withdrawRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"}]", +} + +// L1MessageQueueMetaDataManual contains all meta data concerning the L1MessageQueue contract. +var L1MessageQueueMetaDataManual = &bind.MetaData{ + ABI: "[{\"type\":\"constructor\",\"inputs\":[{\"name\":\"_messenger\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_scrollChain\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_enforcedTxGateway\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"appendCrossDomainMessage\",\"inputs\":[{\"name\":\"_target\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"appendEnforcedTransaction\",\"inputs\":[{\"name\":\"_sender\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_target\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_value\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"calculateIntrinsicGasFee\",\"inputs\":[{\"name\":\"_calldata\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"computeTransactionHash\",\"inputs\":[{\"name\":\"_sender\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_value\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_target\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_data\",\"type\":\"bytes\",\"internalType\":\"bytes\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"pure\"},{\"type\":\"function\",\"name\":\"dropCrossDomainMessage\",\"inputs\":[{\"name\":\"_index\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"enforcedTxGateway\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"estimateCrossDomainMessageFee\",\"inputs\":[{\"name\":\"_gasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"finalizePoppedCrossDomainMessage\",\"inputs\":[{\"name\":\"_newFinalizedQueueIndexPlusOne\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"gasOracle\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"getCrossDomainMessage\",\"inputs\":[{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"initialize\",\"inputs\":[{\"name\":\"_messenger\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_scrollChain\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_enforcedTxGateway\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_gasOracle\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_maxGasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"isMessageDropped\",\"inputs\":[{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"isMessageSkipped\",\"inputs\":[{\"name\":\"_queueIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"maxGasLimit\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"messageQueue\",\"inputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"messenger\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"nextCrossDomainMessageIndex\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"nextUnfinalizedQueueIndex\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"owner\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"pendingQueueIndex\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"popCrossDomainMessage\",\"inputs\":[{\"name\":\"_startIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_count\",\"type\":\"uint256\",\"internalType\":\"uint256\"},{\"name\":\"_skippedBitmap\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"renounceOwnership\",\"inputs\":[],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"resetPoppedCrossDomainMessage\",\"inputs\":[{\"name\":\"_startIndex\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"scrollChain\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"address\",\"internalType\":\"address\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"transferOwnership\",\"inputs\":[{\"name\":\"newOwner\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"updateGasOracle\",\"inputs\":[{\"name\":\"_newGasOracle\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"updateMaxGasLimit\",\"inputs\":[{\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"event\",\"name\":\"DequeueTransaction\",\"inputs\":[{\"name\":\"startIndex\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"count\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"skippedBitmap\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"DropTransaction\",\"inputs\":[{\"name\":\"index\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"FinalizedDequeuedTransaction\",\"inputs\":[{\"name\":\"finalizedIndex\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Initialized\",\"inputs\":[{\"name\":\"version\",\"type\":\"uint8\",\"indexed\":false,\"internalType\":\"uint8\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"OwnershipTransferred\",\"inputs\":[{\"name\":\"previousOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"newOwner\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"QueueTransaction\",\"inputs\":[{\"name\":\"sender\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"target\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"value\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"queueIndex\",\"type\":\"uint64\",\"indexed\":false,\"internalType\":\"uint64\"},{\"name\":\"gasLimit\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"data\",\"type\":\"bytes\",\"indexed\":false,\"internalType\":\"bytes\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"ResetDequeuedTransaction\",\"inputs\":[{\"name\":\"startIndex\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"UpdateGasOracle\",\"inputs\":[{\"name\":\"_oldGasOracle\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"_newGasOracle\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"UpdateMaxGasLimit\",\"inputs\":[{\"name\":\"_oldMaxGasLimit\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"error\",\"name\":\"ErrorZeroAddress\",\"inputs\":[]}]", +} + +const ( + // CommitEventType contains data of event of commit batch + CommitEventType int = iota + // RevertEventType contains data of event of revert batch + RevertEventType + // FinalizeEventType contains data of event of finalize batch + FinalizeEventType + + commitBatchMethodName = "commitBatch" + commitBatchWithBlobProofMethodName = "commitBatchWithBlobProof" + + // the length of method ID at the beginning of transaction data + methodIDLength = 4 +) + +// RollupEvent represents a single rollup event (commit, revert, finalize) +type RollupEvent interface { + Type() int + BatchIndex() *big.Int + BatchHash() common.Hash + TxHash() common.Hash + BlockHash() common.Hash + BlockNumber() uint64 +} + +type RollupEvents []RollupEvent + +// CommitBatchEventUnpacked represents a CommitBatch event raised by the ScrollChain contract. +type CommitBatchEventUnpacked struct { + BatchIndex *big.Int + BatchHash common.Hash +} + +// CommitBatchEvent represents a CommitBatch event raised by the ScrollChain contract with additional fields. +type CommitBatchEvent struct { + batchIndex *big.Int + batchHash common.Hash + txHash common.Hash + blockHash common.Hash + blockNumber uint64 +} + +func (c *CommitBatchEvent) Type() int { + return CommitEventType +} + +func (c *CommitBatchEvent) BatchIndex() *big.Int { + return c.batchIndex +} + +func (c *CommitBatchEvent) BatchHash() common.Hash { + return c.batchHash +} + +func (c *CommitBatchEvent) TxHash() common.Hash { + return c.txHash +} + +func (c *CommitBatchEvent) BlockHash() common.Hash { + return c.blockHash +} + +func (c *CommitBatchEvent) BlockNumber() uint64 { + return c.blockNumber +} + +func (c *CommitBatchEvent) CompareTo(other *CommitBatchEvent) int { + return c.batchIndex.Cmp(other.batchIndex) +} + +type RevertBatchEventUnpacked struct { + BatchIndex *big.Int + BatchHash common.Hash +} + +// RevertBatchEvent represents a RevertBatch event raised by the ScrollChain contract. +type RevertBatchEvent struct { + batchIndex *big.Int + batchHash common.Hash + txHash common.Hash + blockHash common.Hash + blockNumber uint64 +} + +func (r *RevertBatchEvent) BlockNumber() uint64 { + return r.blockNumber +} + +func (r *RevertBatchEvent) BlockHash() common.Hash { + return r.blockHash +} + +func (r *RevertBatchEvent) TxHash() common.Hash { + return r.txHash +} + +func (r *RevertBatchEvent) Type() int { + return RevertEventType +} + +func (r *RevertBatchEvent) BatchIndex() *big.Int { + return r.batchIndex +} + +func (r *RevertBatchEvent) BatchHash() common.Hash { + return r.batchHash +} + +type FinalizeBatchEventUnpacked struct { + BatchIndex *big.Int + BatchHash common.Hash + StateRoot common.Hash + WithdrawRoot common.Hash +} + +// FinalizeBatchEvent represents a FinalizeBatch event raised by the ScrollChain contract. +type FinalizeBatchEvent struct { + batchIndex *big.Int + batchHash common.Hash + stateRoot common.Hash + withdrawRoot common.Hash + txHash common.Hash + blockHash common.Hash + blockNumber uint64 +} + +func (f *FinalizeBatchEvent) TxHash() common.Hash { + return f.txHash +} + +func (f *FinalizeBatchEvent) BlockHash() common.Hash { + return f.blockHash +} + +func (f *FinalizeBatchEvent) BlockNumber() uint64 { + return f.blockNumber +} + +func (f *FinalizeBatchEvent) Type() int { + return FinalizeEventType +} + +func (f *FinalizeBatchEvent) BatchIndex() *big.Int { + return f.batchIndex +} + +func (f *FinalizeBatchEvent) BatchHash() common.Hash { + return f.batchHash +} + +func (f *FinalizeBatchEvent) StateRoot() common.Hash { + return f.stateRoot +} + +func (f *FinalizeBatchEvent) WithdrawRoot() common.Hash { + return f.withdrawRoot +} + +// UnpackLog unpacks a retrieved log into the provided output structure. +func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error { + if log.Topics[0] != c.Events[event].ID { + return fmt.Errorf("event signature mismatch") + } + if len(log.Data) > 0 { + if err := c.UnpackIntoInterface(out, event, log.Data); err != nil { + return err + } + } + var indexed abi.Arguments + for _, arg := range c.Events[event].Inputs { + if arg.Indexed { + indexed = append(indexed, arg) + } + } + return abi.ParseTopics(out, indexed, log.Topics[1:]) +} + +type CommitBatchArgs struct { + Version uint8 + ParentBatchHeader []byte + Chunks [][]byte + SkippedL1MessageBitmap []byte +} + +func newCommitBatchArgs(method *abi.Method, values []interface{}) (*CommitBatchArgs, error) { + var args CommitBatchArgs + err := method.Inputs.Copy(&args, values) + return &args, err +} + +func newCommitBatchArgsFromCommitBatchWithProof(method *abi.Method, values []interface{}) (*CommitBatchArgs, error) { + var args commitBatchWithBlobProofArgs + err := method.Inputs.Copy(&args, values) + if err != nil { + return nil, err + } + return &CommitBatchArgs{ + Version: args.Version, + ParentBatchHeader: args.ParentBatchHeader, + Chunks: args.Chunks, + SkippedL1MessageBitmap: args.SkippedL1MessageBitmap, + }, nil +} + +type commitBatchWithBlobProofArgs struct { + Version uint8 + ParentBatchHeader []byte + Chunks [][]byte + SkippedL1MessageBitmap []byte + BlobDataProof []byte +} diff --git a/rollup/l1/abi_test.go b/rollup/l1/abi_test.go new file mode 100644 index 000000000000..ab4c9d473a16 --- /dev/null +++ b/rollup/l1/abi_test.go @@ -0,0 +1,82 @@ +package l1 + +import ( + "math/big" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/crypto" +) + +func TestEventSignatures(t *testing.T) { + scrollChainABI, err := ScrollChainMetaData.GetAbi() + if err != nil { + t.Fatal("failed to get scroll chain abi", "err", err) + } + + assert.Equal(t, crypto.Keccak256Hash([]byte("CommitBatch(uint256,bytes32)")), scrollChainABI.Events["CommitBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), scrollChainABI.Events["RevertBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")), scrollChainABI.Events["FinalizeBatch"].ID) +} + +func TestUnpackLog(t *testing.T) { + scrollChainABI, err := ScrollChainMetaData.GetAbi() + require.NoError(t, err) + + mockBatchIndex := big.NewInt(123) + mockBatchHash := crypto.Keccak256Hash([]byte("mockBatch")) + mockStateRoot := crypto.Keccak256Hash([]byte("mockStateRoot")) + mockWithdrawRoot := crypto.Keccak256Hash([]byte("mockWithdrawRoot")) + + tests := []struct { + eventName string + mockLog types.Log + expected interface{} + out interface{} + }{ + { + "CommitBatch", + types.Log{ + Data: []byte{}, + Topics: []common.Hash{scrollChainABI.Events["CommitBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &CommitBatchEvent{batchIndex: mockBatchIndex, batchHash: mockBatchHash}, + &CommitBatchEvent{}, + }, + { + "RevertBatch", + types.Log{ + Data: []byte{}, + Topics: []common.Hash{scrollChainABI.Events["RevertBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &RevertBatchEvent{batchIndex: mockBatchIndex, batchHash: mockBatchHash}, + &RevertBatchEvent{}, + }, + { + "FinalizeBatch", + types.Log{ + Data: append(mockStateRoot.Bytes(), mockWithdrawRoot.Bytes()...), + Topics: []common.Hash{scrollChainABI.Events["FinalizeBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &FinalizeBatchEvent{ + batchIndex: mockBatchIndex, + batchHash: mockBatchHash, + stateRoot: mockStateRoot, + withdrawRoot: mockWithdrawRoot, + }, + &FinalizeBatchEvent{}, + }, + } + + for _, tt := range tests { + t.Run(tt.eventName, func(t *testing.T) { + err := UnpackLog(scrollChainABI, tt.out, tt.eventName, tt.mockLog) + assert.NoError(t, err) + assert.Equal(t, tt.expected, tt.out) + }) + } +} diff --git a/rollup/l1/l1msg_bindings.go b/rollup/l1/l1msg_bindings.go new file mode 100644 index 000000000000..679623818423 --- /dev/null +++ b/rollup/l1/l1msg_bindings.go @@ -0,0 +1,150 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +// generated using: +// forge flatten src/L1/rollup/L1MessageQueue.sol > flatten.sol +// go run github.com/scroll-tech/go-ethereum/cmd/abigen@develop --sol flatten.sol --pkg rollup --out ./L1MessageQueue.go --contract L1MessageQueue + +package l1 + +import ( + "math/big" + "strings" + + ethereum "github.com/scroll-tech/go-ethereum" + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/accounts/abi/bind" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" +) + +// L1MessageQueueMetaData contains all meta data concerning the L1MessageQueue contract. +var L1MessageQueueMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"startIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"count\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"skippedBitmap\",\"type\":\"uint256\"}],\"name\":\"DequeueTransaction\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"queueIndex\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"gasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"QueueTransaction\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldGateway\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newGateway\",\"type\":\"address\"}],\"name\":\"UpdateEnforcedTxGateway\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldGasOracle\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newGasOracle\",\"type\":\"address\"}],\"name\":\"UpdateGasOracle\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_oldMaxGasLimit\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\"}],\"name\":\"UpdateMaxGasLimit\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"appendCrossDomainMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"appendEnforcedTransaction\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_calldata\",\"type\":\"bytes\"}],\"name\":\"calculateIntrinsicGasFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_sender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"computeTransactionHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"pure\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"enforcedTxGateway\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_gasLimit\",\"type\":\"uint256\"}],\"name\":\"estimateCrossDomainMessageFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"gasOracle\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_queueIndex\",\"type\":\"uint256\"}],\"name\":\"getCrossDomainMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_messenger\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_scrollChain\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_enforcedTxGateway\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_gasOracle\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_maxGasLimit\",\"type\":\"uint256\"}],\"name\":\"initialize\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"maxGasLimit\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"name\":\"messageQueue\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messenger\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"nextCrossDomainMessageIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"pendingQueueIndex\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_startIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_count\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_skippedBitmap\",\"type\":\"uint256\"}],\"name\":\"popCrossDomainMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scrollChain\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newGateway\",\"type\":\"address\"}],\"name\":\"updateEnforcedTxGateway\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newGasOracle\",\"type\":\"address\"}],\"name\":\"updateGasOracle\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_newMaxGasLimit\",\"type\":\"uint256\"}],\"name\":\"updateMaxGasLimit\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +// L1MessageQueueABI is the input ABI used to generate the binding from. +// Deprecated: Use L1MessageQueueMetaData.ABI instead. +var L1MessageQueueABI = L1MessageQueueMetaData.ABI + +// L1MessageQueueFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type L1MessageQueueFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// NewL1MessageQueueFilterer creates a new log filterer instance of L1MessageQueue, bound to a specific deployed contract. +func NewL1MessageQueueFilterer(address common.Address, filterer bind.ContractFilterer) (*L1MessageQueueFilterer, error) { + contract, err := bindL1MessageQueue(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &L1MessageQueueFilterer{contract: contract}, nil +} + +// bindL1MessageQueue binds a generic wrapper to an already deployed contract. +func bindL1MessageQueue(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(L1MessageQueueABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// L1MessageQueueQueueTransactionIterator is returned from FilterQueueTransaction and is used to iterate over the raw logs and unpacked data for QueueTransaction events raised by the L1MessageQueue contract. +type L1MessageQueueQueueTransactionIterator struct { + Event *L1MessageQueueQueueTransaction // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *L1MessageQueueQueueTransactionIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(L1MessageQueueQueueTransaction) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(L1MessageQueueQueueTransaction) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *L1MessageQueueQueueTransactionIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *L1MessageQueueQueueTransactionIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// L1MessageQueueQueueTransaction represents a QueueTransaction event raised by the L1MessageQueue contract. +type L1MessageQueueQueueTransaction struct { + Sender common.Address + Target common.Address + Value *big.Int + QueueIndex uint64 + GasLimit *big.Int + Data []byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterQueueTransaction is a free log retrieval operation binding the contract event 0x69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e. +// +// Solidity: event QueueTransaction(address indexed sender, address indexed target, uint256 value, uint64 queueIndex, uint256 gasLimit, bytes data) +func (_L1MessageQueue *L1MessageQueueFilterer) FilterQueueTransaction(opts *bind.FilterOpts, sender []common.Address, target []common.Address) (*L1MessageQueueQueueTransactionIterator, error) { + + var senderRule []interface{} + for _, senderItem := range sender { + senderRule = append(senderRule, senderItem) + } + var targetRule []interface{} + for _, targetItem := range target { + targetRule = append(targetRule, targetItem) + } + + logs, sub, err := _L1MessageQueue.contract.FilterLogs(opts, "QueueTransaction", senderRule, targetRule) + if err != nil { + return nil, err + } + return &L1MessageQueueQueueTransactionIterator{contract: _L1MessageQueue.contract, event: "QueueTransaction", logs: logs, sub: sub}, nil +} diff --git a/rollup/l1/reader.go b/rollup/l1/reader.go new file mode 100644 index 000000000000..cc06296b657e --- /dev/null +++ b/rollup/l1/reader.go @@ -0,0 +1,381 @@ +package l1 + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/scroll-tech/go-ethereum" + "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/log" + "github.com/scroll-tech/go-ethereum/rpc" +) + +const ( + commitBatchEventName = "CommitBatch" + revertBatchEventName = "RevertBatch" + finalizeBatchEventName = "FinalizeBatch" + nextUnfinalizedQueueIndex = "nextUnfinalizedQueueIndex" + lastFinalizedBatchIndex = "lastFinalizedBatchIndex" + + defaultL1MsgFetchBlockRange = 500 + defaultRollupEventsFetchBlockRange = 100 +) + +type Reader struct { + ctx context.Context + config Config + client Client + + scrollChainABI *abi.ABI + l1MessageQueueABI *abi.ABI + l1CommitBatchEventSignature common.Hash + l1RevertBatchEventSignature common.Hash + l1FinalizeBatchEventSignature common.Hash +} + +// Config is the configuration parameters of data availability syncing. +type Config struct { + ScrollChainAddress common.Address // address of ScrollChain contract + L1MessageQueueAddress common.Address // address of L1MessageQueue contract +} + +// NewReader initializes a new Reader instance +func NewReader(ctx context.Context, config Config, l1Client Client) (*Reader, error) { + if config.ScrollChainAddress == (common.Address{}) { + return nil, errors.New("must pass non-zero scrollChainAddress to L1Client") + } + + if config.L1MessageQueueAddress == (common.Address{}) { + return nil, errors.New("must pass non-zero l1MessageQueueAddress to L1Client") + } + + reader := Reader{ + ctx: ctx, + config: config, + client: l1Client, + + scrollChainABI: ScrollChainABI, + l1MessageQueueABI: L1MessageQueueABIManual, + l1CommitBatchEventSignature: ScrollChainABI.Events[commitBatchEventName].ID, + l1RevertBatchEventSignature: ScrollChainABI.Events[revertBatchEventName].ID, + l1FinalizeBatchEventSignature: ScrollChainABI.Events[finalizeBatchEventName].ID, + } + + return &reader, nil +} + +func (r *Reader) FinalizedL1MessageQueueIndex(blockNumber uint64) (uint64, error) { + data, err := r.l1MessageQueueABI.Pack(nextUnfinalizedQueueIndex) + if err != nil { + return 0, fmt.Errorf("failed to pack %s: %w", nextUnfinalizedQueueIndex, err) + } + + result, err := r.client.CallContract(r.ctx, ethereum.CallMsg{ + To: &r.config.L1MessageQueueAddress, + Data: data, + }, new(big.Int).SetUint64(blockNumber)) + if err != nil { + return 0, fmt.Errorf("failed to call %s: %w", nextUnfinalizedQueueIndex, err) + } + + var parsedResult *big.Int + if err = r.l1MessageQueueABI.UnpackIntoInterface(&parsedResult, nextUnfinalizedQueueIndex, result); err != nil { + return 0, fmt.Errorf("failed to unpack result: %w", err) + } + + next := parsedResult.Uint64() + if next == 0 { + return 0, nil + } + + return next - 1, nil +} + +func (r *Reader) LatestFinalizedBatch(blockNumber uint64) (uint64, error) { + data, err := r.scrollChainABI.Pack(lastFinalizedBatchIndex) + if err != nil { + return 0, fmt.Errorf("failed to pack %s: %w", lastFinalizedBatchIndex, err) + } + + result, err := r.client.CallContract(r.ctx, ethereum.CallMsg{ + To: &r.config.ScrollChainAddress, + Data: data, + }, new(big.Int).SetUint64(blockNumber)) + if err != nil { + return 0, fmt.Errorf("failed to call %s: %w", lastFinalizedBatchIndex, err) + } + + var parsedResult *big.Int + if err = r.scrollChainABI.UnpackIntoInterface(&parsedResult, lastFinalizedBatchIndex, result); err != nil { + return 0, fmt.Errorf("failed to unpack result: %w", err) + } + + return parsedResult.Uint64(), nil +} + +// GetLatestFinalizedBlockNumber fetches the block number of the latest finalized block from the L1 chain. +func (r *Reader) GetLatestFinalizedBlockNumber() (uint64, error) { + header, err := r.client.HeaderByNumber(r.ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) + if err != nil { + return 0, err + } + if !header.Number.IsInt64() { + return 0, fmt.Errorf("received unexpected block number in L1Client: %v", header.Number) + } + return header.Number.Uint64(), nil +} + +// FetchBlockHeaderByNumber fetches the block header by number +func (r *Reader) FetchBlockHeaderByNumber(blockNumber uint64) (*types.Header, error) { + return r.client.HeaderByNumber(r.ctx, big.NewInt(int64(blockNumber))) +} + +// FetchTxData fetches tx data corresponding to given event log +func (r *Reader) FetchTxData(txHash, blockHash common.Hash) ([]byte, error) { + tx, err := r.fetchTx(txHash, blockHash) + if err != nil { + return nil, err + } + return tx.Data(), nil +} + +// FetchTxBlobHash fetches tx blob hash corresponding to given event log +func (r *Reader) FetchTxBlobHash(txHash, blockHash common.Hash) (common.Hash, error) { + tx, err := r.fetchTx(txHash, blockHash) + if err != nil { + return common.Hash{}, err + } + blobHashes := tx.BlobHashes() + if len(blobHashes) == 0 { + return common.Hash{}, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", txHash.Hex()) + } + return blobHashes[0], nil +} + +// FetchRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. +func (r *Reader) FetchRollupEventsInRange(from, to uint64) (RollupEvents, error) { + log.Trace("L1Client fetchRollupEventsInRange", "fromBlock", from, "toBlock", to) + var logs []types.Log + + err := queryInBatches(r.ctx, from, to, defaultRollupEventsFetchBlockRange, func(from, to uint64) (bool, error) { + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(from)), // inclusive + ToBlock: big.NewInt(int64(to)), // inclusive + Addresses: []common.Address{ + r.config.ScrollChainAddress, + }, + Topics: make([][]common.Hash, 1), + } + query.Topics[0] = make([]common.Hash, 3) + query.Topics[0][0] = r.l1CommitBatchEventSignature + query.Topics[0][1] = r.l1RevertBatchEventSignature + query.Topics[0][2] = r.l1FinalizeBatchEventSignature + + logsBatch, err := r.client.FilterLogs(r.ctx, query) + if err != nil { + return false, fmt.Errorf("failed to filter logs, err: %w", err) + } + logs = append(logs, logsBatch...) + return true, nil + }) + if err != nil { + return nil, err + } + return r.processLogsToRollupEvents(logs) +} + +// FetchRollupEventsInRangeWithCallback retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. +func (r *Reader) FetchRollupEventsInRangeWithCallback(from, to uint64, callback func(event RollupEvent) bool) error { + log.Trace("L1Client fetchRollupEventsInRange", "fromBlock", from, "toBlock", to) + + err := queryInBatches(r.ctx, from, to, defaultRollupEventsFetchBlockRange, func(from, to uint64) (bool, error) { + query := ethereum.FilterQuery{ + FromBlock: big.NewInt(int64(from)), // inclusive + ToBlock: big.NewInt(int64(to)), // inclusive + Addresses: []common.Address{ + r.config.ScrollChainAddress, + }, + Topics: make([][]common.Hash, 1), + } + query.Topics[0] = make([]common.Hash, 3) + query.Topics[0][0] = r.l1CommitBatchEventSignature + query.Topics[0][1] = r.l1RevertBatchEventSignature + query.Topics[0][2] = r.l1FinalizeBatchEventSignature + + logsBatch, err := r.client.FilterLogs(r.ctx, query) + if err != nil { + return false, fmt.Errorf("failed to filter logs, err: %w", err) + } + + rollupEvents, err := r.processLogsToRollupEvents(logsBatch) + if err != nil { + return false, fmt.Errorf("failed to process logs to rollup events, err: %w", err) + } + + for _, event := range rollupEvents { + if !callback(event) { + return false, nil + } + } + + return true, nil + }) + if err != nil { + return err + } + + return nil +} + +func (r *Reader) processLogsToRollupEvents(logs []types.Log) (RollupEvents, error) { + var rollupEvents RollupEvents + var rollupEvent RollupEvent + var err error + + for _, vLog := range logs { + switch vLog.Topics[0] { + case r.l1CommitBatchEventSignature: + event := &CommitBatchEventUnpacked{} + if err = UnpackLog(r.scrollChainABI, event, commitBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack commit rollup event log, err: %w", err) + } + log.Trace("found new CommitBatch event", "batch index", event.BatchIndex.Uint64()) + rollupEvent = &CommitBatchEvent{ + batchIndex: event.BatchIndex, + batchHash: event.BatchHash, + txHash: vLog.TxHash, + blockHash: vLog.BlockHash, + blockNumber: vLog.BlockNumber, + } + + case r.l1RevertBatchEventSignature: + event := &RevertBatchEventUnpacked{} + if err = UnpackLog(r.scrollChainABI, event, revertBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack revert rollup event log, err: %w", err) + } + log.Trace("found new RevertBatchType event", "batch index", event.BatchIndex.Uint64()) + rollupEvent = &RevertBatchEvent{ + batchIndex: event.BatchIndex, + batchHash: event.BatchHash, + txHash: vLog.TxHash, + blockHash: vLog.BlockHash, + blockNumber: vLog.BlockNumber, + } + + case r.l1FinalizeBatchEventSignature: + event := &FinalizeBatchEventUnpacked{} + if err = UnpackLog(r.scrollChainABI, event, finalizeBatchEventName, vLog); err != nil { + return nil, fmt.Errorf("failed to unpack finalized rollup event log, err: %w", err) + } + log.Trace("found new FinalizeBatchType event", "batch index", event.BatchIndex.Uint64()) + rollupEvent = &FinalizeBatchEvent{ + batchIndex: event.BatchIndex, + batchHash: event.BatchHash, + stateRoot: event.StateRoot, + withdrawRoot: event.WithdrawRoot, + txHash: vLog.TxHash, + blockHash: vLog.BlockHash, + blockNumber: vLog.BlockNumber, + } + + default: + return nil, fmt.Errorf("unknown event, topic: %v, tx hash: %v", vLog.Topics[0].Hex(), vLog.TxHash.Hex()) + } + + rollupEvents = append(rollupEvents, rollupEvent) + } + return rollupEvents, nil +} + +func queryInBatches(ctx context.Context, fromBlock, toBlock uint64, batchSize uint64, queryFunc func(from, to uint64) (bool, error)) error { + for from := fromBlock; from <= toBlock; from += batchSize { + // check if context is done and return if it is + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + to := from + batchSize - 1 + if to > toBlock { + to = toBlock + } + cont, err := queryFunc(from, to) + if err != nil { + return fmt.Errorf("error querying blocks %d to %d: %w", from, to, err) + } + if !cont { + break + } + } + return nil +} + +// fetchTx fetches tx corresponding to given event log +func (r *Reader) fetchTx(txHash, blockHash common.Hash) (*types.Transaction, error) { + tx, _, err := r.client.TransactionByHash(r.ctx, txHash) + if err != nil { + log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", + "tx hash", txHash.Hex(), "block hash", blockHash.Hex(), "err", err) + block, err := r.client.BlockByHash(r.ctx, blockHash) + if err != nil { + return nil, fmt.Errorf("failed to get block by hash, block hash: %v, err: %w", blockHash.Hex(), err) + } + + found := false + for _, txInBlock := range block.Transactions() { + if txInBlock.Hash() == txHash { + tx = txInBlock + found = true + break + } + } + if !found { + return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block hash: %v", txHash.Hex(), blockHash.Hex()) + } + } + + return tx, nil +} + +func (r *Reader) FetchCommitTxData(commitEvent *CommitBatchEvent) (*CommitBatchArgs, error) { + tx, err := r.fetchTx(commitEvent.TxHash(), commitEvent.BlockHash()) + if err != nil { + return nil, err + } + txData := tx.Data() + + if len(txData) < methodIDLength { + return nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) + } + + method, err := r.scrollChainABI.MethodById(txData[:methodIDLength]) + if err != nil { + return nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) + } + values, err := method.Inputs.Unpack(txData[methodIDLength:]) + if err != nil { + return nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) + } + + var args *CommitBatchArgs + if method.Name == commitBatchMethodName { + args, err = newCommitBatchArgs(method, values) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata into commitBatch args %s, values: %+v, err: %w", commitBatchMethodName, values, err) + } + } else if method.Name == commitBatchWithBlobProofMethodName { + args, err = newCommitBatchArgsFromCommitBatchWithProof(method, values) + if err != nil { + return nil, fmt.Errorf("failed to decode calldata into commitBatch args %s, values: %+v, err: %w", commitBatchWithBlobProofMethodName, values, err) + } + } else { + return nil, fmt.Errorf("unknown method name for commit transaction: %s", method.Name) + } + + return args, nil +} diff --git a/rollup/l1/reader_test.go b/rollup/l1/reader_test.go new file mode 100644 index 000000000000..5f4a2c95817a --- /dev/null +++ b/rollup/l1/reader_test.go @@ -0,0 +1,125 @@ +package l1 + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestQueryInBatches(t *testing.T) { + tests := []struct { + name string + fromBlock uint64 + toBlock uint64 + batchSize uint64 + queryFunc func(from, to uint64) (bool, error) + expectErr bool + expectedErr string + expectedCalls []struct { + from uint64 + to uint64 + } + }{ + { + name: "Successful query in single batch", + fromBlock: 1, + toBlock: 10, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + return true, nil + }, + expectErr: false, + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + }, + }, + { + name: "Successful query in multiple batches", + fromBlock: 1, + toBlock: 80, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + return true, nil + }, + expectErr: false, + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + {from: 11, to: 20}, + {from: 21, to: 30}, + {from: 31, to: 40}, + {from: 41, to: 50}, + {from: 51, to: 60}, + {from: 61, to: 70}, + {from: 71, to: 80}, + }, + }, + { + name: "Query function returns error", + fromBlock: 1, + toBlock: 10, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + return false, errors.New("query error") + }, + expectErr: true, + expectedErr: "error querying blocks 1 to 10: query error", + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + }, + }, + { + name: "Query function returns false to stop", + fromBlock: 1, + toBlock: 20, + batchSize: 10, + queryFunc: func(from, to uint64) (bool, error) { + if from == 1 { + return false, nil + } + return true, nil + }, + expectErr: false, + expectedCalls: []struct { + from uint64 + to uint64 + }{ + {from: 1, to: 10}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var calls []struct { + from uint64 + to uint64 + } + queryFunc := func(from, to uint64) (bool, error) { + calls = append(calls, struct { + from uint64 + to uint64 + }{from, to}) + return tt.queryFunc(from, to) + } + err := queryInBatches(context.Background(), tt.fromBlock, tt.toBlock, tt.batchSize, queryFunc) + if tt.expectErr { + require.Error(t, err) + require.EqualError(t, err, tt.expectedErr) + } else { + require.NoError(t, err) + } + require.Equal(t, tt.expectedCalls, calls) + }) + } +} diff --git a/rollup/l1/types.go b/rollup/l1/types.go new file mode 100644 index 000000000000..8c030815ec28 --- /dev/null +++ b/rollup/l1/types.go @@ -0,0 +1,22 @@ +package l1 + +import ( + "context" + "math/big" + + "github.com/scroll-tech/go-ethereum" + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" +) + +type Client interface { + BlockNumber(ctx context.Context) (uint64, error) + ChainID(ctx context.Context) (*big.Int, error) + FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) + HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) + HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) + SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) + TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, isPending bool, err error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) + CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) +} From e51182d752621e7cdb40d08d4e49471aac519afd Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Wed, 11 Dec 2024 10:51:08 +0800 Subject: [PATCH 10/36] fix tests and linter errors --- rollup/da_syncer/da/calldata_blob_source.go | 41 +++++--------- rollup/l1/abi_test.go | 59 ++++++++++----------- rollup/l1/reader.go | 1 - 3 files changed, 41 insertions(+), 60 deletions(-) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index a7489c72c838..30ac5ca7f145 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -8,7 +8,6 @@ import ( "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/accounts/abi" - "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" @@ -16,15 +15,7 @@ import ( ) const ( - callDataBlobSourceFetchBlockRange uint64 = 500 - commitBatchEventName = "CommitBatch" - revertBatchEventName = "RevertBatch" - finalizeBatchEventName = "FinalizeBatch" - commitBatchMethodName = "commitBatch" - commitBatchWithBlobProofMethodName = "commitBatchWithBlobProof" - - // the length of method ID at the beginning of transaction data - methodIDLength = 4 + callDataBlobSourceFetchBlockRange uint64 = 500 ) var ( @@ -32,15 +23,12 @@ var ( ) type CalldataBlobSource struct { - ctx context.Context - l1Reader *l1.Reader - blobClient blob_client.BlobClient - l1height uint64 - scrollChainABI *abi.ABI - l1CommitBatchEventSignature common.Hash - l1RevertBatchEventSignature common.Hash - l1FinalizeBatchEventSignature common.Hash - db ethdb.Database + ctx context.Context + l1Reader *l1.Reader + blobClient blob_client.BlobClient + l1height uint64 + scrollChainABI *abi.ABI + db ethdb.Database l1Finalized uint64 } @@ -51,15 +39,12 @@ func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Reader *l1.Re return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) } return &CalldataBlobSource{ - ctx: ctx, - l1Reader: l1Reader, - blobClient: blobClient, - l1height: l1height, - scrollChainABI: scrollChainABI, - l1CommitBatchEventSignature: scrollChainABI.Events[commitBatchEventName].ID, - l1RevertBatchEventSignature: scrollChainABI.Events[revertBatchEventName].ID, - l1FinalizeBatchEventSignature: scrollChainABI.Events[finalizeBatchEventName].ID, - db: db, + ctx: ctx, + l1Reader: l1Reader, + blobClient: blobClient, + l1height: l1height, + scrollChainABI: scrollChainABI, + db: db, }, nil } diff --git a/rollup/l1/abi_test.go b/rollup/l1/abi_test.go index ab4c9d473a16..e50e8ccaa269 100644 --- a/rollup/l1/abi_test.go +++ b/rollup/l1/abi_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" @@ -13,20 +12,12 @@ import ( ) func TestEventSignatures(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - if err != nil { - t.Fatal("failed to get scroll chain abi", "err", err) - } - - assert.Equal(t, crypto.Keccak256Hash([]byte("CommitBatch(uint256,bytes32)")), scrollChainABI.Events["CommitBatch"].ID) - assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), scrollChainABI.Events["RevertBatch"].ID) - assert.Equal(t, crypto.Keccak256Hash([]byte("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")), scrollChainABI.Events["FinalizeBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("CommitBatch(uint256,bytes32)")), ScrollChainABI.Events["CommitBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), ScrollChainABI.Events["RevertBatch"].ID) + assert.Equal(t, crypto.Keccak256Hash([]byte("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")), ScrollChainABI.Events["FinalizeBatch"].ID) } func TestUnpackLog(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - mockBatchIndex := big.NewInt(123) mockBatchHash := crypto.Keccak256Hash([]byte("mockBatch")) mockStateRoot := crypto.Keccak256Hash([]byte("mockStateRoot")) @@ -39,42 +30,48 @@ func TestUnpackLog(t *testing.T) { out interface{} }{ { - "CommitBatch", + commitBatchEventName, types.Log{ - Data: []byte{}, - Topics: []common.Hash{scrollChainABI.Events["CommitBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + Data: nil, + Topics: []common.Hash{ScrollChainABI.Events[commitBatchEventName].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, }, - &CommitBatchEvent{batchIndex: mockBatchIndex, batchHash: mockBatchHash}, - &CommitBatchEvent{}, + &CommitBatchEventUnpacked{ + BatchIndex: mockBatchIndex, + BatchHash: mockBatchHash, + }, + &CommitBatchEventUnpacked{}, }, { - "RevertBatch", + revertBatchEventName, types.Log{ - Data: []byte{}, - Topics: []common.Hash{scrollChainABI.Events["RevertBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + Data: nil, + Topics: []common.Hash{ScrollChainABI.Events[revertBatchEventName].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + }, + &RevertBatchEventUnpacked{ + BatchIndex: mockBatchIndex, + BatchHash: mockBatchHash, }, - &RevertBatchEvent{batchIndex: mockBatchIndex, batchHash: mockBatchHash}, - &RevertBatchEvent{}, + &RevertBatchEventUnpacked{}, }, { - "FinalizeBatch", + finalizeBatchEventName, types.Log{ Data: append(mockStateRoot.Bytes(), mockWithdrawRoot.Bytes()...), - Topics: []common.Hash{scrollChainABI.Events["FinalizeBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, + Topics: []common.Hash{ScrollChainABI.Events[finalizeBatchEventName].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, }, - &FinalizeBatchEvent{ - batchIndex: mockBatchIndex, - batchHash: mockBatchHash, - stateRoot: mockStateRoot, - withdrawRoot: mockWithdrawRoot, + &FinalizeBatchEventUnpacked{ + BatchIndex: mockBatchIndex, + BatchHash: mockBatchHash, + StateRoot: mockStateRoot, + WithdrawRoot: mockWithdrawRoot, }, - &FinalizeBatchEvent{}, + &FinalizeBatchEventUnpacked{}, }, } for _, tt := range tests { t.Run(tt.eventName, func(t *testing.T) { - err := UnpackLog(scrollChainABI, tt.out, tt.eventName, tt.mockLog) + err := UnpackLog(ScrollChainABI, tt.out, tt.eventName, tt.mockLog) assert.NoError(t, err) assert.Equal(t, tt.expected, tt.out) }) diff --git a/rollup/l1/reader.go b/rollup/l1/reader.go index cc06296b657e..eddc77d71350 100644 --- a/rollup/l1/reader.go +++ b/rollup/l1/reader.go @@ -21,7 +21,6 @@ const ( nextUnfinalizedQueueIndex = "nextUnfinalizedQueueIndex" lastFinalizedBatchIndex = "lastFinalizedBatchIndex" - defaultL1MsgFetchBlockRange = 500 defaultRollupEventsFetchBlockRange = 100 ) From 4e6f759c79eb5aaf3277fba23d684c86380c41c0 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 12 Dec 2024 11:23:32 +0700 Subject: [PATCH 11/36] address review comments --- rollup/da_syncer/da/calldata_blob_source.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 30ac5ca7f145..5b665aa0160f 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -26,7 +26,7 @@ type CalldataBlobSource struct { ctx context.Context l1Reader *l1.Reader blobClient blob_client.BlobClient - l1height uint64 + l1Height uint64 scrollChainABI *abi.ABI db ethdb.Database @@ -42,7 +42,7 @@ func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Reader *l1.Re ctx: ctx, l1Reader: l1Reader, blobClient: blobClient, - l1height: l1height, + l1Height: l1height, scrollChainABI: scrollChainABI, db: db, }, nil @@ -50,7 +50,7 @@ func NewCalldataBlobSource(ctx context.Context, l1height uint64, l1Reader *l1.Re func (ds *CalldataBlobSource) NextData() (Entries, error) { var err error - to := ds.l1height + callDataBlobSourceFetchBlockRange + to := ds.l1Height + callDataBlobSourceFetchBlockRange // If there's not enough finalized blocks to request up to, we need to query finalized block number. // Otherwise, we know that there's more finalized blocks than we want to request up to @@ -64,25 +64,25 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) { to = min(to, ds.l1Finalized) } - if ds.l1height > to { + if ds.l1Height > to { return nil, ErrSourceExhausted } - rollupEvents, err := ds.l1Reader.FetchRollupEventsInRange(ds.l1height, to) + rollupEvents, err := ds.l1Reader.FetchRollupEventsInRange(ds.l1Height, to) if err != nil { - return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get rollup events, l1height: %d, error: %v", ds.l1height, err)) + return nil, serrors.NewTemporaryError(fmt.Errorf("cannot get rollup events, l1Height: %d, error: %v", ds.l1Height, err)) } da, err := ds.processRollupEventsToDA(rollupEvents) if err != nil { return nil, serrors.NewTemporaryError(fmt.Errorf("failed to process rollup events to DA, error: %v", err)) } - ds.l1height = to + 1 + ds.l1Height = to + 1 return da, nil } func (ds *CalldataBlobSource) L1Height() uint64 { - return ds.l1height + return ds.l1Height } func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEvents) (Entries, error) { From ab3e8732f99126c143050fd132387b44a5a43144 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 26 Dec 2024 15:00:45 +0800 Subject: [PATCH 12/36] refactor rollup sync service / verifier to use CalldataBlobSource to retrieve data from L1 --- rollup/da_syncer/da/calldata_blob_source.go | 24 +- rollup/da_syncer/da/commitV0.go | 27 +- rollup/da_syncer/da/commitV1.go | 21 +- rollup/da_syncer/da/da.go | 8 + rollup/da_syncer/da/finalize.go | 20 +- rollup/da_syncer/da/revert.go | 20 +- rollup/l1/reader.go | 13 +- .../rollup_sync_service.go | 340 ++++++------------ 8 files changed, 210 insertions(+), 263 deletions(-) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 5b665aa0160f..bf4a2a24ef2c 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -81,10 +81,18 @@ func (ds *CalldataBlobSource) NextData() (Entries, error) { return da, nil } +func (ds *CalldataBlobSource) SetL1Height(l1Height uint64) { + ds.l1Height = l1Height +} + func (ds *CalldataBlobSource) L1Height() uint64 { return ds.l1Height } +func (ds *CalldataBlobSource) L1Finalized() uint64 { + return ds.l1Finalized +} + func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEvents) (Entries, error) { var entries Entries var entry Entry @@ -102,10 +110,22 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven } case l1.RevertEventType: - entry = NewRevertBatch(rollupEvent.BatchIndex().Uint64()) + revertEvent, ok := rollupEvent.(*l1.RevertBatchEvent) + // this should never happen because we just check event type + if !ok { + return nil, fmt.Errorf("unexpected type of rollup event: %T", rollupEvent) + } + + entry = NewRevertBatch(revertEvent) case l1.FinalizeEventType: - entry = NewFinalizeBatch(rollupEvent.BatchIndex().Uint64()) + finalizeEvent, ok := rollupEvent.(*l1.FinalizeBatchEvent) + // this should never happen because we just check event type + if !ok { + return nil, fmt.Errorf("unexpected type of rollup event: %T", rollupEvent) + } + + entry = NewFinalizeBatch(finalizeEvent) default: return nil, fmt.Errorf("unknown rollup event, type: %v", rollupEvent.Type()) diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go index 2c4f07869da1..960151e6cda4 100644 --- a/rollup/da_syncer/da/commitV0.go +++ b/rollup/da_syncer/da/commitV0.go @@ -6,6 +6,7 @@ import ( "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb" @@ -21,7 +22,7 @@ type CommitBatchDAV0 struct { chunks []*encoding.DAChunkRawTx l1Txs []*types.L1MessageTx - l1BlockNumber uint64 + event *l1.CommitBatchEvent } func NewCommitBatchDAV0(db ethdb.Database, @@ -36,7 +37,7 @@ func NewCommitBatchDAV0(db ethdb.Database, return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - return NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent.BlockNumber()) + return NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) } func NewCommitBatchDAV0WithChunks(db ethdb.Database, @@ -45,7 +46,7 @@ func NewCommitBatchDAV0WithChunks(db ethdb.Database, parentBatchHeader []byte, decodedChunks []*encoding.DAChunkRawTx, skippedL1MessageBitmap []byte, - l1BlockNumber uint64, + event *l1.CommitBatchEvent, ) (*CommitBatchDAV0, error) { parentTotalL1MessagePopped := getBatchTotalL1MessagePopped(parentBatchHeader) l1Txs, err := getL1Messages(db, parentTotalL1MessagePopped, skippedL1MessageBitmap, getTotalMessagesPoppedFromChunks(decodedChunks)) @@ -60,7 +61,7 @@ func NewCommitBatchDAV0WithChunks(db ethdb.Database, skippedL1MessageBitmap: skippedL1MessageBitmap, chunks: decodedChunks, l1Txs: l1Txs, - l1BlockNumber: l1BlockNumber, + event: event, }, nil } @@ -70,12 +71,28 @@ func NewCommitBatchDAV0Empty() *CommitBatchDAV0 { } } +func (c *CommitBatchDAV0) Version() uint8 { + return c.version +} + +func (c *CommitBatchDAV0) Chunks() []*encoding.DAChunkRawTx { + return c.chunks +} + +func (c *CommitBatchDAV0) BlobVersionedHashes() []common.Hash { + return nil +} + func (c *CommitBatchDAV0) Type() Type { return CommitBatchV0Type } func (c *CommitBatchDAV0) L1BlockNumber() uint64 { - return c.l1BlockNumber + return c.event.BlockNumber() +} + +func (c *CommitBatchDAV0) Event() l1.RollupEvent { + return c.event } func (c *CommitBatchDAV0) BatchIndex() uint64 { diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 0433479c950b..29eb065ed3e8 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -17,6 +17,8 @@ import ( type CommitBatchDAV1 struct { *CommitBatchDAV0 + + versionedHashes []common.Hash } func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, @@ -33,11 +35,17 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", commitEvent.BatchIndex().Uint64(), err) } - versionedHash, err := l1Reader.FetchTxBlobHash(commitEvent.TxHash(), commitEvent.BlockHash()) + versionedHashes, err := l1Reader.FetchTxBlobHashes(commitEvent.TxHash(), commitEvent.BlockHash()) if err != nil { return nil, fmt.Errorf("failed to fetch blob hash, err: %w", err) } + // with CommitBatchDAV1 we expect only one versioned hash as we commit only one blob per batch submission + if len(versionedHashes) != 1 { + return nil, fmt.Errorf("unexpected number of versioned hashes: %d", len(versionedHashes)) + } + versionedHash := versionedHashes[0] + header, err := l1Reader.FetchBlockHeaderByNumber(commitEvent.BlockNumber()) if err != nil { return nil, fmt.Errorf("failed to get header by number, err: %w", err) @@ -70,14 +78,21 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, return nil, fmt.Errorf("decodedChunks is nil after decoding") } - v0, err := NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent.BlockNumber()) + v0, err := NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) if err != nil { return nil, err } - return &CommitBatchDAV1{v0}, nil + return &CommitBatchDAV1{ + CommitBatchDAV0: v0, + versionedHashes: versionedHashes, + }, nil } func (c *CommitBatchDAV1) Type() Type { return CommitBatchWithBlobType } + +func (c *CommitBatchDAV1) BlobVersionedHashes() []common.Hash { + return c.versionedHashes +} diff --git a/rollup/da_syncer/da/da.go b/rollup/da_syncer/da/da.go index 1ad618d7ba3d..2773da2951be 100644 --- a/rollup/da_syncer/da/da.go +++ b/rollup/da_syncer/da/da.go @@ -3,7 +3,11 @@ package da import ( "math/big" + "github.com/scroll-tech/da-codec/encoding" + + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) type Type int @@ -25,11 +29,15 @@ type Entry interface { BatchIndex() uint64 L1BlockNumber() uint64 CompareTo(Entry) int + Event() l1.RollupEvent } type EntryWithBlocks interface { Entry Blocks() []*PartialBlock + Version() uint8 + Chunks() []*encoding.DAChunkRawTx + BlobVersionedHashes() []common.Hash } type Entries []Entry diff --git a/rollup/da_syncer/da/finalize.go b/rollup/da_syncer/da/finalize.go index 14d6c2a644cb..eab805d52482 100644 --- a/rollup/da_syncer/da/finalize.go +++ b/rollup/da_syncer/da/finalize.go @@ -1,14 +1,16 @@ package da -type FinalizeBatch struct { - batchIndex uint64 +import ( + "github.com/scroll-tech/go-ethereum/rollup/l1" +) - l1BlockNumber uint64 +type FinalizeBatch struct { + event *l1.FinalizeBatchEvent } -func NewFinalizeBatch(batchIndex uint64) *FinalizeBatch { +func NewFinalizeBatch(event *l1.FinalizeBatchEvent) *FinalizeBatch { return &FinalizeBatch{ - batchIndex: batchIndex, + event: event, } } @@ -17,11 +19,15 @@ func (f *FinalizeBatch) Type() Type { } func (f *FinalizeBatch) L1BlockNumber() uint64 { - return f.l1BlockNumber + return f.event.BlockNumber() } func (f *FinalizeBatch) BatchIndex() uint64 { - return f.batchIndex + return f.event.BatchIndex().Uint64() +} + +func (f *FinalizeBatch) Event() l1.RollupEvent { + return f.event } func (f *FinalizeBatch) CompareTo(other Entry) int { diff --git a/rollup/da_syncer/da/revert.go b/rollup/da_syncer/da/revert.go index d84f22ebaa7b..f8120fd3f150 100644 --- a/rollup/da_syncer/da/revert.go +++ b/rollup/da_syncer/da/revert.go @@ -1,14 +1,16 @@ package da -type RevertBatch struct { - batchIndex uint64 +import ( + "github.com/scroll-tech/go-ethereum/rollup/l1" +) - l1BlockNumber uint64 +type RevertBatch struct { + event *l1.RevertBatchEvent } -func NewRevertBatch(batchIndex uint64) *RevertBatch { +func NewRevertBatch(event *l1.RevertBatchEvent) *RevertBatch { return &RevertBatch{ - batchIndex: batchIndex, + event: event, } } @@ -17,10 +19,14 @@ func (r *RevertBatch) Type() Type { } func (r *RevertBatch) L1BlockNumber() uint64 { - return r.l1BlockNumber + return r.event.BlockNumber() } func (r *RevertBatch) BatchIndex() uint64 { - return r.batchIndex + return r.event.BatchIndex().Uint64() +} + +func (r *RevertBatch) Event() l1.RollupEvent { + return r.event } func (r *RevertBatch) CompareTo(other Entry) int { diff --git a/rollup/l1/reader.go b/rollup/l1/reader.go index eddc77d71350..2902b48caefa 100644 --- a/rollup/l1/reader.go +++ b/rollup/l1/reader.go @@ -139,20 +139,23 @@ func (r *Reader) FetchTxData(txHash, blockHash common.Hash) ([]byte, error) { if err != nil { return nil, err } + return tx.Data(), nil } -// FetchTxBlobHash fetches tx blob hash corresponding to given event log -func (r *Reader) FetchTxBlobHash(txHash, blockHash common.Hash) (common.Hash, error) { +// FetchTxBlobHashes fetches tx blob hash corresponding to given event log +func (r *Reader) FetchTxBlobHashes(txHash, blockHash common.Hash) ([]common.Hash, error) { tx, err := r.fetchTx(txHash, blockHash) if err != nil { - return common.Hash{}, err + return nil, fmt.Errorf("failed to fetch tx, tx hash: %v, block hash: %v, err: %w", txHash.Hex(), blockHash.Hex(), err) } + blobHashes := tx.BlobHashes() if len(blobHashes) == 0 { - return common.Hash{}, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", txHash.Hex()) + return nil, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", txHash.Hex()) } - return blobHashes[0], nil + + return blobHashes, nil } // FetchRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index bbb2b4940393..58ba26bf21e4 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -3,25 +3,23 @@ package rollup_sync_service import ( "context" "encoding/json" + "errors" "fmt" "os" - "reflect" "sync" "time" "github.com/scroll-tech/da-codec/encoding" - "github.com/scroll-tech/go-ethereum/accounts/abi" - "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" + "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/scroll-tech/go-ethereum/rollup/rcfg" - "github.com/scroll-tech/go-ethereum/rollup/sync_service" "github.com/scroll-tech/go-ethereum/rollup/withdrawtrie" ) @@ -46,41 +44,21 @@ const ( // RollupSyncService collects ScrollChain batch commit/revert/finalize events and stores metadata into db. type RollupSyncService struct { - ctx context.Context - cancel context.CancelFunc - client *L1Client - db ethdb.Database - latestProcessedBlock uint64 - scrollChainABI *abi.ABI - l1CommitBatchEventSignature common.Hash - l1RevertBatchEventSignature common.Hash - l1FinalizeBatchEventSignature common.Hash - bc *core.BlockChain - stack *node.Node - stateMu sync.Mutex + ctx context.Context + cancel context.CancelFunc + db ethdb.Database + bc *core.BlockChain + stack *node.Node + stateMu sync.Mutex + + callDataBlobSource *da.CalldataBlobSource } -func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig, db ethdb.Database, l1Client sync_service.EthClient, bc *core.BlockChain, stack *node.Node) (*RollupSyncService, error) { - // terminate if the caller does not provide an L1 client (e.g. in tests) - if l1Client == nil || (reflect.ValueOf(l1Client).Kind() == reflect.Ptr && reflect.ValueOf(l1Client).IsNil()) { - log.Warn("No L1 client provided, L1 rollup sync service will not run") - return nil, nil - } - +func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig, db ethdb.Database, l1Client l1.Client, bc *core.BlockChain, stack *node.Node) (*RollupSyncService, error) { if genesisConfig.Scroll.L1Config == nil { return nil, fmt.Errorf("missing L1 config in genesis") } - scrollChainABI, err := ScrollChainMetaData.GetAbi() - if err != nil { - return nil, fmt.Errorf("failed to get scroll chain abi: %w", err) - } - - client, err := NewL1Client(ctx, l1Client, genesisConfig.Scroll.L1Config.L1ChainId, genesisConfig.Scroll.L1Config.ScrollChainAddress, scrollChainABI) - if err != nil { - return nil, fmt.Errorf("failed to initialize l1 client: %w", err) - } - // Initialize the latestProcessedBlock with the block just before the L1 deployment block. // This serves as a default value when there's no L1 rollup events synced in the database. var latestProcessedBlock uint64 @@ -94,20 +72,31 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig latestProcessedBlock = *block } + l1Reader, err := l1.NewReader(ctx, l1.Config{ + ScrollChainAddress: genesisConfig.Scroll.L1Config.ScrollChainAddress, + L1MessageQueueAddress: genesisConfig.Scroll.L1Config.L1MessageQueueAddress, + }, l1Client) + if err != nil { + return nil, fmt.Errorf("failed to initialize l1.Reader, err = %w", err) + } + + // TODO: create blob clients based on new config parameters + + calldataBlobSource, err := da.NewCalldataBlobSource(ctx, latestProcessedBlock, l1Reader, nil, db) + if err != nil { + return nil, fmt.Errorf("failed to create calldata blob source: %w", err) + } + ctx, cancel := context.WithCancel(ctx) service := RollupSyncService{ - ctx: ctx, - cancel: cancel, - client: client, - db: db, - latestProcessedBlock: latestProcessedBlock, - scrollChainABI: scrollChainABI, - l1CommitBatchEventSignature: scrollChainABI.Events["CommitBatch"].ID, - l1RevertBatchEventSignature: scrollChainABI.Events["RevertBatch"].ID, - l1FinalizeBatchEventSignature: scrollChainABI.Events["FinalizeBatch"].ID, - bc: bc, - stack: stack, + ctx: ctx, + cancel: cancel, + db: db, + bc: bc, + stack: stack, + + callDataBlobSource: calldataBlobSource, } return &service, nil @@ -118,7 +107,7 @@ func (s *RollupSyncService) Start() { return } - log.Info("Starting rollup event sync background service", "latest processed block", s.latestProcessedBlock) + log.Info("Starting rollup event sync background service", "latest processed block", s.callDataBlobSource.L1Height()) go func() { syncTicker := time.NewTicker(defaultSyncInterval) @@ -132,9 +121,12 @@ func (s *RollupSyncService) Start() { case <-s.ctx.Done(): return case <-syncTicker.C: - s.fetchRollupEvents() + err := s.fetchRollupEvents() + if err != nil { + log.Error("failed to fetch rollup events", "err", err) + } case <-logTicker.C: - log.Info("Sync rollup events progress update", "latestProcessedBlock", s.latestProcessedBlock) + log.Info("Sync rollup events progress update", "latestProcessedBlock", s.callDataBlobSource.L1Height()) } } }() @@ -161,90 +153,79 @@ func (s *RollupSyncService) ResetStartSyncHeight(height uint64) { s.stateMu.Lock() defer s.stateMu.Unlock() - s.latestProcessedBlock = height + s.callDataBlobSource.SetL1Height(height) log.Info("Reset sync service", "height", height) } -func (s *RollupSyncService) fetchRollupEvents() { +func (s *RollupSyncService) fetchRollupEvents() error { s.stateMu.Lock() defer s.stateMu.Unlock() - latestConfirmed, err := s.client.GetLatestFinalizedBlockNumber() - if err != nil { - log.Warn("failed to get latest confirmed block number", "err", err) - return - } - - log.Trace("Sync service fetch rollup events", "latest processed block", s.latestProcessedBlock, "latest confirmed", latestConfirmed) + for { + prevL1Height := s.callDataBlobSource.L1Height() - // query in batches - for from := s.latestProcessedBlock + 1; from <= latestConfirmed; from += defaultFetchBlockRange { - if s.ctx.Err() != nil { - log.Info("Context canceled", "reason", s.ctx.Err()) - return - } + daEntries, err := s.callDataBlobSource.NextData() + if err != nil { + if errors.Is(err, da.ErrSourceExhausted) { + log.Trace("Sync service exhausted data source, waiting for next data") + return nil + } - to := from + defaultFetchBlockRange - 1 - if to > latestConfirmed { - to = latestConfirmed + return fmt.Errorf("failed to get next data: %w", err) } - logs, err := s.client.FetchRollupEventsInRange(from, to) - if err != nil { - log.Error("failed to fetch rollup events in range", "from block", from, "to block", to, "err", err) - return + if err = s.updateRollupEvents(daEntries); err != nil { + // Reset the L1 height to the previous value to retry fetching the same data. + s.callDataBlobSource.SetL1Height(prevL1Height) + return fmt.Errorf("failed to parse and update rollup event logs: %w", err) } - if err := s.parseAndUpdateRollupEventLogs(logs, to); err != nil { - log.Error("failed to parse and update rollup event logs", "err", err) - return - } + log.Trace("Sync service fetched rollup events", "latest processed L1 block", s.callDataBlobSource.L1Height(), "latest finalized L1 block", s.callDataBlobSource.L1Finalized()) - s.latestProcessedBlock = to + // note: the batch updates in updateRollupEvents are idempotent, if we crash + // before this line and re-execute the previous steps, we will get the same result. + rawdb.WriteRollupEventSyncedL1BlockNumber(s.db, s.callDataBlobSource.L1Height()) } } -func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endBlockNumber uint64) error { - for _, vLog := range logs { - switch vLog.Topics[0] { - case s.l1CommitBatchEventSignature: - event := &L1CommitBatchEvent{} - if err := UnpackLog(s.scrollChainABI, event, "CommitBatch", vLog); err != nil { - return fmt.Errorf("failed to unpack commit rollup event log, err: %w", err) +func (s *RollupSyncService) updateRollupEvents(daEntries da.Entries) error { + for _, entry := range daEntries { + switch entry.Type() { + case da.CommitBatchV0Type, da.CommitBatchWithBlobType: + log.Trace("found new CommitBatch event", "batch index", entry.BatchIndex()) + + entryWithBlocks, ok := entry.(da.EntryWithBlocks) + if !ok { + return fmt.Errorf("failed to cast to EntryWithBlocks, batch index: %v", entry.BatchIndex()) } - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new CommitBatch event", "batch index", batchIndex) - committedBatchMeta, err := s.getCommittedBatchMeta(batchIndex, &vLog) + committedBatchMeta, err := s.getCommittedBatchMeta(entryWithBlocks) if err != nil { - return fmt.Errorf("failed to get chunk ranges, batch index: %v, err: %w", batchIndex, err) + return fmt.Errorf("failed to get committed batch meta, batch index: %v, err: %w", entry.BatchIndex(), err) } - rawdb.WriteCommittedBatchMeta(s.db, batchIndex, committedBatchMeta) - case s.l1RevertBatchEventSignature: - event := &L1RevertBatchEvent{} - if err := UnpackLog(s.scrollChainABI, event, "RevertBatch", vLog); err != nil { - return fmt.Errorf("failed to unpack revert rollup event log, err: %w", err) - } - batchIndex := event.BatchIndex.Uint64() - log.Trace("found new RevertBatch event", "batch index", batchIndex) + rawdb.WriteCommittedBatchMeta(s.db, entry.BatchIndex(), committedBatchMeta) - rawdb.DeleteCommittedBatchMeta(s.db, batchIndex) + case da.RevertBatchType: + log.Trace("found new RevertBatch event", "batch index", entry.BatchIndex()) + rawdb.DeleteCommittedBatchMeta(s.db, entry.BatchIndex()) - case s.l1FinalizeBatchEventSignature: - event := &L1FinalizeBatchEvent{} - if err := UnpackLog(s.scrollChainABI, event, "FinalizeBatch", vLog); err != nil { - return fmt.Errorf("failed to unpack finalized rollup event log, err: %w", err) + case da.FinalizeBatchType: + event, ok := entry.Event().(*l1.FinalizeBatchEvent) + // This should never happen because we just checked the batch type + if !ok { + return fmt.Errorf("failed to cast to FinalizeBatchEvent, batch index: %v", entry.BatchIndex()) } - batchIndex := event.BatchIndex.Uint64() + + batchIndex := entry.BatchIndex() log.Trace("found new FinalizeBatch event", "batch index", batchIndex) lastFinalizedBatchIndex := rawdb.ReadLastFinalizedBatchIndex(s.db) - // After darwin, FinalizeBatch event emitted every bundle, which contains multiple batches. - // Therefore there are a range of finalized batches need to be saved into db. + // After Darwin, FinalizeBatch event emitted every bundle, which contains multiple batches. + // Therefore, there are a range of finalized batches need to be saved into db. // - // The range logic also applies to the batches before darwin when FinalizeBatch event emitted + // The range logic also applies to the batches before Darwin when FinalizeBatch event emitted // per single batch. In this situation, `batchIndex` just equals to `*lastFinalizedBatchIndex + 1` // and only one batch is processed through the for loop. startBatchIndex := batchIndex @@ -293,14 +274,10 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB log.Debug("write finalized l2 block number", "batch index", batchIndex, "finalized l2 block height", highestFinalizedBlockNumber) default: - return fmt.Errorf("unknown event, topic: %v, tx hash: %v", vLog.Topics[0].Hex(), vLog.TxHash.Hex()) + return fmt.Errorf("unknown daEntry, type: %d, batch index: %d", entry.Type(), entry.BatchIndex()) } } - // note: the batch updates above are idempotent, if we crash - // before this line and reexecute the previous steps, we will - // get the same result. - rawdb.WriteRollupEventSyncedL1BlockNumber(s.db, endBlockNumber) return nil } @@ -355,8 +332,8 @@ func (s *RollupSyncService) getLocalChunksForBatch(chunkBlockRanges []*rawdb.Chu return chunks, nil } -func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types.Log) (*rawdb.CommittedBatchMeta, error) { - if batchIndex == 0 { +func (s *RollupSyncService) getCommittedBatchMeta(commitedBatch da.EntryWithBlocks) (*rawdb.CommittedBatchMeta, error) { + if commitedBatch.BatchIndex() == 0 { return &rawdb.CommittedBatchMeta{ Version: 0, BlobVersionedHashes: nil, @@ -364,111 +341,16 @@ func (s *RollupSyncService) getCommittedBatchMeta(batchIndex uint64, vLog *types }, nil } - tx, _, err := s.client.client.TransactionByHash(s.ctx, vLog.TxHash) - if err != nil { - log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", - "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) - block, err := s.client.client.BlockByHash(s.ctx, vLog.BlockHash) - if err != nil { - return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) - } - - if block == nil { - return nil, fmt.Errorf("failed to get block by hash, block not found, block number: %v, block hash: %v", vLog.BlockNumber, vLog.BlockHash.Hex()) - } - - found := false - for _, txInBlock := range block.Transactions() { - if txInBlock.Hash() == vLog.TxHash { - tx = txInBlock - found = true - break - } - } - if !found { - return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) - } - } - - var commitBatchMeta rawdb.CommittedBatchMeta - - if tx.Type() == types.BlobTxType { - blobVersionedHashes := tx.BlobHashes() - if blobVersionedHashes == nil { - return nil, fmt.Errorf("invalid blob transaction, blob hashes is nil, tx hash: %v", tx.Hash().Hex()) - } - commitBatchMeta.BlobVersionedHashes = blobVersionedHashes - } - - version, ranges, err := s.decodeBatchVersionAndChunkBlockRanges(tx.Data()) - if err != nil { - return nil, fmt.Errorf("failed to decode chunk block ranges, batch index: %v, err: %w", batchIndex, err) - } - - commitBatchMeta.Version = version - commitBatchMeta.ChunkBlockRanges = ranges - return &commitBatchMeta, nil -} - -// decodeBatchVersionAndChunkBlockRanges decodes version and chunks' block ranges in a batch based on the commit batch transaction's calldata. -func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) (uint8, []*rawdb.ChunkBlockRange, error) { - const methodIDLength = 4 - if len(txData) < methodIDLength { - return 0, nil, fmt.Errorf("transaction data is too short, length of tx data: %v, minimum length required: %v", len(txData), methodIDLength) - } - - method, err := s.scrollChainABI.MethodById(txData[:methodIDLength]) - if err != nil { - return 0, nil, fmt.Errorf("failed to get method by ID, ID: %v, err: %w", txData[:methodIDLength], err) - } - - values, err := method.Inputs.Unpack(txData[methodIDLength:]) + chunkRanges, err := blockRangesFromChunks(commitedBatch.Chunks()) if err != nil { - return 0, nil, fmt.Errorf("failed to unpack transaction data using ABI, tx data: %v, err: %w", txData, err) - } - - if method.Name == "commitBatch" { - type commitBatchArgs struct { - Version uint8 - ParentBatchHeader []byte - Chunks [][]byte - SkippedL1MessageBitmap []byte - } - - var args commitBatchArgs - if err = method.Inputs.Copy(&args, values); err != nil { - return 0, nil, fmt.Errorf("failed to decode calldata into commitBatch args, values: %+v, err: %w", values, err) - } - - chunkRanges, err := decodeBlockRangesFromEncodedChunks(encoding.CodecVersion(args.Version), args.Chunks) - if err != nil { - return 0, nil, fmt.Errorf("failed to decode block ranges from encoded chunks, version: %v, chunks: %+v, err: %w", args.Version, args.Chunks, err) - } - - return args.Version, chunkRanges, nil - } else if method.Name == "commitBatchWithBlobProof" { - type commitBatchWithBlobProofArgs struct { - Version uint8 - ParentBatchHeader []byte - Chunks [][]byte - SkippedL1MessageBitmap []byte - BlobDataProof []byte - } - - var args commitBatchWithBlobProofArgs - if err = method.Inputs.Copy(&args, values); err != nil { - return 0, nil, fmt.Errorf("failed to decode calldata into commitBatchWithBlobProofArgs args, values: %+v, err: %w", values, err) - } - - chunkRanges, err := decodeBlockRangesFromEncodedChunks(encoding.CodecVersion(args.Version), args.Chunks) - if err != nil { - return 0, nil, fmt.Errorf("failed to decode block ranges from encoded chunks, version: %v, chunks: %+v, err: %w", args.Version, args.Chunks, err) - } - - return args.Version, chunkRanges, nil + return nil, fmt.Errorf("failed to decode block ranges from chunks, batch index: %v, err: %w", commitedBatch.BatchIndex(), err) } - return 0, nil, fmt.Errorf("unexpected method name: %v", method.Name) + return &rawdb.CommittedBatchMeta{ + Version: commitedBatch.Version(), + ChunkBlockRanges: chunkRanges, + BlobVersionedHashes: commitedBatch.BlobVersionedHashes(), + }, nil } // validateBatch verifies the consistency between the L1 contract and L2 node data. @@ -494,7 +376,7 @@ func (s *RollupSyncService) decodeBatchVersionAndChunkBlockRanges(txData []byte) // Note: This function is compatible with both "finalize by batch" and "finalize by bundle" methods. // In "finalize by bundle", only the last batch of each bundle is fully verified. // This check still ensures the correctness of all batch hashes in the bundle due to the parent-child relationship between batch hashes. -func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { +func validateBatch(batchIndex uint64, event *l1.FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { if len(chunks) == 0 { return 0, nil, fmt.Errorf("invalid argument: length of chunks is 0, batch index: %v", batchIndex) } @@ -540,15 +422,15 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz // Only check when batch index matches the index of the event. This is compatible with both "finalize by batch" and "finalize by bundle": // - finalize by batch: check all batches // - finalize by bundle: check the last batch, because only one event (containing the info of the last batch) is emitted per bundle - if batchIndex == event.BatchIndex.Uint64() { - if localStateRoot != event.StateRoot { - log.Error("State root mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "l1 finalized state root", event.StateRoot.Hex(), "l2 state root", localStateRoot.Hex()) + if batchIndex == event.BatchIndex().Uint64() { + if localStateRoot != event.StateRoot() { + log.Error("State root mismatch", "batch index", event.BatchIndex().Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "l1 finalized state root", event.StateRoot().Hex(), "l2 state root", localStateRoot.Hex()) stack.Close() os.Exit(1) } - if localWithdrawRoot != event.WithdrawRoot { - log.Error("Withdraw root mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "l1 finalized withdraw root", event.WithdrawRoot.Hex(), "l2 withdraw root", localWithdrawRoot.Hex()) + if localWithdrawRoot != event.WithdrawRoot() { + log.Error("Withdraw root mismatch", "batch index", event.BatchIndex().Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "l1 finalized withdraw root", event.WithdrawRoot().Hex(), "l2 withdraw root", localWithdrawRoot.Hex()) stack.Close() os.Exit(1) } @@ -556,8 +438,8 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz // Verify batch hash // This check ensures the correctness of all batch hashes in the bundle // due to the parent-child relationship between batch hashes - if localBatchHash != event.BatchHash { - log.Error("Batch hash mismatch", "batch index", event.BatchIndex.Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "parent TotalL1MessagePopped", parentFinalizedBatchMeta.TotalL1MessagePopped, "l1 finalized batch hash", event.BatchHash.Hex(), "l2 batch hash", localBatchHash.Hex()) + if localBatchHash != event.BatchHash() { + log.Error("Batch hash mismatch", "batch index", event.BatchIndex().Uint64(), "start block", startBlock.Header.Number.Uint64(), "end block", endBlock.Header.Number.Uint64(), "parent batch hash", parentFinalizedBatchMeta.BatchHash.Hex(), "parent TotalL1MessagePopped", parentFinalizedBatchMeta.TotalL1MessagePopped, "l1 finalized batch hash", event.BatchHash().Hex(), "l2 batch hash", localBatchHash.Hex()) chunksJson, err := json.Marshal(chunks) if err != nil { log.Error("marshal chunks failed", "err", err) @@ -581,22 +463,12 @@ func validateBatch(batchIndex uint64, event *L1FinalizeBatchEvent, parentFinaliz return endBlock.Header.Number.Uint64(), finalizedBatchMeta, nil } -// decodeBlockRangesFromEncodedChunks decodes the provided chunks into a list of block ranges. -func decodeBlockRangesFromEncodedChunks(codecVersion encoding.CodecVersion, chunks [][]byte) ([]*rawdb.ChunkBlockRange, error) { - codec, err := encoding.CodecFromVersion(codecVersion) - if err != nil { - return nil, fmt.Errorf("failed to get codec from version: %v, err: %w", codecVersion, err) - } - - daChunksRawTx, err := codec.DecodeDAChunksRawTx(chunks) - if err != nil { - return nil, fmt.Errorf("failed to decode DA chunks, version: %v, err: %w", codecVersion, err) - } - +// blockRangesFromChunks decodes the provided chunks into a list of block ranges. +func blockRangesFromChunks(chunks []*encoding.DAChunkRawTx) ([]*rawdb.ChunkBlockRange, error) { var chunkBlockRanges []*rawdb.ChunkBlockRange - for _, daChunkRawTx := range daChunksRawTx { + for _, daChunkRawTx := range chunks { if len(daChunkRawTx.Blocks) == 0 { - return nil, fmt.Errorf("no blocks found in DA chunk, version: %v", codecVersion) + return nil, fmt.Errorf("no blocks found in DA chunk, chunk: %+v", daChunkRawTx) } chunkBlockRanges = append(chunkBlockRanges, &rawdb.ChunkBlockRange{ From 4ced6f29f512e5ac4f8da40ebe6ebe90321d35b0 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Fri, 27 Dec 2024 08:32:07 +0800 Subject: [PATCH 13/36] add configuration and initialize blob clients --- cmd/utils/flags.go | 18 ++++---- eth/backend.go | 2 +- .../rollup_sync_service.go | 41 +++++++++++++++---- 3 files changed, 43 insertions(+), 18 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index bccd6017b36e..090f16d55c27 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1629,15 +1629,15 @@ func setEnableRollupVerify(ctx *cli.Context, cfg *ethconfig.Config) { func setDA(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.IsSet(DASyncEnabledFlag.Name) { cfg.EnableDASyncing = ctx.Bool(DASyncEnabledFlag.Name) - if ctx.IsSet(DABlobScanAPIEndpointFlag.Name) { - cfg.DA.BlobScanAPIEndpoint = ctx.String(DABlobScanAPIEndpointFlag.Name) - } - if ctx.IsSet(DABlockNativeAPIEndpointFlag.Name) { - cfg.DA.BlockNativeAPIEndpoint = ctx.String(DABlockNativeAPIEndpointFlag.Name) - } - if ctx.IsSet(DABeaconNodeAPIEndpointFlag.Name) { - cfg.DA.BeaconNodeAPIEndpoint = ctx.String(DABeaconNodeAPIEndpointFlag.Name) - } + } + if ctx.IsSet(DABlobScanAPIEndpointFlag.Name) { + cfg.DA.BlobScanAPIEndpoint = ctx.String(DABlobScanAPIEndpointFlag.Name) + } + if ctx.IsSet(DABlockNativeAPIEndpointFlag.Name) { + cfg.DA.BlockNativeAPIEndpoint = ctx.String(DABlockNativeAPIEndpointFlag.Name) + } + if ctx.IsSet(DABeaconNodeAPIEndpointFlag.Name) { + cfg.DA.BeaconNodeAPIEndpoint = ctx.String(DABeaconNodeAPIEndpointFlag.Name) } } diff --git a/eth/backend.go b/eth/backend.go index a119708e52be..bd432cb7131c 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -245,7 +245,7 @@ func New(stack *node.Node, config *ethconfig.Config, l1Client l1.Client) (*Ether if config.EnableRollupVerify { // initialize and start rollup event sync service - eth.rollupSyncService, err = rollup_sync_service.NewRollupSyncService(context.Background(), chainConfig, eth.chainDb, l1Client, eth.blockchain, stack) + eth.rollupSyncService, err = rollup_sync_service.NewRollupSyncService(context.Background(), chainConfig, eth.chainDb, l1Client, eth.blockchain, stack, config.DA) if err != nil { return nil, fmt.Errorf("cannot initialize rollup event sync service: %w", err) } diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 58ba26bf21e4..ec782a60f535 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -17,6 +17,8 @@ import ( "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" "github.com/scroll-tech/go-ethereum/rollup/l1" "github.com/scroll-tech/go-ethereum/rollup/rcfg" @@ -54,7 +56,7 @@ type RollupSyncService struct { callDataBlobSource *da.CalldataBlobSource } -func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig, db ethdb.Database, l1Client l1.Client, bc *core.BlockChain, stack *node.Node) (*RollupSyncService, error) { +func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig, db ethdb.Database, l1Client l1.Client, bc *core.BlockChain, stack *node.Node, config da_syncer.Config) (*RollupSyncService, error) { if genesisConfig.Scroll.L1Config == nil { return nil, fmt.Errorf("missing L1 config in genesis") } @@ -72,6 +74,14 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig latestProcessedBlock = *block } + var success bool + ctx, cancel := context.WithCancel(ctx) + defer func() { + if !success { + cancel() + } + }() + l1Reader, err := l1.NewReader(ctx, l1.Config{ ScrollChainAddress: genesisConfig.Scroll.L1Config.ScrollChainAddress, L1MessageQueueAddress: genesisConfig.Scroll.L1Config.L1MessageQueueAddress, @@ -80,16 +90,33 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig return nil, fmt.Errorf("failed to initialize l1.Reader, err = %w", err) } - // TODO: create blob clients based on new config parameters + blobClientList := blob_client.NewBlobClients() + if config.BeaconNodeAPIEndpoint != "" { + beaconNodeClient, err := blob_client.NewBeaconNodeClient(config.BeaconNodeAPIEndpoint) + if err != nil { + log.Warn("failed to create BeaconNodeClient", "err", err) + } else { + blobClientList.AddBlobClient(beaconNodeClient) + } + } + if config.BlobScanAPIEndpoint != "" { + blobClientList.AddBlobClient(blob_client.NewBlobScanClient(config.BlobScanAPIEndpoint)) + } + if config.BlockNativeAPIEndpoint != "" { + blobClientList.AddBlobClient(blob_client.NewBlockNativeClient(config.BlockNativeAPIEndpoint)) + } + if blobClientList.Size() == 0 { + return nil, errors.New("DA syncing is enabled but no blob client is configured. Please provide at least one blob client via command line flag") + } - calldataBlobSource, err := da.NewCalldataBlobSource(ctx, latestProcessedBlock, l1Reader, nil, db) + calldataBlobSource, err := da.NewCalldataBlobSource(ctx, latestProcessedBlock, l1Reader, blobClientList, db) if err != nil { return nil, fmt.Errorf("failed to create calldata blob source: %w", err) } - ctx, cancel := context.WithCancel(ctx) + success = true - service := RollupSyncService{ + return &RollupSyncService{ ctx: ctx, cancel: cancel, db: db, @@ -97,9 +124,7 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig stack: stack, callDataBlobSource: calldataBlobSource, - } - - return &service, nil + }, nil } func (s *RollupSyncService) Start() { From 6aafa74c11e89cc11b4c1e8de4b853908563edba Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Fri, 27 Dec 2024 11:19:25 +0800 Subject: [PATCH 14/36] fix unit tests --- rollup/da_syncer/da/commitV0.go | 8 +- rollup/da_syncer/da/commitV1.go | 2 +- rollup/da_syncer/da/da.go | 2 +- rollup/l1/abi.go | 20 + rollup/l1/types.go | 38 + .../rollup_sync_service.go | 4 +- .../rollup_sync_service_test.go | 870 ++++++------------ 7 files changed, 363 insertions(+), 581 deletions(-) diff --git a/rollup/da_syncer/da/commitV0.go b/rollup/da_syncer/da/commitV0.go index 960151e6cda4..c8e34ec01a7e 100644 --- a/rollup/da_syncer/da/commitV0.go +++ b/rollup/da_syncer/da/commitV0.go @@ -15,7 +15,7 @@ import ( ) type CommitBatchDAV0 struct { - version uint8 + version encoding.CodecVersion batchIndex uint64 parentTotalL1MessagePopped uint64 skippedL1MessageBitmap []byte @@ -37,11 +37,11 @@ func NewCommitBatchDAV0(db ethdb.Database, return nil, fmt.Errorf("failed to unpack chunks: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - return NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) + return NewCommitBatchDAV0WithChunks(db, codec.Version(), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) } func NewCommitBatchDAV0WithChunks(db ethdb.Database, - version uint8, + version encoding.CodecVersion, batchIndex uint64, parentBatchHeader []byte, decodedChunks []*encoding.DAChunkRawTx, @@ -71,7 +71,7 @@ func NewCommitBatchDAV0Empty() *CommitBatchDAV0 { } } -func (c *CommitBatchDAV0) Version() uint8 { +func (c *CommitBatchDAV0) Version() encoding.CodecVersion { return c.version } diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 29eb065ed3e8..6fdcf45b6d14 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -78,7 +78,7 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, return nil, fmt.Errorf("decodedChunks is nil after decoding") } - v0, err := NewCommitBatchDAV0WithChunks(db, uint8(codec.Version()), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) + v0, err := NewCommitBatchDAV0WithChunks(db, codec.Version(), commitEvent.BatchIndex().Uint64(), parentBatchHeader, decodedChunks, skippedL1MessageBitmap, commitEvent) if err != nil { return nil, err } diff --git a/rollup/da_syncer/da/da.go b/rollup/da_syncer/da/da.go index 2773da2951be..cd7320f1c04f 100644 --- a/rollup/da_syncer/da/da.go +++ b/rollup/da_syncer/da/da.go @@ -35,7 +35,7 @@ type Entry interface { type EntryWithBlocks interface { Entry Blocks() []*PartialBlock - Version() uint8 + Version() encoding.CodecVersion Chunks() []*encoding.DAChunkRawTx BlobVersionedHashes() []common.Hash } diff --git a/rollup/l1/abi.go b/rollup/l1/abi.go index c16123aa5e8b..dcf09f25fd13 100644 --- a/rollup/l1/abi.go +++ b/rollup/l1/abi.go @@ -158,6 +158,26 @@ type FinalizeBatchEvent struct { blockNumber uint64 } +func NewFinalizeBatchEvent( + batchIndex *big.Int, + batchHash common.Hash, + stateRoot common.Hash, + withdrawRoot common.Hash, + txHash common.Hash, + blockHash common.Hash, + blockNumber uint64, +) *FinalizeBatchEvent { + return &FinalizeBatchEvent{ + batchIndex: batchIndex, + batchHash: batchHash, + stateRoot: stateRoot, + withdrawRoot: withdrawRoot, + txHash: txHash, + blockHash: blockHash, + blockNumber: blockNumber, + } +} + func (f *FinalizeBatchEvent) TxHash() common.Hash { return f.txHash } diff --git a/rollup/l1/types.go b/rollup/l1/types.go index 8c030815ec28..0adb734bac09 100644 --- a/rollup/l1/types.go +++ b/rollup/l1/types.go @@ -20,3 +20,41 @@ type Client interface { BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) } + +type MockNopClient struct{} + +func (m *MockNopClient) BlockNumber(ctx context.Context) (uint64, error) { + return 0, nil +} + +func (m *MockNopClient) ChainID(ctx context.Context) (*big.Int, error) { + return big.NewInt(0), nil +} + +func (m *MockNopClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { + return nil, nil +} + +func (m *MockNopClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + return nil, nil +} + +func (m *MockNopClient) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + return nil, nil +} + +func (m *MockNopClient) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { + return nil, nil +} + +func (m *MockNopClient) TransactionByHash(ctx context.Context, txHash common.Hash) (tx *types.Transaction, isPending bool, err error) { + return nil, false, nil +} + +func (m *MockNopClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return nil, nil +} + +func (m *MockNopClient) CallContract(ctx context.Context, msg ethereum.CallMsg, blockNumber *big.Int) ([]byte, error) { + return nil, nil +} diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index ec782a60f535..3380b7dcff0a 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -106,7 +106,7 @@ func NewRollupSyncService(ctx context.Context, genesisConfig *params.ChainConfig blobClientList.AddBlobClient(blob_client.NewBlockNativeClient(config.BlockNativeAPIEndpoint)) } if blobClientList.Size() == 0 { - return nil, errors.New("DA syncing is enabled but no blob client is configured. Please provide at least one blob client via command line flag") + return nil, errors.New("no blob client is configured for rollup verifier. Please provide at least one blob client via command line flag") } calldataBlobSource, err := da.NewCalldataBlobSource(ctx, latestProcessedBlock, l1Reader, blobClientList, db) @@ -372,7 +372,7 @@ func (s *RollupSyncService) getCommittedBatchMeta(commitedBatch da.EntryWithBloc } return &rawdb.CommittedBatchMeta{ - Version: commitedBatch.Version(), + Version: uint8(commitedBatch.Version()), ChunkBlockRanges: chunkRanges, BlobVersionedHashes: commitedBatch.BlobVersionedHashes(), }, nil diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index f1b09a37a1f2..c34f9385b515 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -2,12 +2,10 @@ package rollup_sync_service import ( "context" - "encoding/hex" "encoding/json" "math/big" "os" "testing" - "time" "github.com/scroll-tech/da-codec/encoding" "github.com/stretchr/testify/assert" @@ -16,415 +14,86 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb/memorydb" "github.com/scroll-tech/go-ethereum/node" "github.com/scroll-tech/go-ethereum/params" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/da" + "github.com/scroll-tech/go-ethereum/rollup/l1" ) -func TestRollupSyncServiceStartAndStop(t *testing.T) { - genesisConfig := ¶ms.ChainConfig{ - Scroll: params.ScrollConfig{ - L1Config: ¶ms.L1Config{ - L1ChainId: 11155111, - ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), - }, - }, - } - db := rawdb.NewDatabase(memorydb.New()) - l1Client := &mockEthClient{} - bc := &core.BlockChain{} - stack, err := node.New(&node.DefaultConfig) - if err != nil { - t.Fatalf("Failed to new P2P node: %v", err) - } - defer stack.Close() - service, err := NewRollupSyncService(context.Background(), genesisConfig, db, l1Client, bc, stack) - if err != nil { - t.Fatalf("Failed to new rollup sync service: %v", err) - } - - assert.NotNil(t, service) - service.Start() - time.Sleep(10 * time.Millisecond) - service.Stop() -} - -func TestDecodeBatchVersionAndChunkBlockRangesCodecv0(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - - service := &RollupSyncService{ - scrollChainABI: scrollChainABI, - } - - data, err := os.ReadFile("./testdata/commitBatch_input_codecv0.json") - require.NoError(t, err, "Failed to read json file") - - type tx struct { - Input string `json:"input"` - } - var commitBatch tx - err = json.Unmarshal(data, &commitBatch) - require.NoError(t, err, "Failed to unmarshal transaction json") - - testTxData, err := hex.DecodeString(commitBatch.Input[2:]) - if err != nil { - t.Fatalf("Failed to decode string: %v", err) - } - - version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) - if err != nil { - t.Fatalf("Failed to decode chunk ranges: %v", err) - } - - assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(version)) - - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 4435142, EndBlockNumber: 4435142}, - {StartBlockNumber: 4435143, EndBlockNumber: 4435144}, - {StartBlockNumber: 4435145, EndBlockNumber: 4435145}, - {StartBlockNumber: 4435146, EndBlockNumber: 4435146}, - {StartBlockNumber: 4435147, EndBlockNumber: 4435147}, - {StartBlockNumber: 4435148, EndBlockNumber: 4435148}, - {StartBlockNumber: 4435149, EndBlockNumber: 4435150}, - {StartBlockNumber: 4435151, EndBlockNumber: 4435151}, - {StartBlockNumber: 4435152, EndBlockNumber: 4435152}, - {StartBlockNumber: 4435153, EndBlockNumber: 4435153}, - {StartBlockNumber: 4435154, EndBlockNumber: 4435154}, - {StartBlockNumber: 4435155, EndBlockNumber: 4435155}, - {StartBlockNumber: 4435156, EndBlockNumber: 4435156}, - {StartBlockNumber: 4435157, EndBlockNumber: 4435157}, - {StartBlockNumber: 4435158, EndBlockNumber: 4435158}, - } - - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) - } - - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Errorf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) - } - } -} - -func TestDecodeBatchVersionAndChunkBlockRangesCodecv1(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - - service := &RollupSyncService{ - scrollChainABI: scrollChainABI, - } - - data, err := os.ReadFile("./testdata/commitBatch_input_codecv1.json") - require.NoError(t, err, "Failed to read json file") - - type tx struct { - Input string `json:"input"` - } - var commitBatch tx - err = json.Unmarshal(data, &commitBatch) - require.NoError(t, err, "Failed to unmarshal transaction json") - - testTxData, err := hex.DecodeString(commitBatch.Input[2:]) - if err != nil { - t.Fatalf("Failed to decode string: %v", err) - } - - version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) - if err != nil { - t.Fatalf("Failed to decode chunk ranges: %v", err) - } - - assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(version)) - - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 1690, EndBlockNumber: 1780}, - {StartBlockNumber: 1781, EndBlockNumber: 1871}, - {StartBlockNumber: 1872, EndBlockNumber: 1962}, - {StartBlockNumber: 1963, EndBlockNumber: 2053}, - {StartBlockNumber: 2054, EndBlockNumber: 2144}, - {StartBlockNumber: 2145, EndBlockNumber: 2235}, - {StartBlockNumber: 2236, EndBlockNumber: 2326}, - {StartBlockNumber: 2327, EndBlockNumber: 2417}, - {StartBlockNumber: 2418, EndBlockNumber: 2508}, - } - - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) - } - - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Errorf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) - } - } -} - -func TestDecodeBatchVersionAndChunkBlockRangesCodecv2(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - - service := &RollupSyncService{ - scrollChainABI: scrollChainABI, - } - - data, err := os.ReadFile("./testdata/commitBatch_input_codecv2.json") - require.NoError(t, err, "Failed to read json file") - - type tx struct { - Input string `json:"input"` - } - var commitBatch tx - err = json.Unmarshal(data, &commitBatch) - require.NoError(t, err, "Failed to unmarshal transaction json") - - testTxData, err := hex.DecodeString(commitBatch.Input[2:]) - if err != nil { - t.Fatalf("Failed to decode string: %v", err) - } - - version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) - if err != nil { - t.Fatalf("Failed to decode chunk ranges: %v", err) - } - - assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(version)) - - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 200, EndBlockNumber: 290}, - {StartBlockNumber: 291, EndBlockNumber: 381}, - {StartBlockNumber: 382, EndBlockNumber: 472}, - {StartBlockNumber: 473, EndBlockNumber: 563}, - {StartBlockNumber: 564, EndBlockNumber: 654}, - {StartBlockNumber: 655, EndBlockNumber: 745}, - {StartBlockNumber: 746, EndBlockNumber: 836}, - {StartBlockNumber: 837, EndBlockNumber: 927}, - {StartBlockNumber: 928, EndBlockNumber: 1018}, - } - - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) - } - - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Errorf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) - } - } -} - -func TestDecodeBatchVersionAndChunkBlockRangesCodecv3(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - - service := &RollupSyncService{ - scrollChainABI: scrollChainABI, - } - - data, err := os.ReadFile("./testdata/commitBatchWithBlobProof_input_codecv3.json") - require.NoError(t, err, "Failed to read json file") - - type tx struct { - Input string `json:"input"` - } - var commitBatch tx - err = json.Unmarshal(data, &commitBatch) - require.NoError(t, err, "Failed to unmarshal transaction json") - - testTxData, err := hex.DecodeString(commitBatch.Input[2:]) - if err != nil { - t.Fatalf("Failed to decode string: %v", err) - } - - version, ranges, err := service.decodeBatchVersionAndChunkBlockRanges(testTxData) - if err != nil { - t.Fatalf("Failed to decode chunk ranges: %v", err) - } - - assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(version)) - - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 1, EndBlockNumber: 9}, - {StartBlockNumber: 10, EndBlockNumber: 20}, - {StartBlockNumber: 21, EndBlockNumber: 21}, - {StartBlockNumber: 22, EndBlockNumber: 22}, - {StartBlockNumber: 23, EndBlockNumber: 23}, - {StartBlockNumber: 24, EndBlockNumber: 24}, - {StartBlockNumber: 25, EndBlockNumber: 25}, - {StartBlockNumber: 26, EndBlockNumber: 26}, - {StartBlockNumber: 27, EndBlockNumber: 27}, - {StartBlockNumber: 28, EndBlockNumber: 28}, - {StartBlockNumber: 29, EndBlockNumber: 29}, - {StartBlockNumber: 30, EndBlockNumber: 30}, - {StartBlockNumber: 31, EndBlockNumber: 31}, - {StartBlockNumber: 32, EndBlockNumber: 32}, - {StartBlockNumber: 33, EndBlockNumber: 33}, - {StartBlockNumber: 34, EndBlockNumber: 34}, - {StartBlockNumber: 35, EndBlockNumber: 35}, - {StartBlockNumber: 36, EndBlockNumber: 36}, - {StartBlockNumber: 37, EndBlockNumber: 37}, - {StartBlockNumber: 38, EndBlockNumber: 38}, - {StartBlockNumber: 39, EndBlockNumber: 39}, - {StartBlockNumber: 40, EndBlockNumber: 40}, - } - - if len(expectedRanges) != len(ranges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(ranges)) - } - - for i := range ranges { - if *expectedRanges[i] != *ranges[i] { - t.Errorf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *ranges[i]) - } - } -} - func TestGetCommittedBatchMetaCodecv0(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ - L1ChainId: 11155111, - ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), + L1ChainId: 11155111, + ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), + L1MessageQueueAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a1"), }, }, } db := rawdb.NewDatabase(memorydb.New()) - rlpData, err := os.ReadFile("./testdata/commitBatch_codecv0.rlp") - if err != nil { - t.Fatalf("Failed to read RLP data: %v", err) - } - l1Client := &mockEthClient{ - txRLP: rlpData, - } - bc := &core.BlockChain{} stack, err := node.New(&node.DefaultConfig) - if err != nil { - t.Fatalf("Failed to new P2P node: %v", err) - } + require.NoError(t, err, "Failed to create new P2P node") defer stack.Close() - service, err := NewRollupSyncService(context.Background(), genesisConfig, db, l1Client, bc, stack) - if err != nil { - t.Fatalf("Failed to new rollup sync service: %v", err) - } - vLog := &types.Log{ - TxHash: common.HexToHash("0x0"), - } - metadata, err := service.getCommittedBatchMeta(1, vLog) + service, err := NewRollupSyncService(context.Background(), genesisConfig, db, &l1.MockNopClient{}, &core.BlockChain{}, stack, da_syncer.Config{ + BlobScanAPIEndpoint: "http://localhost:8080", + }) require.NoError(t, err) - assert.Equal(t, encoding.CodecV0, encoding.CodecVersion(metadata.Version)) - expectedRanges := []*rawdb.ChunkBlockRange{ {StartBlockNumber: 911145, EndBlockNumber: 911151}, {StartBlockNumber: 911152, EndBlockNumber: 911155}, {StartBlockNumber: 911156, EndBlockNumber: 911159}, } - if len(expectedRanges) != len(metadata.ChunkBlockRanges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) - } - - for i := range metadata.ChunkBlockRanges { - if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) + var chunks []*encoding.DAChunkRawTx + for _, r := range expectedRanges { + var blocks []encoding.DABlock + for i := r.StartBlockNumber; i <= r.EndBlockNumber; i++ { + blocks = append(blocks, &mockDABlock{number: i}) } + chunks = append(chunks, &encoding.DAChunkRawTx{Blocks: blocks}) } -} -func TestGetCommittedBatchMetaCodecv1(t *testing.T) { - genesisConfig := ¶ms.ChainConfig{ - Scroll: params.ScrollConfig{ - L1Config: ¶ms.L1Config{ - L1ChainId: 11155111, - ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), - }, - }, - } - db := rawdb.NewDatabase(memorydb.New()) - - rlpData, err := os.ReadFile("./testdata/commitBatch_codecv1.rlp") - if err != nil { - t.Fatalf("Failed to read RLP data: %v", err) - } - l1Client := &mockEthClient{ - txRLP: rlpData, - } - bc := &core.BlockChain{} - stack, err := node.New(&node.DefaultConfig) - if err != nil { - t.Fatalf("Failed to new P2P node: %v", err) - } - defer stack.Close() - service, err := NewRollupSyncService(context.Background(), genesisConfig, db, l1Client, bc, stack) - if err != nil { - t.Fatalf("Failed to new rollup sync service: %v", err) + committedBatch := mockEntryWithBlocks{ + batchIndex: 1, + version: encoding.CodecV0, + chunks: chunks, } - vLog := &types.Log{ - TxHash: common.HexToHash("0x1"), - } - metadata, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(committedBatch) require.NoError(t, err) - assert.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) - - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 1, EndBlockNumber: 11}, - } - - if len(expectedRanges) != len(metadata.ChunkBlockRanges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) - } - - for i := range metadata.ChunkBlockRanges { - if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) - } - } + require.Equal(t, encoding.CodecV0, encoding.CodecVersion(metadata.Version)) + require.EqualValues(t, expectedRanges, metadata.ChunkBlockRanges) } -func TestGetCommittedBatchMetaCodecv2(t *testing.T) { +func TestGetCommittedBatchMetaCodecV1(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ - L1ChainId: 11155111, - ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), + L1ChainId: 11155111, + ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), + L1MessageQueueAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a1"), }, }, } db := rawdb.NewDatabase(memorydb.New()) - rlpData, err := os.ReadFile("./testdata/commitBatch_codecv2.rlp") - if err != nil { - t.Fatalf("Failed to read RLP data: %v", err) - } - l1Client := &mockEthClient{ - txRLP: rlpData, - } - bc := &core.BlockChain{} stack, err := node.New(&node.DefaultConfig) - if err != nil { - t.Fatalf("Failed to new P2P node: %v", err) - } + require.NoError(t, err, "Failed to create new P2P node") defer stack.Close() - service, err := NewRollupSyncService(context.Background(), genesisConfig, db, l1Client, bc, stack) - if err != nil { - t.Fatalf("Failed to new rollup sync service: %v", err) - } - vLog := &types.Log{ - TxHash: common.HexToHash("0x2"), - } - metadata, err := service.getCommittedBatchMeta(1, vLog) + service, err := NewRollupSyncService(context.Background(), genesisConfig, db, &l1.MockNopClient{}, &core.BlockChain{}, stack, da_syncer.Config{ + BlobScanAPIEndpoint: "http://localhost:8080", + }) require.NoError(t, err) - assert.Equal(t, encoding.CodecV2, encoding.CodecVersion(metadata.Version)) - expectedRanges := []*rawdb.ChunkBlockRange{ + {StartBlockNumber: 100, EndBlockNumber: 142}, {StartBlockNumber: 143, EndBlockNumber: 143}, {StartBlockNumber: 144, EndBlockNumber: 144}, {StartBlockNumber: 145, EndBlockNumber: 145}, @@ -456,96 +125,112 @@ func TestGetCommittedBatchMetaCodecv2(t *testing.T) { {StartBlockNumber: 174, EndBlockNumber: 174}, } - if len(expectedRanges) != len(metadata.ChunkBlockRanges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) - } - - for i := range metadata.ChunkBlockRanges { - if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) + var chunks []*encoding.DAChunkRawTx + for _, r := range expectedRanges { + var blocks []encoding.DABlock + for i := r.StartBlockNumber; i <= r.EndBlockNumber; i++ { + blocks = append(blocks, &mockDABlock{number: i}) } + chunks = append(chunks, &encoding.DAChunkRawTx{Blocks: blocks}) } -} -func TestGetCommittedBatchMetaCodecv3(t *testing.T) { - genesisConfig := ¶ms.ChainConfig{ - Scroll: params.ScrollConfig{ - L1Config: ¶ms.L1Config{ - L1ChainId: 11155111, - ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), - }, - }, + expectedVersionedHashes := []common.Hash{ + common.HexToHash("0x1"), + common.HexToHash("0x2"), } - db := rawdb.NewDatabase(memorydb.New()) - rlpData, err := os.ReadFile("./testdata/commitBatchWithBlobProof_codecv3.rlp") - if err != nil { - t.Fatalf("Failed to read RLP data: %v", err) - } - l1Client := &mockEthClient{ - txRLP: rlpData, - } - bc := &core.BlockChain{} - stack, err := node.New(&node.DefaultConfig) - if err != nil { - t.Fatalf("Failed to new P2P node: %v", err) - } - defer stack.Close() - service, err := NewRollupSyncService(context.Background(), genesisConfig, db, l1Client, bc, stack) - if err != nil { - t.Fatalf("Failed to new rollup sync service: %v", err) + committedBatch := mockEntryWithBlocks{ + batchIndex: 1, + version: encoding.CodecV1, + chunks: chunks, + versionedHashes: expectedVersionedHashes, } - vLog := &types.Log{ - TxHash: common.HexToHash("0x3"), - } - metadata, err := service.getCommittedBatchMeta(1, vLog) + metadata, err := service.getCommittedBatchMeta(committedBatch) require.NoError(t, err) - assert.Equal(t, encoding.CodecV3, encoding.CodecVersion(metadata.Version)) + require.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) + require.EqualValues(t, expectedRanges, metadata.ChunkBlockRanges) + require.EqualValues(t, expectedVersionedHashes, metadata.BlobVersionedHashes) +} - expectedRanges := []*rawdb.ChunkBlockRange{ - {StartBlockNumber: 41, EndBlockNumber: 41}, - {StartBlockNumber: 42, EndBlockNumber: 42}, - {StartBlockNumber: 43, EndBlockNumber: 43}, - {StartBlockNumber: 44, EndBlockNumber: 44}, - {StartBlockNumber: 45, EndBlockNumber: 45}, - {StartBlockNumber: 46, EndBlockNumber: 46}, - {StartBlockNumber: 47, EndBlockNumber: 47}, - {StartBlockNumber: 48, EndBlockNumber: 48}, - {StartBlockNumber: 49, EndBlockNumber: 49}, - {StartBlockNumber: 50, EndBlockNumber: 50}, - {StartBlockNumber: 51, EndBlockNumber: 51}, - {StartBlockNumber: 52, EndBlockNumber: 52}, - {StartBlockNumber: 53, EndBlockNumber: 53}, - {StartBlockNumber: 54, EndBlockNumber: 54}, - {StartBlockNumber: 55, EndBlockNumber: 55}, - {StartBlockNumber: 56, EndBlockNumber: 56}, - {StartBlockNumber: 57, EndBlockNumber: 57}, - {StartBlockNumber: 58, EndBlockNumber: 58}, - {StartBlockNumber: 59, EndBlockNumber: 59}, - {StartBlockNumber: 60, EndBlockNumber: 60}, - {StartBlockNumber: 61, EndBlockNumber: 61}, - {StartBlockNumber: 62, EndBlockNumber: 62}, - {StartBlockNumber: 63, EndBlockNumber: 63}, - {StartBlockNumber: 64, EndBlockNumber: 64}, - {StartBlockNumber: 65, EndBlockNumber: 65}, - {StartBlockNumber: 66, EndBlockNumber: 66}, - {StartBlockNumber: 67, EndBlockNumber: 67}, - {StartBlockNumber: 68, EndBlockNumber: 68}, - {StartBlockNumber: 69, EndBlockNumber: 69}, - {StartBlockNumber: 70, EndBlockNumber: 70}, - } - - if len(expectedRanges) != len(metadata.ChunkBlockRanges) { - t.Fatalf("Expected range length %v, got %v", len(expectedRanges), len(metadata.ChunkBlockRanges)) - } - - for i := range metadata.ChunkBlockRanges { - if *expectedRanges[i] != *metadata.ChunkBlockRanges[i] { - t.Fatalf("Mismatch at index %d: expected %v, got %v", i, *expectedRanges[i], *metadata.ChunkBlockRanges[i]) - } - } +type mockEntryWithBlocks struct { + batchIndex uint64 + version encoding.CodecVersion + chunks []*encoding.DAChunkRawTx + versionedHashes []common.Hash +} + +func (m mockEntryWithBlocks) Type() da.Type { + panic("implement me") +} + +func (m mockEntryWithBlocks) BatchIndex() uint64 { + return m.batchIndex +} + +func (m mockEntryWithBlocks) L1BlockNumber() uint64 { + panic("implement me") +} + +func (m mockEntryWithBlocks) CompareTo(entry da.Entry) int { + panic("implement me") +} + +func (m mockEntryWithBlocks) Event() l1.RollupEvent { + panic("implement me") +} + +func (m mockEntryWithBlocks) Blocks() []*da.PartialBlock { + panic("implement me") +} + +func (m mockEntryWithBlocks) Version() encoding.CodecVersion { + return m.version +} + +func (m mockEntryWithBlocks) Chunks() []*encoding.DAChunkRawTx { + return m.chunks +} + +func (m mockEntryWithBlocks) BlobVersionedHashes() []common.Hash { + return m.versionedHashes +} + +type mockDABlock struct { + number uint64 +} + +func (b *mockDABlock) Encode() []byte { + panic("implement me") +} + +func (b *mockDABlock) Decode(bytes []byte) error { + panic("implement me") +} + +func (b *mockDABlock) NumTransactions() uint16 { + panic("implement me") +} + +func (b *mockDABlock) NumL1Messages() uint16 { + panic("implement me") +} + +func (b *mockDABlock) Timestamp() uint64 { + panic("implement me") +} + +func (b *mockDABlock) BaseFee() *big.Int { + panic("implement me") +} + +func (b *mockDABlock) GasLimit() uint64 { + panic("implement me") +} + +func (b *mockDABlock) Number() uint64 { + return b.number } func TestValidateBatchCodecv0(t *testing.T) { @@ -559,18 +244,21 @@ func TestValidateBatchCodecv0(t *testing.T) { chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} - event1 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(0), - BatchHash: common.HexToHash("0xfd3ecf106ce993adc6db68e42ce701bfe638434395abdeeb871f7bd395ae2368"), - StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, - WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, - } + event1 := l1.NewFinalizeBatchEvent( + big.NewInt(0), + common.HexToHash("0xfd3ecf106ce993adc6db68e42ce701bfe638434395abdeeb871f7bd395ae2368"), + chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, + chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV0), BlobVersionedHashes: nil, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -578,32 +266,36 @@ func TestValidateBatchCodecv0(t *testing.T) { chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ - BatchHash: event1.BatchHash, + BatchHash: event1.BatchHash(), TotalL1MessagePopped: 11, - StateRoot: event1.StateRoot, - WithdrawRoot: event1.WithdrawRoot, + StateRoot: event1.StateRoot(), + WithdrawRoot: event1.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) - event2 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(1), - BatchHash: common.HexToHash("0xadb8e526c3fdc2045614158300789cd66e7a945efe5a484db00b5ef9a26016d7"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + + event2 := l1.NewFinalizeBatchEvent( + big.NewInt(1), + common.HexToHash("0xadb8e526c3fdc2045614158300789cd66e7a945efe5a484db00b5ef9a26016d7"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 2, + ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV0), BlobVersionedHashes: nil, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ - BatchHash: event2.BatchHash, + BatchHash: event2.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event2.StateRoot, - WithdrawRoot: event2.WithdrawRoot, + StateRoot: event2.StateRoot(), + WithdrawRoot: event2.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } @@ -619,18 +311,21 @@ func TestValidateBatchCodecv1(t *testing.T) { chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} - event1 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(0), - BatchHash: common.HexToHash("0x73cb3310646716cb782702a0ec4ad33cf55633c85daf96b641953c5defe58031"), - StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, - WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, - } + event1 := l1.NewFinalizeBatchEvent( + big.NewInt(0), + common.HexToHash("0x73cb3310646716cb782702a0ec4ad33cf55633c85daf96b641953c5defe58031"), + chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, + chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV1), BlobVersionedHashes: []common.Hash{common.HexToHash("0x0129554070e4323800ca0e5ddd17bc447854601b306a70870002a058741214b3")}, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -638,31 +333,34 @@ func TestValidateBatchCodecv1(t *testing.T) { chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ - BatchHash: event1.BatchHash, + BatchHash: event1.BatchHash(), TotalL1MessagePopped: 11, - StateRoot: event1.StateRoot, - WithdrawRoot: event1.WithdrawRoot, + StateRoot: event1.StateRoot(), + WithdrawRoot: event1.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) - event2 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(1), - BatchHash: common.HexToHash("0x7f230ce84b4bf86f8ee22ffb5c145e3ef3ddf2a76da4936a33f33cebdb63a48a"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + event2 := l1.NewFinalizeBatchEvent( + big.NewInt(1), + common.HexToHash("0x7f230ce84b4bf86f8ee22ffb5c145e3ef3ddf2a76da4936a33f33cebdb63a48a"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV1), BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a327088bb2b13151449d8313c281d0006d12e8453e863637b746898b6ad5a6")}, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ - BatchHash: event2.BatchHash, + BatchHash: event2.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event2.StateRoot, - WithdrawRoot: event2.WithdrawRoot, + StateRoot: event2.StateRoot(), + WithdrawRoot: event2.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } @@ -678,18 +376,21 @@ func TestValidateBatchCodecv2(t *testing.T) { chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} - event1 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(0), - BatchHash: common.HexToHash("0xaccf37a0b974f2058692d366b2ea85502c99db4a0bcb9b77903b49bf866a463b"), - StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, - WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, - } + event1 := l1.NewFinalizeBatchEvent( + big.NewInt(0), + common.HexToHash("0xaccf37a0b974f2058692d366b2ea85502c99db4a0bcb9b77903b49bf866a463b"), + chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, + chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV2), BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -697,31 +398,34 @@ func TestValidateBatchCodecv2(t *testing.T) { chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ - BatchHash: event1.BatchHash, + BatchHash: event1.BatchHash(), TotalL1MessagePopped: 11, - StateRoot: event1.StateRoot, - WithdrawRoot: event1.WithdrawRoot, + StateRoot: event1.StateRoot(), + WithdrawRoot: event1.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) - event2 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(1), - BatchHash: common.HexToHash("0x62ec61e1fdb334868ffd471df601f6858e692af01d42b5077c805a9fd4558c91"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + event2 := l1.NewFinalizeBatchEvent( + big.NewInt(1), + common.HexToHash("0x62ec61e1fdb334868ffd471df601f6858e692af01d42b5077c805a9fd4558c91"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV2), BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ - BatchHash: event2.BatchHash, + BatchHash: event2.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event2.StateRoot, - WithdrawRoot: event2.WithdrawRoot, + StateRoot: event2.StateRoot(), + WithdrawRoot: event2.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } @@ -737,19 +441,22 @@ func TestValidateBatchCodecv3(t *testing.T) { chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} - event1 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(0), - BatchHash: common.HexToHash("0x015eb56fb95bf9a06157cfb8389ba7c2b6b08373e22581ac2ba387003708265d"), - StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, - WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, - } + event1 := l1.NewFinalizeBatchEvent( + big.NewInt(0), + common.HexToHash("0x015eb56fb95bf9a06157cfb8389ba7c2b6b08373e22581ac2ba387003708265d"), + chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, + chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV3), BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -757,31 +464,34 @@ func TestValidateBatchCodecv3(t *testing.T) { chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ - BatchHash: event1.BatchHash, + BatchHash: event1.BatchHash(), TotalL1MessagePopped: 11, - StateRoot: event1.StateRoot, - WithdrawRoot: event1.WithdrawRoot, + StateRoot: event1.StateRoot(), + WithdrawRoot: event1.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) - event2 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(1), - BatchHash: common.HexToHash("0x382cb0d507e3d7507f556c52e05f76b05e364ad26205e7f62c95967a19c2f35d"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + event2 := l1.NewFinalizeBatchEvent( + big.NewInt(1), + common.HexToHash("0x382cb0d507e3d7507f556c52e05f76b05e364ad26205e7f62c95967a19c2f35d"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV3), BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ - BatchHash: event2.BatchHash, + BatchHash: event2.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event2.StateRoot, - WithdrawRoot: event2.WithdrawRoot, + StateRoot: event2.StateRoot(), + WithdrawRoot: event2.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } @@ -791,19 +501,22 @@ func TestValidateBatchUpgrades(t *testing.T) { chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} parentFinalizedBatchMeta1 := &rawdb.FinalizedBatchMeta{} - event1 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(0), - BatchHash: common.HexToHash("0x4605465b7470c8565b123330d7186805caf9a7f2656d8e9e744b62e14ca22c3d"), - StateRoot: chunk1.Blocks[len(chunk1.Blocks)-1].Header.Root, - WithdrawRoot: chunk1.Blocks[len(chunk1.Blocks)-1].WithdrawRoot, - } + event1 := l1.NewFinalizeBatchEvent( + big.NewInt(0), + common.HexToHash("0x4605465b7470c8565b123330d7186805caf9a7f2656d8e9e744b62e14ca22c3d"), + chunk1.Blocks[len(chunk1.Blocks)-1].Header.Root, + chunk1.Blocks[len(chunk1.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV0), BlobVersionedHashes: nil, } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex.Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) @@ -811,23 +524,26 @@ func TestValidateBatchUpgrades(t *testing.T) { chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} parentFinalizedBatchMeta2 := &rawdb.FinalizedBatchMeta{ - BatchHash: event1.BatchHash, + BatchHash: event1.BatchHash(), TotalL1MessagePopped: 0, - StateRoot: event1.StateRoot, - WithdrawRoot: event1.WithdrawRoot, + StateRoot: event1.StateRoot(), + WithdrawRoot: event1.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta2, finalizedBatchMeta1) - event2 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(1), - BatchHash: common.HexToHash("0xc4af33bce87aa702edc3ad4b7d34730d25719427704e250787f99e0f55049252"), - StateRoot: chunk2.Blocks[len(chunk2.Blocks)-1].Header.Root, - WithdrawRoot: chunk2.Blocks[len(chunk2.Blocks)-1].WithdrawRoot, - } + event2 := l1.NewFinalizeBatchEvent( + big.NewInt(1), + common.HexToHash("0xc4af33bce87aa702edc3ad4b7d34730d25719427704e250787f99e0f55049252"), + chunk2.Blocks[len(chunk2.Blocks)-1].Header.Root, + chunk2.Blocks[len(chunk2.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV1), BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a688c6e137310df38a62f5ad1e5119b8cb0455c386a9a4079b14fe92a239aa")}, } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex.Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) @@ -835,23 +551,26 @@ func TestValidateBatchUpgrades(t *testing.T) { chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} parentFinalizedBatchMeta3 := &rawdb.FinalizedBatchMeta{ - BatchHash: event2.BatchHash, + BatchHash: event2.BatchHash(), TotalL1MessagePopped: 0, - StateRoot: event2.StateRoot, - WithdrawRoot: event2.WithdrawRoot, + StateRoot: event2.StateRoot(), + WithdrawRoot: event2.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) - event3 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(2), - BatchHash: common.HexToHash("0x9f87f2de2019ed635f867b1e61be6a607c3174ced096f370fd18556c38833c62"), - StateRoot: chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, - WithdrawRoot: chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, - } + event3 := l1.NewFinalizeBatchEvent( + big.NewInt(2), + common.HexToHash("0x9f87f2de2019ed635f867b1e61be6a607c3174ced096f370fd18556c38833c62"), + chunk3.Blocks[len(chunk3.Blocks)-1].Header.Root, + chunk3.Blocks[len(chunk3.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta3 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV1), BlobVersionedHashes: []common.Hash{common.HexToHash("0x01ea66c4de196d36e2c3a5d7c0045100b9e46ef65be8f7a921ef20e6f2e99ebd")}, } - endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex.Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex().Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) @@ -859,31 +578,34 @@ func TestValidateBatchUpgrades(t *testing.T) { chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} parentFinalizedBatchMeta4 := &rawdb.FinalizedBatchMeta{ - BatchHash: event3.BatchHash, + BatchHash: event3.BatchHash(), TotalL1MessagePopped: 11, - StateRoot: event3.StateRoot, - WithdrawRoot: event3.WithdrawRoot, + StateRoot: event3.StateRoot(), + WithdrawRoot: event3.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta4, finalizedBatchMeta3) - event4 := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(3), - BatchHash: common.HexToHash("0xd33332aef8efbc9a0be4c4694088ac0dd052d2d3ad3ffda5e4c2010825e476bc"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + event4 := l1.NewFinalizeBatchEvent( + big.NewInt(3), + common.HexToHash("0xd33332aef8efbc9a0be4c4694088ac0dd052d2d3ad3ffda5e4c2010825e476bc"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta4 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV3), BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, } - endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex.Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex().Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ - BatchHash: event4.BatchHash, + BatchHash: event4.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event4.StateRoot, - WithdrawRoot: event4.WithdrawRoot, + StateRoot: event4.StateRoot(), + WithdrawRoot: event4.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } @@ -898,13 +620,15 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { chunk2 := &encoding.Chunk{Blocks: []*encoding.Block{block2}} chunk3 := &encoding.Chunk{Blocks: []*encoding.Block{block3}} chunk4 := &encoding.Chunk{Blocks: []*encoding.Block{block4}} - - event := &L1FinalizeBatchEvent{ - BatchIndex: big.NewInt(3), - BatchHash: common.HexToHash("0xaa6dc7cc432c8d46a9373e1e96d829a1e24e52fe0468012ff062793ea8f5b55e"), - StateRoot: chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, - WithdrawRoot: chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, - } + event := l1.NewFinalizeBatchEvent( + big.NewInt(3), + common.HexToHash("0xaa6dc7cc432c8d46a9373e1e96d829a1e24e52fe0468012ff062793ea8f5b55e"), + chunk4.Blocks[len(chunk4.Blocks)-1].Header.Root, + chunk4.Blocks[len(chunk4.Blocks)-1].WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ Version: uint8(encoding.CodecV3), @@ -943,10 +667,10 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { assert.Equal(t, uint64(17), endBlock4) parentFinalizedBatchMeta5 := &rawdb.FinalizedBatchMeta{ - BatchHash: event.BatchHash, + BatchHash: event.BatchHash(), TotalL1MessagePopped: 42, - StateRoot: event.StateRoot, - WithdrawRoot: event.WithdrawRoot, + StateRoot: event.StateRoot(), + WithdrawRoot: event.WithdrawRoot(), } assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } From da81a2ea6cb6b9183190be63181f74dd7d1fe57b Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Fri, 27 Dec 2024 11:19:35 +0800 Subject: [PATCH 15/36] remove unused code --- rollup/rollup_sync_service/abi.go | 55 ------ rollup/rollup_sync_service/abi_test.go | 82 --------- rollup/rollup_sync_service/l1client.go | 158 ------------------ rollup/rollup_sync_service/l1client_test.go | 74 -------- .../rollup_sync_service.go | 3 - 5 files changed, 372 deletions(-) delete mode 100644 rollup/rollup_sync_service/abi.go delete mode 100644 rollup/rollup_sync_service/abi_test.go delete mode 100644 rollup/rollup_sync_service/l1client.go delete mode 100644 rollup/rollup_sync_service/l1client_test.go diff --git a/rollup/rollup_sync_service/abi.go b/rollup/rollup_sync_service/abi.go deleted file mode 100644 index 428413dec9c2..000000000000 --- a/rollup/rollup_sync_service/abi.go +++ /dev/null @@ -1,55 +0,0 @@ -package rollup_sync_service - -import ( - "fmt" - "math/big" - - "github.com/scroll-tech/go-ethereum/accounts/abi" - "github.com/scroll-tech/go-ethereum/accounts/abi/bind" - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" -) - -// ScrollChainMetaData contains ABI of the ScrollChain contract. -var ScrollChainMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"CommitBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"stateRoot\",\"type\": \"bytes32\"},{\"indexed\": false,\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"FinalizeBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"},{\"indexed\": true,\"internalType\": \"bytes32\",\"name\": \"batchHash\",\"type\": \"bytes32\"}],\"name\": \"RevertBatch\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"oldMaxNumTxInChunk\",\"type\": \"uint256\"},{\"indexed\": false,\"internalType\": \"uint256\",\"name\": \"newMaxNumTxInChunk\",\"type\": \"uint256\"}],\"name\": \"UpdateMaxNumTxInChunk\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateProver\",\"type\": \"event\"},{\"anonymous\": false,\"inputs\": [{\"indexed\": true,\"internalType\": \"address\",\"name\": \"account\",\"type\": \"address\"},{\"indexed\": false,\"internalType\": \"bool\",\"name\": \"status\",\"type\": \"bool\"}],\"name\": \"UpdateSequencer\",\"type\": \"event\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"}],\"name\": \"commitBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint8\",\"name\": \"version\",\"type\": \"uint8\"},{\"internalType\": \"bytes\",\"name\": \"parentBatchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes[]\",\"name\": \"chunks\",\"type\": \"bytes[]\"},{\"internalType\": \"bytes\",\"name\": \"skippedL1MessageBitmap\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"commitBatchWithBlobProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"committedBatches\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatch4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"prevStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"blobDataProof\",\"type\": \"bytes\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBatchWithProof4844\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"}],\"name\": \"finalizeBundle\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"postStateRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes32\",\"name\": \"withdrawRoot\",\"type\": \"bytes32\"},{\"internalType\": \"bytes\",\"name\": \"aggrProof\",\"type\": \"bytes\"}],\"name\": \"finalizeBundleWithProof\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"finalizedStateRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"_batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"bytes32\",\"name\": \"_stateRoot\",\"type\": \"bytes32\"}],\"name\": \"importGenesisBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"isBatchFinalized\",\"outputs\": [{\"internalType\": \"bool\",\"name\": \"\",\"type\": \"bool\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [],\"name\": \"lastFinalizedBatchIndex\",\"outputs\": [{\"internalType\": \"uint256\",\"name\": \"\",\"type\": \"uint256\"}],\"stateMutability\": \"view\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"bytes\",\"name\": \"batchHeader\",\"type\": \"bytes\"},{\"internalType\": \"uint256\",\"name\": \"count\",\"type\": \"uint256\"}],\"name\": \"revertBatch\",\"outputs\": [],\"stateMutability\": \"nonpayable\",\"type\": \"function\"},{\"inputs\": [{\"internalType\": \"uint256\",\"name\": \"batchIndex\",\"type\": \"uint256\"}],\"name\": \"withdrawRoots\",\"outputs\": [{\"internalType\": \"bytes32\",\"name\": \"\",\"type\": \"bytes32\"}],\"stateMutability\": \"view\",\"type\": \"function\"}]", -} - -// L1CommitBatchEvent represents a CommitBatch event raised by the ScrollChain contract. -type L1CommitBatchEvent struct { - BatchIndex *big.Int - BatchHash common.Hash -} - -// L1RevertBatchEvent represents a RevertBatch event raised by the ScrollChain contract. -type L1RevertBatchEvent struct { - BatchIndex *big.Int - BatchHash common.Hash -} - -// L1FinalizeBatchEvent represents a FinalizeBatch event raised by the ScrollChain contract. -type L1FinalizeBatchEvent struct { - BatchIndex *big.Int - BatchHash common.Hash - StateRoot common.Hash - WithdrawRoot common.Hash -} - -// UnpackLog unpacks a retrieved log into the provided output structure. -func UnpackLog(c *abi.ABI, out interface{}, event string, log types.Log) error { - if log.Topics[0] != c.Events[event].ID { - return fmt.Errorf("event signature mismatch") - } - if len(log.Data) > 0 { - if err := c.UnpackIntoInterface(out, event, log.Data); err != nil { - return err - } - } - var indexed abi.Arguments - for _, arg := range c.Events[event].Inputs { - if arg.Indexed { - indexed = append(indexed, arg) - } - } - return abi.ParseTopics(out, indexed, log.Topics[1:]) -} diff --git a/rollup/rollup_sync_service/abi_test.go b/rollup/rollup_sync_service/abi_test.go deleted file mode 100644 index 550c950bb337..000000000000 --- a/rollup/rollup_sync_service/abi_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package rollup_sync_service - -import ( - "math/big" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/crypto" -) - -func TestEventSignatures(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - if err != nil { - t.Fatal("failed to get scroll chain abi", "err", err) - } - - assert.Equal(t, crypto.Keccak256Hash([]byte("CommitBatch(uint256,bytes32)")), scrollChainABI.Events["CommitBatch"].ID) - assert.Equal(t, crypto.Keccak256Hash([]byte("RevertBatch(uint256,bytes32)")), scrollChainABI.Events["RevertBatch"].ID) - assert.Equal(t, crypto.Keccak256Hash([]byte("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")), scrollChainABI.Events["FinalizeBatch"].ID) -} - -func TestUnpackLog(t *testing.T) { - scrollChainABI, err := ScrollChainMetaData.GetAbi() - require.NoError(t, err) - - mockBatchIndex := big.NewInt(123) - mockBatchHash := crypto.Keccak256Hash([]byte("mockBatch")) - mockStateRoot := crypto.Keccak256Hash([]byte("mockStateRoot")) - mockWithdrawRoot := crypto.Keccak256Hash([]byte("mockWithdrawRoot")) - - tests := []struct { - eventName string - mockLog types.Log - expected interface{} - out interface{} - }{ - { - "CommitBatch", - types.Log{ - Data: []byte{}, - Topics: []common.Hash{scrollChainABI.Events["CommitBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, - }, - &L1CommitBatchEvent{BatchIndex: mockBatchIndex, BatchHash: mockBatchHash}, - &L1CommitBatchEvent{}, - }, - { - "RevertBatch", - types.Log{ - Data: []byte{}, - Topics: []common.Hash{scrollChainABI.Events["RevertBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, - }, - &L1RevertBatchEvent{BatchIndex: mockBatchIndex, BatchHash: mockBatchHash}, - &L1RevertBatchEvent{}, - }, - { - "FinalizeBatch", - types.Log{ - Data: append(mockStateRoot.Bytes(), mockWithdrawRoot.Bytes()...), - Topics: []common.Hash{scrollChainABI.Events["FinalizeBatch"].ID, common.BigToHash(mockBatchIndex), mockBatchHash}, - }, - &L1FinalizeBatchEvent{ - BatchIndex: mockBatchIndex, - BatchHash: mockBatchHash, - StateRoot: mockStateRoot, - WithdrawRoot: mockWithdrawRoot, - }, - &L1FinalizeBatchEvent{}, - }, - } - - for _, tt := range tests { - t.Run(tt.eventName, func(t *testing.T) { - err := UnpackLog(scrollChainABI, tt.out, tt.eventName, tt.mockLog) - assert.NoError(t, err) - assert.Equal(t, tt.expected, tt.out) - }) - } -} diff --git a/rollup/rollup_sync_service/l1client.go b/rollup/rollup_sync_service/l1client.go deleted file mode 100644 index b6be3e0bc611..000000000000 --- a/rollup/rollup_sync_service/l1client.go +++ /dev/null @@ -1,158 +0,0 @@ -package rollup_sync_service - -import ( - "context" - "errors" - "fmt" - "math/big" - - "github.com/scroll-tech/go-ethereum" - "github.com/scroll-tech/go-ethereum/accounts/abi" - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/log" - "github.com/scroll-tech/go-ethereum/rpc" - - "github.com/scroll-tech/go-ethereum/rollup/sync_service" -) - -// L1Client is a wrapper around EthClient that adds -// methods for conveniently collecting rollup events of ScrollChain contract. -type L1Client struct { - ctx context.Context - client sync_service.EthClient - scrollChainAddress common.Address - l1CommitBatchEventSignature common.Hash - l1RevertBatchEventSignature common.Hash - l1FinalizeBatchEventSignature common.Hash -} - -// NewL1Client initializes a new L1Client instance with the provided configuration. -// It checks for a valid scrollChainAddress and verifies the chain ID. -func NewL1Client(ctx context.Context, l1Client sync_service.EthClient, l1ChainId uint64, scrollChainAddress common.Address, scrollChainABI *abi.ABI) (*L1Client, error) { - if scrollChainAddress == (common.Address{}) { - return nil, errors.New("must pass non-zero scrollChainAddress to L1Client") - } - - // sanity check: compare chain IDs - got, err := l1Client.ChainID(ctx) - if err != nil { - return nil, fmt.Errorf("failed to query L1 chain ID, err: %w", err) - } - if got.Cmp(big.NewInt(0).SetUint64(l1ChainId)) != 0 { - return nil, fmt.Errorf("unexpected chain ID, expected: %v, got: %v", l1ChainId, got) - } - - client := L1Client{ - ctx: ctx, - client: l1Client, - scrollChainAddress: scrollChainAddress, - l1CommitBatchEventSignature: scrollChainABI.Events["CommitBatch"].ID, - l1RevertBatchEventSignature: scrollChainABI.Events["RevertBatch"].ID, - l1FinalizeBatchEventSignature: scrollChainABI.Events["FinalizeBatch"].ID, - } - - return &client, nil -} - -// FetchRollupEventsInRange retrieves and parses commit/revert/finalize rollup events between block numbers: [from, to]. -func (c *L1Client) FetchRollupEventsInRange(from, to uint64) ([]types.Log, error) { - log.Trace("L1Client FetchRollupEventsInRange", "fromBlock", from, "toBlock", to) - - query := ethereum.FilterQuery{ - FromBlock: big.NewInt(int64(from)), // inclusive - ToBlock: big.NewInt(int64(to)), // inclusive - Addresses: []common.Address{ - c.scrollChainAddress, - }, - Topics: make([][]common.Hash, 1), - } - query.Topics[0] = make([]common.Hash, 3) - query.Topics[0][0] = c.l1CommitBatchEventSignature - query.Topics[0][1] = c.l1RevertBatchEventSignature - query.Topics[0][2] = c.l1FinalizeBatchEventSignature - - logs, err := c.client.FilterLogs(c.ctx, query) - if err != nil { - return nil, fmt.Errorf("failed to filter logs, err: %w", err) - } - return logs, nil -} - -// GetLatestFinalizedBlockNumber fetches the block number of the latest finalized block from the L1 chain. -func (c *L1Client) GetLatestFinalizedBlockNumber() (uint64, error) { - header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(int64(rpc.FinalizedBlockNumber))) - if err != nil { - return 0, err - } - if !header.Number.IsInt64() { - return 0, fmt.Errorf("received unexpected block number in L1Client: %v", header.Number) - } - return header.Number.Uint64(), nil -} - -// FetchTxData fetches tx data corresponding to given event log -func (c *L1Client) FetchTxData(vLog *types.Log) ([]byte, error) { - tx, _, err := c.client.TransactionByHash(c.ctx, vLog.TxHash) - if err != nil { - log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", - "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) - block, err := c.client.BlockByHash(c.ctx, vLog.BlockHash) - if err != nil { - return nil, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) - } - - found := false - for _, txInBlock := range block.Transactions() { - if txInBlock.Hash() == vLog.TxHash { - tx = txInBlock - found = true - break - } - } - if !found { - return nil, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) - } - } - - return tx.Data(), nil -} - -// FetchTxBlobHash fetches tx blob hash corresponding to given event log -func (c *L1Client) FetchTxBlobHash(vLog *types.Log) (common.Hash, error) { - tx, _, err := c.client.TransactionByHash(c.ctx, vLog.TxHash) - if err != nil { - log.Debug("failed to get transaction by hash, probably an unindexed transaction, fetching the whole block to get the transaction", - "tx hash", vLog.TxHash.Hex(), "block number", vLog.BlockNumber, "block hash", vLog.BlockHash.Hex(), "err", err) - block, err := c.client.BlockByHash(c.ctx, vLog.BlockHash) - if err != nil { - return common.Hash{}, fmt.Errorf("failed to get block by hash, block number: %v, block hash: %v, err: %w", vLog.BlockNumber, vLog.BlockHash.Hex(), err) - } - - found := false - for _, txInBlock := range block.Transactions() { - if txInBlock.Hash() == vLog.TxHash { - tx = txInBlock - found = true - break - } - } - if !found { - return common.Hash{}, fmt.Errorf("transaction not found in the block, tx hash: %v, block number: %v, block hash: %v", vLog.TxHash.Hex(), vLog.BlockNumber, vLog.BlockHash.Hex()) - } - } - blobHashes := tx.BlobHashes() - if len(blobHashes) == 0 { - return common.Hash{}, fmt.Errorf("transaction does not contain any blobs, tx hash: %v", vLog.TxHash.Hex()) - } - return blobHashes[0], nil -} - -// GetHeaderByNumber fetches the block header by number -func (c *L1Client) GetHeaderByNumber(blockNumber uint64) (*types.Header, error) { - header, err := c.client.HeaderByNumber(c.ctx, big.NewInt(0).SetUint64(blockNumber)) - if err != nil { - return nil, err - } - return header, nil -} diff --git a/rollup/rollup_sync_service/l1client_test.go b/rollup/rollup_sync_service/l1client_test.go deleted file mode 100644 index 394f455b80c5..000000000000 --- a/rollup/rollup_sync_service/l1client_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package rollup_sync_service - -import ( - "context" - "math/big" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/scroll-tech/go-ethereum" - "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/types" - "github.com/scroll-tech/go-ethereum/rlp" -) - -func TestL1Client(t *testing.T) { - ctx := context.Background() - mockClient := &mockEthClient{} - - scrollChainABI, err := ScrollChainMetaData.GetAbi() - if err != nil { - t.Fatal("failed to get scroll chain abi", "err", err) - } - scrollChainAddress := common.HexToAddress("0x0123456789abcdef") - l1Client, err := NewL1Client(ctx, mockClient, 11155111, scrollChainAddress, scrollChainABI) - require.NoError(t, err, "Failed to initialize L1Client") - - blockNumber, err := l1Client.GetLatestFinalizedBlockNumber() - assert.NoError(t, err, "Error getting latest confirmed block number") - assert.Equal(t, uint64(36), blockNumber, "Unexpected block number") - - logs, err := l1Client.FetchRollupEventsInRange(0, blockNumber) - assert.NoError(t, err, "Error fetching rollup events in range") - assert.Empty(t, logs, "Expected no logs from FetchRollupEventsInRange") -} - -type mockEthClient struct { - txRLP []byte -} - -func (m *mockEthClient) BlockNumber(ctx context.Context) (uint64, error) { - return 11155111, nil -} - -func (m *mockEthClient) ChainID(ctx context.Context) (*big.Int, error) { - return big.NewInt(11155111), nil -} - -func (m *mockEthClient) FilterLogs(ctx context.Context, q ethereum.FilterQuery) ([]types.Log, error) { - return []types.Log{}, nil -} - -func (m *mockEthClient) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { - return &types.Header{ - Number: big.NewInt(100 - 64), - }, nil -} - -func (m *mockEthClient) SubscribeFilterLogs(ctx context.Context, query ethereum.FilterQuery, ch chan<- types.Log) (ethereum.Subscription, error) { - return nil, nil -} - -func (m *mockEthClient) TransactionByHash(ctx context.Context, txHash common.Hash) (*types.Transaction, bool, error) { - var tx types.Transaction - if err := rlp.DecodeBytes(m.txRLP, &tx); err != nil { - return nil, false, err - } - return &tx, false, nil -} - -func (m *mockEthClient) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { - return nil, nil -} diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 3380b7dcff0a..15b9c55ad633 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -26,9 +26,6 @@ import ( ) const ( - // defaultFetchBlockRange is the number of blocks that we collect in a single eth_getLogs query. - defaultFetchBlockRange = uint64(100) - // defaultSyncInterval is the frequency at which we query for new rollup event. defaultSyncInterval = 60 * time.Second From 875004525ff705af02b55818a884567c6e28913a Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 2 Jan 2025 14:03:22 +0800 Subject: [PATCH 16/36] address review comments --- .../rollup_sync_service.go | 6 ++-- .../rollup_sync_service_test.go | 16 ++++----- .../commitBatchWithBlobProof_codecv3.rlp | Bin 4693 -> 0 bytes ...ommitBatchWithBlobProof_input_codecv3.json | 31 ------------------ .../testdata/commitBatch_codecv0.rlp | Bin 88636 -> 0 bytes .../testdata/commitBatch_codecv1.rlp | Bin 1237 -> 0 bytes .../testdata/commitBatch_codecv2.rlp | Bin 4437 -> 0 bytes .../testdata/commitBatch_input_codecv0.json | 27 --------------- .../testdata/commitBatch_input_codecv1.json | 31 ------------------ .../testdata/commitBatch_input_codecv2.json | 31 ------------------ 10 files changed, 11 insertions(+), 131 deletions(-) delete mode 100644 rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_codecv3.rlp delete mode 100644 rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_input_codecv3.json delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_codecv0.rlp delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_codecv1.rlp delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_codecv2.rlp delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_input_codecv0.json delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_input_codecv1.json delete mode 100644 rollup/rollup_sync_service/testdata/commitBatch_input_codecv2.json diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 15b9c55ad633..406895be1120 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -27,7 +27,7 @@ import ( const ( // defaultSyncInterval is the frequency at which we query for new rollup event. - defaultSyncInterval = 60 * time.Second + defaultSyncInterval = 30 * time.Second // defaultMaxRetries is the maximum number of retries allowed when the local node is not synced up to the required block height. defaultMaxRetries = 20 @@ -37,7 +37,7 @@ const ( // of a specific L1 batch finalize event. defaultGetBlockInRangeRetryDelay = 60 * time.Second - // defaultLogInterval is the frequency at which we print the latestProcessedBlock. + // defaultLogInterval is the frequency at which we print the latest processed block. defaultLogInterval = 5 * time.Minute ) @@ -148,7 +148,7 @@ func (s *RollupSyncService) Start() { log.Error("failed to fetch rollup events", "err", err) } case <-logTicker.C: - log.Info("Sync rollup events progress update", "latestProcessedBlock", s.callDataBlobSource.L1Height()) + log.Info("Sync rollup events progress update", "latest processed block", s.callDataBlobSource.L1Height()) } } }() diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index c34f9385b515..dca18285d2c0 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -22,13 +22,13 @@ import ( "github.com/scroll-tech/go-ethereum/rollup/l1" ) -func TestGetCommittedBatchMetaCodecv0(t *testing.T) { +func TestGetCommittedBatchMetaCodecV0(t *testing.T) { genesisConfig := ¶ms.ChainConfig{ Scroll: params.ScrollConfig{ L1Config: ¶ms.L1Config{ L1ChainId: 11155111, ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), - L1MessageQueueAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a1"), + L1MessageQueueAddress: common.HexToAddress("0x0000000000000000000000000000000000000001"), }, }, } @@ -39,7 +39,7 @@ func TestGetCommittedBatchMetaCodecv0(t *testing.T) { defer stack.Close() service, err := NewRollupSyncService(context.Background(), genesisConfig, db, &l1.MockNopClient{}, &core.BlockChain{}, stack, da_syncer.Config{ - BlobScanAPIEndpoint: "http://localhost:8080", + BlobScanAPIEndpoint: "http://dummy-endpoint:1234", }) require.NoError(t, err) @@ -77,7 +77,7 @@ func TestGetCommittedBatchMetaCodecV1(t *testing.T) { L1Config: ¶ms.L1Config{ L1ChainId: 11155111, ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), - L1MessageQueueAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a1"), + L1MessageQueueAddress: common.HexToAddress("0x0000000000000000000000000000000000000001"), }, }, } @@ -233,7 +233,7 @@ func (b *mockDABlock) Number() uint64 { return b.number } -func TestValidateBatchCodecv0(t *testing.T) { +func TestValidateBatchCodecV0(t *testing.T) { block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -300,7 +300,7 @@ func TestValidateBatchCodecv0(t *testing.T) { assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } -func TestValidateBatchCodecv1(t *testing.T) { +func TestValidateBatchCodecV1(t *testing.T) { block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -365,7 +365,7 @@ func TestValidateBatchCodecv1(t *testing.T) { assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } -func TestValidateBatchCodecv2(t *testing.T) { +func TestValidateBatchCodecV2(t *testing.T) { block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} @@ -430,7 +430,7 @@ func TestValidateBatchCodecv2(t *testing.T) { assert.Equal(t, parentFinalizedBatchMeta3, finalizedBatchMeta2) } -func TestValidateBatchCodecv3(t *testing.T) { +func TestValidateBatchCodecV3(t *testing.T) { block1 := readBlockFromJSON(t, "./testdata/blockTrace_02.json") chunk1 := &encoding.Chunk{Blocks: []*encoding.Block{block1}} diff --git a/rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_codecv3.rlp b/rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_codecv3.rlp deleted file mode 100644 index 4640f84687416f77ecd3e0d0207f1bc46d75edbb..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4693 zcmcJSe@qi+7{{;eC|00Qx@ct66`003eu!+A7GcM*5(cRWC1q@qbWWKV2aXNm9Dxql z+}OZhT@?_$i38e-#WEHM8|kXf%waN+A<1y*7BgfzwL7}$H4yUX3>c#lgn{@{Jz z=kx7{hd0k%(t*f4*~3U*szjj>~3^ zm%ol^pLlmy&DP?=i+^<4Jl~8sXWwr1Cw+zgz2YX{DH;?Aq! zahVs+AJYhMs{r@Ic@QeVF*ujQF(S9X19C5%2WtelRe<~9d$y(mndKMv{voCr^nZZqk)AOH#Q{ZplOHnnJ zUtIshv;lu3<>Ffd@au6O@E_vyQndj5siglX<>Fge{tUhZw?^MT@w9;c?c4ce zF_ZG`X+JRyz~9W5qG~Mv3se=h(tctZfd2$6&GL(zPof*(KecGTxcfnIFYupUv|n8R z#54f^7ypz0MY4bB_c7M}i+g@ENxy5+{iCoX#pwCjj90+@oTCWe8bJRB(tn=MOVwC@ z;)$ooLMz?>7CZs^|8k)uUT66i^?bo>IKx(#uZZnG`Pt~QlcPQB4Z5i{-E$4ty z4hSn8M9AbX*<+1kJfbr4Gk68Z#Fw`%y};4 zs$#IlUc=t_C1^st@y@-?Im1ZCtK~iOrkWk{n)XYcw;J3tir$}ckC+Vg+io|mNi*in zAe$!C-Y9gs`gl>*quu{Zm)a&({gK1VO3m7z^P8`a+tOOr-Y5!GUmWO@3@*uWVx!Z9@(jGS`omW;ro-|RJIJn&Y{r$doPMsZ# I*)@{*A9z%^Qvd(} diff --git a/rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_input_codecv3.json b/rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_input_codecv3.json deleted file mode 100644 index ca13d9a749fc..000000000000 --- a/rollup/rollup_sync_service/testdata/commitBatchWithBlobProof_input_codecv3.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "blockHash": "0xb7f00b3235ef6066d61e5e5be5472cdd1edc0a64537b84f110f860e6614a4759", - "blockNumber": "0x187", - "from": "0xf472086186382fca55cd182de196520abd76f69d", - "gas": "0x3e052", - "gasPrice": "0x3b9aca07", - "maxFeePerGas": "0x3b9aca0e", - "maxPriorityFeePerGas": "0x3b9aca00", - "maxFeePerBlobGas": "0x2", - "hash": "0x8de0573f1f72ced727838df60088d9084333d27384a77bf331d520cac0e6a298", - "input": "0x86b053a9000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000108000000000000000000000000000000000000000000000000000000000000010a0000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000e5a938b077b60c939e58eeede33d4385228b532cf73e54aca76731a27acc86b7000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000002c0000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000007c00000000000000000000000000000000000000000000000000000000000000820000000000000000000000000000000000000000000000000000000000000088000000000000000000000000000000000000000000000000000000000000008e0000000000000000000000000000000000000000000000000000000000000094000000000000000000000000000000000000000000000000000000000000009a00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a600000000000000000000000000000000000000000000000000000000000000ac00000000000000000000000000000000000000000000000000000000000000b200000000000000000000000000000000000000000000000000000000000000b800000000000000000000000000000000000000000000000000000000000000be00000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000ca00000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000d600000000000000000000000000000000000000000000000000000000000000dc00000000000000000000000000000000000000000000000000000000000000e200000000000000000000000000000000000000000000000000000000000000e800000000000000000000000000000000000000000000000000000000000000ee0000000000000000000000000000000000000000000000000000000000000021d09000000000000000100000000668ebf810000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000200000000668ec0650000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000300000000668ec0680000000000000000000000000000000000000000000000000000000002e71a60000000000098968000070000000000000000000400000000668ec06b0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000500000000668ec08e0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000600000000668ec0910000000000000000000000000000000000000000000000000000000002e71a60000000000098968000080000000000000000000700000000668ec0940000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000800000000668ec0970000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000900000000668ec0b50000000000000000000000000000000000000000000000000000000002e71a6000000000009896800001000000000000000000000000000000000000000000000000000000000000000000000002950b000000000000000a00000000668ec0b80000000000000000000000000000000000000000000000000000000002e71a60000000000098968000020000000000000000000b00000000668ec0bb0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000c00000000668ec1630000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000d00000000668ec1660000000000000000000000000000000000000000000000000000000002e71a60000000000098968000120000000000000000000e00000000668ec1690000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000000f00000000668ec16e0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000001000000000668ec1710000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000001100000000668ec1740000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000001200000000668ec17c0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000001300000000668ec21b0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000010000000000000000001400000000668ec21e0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003400000000000000000000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001500000000668ec2210000000000000000000000000000000000000000000000000000000002e71a60000000000098968000400000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001600000000668ec2240000000000000000000000000000000000000000000000000000000002e71a600000000000989680003d0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001700000000668ec2270000000000000000000000000000000000000000000000000000000002e71a600000000000989680003f0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001800000000668ec22a0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000400000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001900000000668ec22d0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003d0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001a00000000668ec2300000000000000000000000000000000000000000000000000000000002e71a600000000000989680003e0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001b00000000668ec2330000000000000000000000000000000000000000000000000000000002e71a60000000000098968000400000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001c00000000668ec2360000000000000000000000000000000000000000000000000000000002e71a600000000000989680003f0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001d00000000668ec2390000000000000000000000000000000000000000000000000000000002e71a60000000000098968000410000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001e00000000668ec23c0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003e0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000001f00000000668ec23f0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003f0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002000000000668ec2420000000000000000000000000000000000000000000000000000000002e71a600000000000989680003d0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002100000000668ec2450000000000000000000000000000000000000000000000000000000002e71a600000000000989680003a0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002200000000668ec2480000000000000000000000000000000000000000000000000000000002e71a600000000000989680003d0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002300000000668ec24b0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003e0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002400000000668ec24e0000000000000000000000000000000000000000000000000000000002e71a600000000000989680003c0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002500000000668ec2510000000000000000000000000000000000000000000000000000000002e71a600000000000989680003b0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002600000000668ec2540000000000000000000000000000000000000000000000000000000002e71a600000000000989680003d0000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002700000000668ec2570000000000000000000000000000000000000000000000000000000002e71a60000000000098968000370000000000000000000000000000000000000000000000000000000000000000000000003d01000000000000002800000000668ec25a0000000000000000000000000000000000000000000000000000000002e71a60000000000098968000390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00b3a20d1a749d0917ef837a9973e549be83321ecafc3b8388f25c5c247078e383e4b062167826f53032aa9bb0fc2a8ef610e1c4e5d0f70ee133a76b5020224a8b2428ba7ec725c61950716b2758aa21f1b0cc3f0bf1551474537093924829c170e3d74228be4c07acf24b5c1adb1adac92d25a32f79ba51731343fc0f52cb11e32bbcedb1969853cf854f61862e915744fb5fbc3d4ec8701fc0f626f5c97211b", - "nonce": "0x1", - "to": "0x475652655309fa7cb1397537bee9a7fbafdc11ca", - "transactionIndex": "0x0", - "value": "0x0", - "type": "0x3", - "accessList": [ - { - "address": "0x3d9a28f7692cb94740bb64f08cb5cd19aa5cd3dd", - "storageKeys": [] - } - ], - "chainId": "0x1b207", - "blobVersionedHashes": [ - "0x0132d1238782d359051322a61d997a57f5b2f86d6f36c2aad4eda0118e3a213a" - ], - "v": "0x0", - "r": "0xe8f6ca4ef76a295dc6aa0099e0e9c4a2902cb0ffaeff0c4bae258575d00cae94", - "s": "0x20682493a94948fb97b8cb67e8ae86f444a96e39c5945f10d1008cf1a0508851", - "yParity": "0x0" -} diff --git a/rollup/rollup_sync_service/testdata/commitBatch_codecv0.rlp b/rollup/rollup_sync_service/testdata/commitBatch_codecv0.rlp deleted file mode 100644 index e0b42dc0a5815a67dbf48d3d7d6d6c55b368ba24..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 88636 zcmd>n2Ut@}({KvCgH&k|dj|w*(gj68Lyd^#EKN}a1-l|CG<#4KyRl)v_Flnq?Y&p* zy;p4fv*#osfsnxUeZTL1&!2lu_Uvq#nVp@Po!ztSq4#lTe5d#6Bv{*d6+gc_Up<09 zxRJn;wWYXy?8LLFi`PBgc*fN3*0^5n_0yh-xO?bvM;bL>FQWa~hpW6gT?IH!q`Uwv zP7KezC|6Lov%}zFE?!k5`Z?13Is48SOT16vrni1kyhF{Ue>Q9K*{1_|MA^Qfe{Wsb z_eiitMZuu~E;?yh+vCPnJ-!go_-vC$o^tKg`M|6bLIDWn@D*I;@Nz;qeEEA#vVi{6 z*7!GB=dVS|D~O?U=WuBf-s?YYBd#l|cU=kD`cK=6>n{HXb=!&Sme#wjy11nN)Ar)J zYwBIM8s`q;y6fv*x0+WS#dSBY0`583Ov&xdNl(6#{j=e#NKu^;_9tStfsM ztp3RvJr7MAz@L}L-L0;+!|}4Sinjff!BnG-+tF- z)Vy|ojaAyddg&9Ta2lPZ94?i76~*cV8)PMBW~8OH&q^Mak(P?;EcZ_V6{`4i{>I|_ zmyh)bod2}{Df*t0^|r(8jyFvlr15x6+3UW?%G8v0tX}$5DV#=UDTn{-`S+*tt%7`& z^sC~ZzoSlO@%?@p-*xgzw8!t%zuIYyCQE+_{nYOdV?G-_JLBBEOMks1+wTQFJrdj7 z@b83&eTjw3I^MtRyFt@!+n4US%WYI)s2ncQW_6WPiNCPN$RSTA_tV?ac!H*0#lFCo zKQym6IPhj&wQh9hTJ!yhk>4-m%H)7x!$t-<5`YrMG30# z7Z~*{7x7-CR=v3IadqXvlT-KQy15En9zMUf@W6v<6E>~fZ#-;^h{>7K-ff&6|CihP z@NBOI>gr+Rw@(^Qf8J!F_3{O;U4c3VyOI&k$n~0kk|r<=@GD;M+VA0>A#sAeOL`pp zWN@&3yCH%4+}-p?5q)m+>SBgCpOuj4=ZYaeiDKQ~A zrQC0YkH__wMu*NN`|mYMe_lTSU98rm%qLw=#ZHbJwBl0B=)mX8{qo<&kIcOLX72ob zm*ZCG>W!=lX`N0Nd@jlDqoRMCc8h-Z(OeY2F!_1Xms9R-XB@ZMG`DipndH;W*^7e> z+iCl7Kf9m!J=E&Zmv6(~{J5I5`SA45w7!4m{g_s=qN~P;#j4t`T65-dsKt&CP5P&d z_6cX+h-92&M18hOMU4~Nuk`*wJ)o4dm5CM?XAkl)>(OZ5{B?~zv)P4HEek9c3vb%?CMK4aG&vq3@>{rm zbitg;mOb}5ocHZnR1h~OZGF_G=)kNmi+jCa#z>*j#$2f!b5Vyh;}Q0WT<-2M6A_&t z{32brW(hQ!35TF@f5o#2zX&ejM~8eaq0NgVSoEC0jP$JJ^sKR2a(Gog@FLkzrMeoO zs-}uCs3u#h{+-FzVv13WimJv(K??ff5lkTu2>n$gOrV*@#Kr^CYB+~(D9vHB;n5lX zwMd{b!U>i&!Lo^shjff2P0$E!!W7YII=pBeQN^$_7YfYG7#6}9Ty+qkBS@ezp_+99 z&C=6CSR^D{Ky(%lP{2i|5W0NDMf}JjJZ=Qkp|gkz@{uckgdWM~U_vBm=_E8WMv51g z%y!JGQt&Y+4)GKNFjDXV3cGcJFI#BxinK``$Z3mtV}2T#0IoMYd*NpbTg(*ZT}jCV zn(cwuc!JhR$QI&XU=d9iEyP?BswwVR2;%{z#Eeu}hUBe6h&jpcx=2!7C1*;7=E5lR zIMYU5&)A<|@gdhLB&Wn-`OS?Fw^?QkN#az1RAIp?VhWeD*(4P#L4*-Z4W>T_zC7^2 zR!S%q0?U}0vGI5)djmu*1t2iw6D`Q`CTMglSkYpuKNTr}m0mewZ1BsE9 z8HEvKmn6_M7D%`|UV#itv&^CNQ7C;-QW{gBuFb&&XdNCaQZW0aSz-dzX@F>Bx(xo% zLMy@uT0KD`)_8~o464R`Nf&sIV6qg87GH5T6B-pY2(1!~R+cxa@1r?sF7#YR6r1BJ!-vo_CHcJ<`RmbeAiw8M(K16P? zesFw2YWb!?Ki#ISo!xfT`@|zl&0Br$6aKmB*0%x}A7JTt66Nz!8ka*}iI~Jr5)vt9 zE0qF?q%)$$DIPFMNw;tf^N(&~ur-1g7zgv45Zy#la9C;8?PRW69Kljl1Pm?*XbN^v9YfzlF`T}Ys5|BQ{#uO3gZTiP)2T0aya z4|(q>BwtZT%5W9xx#riRLOBY+X(5-O^P=foehe2>AVC*0g#-;eDVSnds*3P`n6dN- z%sHtMBs(UFq);G@9Kn(l`;YR7&5mcYXR&9I)(9vBrK%e&8CQ7jAizpT7i;7MnzgUQ zaDWm7U?>UHE2e;7Wq?2wP|Bl7t)7EWCp$2KruT@fVdFs65KmmC?BtsBv3l2e)(}0a za}U&_b)rwc1u?aN&E)xC$!n*Ss)U*(qwiznZ zH0HDJR+7*9Q54rkQ(UV{0SgpscqXvj5T>JUC750P-VzicR~w%9$~@12?i+02^oBs& zdNCj?a&0HqXnQ)QM{h#SV$P%A*eb-Je+=?|4i1uhD@8tC|S} zq}OAZN&4-!Eu~_#3>0u(#Ma?Xf(0`gOc85jjET)xOqSx@n#5K+y0wU{gW2^Z)@Xnt zVkNQ&?pPh|*9UL#Dj+ZR6fXr$Z@gl_B%IVpC{BY?SNI?R7 zC;o(5B+-nZ{fe(waR+H}Snf0UYT#be`Q|VNCjB@>4&S^8AYOrM>ny$j_2{P z&^FouOl!0j94h!34T#ctbSxE(ig7u3wkI7UQ*~VI9JFRKmTc`Nv}FS3mPaR8d1ON` zIDjT}WXgb&l`z{eSU|_^&cba;q?pgQspd5lZ$r?matSS{WvUY!Z^aeBm`cY2GJ~*( zXlfA~U&Q7U3z8F97@o%w2q5*NU#~fI80LXk7hypx+ z7ONQYnPRKC0f%5<{+e4shvvP2VZa=59il21qh;O!gW5v_7#9J-&;eUW3Z8j)4Cy9@ zh-(2;1xAS8Nnm77^vpFPbaRc&EpRiFIDEd2kS!nyG#B8i7)#tJBmqbGo|~JSOVA;A z431}jcF3jpVZH?y-z+Z9!}P~XXD?C3H<$7hirs)_@pLW17(D?2H9v@3gOH_}O0|Kf zq1h#Q1#Zzsra=ogJjtQ;nQT6v0c~~z)>6Ik`D(eQ0y7K9M1ZaZAx2j^VX9SyMK;v3 z7z8RI5{nU}r;D4RGU{*(eY0hj?@UiZJ@DnI*+`dFN z#|y}rW;qbV6qdEQbbwRy%d!WqkPA7~v;oZQt+1At9XHD&pa&}q=yMSd`nT*H8;?T~ zmK%9_1S_8oDNvB`y$8s&gb-OSmQ_jb6fQg-6O$SV1@7|zY0F&Mv6NavQsq-M#B&jX zmL_}%PT@hpEB5N_R)Ch}b27|p<$|01gqwu&{(y#+8kPiX7HU=du(_`a7&|~AK&?8_~caK>9 z8;41IBxvxs3~EXya^sCM^A6hKk{YP-N#J#h_p&{<@0@V$MoSVl5d#CFIwT}FJFVAx12 zXgdkS*B10LX$8d+S|3NpfVZ%)v z&bBNyMszO@H*v0Yv-8G?whNM1)YqSb4q^SAWe1jzzOuHckkgXB&~u2EmO7m`mQdHBpq z7Sy$n)Sb5SR6L+i1t6R>Me2tkIX!CeR7l>3WYL`spP;TWBxhe=;19`;kSsAC-UpK5 zl4dY{P{9{KV^0qhLNWxB6VD!?K{6SVxv{6>AUPW+H(8}aa*3pFz?`%}z?%~Q>RZxh zFC?dllcq=w;bhhGyO6|OK+ZAiAc?t zhGfqNXC^}OF(i9WxN8S>%^=y=YE55A21)96?)uow7%_|hgp*J~6Otb6SrL$I4oT18 z&8s0914-Y|)t->dgQQ=Xp%o-&NSe|3b(d+17~ufY?!Rg)B!@uK(p>)zBo{!k>8?zi z+zm;mE{_L7@;L`AKL`(E#2`&7aEcfSMMC7s!xNC)YyAk&JVK4znF;O!VsHWK=_SmUkDIpMZ1ZXkku6 z+Taw?;t_LAb}WOY5Wu=D0W5Tw6zm+*fkh0aj(Hnk@| zRbEXG=g8rs)=^qstqtx8&IKS{SoPxe)w)2Aw-)(;H-%wj)dp0T?;|Zgk;F_b73t0t z*Bpg=k{e^SdDS(Gal3#A(p@7i50R0YDqo89`@wY}BtqTk>P0{-e6@FSu_kHAb+a z#N{=#YW1yQg^bRNp)COF*Fv^04L4*23u&@$SAfdPnMMpUhSdjfLu0tKSB*4Dug0Wj z0=-VG-Rsio@(Xc!EOEv=#6A60yZnXf@~1HrbKB&Axcpt5BPVB?u=gm|JsLkH<1;6M!{q}7`P zd%9dfv>HPYSp?;Z00avudXY48DU64}CY)ZO(}e)%3XG9f3WqOb687d$jEb{}_XxR2 zYc8?@#Ux}St(CmIJY=&0Xp>t5N>2hn;iJ;R7xVae$ZiyHL+ef+)U#)Z>q)&ef;ub% zK9bf?)C7VL(mu8IB+Y2I$9-zs;1B#O{v!l+Z>$we%g&X_T3VjKGd7iY0{+p$&+E23#ela zM1?i5UyY9RG9`8^2wVaP{evV3n6N2O(C0mD4kEoGjyL#^KJW$SQsDyyyvg7ZpAH%V zFHzXoY1G>elQHOREcp6*5a+D7DC)fuqF{)H<9^9QY`{ysD%1kh6cDd(QRtXE3F(_i zW;#|Pu>LgVBK=6bX<3J4Db9v-F-U(n2kEC33JQ__KuM!`ULpMg{58I?fShJXKfjQl zODZbRa9H|g7h~%2^rO{rzO$|^L!^HZR&2;X19xzfbemO1sE~m=U>E!s#Ac--8)VQ2 z>!3QeK^2oMKgaN%7Ha$ovsN5<`p@hHB7@D8?lRa5gtjOo^_jsrJ~B8#&L07#{0(kk z&4!g2$wp+@g3$XTdXPhh+2Fq|Oq1(VZ!H&?OH9?czb3uleImqx53VwjM62N$K`Eab8g-to1RJmrzh{1vL zI$+}Y3~pzE8yD}8fO?H*=NKwQm`06A+COTRpM3A){M?$i@K=8t#-J)WC5udRu=d$Jkoxj#^41}7D_s) z{(PFk-W!c)icDs6{&3F2b{iA#T_ck|9ApxQx`C~c*|^4M-|BmM5}C}!k`k}|d!VP$FVyIB&Lcr<_3^)$SNex+@7Ab; z$GKk%Em<4JFA9s&Of_wOq0hZZlO_SqFt<^oo8-xH)ZK?{N5R2y)Czom>>szfKw3Xj zEgow16Lv;Tk!eT+ur>7qY|qJHi%g?sWHeye_rK^ZjC+_bOv>$tTAU~>EJUVfr;=1i_+a`-;&FsvC5@S* z;0Qf1*h~{9A#jA09y1GmK|z6-Gn48tc8ar2ky$|l=ro%sL_Mk_C=fX_n>l&%WKz&5 z?y2wt>Nr|situJ{y&WTSbwC4L;%bREw-g9a>pqwvlpKS4hl8ts;156;T`|iP=gK=1 z;7f?tGUgkR$5z1f&$J$&vb&_Nve4il3-wyMO1erBf`baSv-SmF)_^9hJ3y1A3YtXLJ>Up4vVky5 zT~o0R^~Xfk#T;Z^z(?)ggV0;gK<(jt6tezMcT07OpY>g6=>iB4+0qShOXR~-N$xwU zBk|d5WWylwo&a6i*diN)Tr#|mY%HjR0JSHhL*zV{JywluG8(`)n-n2pyDQ)uInBg# z4WzDmcp)i68~LLpi8v<9s-l&y=EDU$-7Lg=nD4sKqSMeG>n@X$~1aP&~${ z=^}DRS1}Y&{n!AN(DVh&z!!j?WCp^RHvNU1tTD#;%_i|HneulWHp?7H&V+MZG4FHL^&Xj9f&)Xblx0E>IGp1@I=UFW6HMY9C(=Pe;ji=`bk z8wO8KNfJj8!dsM($dkkM?j5Jktzkb#<&BPVd9@oq?SwT86x16nGVX$fSvxlLXk z$R~1|B@>*5AQI%^fHsebntOosac8o@9Y@V!r2}sj;}=NCrDXF64r

xs;413xfpD z6q0TfYCb|hdRb(!p*b&?+Ec1CyXDRLhq_y7NJHJNqNq@Jt5&r_ z-L0Zjg}PgHt{v)@-hhEx4JEWtCmmo54Ye9gPFf*5hwJ#vOYQpjw^7{LhA)gxtp$piXdLLb_JKvGi6C zAd**rIzfCL@AslsZ+XZi5Eum0mt9OMVvG*=4>yN^?X5$2p z`;x&t7=w1N$mg$;=l1@8_`)`7-$g*38*yC&jY1dgV`1a2eFU{thciLZC^W|Abu23J zJJP6qF+Od8Uy()~!@${SSPQ5_CU#Iz2WuQ7uj8DC;wI%|?#PChN5(?`_7}v& z908y<{5qZ%p9qavMlxC9J{M^(xqK$-SP7g2o8@>5<4Mqwqed?3+zf1|4ZKBY4Sp$| zm&-$rEDmzCM=`L-idTd8Ti|6zF~9==?ur1B0Ekco7y^L7iU7F)5GVpH0>DB=fE@tX zE(bu4*LeIqK==ZH&Wl0FO&uG3>ATW=MHiL8o6NbE$VmglGZyIQ=b9s@M(}`_g_0N~ za%xJRTt-d_4Om?`#X*y>01`|5FVF?ZDIHcnZm_qDoF>3#gAGJHWj7m;({ylY&$UYh{7 z#0gX%UQ;{a^PkdnuhTYMjt_VKAm`>BHn@@Yu=dSE&W)iIUb{+ArJjj|Qf@P)+j8~_H zFr);NbXZ+H@$e9I?8f4`4rd{;hvOFs52#Zawaw3I4~6h?I+*rMAvc#P%#~JaN`w z$V#QMm}bbe6r@NDf%)$ukS|;US#!jNk;}CPh>*)!5TVyHA_Ti=6izRp%@^|B{D!Pw zl5SiZa5WHebrquCakAYoscu|B|G7rUb>o^0SxM4vAVOB}ZF$#)&<&o(k_kbb5kjt8 z#cL-naxKRfU?7ivvGEWY>ue9J3fh11;tLpbau#xPg;-EK2Xo+kVWADY6wAdg|6)n? z6N3;CN`d?&AeI<3fdf0nWG(WQVO+~d3@YKkIW7`pB&nq(25sTMeGnu_O9E08gW$dZ zH43CA0WriNxJQaXffy1HT@3mOLP+M}wWN4|i3hK5*L0~PI>JP5 z(;6_&Zn;7fmn}2S$ZZ~sqfE5Ejv%z+$tv&0fvF&)_3ri!z{uTPi24}HU_?dh>xlZ) z2MsD(&mbf2Owb|txh}5O<&&wpagYqTyI%y;BoirEj@+MsUX{k&fhc%O0ek^IC_@?x zq!xLo@sJ02>$19okvOl6zYt+Un7ie1P?8pq$0Wi)G|WeD@3T$f7K{ z#|ki+;+tQPN1oW4l0ig|IUM9MyH*epW|u@pB@qtjM{n1WS%QNAZxSo6?v1k>>)4 zjCyvj_QPP@e&NBxUkXJ|=Eel%d7Y0uH{i-7*O2Ek=nkMNL zc^#rKtXcAwxa3YrNgx^YERmZ}V)z{}tdwHdMJpffSs^imkQiXt#Y&oiQT9V!+K?I5 zD6?l`?#bN7fx7hIp)M}9aPE>!VbaCF8WSmhyG(>SLu%KVC$2NHx(=`)7o*-~I|p^y zR*m}MTz(!MNe}~ml6Xl9KY+sbs|&v)3*(WH&UNI?fWZgJsKG&RTQLrm)i@yU4j@%f zRx(cBK0M^zl#~x|I5jCRRo?LcbgiykExlAqK-lNfO^|m!2YKfqZv@i92WgaZaJ`oU za2fJW!J-6jp2Hn7;D0j&bkIrO2LW~fc?18dHy2=#@i=qy3o*02fqw65`n}&3@+Z-e zH$Dr_xDT2-de zK<21fMCk*cZ}Rg7`Rc$5$_HL5!EBZ+{eY9OI1rCEOl4mff1kskz|?;18ZS;v+uQEcRpZkRL3hB`g+eB0n!` z;QTBk1Bd*?^B;7jUrf!p;0NRHC)EJR5A;8t_qZ&tR@qrlcBrIXQl3evK2$IX%d{V? z|NXX0=d|CEN&Lx_?wSMKWg)*SWCj*tEaZO3?};=63ps)OzLFV!aIy&tIT!hBQ5m_^ z2#Jutl{5n@3M|*~KV}d$zwi{RD4dMZ@rU>B{kuxh@sFC!pF*Jnj0JT3lcX6KKR@I@ zlFX3cMv(hp$hKw90 zR;rj&)OA59@E2hPkrnXIp!u#^d=&UM)_yYjj(Q+{f2=jSw&S6|cT(#o*H=Pj#*pN6 zRw90hlZD^sg#Kju>Y7T}R`)f6kGghJ(brra>I$dg#JZR2D>w~h4GM>Qi9A_oXvRpm zpD1}uVsdg)7HcTkR}LCO775Eh>0)~2e=tE}>bw3R+Db7B(g2K>07iawQy1E3Ceikh;AK1+d`tbltQAReJN1| z-QoOyw;$5Jz#MW(=cRiTmfr%>+d$n3EWctur29}Tzj>60(tRA3Us%Y1yCQwrRSsc^ zh^1!$2gNLsMR>Q2?&^MokGg|BP>L|D|GKZ2B24wgPR<;f3d%&x6Bs;{l$M%aLwAKV zmtjtbBlJajiFIp=5ZM0@v6S`&%sE6dCuEp-a)T>|Lb7EhcL=;LO|lJpY#}AplN*Js zz>{0dwvf%(m!sGwGsESHk#J5rZ|9GZnC1q%IzLO(-iS0Vh88;wH23zoa97KNFhF!1?%2F9Z>e2|A)%!I*8f?Pk~J8DrFENH_Zgc{lcH$p%YUf}6d*VCe~4Dxnb6ed7n zPgom07+X;Ii3g1VR%g-xk-FD(iyhjm-nTx_o7Z>2?Hx2QOj+yXZ2 z6Cn4I+A%_5zu^TNeCG)Y*M{3@QCJX=1WP_4ME$7CXi<0<@PTv9PU6H6~8ZcGb*JwWK*%|s$VqMb;T)#5nObt^do~&L@?nX zm7|CVz=%9%UAG)XjO3vRk6IX|re}>zNl8siO@@_?)O`djNQ@{?EnE`FKQT@ld2&-5Avz z5DFuF^Q^LuEJjy}rqc0Fp_#gi69r08Nyrf?wBOy_j!LgKRwWxd0 z*J9-o63v9ZVCgUEi=;j(9SzGGlmX5$tna;&CPrmtu}!d%){bSNK1W2ZogPLIyu zp=dC;b@T<^e)5>qtPywuq$WX^vVEC#?o5*Qem0ljh#Z6N<}Gl4j;wFC?DR~JvZ_= z0ezJz?~Bs{`U0gc6(#X9??X~ z{G6b`u2KFl9mTO1ndKcdHD&1ITq7*2>3hIOoTbX?>#e~gw5R(qu%-|`pV>?-r)L4RX`ndv07%e#P@cSbmb@yEy3u5(h*1YdS8(-$Gs~6 zqp^SyImyJcpPKs8xMm#e|MxyFgAu-%-IJAoV_P-NBQ}f_M#{~R97b^ii6+wVh|A=o zI9Q@7#R&F)N4q4;rXo1$<5W>qqAMD3cN1Or_tyxm2@X<@(&v`JG&KsdNU#PPT-gAkF{B$ZvAJQ zJTlfZ%V*mCoNm8h*O8Cv*Q}`dL4Cj7>uz+!CC7hgn`Up^HZ2GqlD^(d7A}|8Sv~$u z6jv@w?m=h%wK1!Na#`{Z%*MZNcAg-c7roUbYJlq&HV9govx6+WW;$7C zYcTl-p&O6vHE7)o7uTankKWm>+??aL`h?rWl=G}ZJI&CwuP4kz<87SW-~U`%?sMO& z@eyWgoki7uD~|x(R9d=NeUIb7-hL^5-v_Nxg_YL+FPVXvb z{?5?+#9bj8-hPm5=L*tvQm{5vwEyrR>4eenPp7^*9vm`1D9&rmahs8zRWX%oKbj=Y z61Cg#HL6Fxh=1+d6^C899b3*Cu{UPp!S1QX&v)}3d}As>u!dLC)A_EkBua+xXV?{q z7W^4@HTTyR9tscfD?E=*pMr+|>_`zVIpt?hNwQ2Qxq-dw5MM!cIvEJ~m|Ng1mMXoasTlQV8adhRD&~jkT##rs5@uAU$FIy?8N@{ zd8JGLff|vix}|6ASBJIKsMvOE-M}l?D<(P52<5cd^sqF@y~Wc_?vD?z8ZqV38f)3s z6r{-^X)4fQs-4c-xNJdYtEJO#wW#{Y`8jFYw8}L)2hQ~07o08{m$mcJHT|-A+TFhx zet#n{U-dUVySG+b-!sXPshj(Bo(2t1`8J8vulzG-ir3pO*>Jeap|K0UK2*0GFr?Lq zNhxo*E1pgfc2ah)CRf|NY+tlB=`F9J-U!S<`+3wBWoD;ns?OYw#l2JLw>4Nq5 zqqjx<&J~?9o7^X~%e1Ll+pb*S9apgQ%4OS%nGtWE51#$3!{5;4wr^W>DRqA+r+EIQ zqiEvEm9bavr;Hg_qGhFNVd7X|t?lo;Uqsv0_3tw;+@wg%&z%N;yv+nY-GFg zxE5oK&u}c?yg0Bd&|wv)Z$!+`1s=C!8GjKXvvybCUz%vAd8@=G$bA2sfrj?0pIrDj z_~p`_>?fWRd?6aHxusHbqkz739*MhV+hfJ2_VruYVOb`BY^?st89fh88^E8J$K9>j zb%*0+=bBj5c=oIDT;oxVTO*_9BeP~kjc209bB#y3s76MOM@B`BjGB)X=Wi^&fB9IC z!1+)6pQ7(6S#LYc?s(J0K^l+8l)dhItV}I<@de+BWmm7iQa9c8J#*VhW7~I3k(SmzD|uK(S}JOAeN`1I zRPjd@TxkdDrO%bZX>^uyIPL#D|MK^;`ySZIvMD&Udr{e8>m^OU&zy61_{gEcB}4Ds zZ=L&4DgElDFObr5A+4*MpmPnYa9nnbz>mO|2J|p0iPb zpbD_6`Jod3qmjFJJ6hf9Zk+1+Xmhvyrh)E<7$&8QPrRG+wtvvGVV;RkPmMf2!tqd5 zjOTdY(~DU*L;bVf=Re;{-_`$^Q{p&;J_<{G=#}Ci!o6Mg4vbVvI z$$AT|))%K+Tej&tW7?q1;wgnYwe|*#Qh$BT*TV0|mqFcF+HOAg1_$e?!XK=5xkxAK z#=Qg+ojD#~({80#{J4D9?DnCjZ8Cco&fNHF(I@MLB6e7tT@P*t_h`THcjy=EqtE7A zcWJNTW45pG{m|x@pSgvJ{ZTV&RdOlhpZlZCkV09JxZzV^+QX0e!)?;bvFsn?3)}VG zvqlzPGp#r3VUL=5a?dr|mxECcA_aM}S^gI{N11JCzM=Uw-=AYkjk_d`ig9WjK3Sh((R)|%!mVp#1u=eh9Qq+Yj7njM|-aHByKbE9{Lj`QwL z9S50r{2Br+r+-U>)hR5uBeja1A}2H}j7XYt_Z&m#%tCjUbIdQIf~NT&GRkN)E-AeM z<_`xfHeY(7N0Tv+y6;`&dgDymKBCtUJqttDj~J)pUqtRbY5VdLk7avxTQ+RFZ&%y1 z=6@UZ-o4U$V+$V3Lew9>o09QusI!&BKPdmPxUdW6KGGT}N ziM!moc4sF)lZ!jrmyi8&t1Pecokg$vy!O|)(`w=4Wv*HsI*S~f`k(LF*>~R4MDuLp z()ZJL^RgY%9y8CL(phak%h9iGwVOL%?B;&?|Juz}*DR!Tu@)Er!w>;5%?V9HYe-5(Vd~e~+Uy9Kd z=c<>!QVOThS<2!6dN-{?6@LVYKeu|6UCtW!mqoc~Kf~gSNbQ|*(UGUFlg9tHGE{4! zlz#Qn*Gl0uI!ihHU(dh)^KNdq?W(4_X5I8FcN9E|n>uZ8-j19twC=C`14A8G9wgj! z6UKB=pLQ>y)#`cK@5Zj1#u?r1-nH>hb-b6IHp1|84x;MIBcSv2FJ3{xi)4;$+v`di;f>J^fmw`yV8NcEogj;B#~k zDxa2nXK5Dql>Ja%iQvo89HM);laqh5bFr<)&Qpay=_ou}pMB+8knVoH6W6VEY&Lbe zqLWh9Mt#hF;y7<*Rb!uQk)Q6Bbt~p_w$Gs_-1o36p8GJ*=I7F!Ve_1>KmQUKlRpgH z++LORFwI=A`6p=tdO%U}ytm<((h4UhT|JxibHXoMuSF-7?$PP zv1399CuheFaH?T+N4W|lOVx>Bx|{8G*6MdBhh%u0Bo4lOc(2u&jJr1;EWF@da;L2* z=(rL4)9b&QuQ-0$x7*?*Vua)Li+vae44lvE?_XqkBc(XuS|nial#V*F0(fZ_MV#UX<9ESzcB0d#lCNstX|f_#&gwetE>xW z_TKNbFaOP9UrYbrPiLML4O-l&P16~-BR9U%`D>JBJLAWV_deWq`p}#~F6C3M?3=*@ zl!tvw=bN7tm^SmnF1Xf|EuVMNHF@fmrB80P`|M}6eOCv07u@!C>X$`(y0x1->-f8+ zefsTt{!ZfAf-x@dX1G`AoYyAjCTzZWCNgmw_ej-+0isP& zUq=T)pRlXdg<3z_oGd<)b^QGgozNYb8%LbiI`rCMfUC2Ay>_)2q*%5t6}))!GmNlX zXKLYiRqsvgjM$dM=l2Wuf4dy)di2p@(a_@j{kq$Z931U#ki(7Ta!cG7G;L9oknNCd z8MMpC_ZW!f#!C8nXzh4~fc~g)U1HfaeEKe>JCQV|5nj=hpCVZ^k&;iF8+c`l>RH z9~4&m)d-2P>x8p^gq~$TrFE) zLHa+j5m_$kv~lC4WxL-l_{Qje);shn+e>3qpE-s84p)x9S#)aZOok{Vs-Pvy`Jt`T zOuhSC9-O(eDZ6b}|I(2yZn(6ssx-RP8kpX*k{(K~odtULl8Pl-vr1x_gyZ5<_uaO6uW2Mx$Kam*wDsxorE>oz@{`={j9jMT}^c6rCA;c)!uX zCG#9E`TL%`S9z*%*rd#EqSmgPlC})k=>IG><%>~J&R+X3;c;l?!nJ2L^fWvoj>Cxp z@??!3sdbnwrYb!Ckl*K{(KtU^?UOY(Qaf67-}r|1{cQW?kpceM=lkT>d9p_Lz~I>Y zMT>6K#V*~A39Oj#(eLVl(`bDrmbR21sU?@*9P77YXWxbUt_@C0Z+VY z`{lmB+mE@D&~eYIT_zLOzbJS#T@>wE9RgcGvQD!d*PPH+Aq zrHvmqJj!5{?YNDcOP8nGIDDAY%)Rb01BiO5iuDGarL;Z>xeB-{@P9o7riwq|qlF7c zMF0NrY3XLx!TzqtH(fb$`=#@Q$0g4UTIrl-)j8`(!qiLODkqT6QV#$3^G_9jRKb`!xUCJJY5O8Ms6c-Oh6w zYCop@JEz!C&%%4Z{rk7$x;{!jJu3U6bL)V)s~-l<{cGojSI&*@p6EB;@*@2Raelpx z)z`DC+@t?C{_A`2CqMEz`7Xtz^3uB3afazTm+$c74)k=o?%VkO`%8w5X*ZLf2QDvr z;BsW|-m)Fqgh`Quc=z4{I~JH!^s>iO0WE3p8UIIo5IBrGg48S z$IPRa^8|v&%6!Mh-9<>t)$-Z)E=LUX9F}(r^Ln*9 zyz<4EPqrxsQwOA+*f#t&aAC){ZM+Sn$yPTzP(1I(Xu;ej27O%9oQm6>+u+)D$D{{^ z^u(Ys{U+03U-854Uvdt|SGHABV+(8R(JL*ro}0e@JIciPXkDC@v_h^Az1|MXGizz5 zR zKOY*c$wf!w%?u4pidUVh+HmmRlEGmYol8fM;?lxz1Z(Q2iPxSbY`5v>Gjwy`9VS^P z+F2Id-)-`JwdkmmsC)05SM6pU9^o-}MF~-OgteE*&v{j5s$Eg|WBu}@AK`>78AoYG zvYh_%$Nyp+$CNU+nUml9xn|oc{sEtB zbE3=6eg8Ps>ig!52YW>07MUA;c%^-8_xVvwt>t+(rmB5;oL$)*QNQF(C8KqN+PcWXYo=kvmq3nV!m6lFTfZGDVk{lj zAz}GTW2-W=DSLEgweFz5TZqQr7K~Qv-2f~XI7L^QBt6Z;jGb;q@>gmN68_&1!PSxIHKY#lA)SOXKO_P7jE@=Pui2DZ? zZ8v>IpGS0TaO(Ud>s%?T6G#F4RKQial`IwYAE`a+BR2q`@@jtMUJ7W)f}d)L{z775DBpD>y@z);NTMtjKTr{Ap^#s?e zt$Ti}Y9uI5DIi)7n9*(IwK31wzWb(bdFJULVm;axXFvPS-A!Yo?5$orYu&`>)Sl6P zdPfFi(>`?ZwZ&oqr=qAA8ekL1=lS0DhnBuDYyI+luI2&r$6aXqjSFp?ZMnBTk~)Z0 zCfcLEHphCB=V$k64rc4Mf`6XM?f3yX$2xTEpY`s@pvi|s-RNOSBe))wS9NbLU1Q?3 zj`p*#xV``KJ$eOgR-`WfxgEM(|81=Xspt=#C{{F8xVyS)_g}SDR#NwFFqmZJuzJO+ z=Z6=C9f~Qkpl8%|UZolPruL^6vpxJkYRg5>ysAQuPR#vz+IT1P`;+tYuD5J5|Kj5P zi?ts7RhWJtZJdbF=HYQmlw3NtJo99~y_2-E%Cxsy?^-kJ%A_Ng=O5|@y@28YI%w0WL(u(i4<{#<9rcRaWEP67TLI|gTGiI1@s}O{h!moh&0>u?8{(T&U_qu zG5oCVPVa(CA&Yw19M91`uWc_2ua&0fudbCRpCOWeto2k;D@Q&}JkFiM@kdLn?hZNiRFu#Zy0{|DBAZ{94h zZ}Ii_FZ3uYAg7Ur9r1~Xn6Tc#Z--SAtuf7po~F6Yo$-j{u=T`>Q~Dp-5loF)BOZ)y z{A^~=ir>vjt1g+~B{p z*|wLNj#J8J6!W{7LZ+1E**Cw1)|ve@K)E6pQQ-l5N3 zZ#^oQzPoFbr{i}KbJFez!s-0iF&npQv~bQE&}jMFE$pcU&5Bn2y{cvUM2xVi&+PY- z{atd2279FkR8=A2c&}7lTvGq3D^5Y(GI5UbA6!?mV$pxvSzPys@=9W8^&TyarvH?D zr4;(bFQcmdNc&}!R5FT3lA1zoYiRN4Mr{)VN-S@+9en*pz?()(=YCliQm9|CW9zCU zFd~d*>*uhqt7x620xsv-C@AZvAWtTXE|Pgrkfb*od3NIy1pwJBmh6Lqv`9A3X|==P z!HnDNBLT$S^g%!Mx(YkYk%h~qC9IOolSv0mFN;gLGNk7WnY>!bWSz~uYGul2wEaHV zZ$ene3)|q>g^fec4L=+>{&5&h^L&hP$rD%Y&Lu;ub_{9fwYlG|OlP)y%|CxqGihP~ z(dN28q~X1h;cL$N(r3w$;rEh6I_~KinO(6F8&yfqqIUYRvwwU0^cMcUT3IIL2ePMHPcf?0 zO&d_q(sa0K0Tr|%)~KRRhEg~C(U~vaElq1vW``7e_z0r%B#T#%N<7i3LEyazk1Nxh)i_W$qKWmN6O_8R^;_El;5 z#_lI>G#%P{W?Q{}&8C;0Ugg`%`KX)SoqpXF54Jd0z4X0OIE~Iy4p-@PtEf)E7Ob=VYffYCMP`Du2-@XyzyV>jvxRyLRdJW}qnh?W%1b zJ%1c~!E0-hrZIKErrrtc=GKo@tziCed{xPZ|H5f0dT!Hv-8J@==zdP#*rBn&wW-H4 ztsgJ!QV&dTd@3ZVU;2Q}SGTn{?f>aYK#Qu!qgERp@eeBE+K!sD)@>~NfBmg+?E7_;a@r24{8!pP<{k)wI%3vH{XpZRv@q)q{rPq?#Y z&n+>ITcx(M-Hft&MUYp{et*F7Ue?cJs7J4tC(pK6)$Z`Cyxj(43po`lLtd?&AhI}~ z%)9g0zN2T_l5J+EMs|!^awW`HcZSR1E@|&`H#Kh$qX-dX7Pi>$G6}sj`Rfj|Hy(ZCIS_{+qvc&)R}Phl2NO zS`Qi{>U_BDVaCj~KAARmxvXwkZ_kFUdM0pfmpo=U&**f^_tuMohe|mXk_p8uC|F6350F1I)_TSAWB&3mqR1#pNhE75cB&5(n0;Dex z(QiTmgpy!FQ-N$t0W3?mphi%!V8IG_1q8iU!3KzkTq%MD6-2~>h`ir9U)gVy-A(ei z@BiLEN^-ta=ggUzGiOejTHl`C-|o2sDq8WT7kjar((Zmvs}l=Xc?T4jdwEZ*G8{Pm z>ae8PyLKDY=?%l$GXsnb;~c-WdiA%n`$X#WmbR}h>mux1Trsn zTxa4c;FFtvn6dl7PtSj#Z@;Ai((fVFbs>214vp6&a|w_i+*VwWJW3Cs;hH~qfSzWO=8t=!dN!GkY`Kec&R zc!kfmqvL+{_8xs($OHHNQsO)EdY}67A5VI4=a&Os*}utjag1%@9Mc{3RdrW>%M8A+ z{zC3|?|z`sEIE30$zkz+eRjv`M&r&U%SkK1$Lv63Sdm5(Vi33&Am-Ol_BYs?JDIFX zD=(`oDXYBG6~0Y5Stw#JrWK3vW7@9K3?H{}lw>c}{KnL!bP|ZUxo%q0u(nKs&SpYK zudlWhX+rVrD1i24G1@jtOjZ^?xa9M(Iz8n@bLTe$Ev>IQyaTIQN zM1|WdVPPH-aCC`cvjksDkw%AN(M6ibgb3Se8?S-1diWsbwXYKV%+9kU^DHJ~Hp*!A z!fF0#ls?fS{~jaM%QYQ9HA@t@j6Ax(QKRb@K@Z0cz_0eG@~Bd!FJgKz z+=PDsB*5N*@8Rq_qCG>3?xdWvNYj2d9+uP$fWvC`7bw!$@@x!CHc#ge!CxuMB_Yyj z9*$e5Dd8O7uDayBNY$#--kWm6r-Zg0^AL* zgd2h;8s0r+HL(g=B85LIbpW6KycWlHRpCWrQ>sI8u914c8>xIT_ zbl1~>=%{E$0WlHiE__wlA7bVqKEfcxeD=JFN{E^b@iikaNYo_rxsSC@&{jL*U85B|rFp2f&5VfQiWBN8&uP%7w$m-rfo)e%*oRl z&H3z^c|mK_*<@!zjH*_%{}^Jm*$i{aB*+`)ofJXflN@`*TFmwz#S!ojcPEG* zLa6$Uk;fI^eJxDr_%VrGq=_CWPaF_}NX+1g{_DiV-&Kj|6nbiseQwK>9m*_AE7JI% z<0WD`i6tb)b&F18ID8GEI2^n;U%dOCf>?~6uE?0{g|dGniD#r*{gPTztKT42E4-mG z!s3&jTq_V&P^1YMA>$7K`#spAs%@w`W}d2QhG)N5IGzJ?MXW6rvF1tuyKtwj1KQhw z9sRCi+6|a2gNWGH_UKTB`OiV!{`sI4hq_A|)O{c8kN?t!+3t0K2@W)w z;77)7V#iO39bYPT9Py{{?`4!jI*K%Hma%m!{&o@C2%1{w#hu4C#CPy0k={%Z)j+AD`>oDeo#jHQsyexE*N2BM z6;3%h@Qa6Y5^tbicClDMHUO?-4O2|ocUQ=0GX$`!&br_> z%wR#Y#m)u=v(DBy@0Rn}hMl!5x{W&P0@=;&Y|!n_Ix9PUgWBSv{^r09Jx*Ft02x^S z2Hz@k1e6;@ov~sAG+Vy~kHqRmqct^aTFy5MTCKTxS>--4)iXGck8mFIonW8# zf)@%tNc&JEtc?!yp3%(IQXP`lU{u+HG8o<|OoGyATq7(ELHOPUQ5(DyWCTfF zXxAu^xlmqap__ITDSpzMKN_i$zrOWs^>LFsp>X8xnq>uffo;@cL%BS(K-9K~tn@I~f1^<6lqA zE^V<=#BX@>5W@-)$|Vq;gNE9ofA~Ec{|GR&P2|280l;$&oG&5`%jLGPAK>?W@V|&Z zVXIa3xSPijp3_D;j)_9tDa5JzJ_Bj5H z0G5y*&eC5t7|kB5vkHwS4<;PK_5oR8a|#OAu@5u*yKar7?f@@gUmLLOKvMV)kOIDc zl|&5x1}$MS2@f;~9y}0*2E!*Y8p8;SWrQ1621|HfmedCoFjy>thcD(rPI%$tSjt!_ zMJk20v?YkNZ~+r8Otc0Ge%7Gy2v*GsgW2qB%d+qehFe%s1|_Q$LBNRL4_K{ML5tWW z%$@=JpjE_2_{%K&b}74w^}o>WEbE5iFrKk(_;FOr*mX67=o=7d_#RdpY-V9iiVenw zVMiPVctlTCg%Lv-lcRR#o!tX~e#l%=Bawfs6~3g5AX1 zj~vf9(TW(5C-hc8)-j4X(hXO50p(VsKTKdC_=pLl9SPS_%oGv%qN!TY*Ju%ffW+@z z=w>8LWL8G2vNUq^^n?s&Oc={Zt9mSz*^o~?VhKf;hzc=7O=E+iatsGsW-P#pJkQO% zQ3F|(S6G!uNC6m8o{SPu6YF`D6~i7?)g+NwL`ij!ZIhFcyW-!R9T2xXo1AfL!;f;9_%iGY}$vP%_$O1U$;rA6l1DA{+ zqJBefqE=xDl3hBjgsmoqbq$W;rQvCT^H&sx5LUI=ZWdZ>*BLDWDrLCE57{plGBEmY zm#MVK7`feMl$05{-HR%|Lp??=f!ZAa#@oFwF`6SOAmLj^_82lo$zoztj$GqK495@D z7nLq(gjfa9Y79qbz_+X&!_kvjf4P~?O@E`8qrcIReKLldaCtjGg3CQvD&J_R{VY6D zs^Gc=xT33@82OlR2G^%{xa@xYn)0;=>qS1C?b%z5DME`e*d}s2@-cO2F=mXt#U?N@ zP{hR07GNUgf&(UE{$y}5Q-?bqXs>5*omAk0%;MRhdnYlnf`AeQWP6wz6e?3DC1m6J zpEM7(FH@1P4ig)n1>3Ix`L>5#=Chzgq0Lb#;_aHZF_G6S*S3!lTq*@a$$J2T$B*B0 z4#7zXepz%O3Bf7^e-2sxIf5@Dc=`I-RS2FkpaqCvUSlY=G9c-pmnXi2U@d}A81@}Q z@C^i?@!Iqzg5M&z|HCKsC>tS{z4yt3g#h6Ol2|a5y!In_U+$y#Ab1AB)n7h;1!aQ~ zT>tsQDG2sMaAWWtQxTjXSHt>;0wa-h=La)w2&N;r{G;6(1WORK795;{;CdE(DXI*? z$KaA5geKEbOM6a2qwSXCJMp(!zEvxnD#OGViCi7 zr~5b395JuEVK4coT&pkKNVEK&{w&4;9?yPCe$Sq)C7cT&KdgFr{t<{t^x+@OK8}&~ zyxpFEs*-;>hs?8-{MJY%7qONc#@O>gd&zaIUcf)~oIfx?%m_1hg0(uj%%Zx3PST zMgrdw)2i%gcPX`gDb;#U<60k~HhsjBTdB=RDc`P*8^n_VG}7b%^FaziV{B}^K1W@S zW8-zr-u7>-ZALXgSW&Y4t$Z4_-71QLK9N$pAvL+1x~*12CNh}^zT5Q-gf%;X7rze1sH|vrTm=!9rN4i-<#2Y>Ohu%tCXM7f2t_tBzpF? z)4#Vu{}FF6SOr1Q8S}`;*G6drFdGXUVM;Qt_VmE!F`%`X@H@u{v5W&ph=LW9dCK#3sonQXwGuF;a`>0!xPjeM(-goTuLwPLxn6~eHI(<@r74e3TpF!{_dm~A?t zV>q%gvs4z$05kic`_#ja0SYR{_=Cj6dQ0r9A zDDvjGIfz=XG5~SSJDftT%{Cg1Nmc#uQD|Jzu8G6zx&B= zg@pR9r6TJ2!NW$|jX;3ktJTD&I#}$;@3$6=h2I4mjSg0ig@*h+YHElHCUBa6U~LU2 zhpM#I<1W4VPxArqN5_2j)xJw_pWUJVdGqR*N+8f-)(y5v;M0FtXNUA zawUejRy@`qoQ?+iJ+-)pb*=$nu|ItQcU<+sM^8I|^(*Fi^gP20+10w4B zTUSs(h|K9sh+zF0YNUW1wrS~tvYbcbTnq)=VW5Dyb(T5`m?>Av_=*B**{{3nYB`%x zKuw+5$^{i99HTzPnY^HJalX^lmLUZk#)^#sTd@Yd;A(S|7Agw#0(4_ufNfSW*-+qA zCWBmTgF5fBBag8?Eo%28L#JPb@Pv zf&3zAx2ESY}zf%s%qmNVvVSDZpc#qXIs5y?Yi|4M(BBS4gvGooq`rROevT$ zgLb!ok%L}qIAsRC4{MY(>q+EBK_}8ErV|@9)~FzOLU9TDsrgDyaI~W21kVzcoZ!Sp zN>1>srj(rE45vyC1>YqSQLvd}G5c^j1p!g;daKY%HAMwKXQ1GxI4@H$v>kNe)GuLp%jcq{%g;Cp>7Eo8)wl*63GFq3X32NKJTvKD!NxGj}KQL0;WLaly zJAvtpioPi-UzSu%(rq7ZN%L!LG_HfHdGx34b!z)-)j3NKue-17j^393+Ss;zhkbl~ zQ>6a+yC2ES^)3zVdT8pY6)RQ%X7Dz({bJo(W=9#9#OrZ#aGYYH`!oGG_7I=_LVZjW z`xkabLn$=91<-~jqm_?U&{AlwsgAe~!=`X1emM?8A8Sdi8*G$5Tve-}sVckU!V4kW zcUDd4C=6-yQ&7n3 zsRy_?S`$iav$xrbQMebtfJ$tq`0z-Jg?db72odQR;vEN9Z$clq*<(P`S|x`_8n=I7sPN`cmB?}t2RL{@yQoD04z;C+~ zHkuTw!mmtY>O?N@w>FhPdcyPFvqRE){6ccFW=6#BZ z5CazK!>vP{&(+7ODW<#yc#D}~qpYD$@W$Cp;x$l3%>AGWwi2~d1*ESQOG!!1lP?@P z=S!C;Zr>99uRZj?EC?P&+PNB0m*ddBCHBAeu>WPlXsk4c%Op2t{ky4Ynu(Z}o$Ic; zr|`_zKm53?@X(|?QkQkG{_>?Rbnovk_vlcl_kHhQ`*(J=pl8$pHzOUSf;8yV!ECik zs(O=_3M^7O{ADmPjlRRh7LZ?uuYqD1)D;v{hif=|CTa9d2z!@A)$xuNfY5Q4jdGw* zDIm0%s5+i*Npr{09E>kYbJXz%#^`Kn;bw}Pur7le`4H$^(tkbrpUdv-J|cSnOqi2G;3G zW_;|FYo&hQ!aPl#*#4&|&a0`@oH`5jXM2V)131^FSruyyIFU!4wjiI@M4jqU1KW35 z0|QlIp%F*#sIwPuWf#b~KejV$S*ZJ8&8qt+vF;mE-E99)uA4fsX`PutI%gQDbGsTG z%hxI3Hdn2u&aehHX|-4j*(dAoovXO{!1)+c=STRCuJcrY`b!H$Lg(+nf!_l>nFBGV zov%||G=mtnJLhj4R88A?(Ko&D@;5g&t=?+MjDF(!;ZaY1_w2;nyN|5={@hEI&r+8Z z-UCMNT_ysfs$J*D(L|Rb=<|^<(=uwOF7xpfS1c=nFn3vlsmrbwbfe358|4+KxJ}GXk$~X%n65^Oa+oYqe!$5t;HCa%{sVi1G zxK+$9knp8s*K7lI&1$%m%;T9s!E0<>jiRm#EL_Xt#)hsYE8l#wWtrKvt(y>t0AGBi z!YR2l^|*oy@#iAih0v{v8~EMkZ;mft^U(4V-TPddQ*j&-n9UI%o)E%4Kl&L#*aAQ-q@&X3M&J%oSfG^OZq1eToX>D0m z#{S<8_x$|spK`|qcIy^Dy$$)A3r$L#3W1X(?Eb~QTaGKw#~_f zVh>R4JA5DCLIZGMLRl*?TYBtQu;kU!0NB^relNxTWTJs-KoHnBYaJKF3>@Q4ZVmzM zJcP%`s7~1M=QpUwn>NZ1Vc<7- ze8cTRTHFm`J5}me5~$~^2I?79%ic&3XTAU1_Alyr2_08pL~kUE9y_7Zgef5$1~gC! zw#Ax$8-aRt#nT94-Td%Be;t8(O)_LruLOrpEVfX!KZ-!T_8O?yG#ENenD%t-ageP%NRD02{R z>wn{I1L}Q;Fvd10ISjIfC3Mg~2DO3_500&Lv% z$riWjSR@3E)5$p8#iEkkk)}TD*l7cHMVk6$LfL3p4b-=SDJax8n%T%*oYQb#rQ-%Z z#h9wOE%c`ELsQ?GY%?mKUC|Sj&;QPCXzG_CcI*o4*iGn3F!I_v=3*e4clVk4B|GUD zvnMdA+~kW-O)kBf%zz^R;)Y+p52O>J`I|YCo$hn-`O;|CQNOFeB<8ICCm5W9mio7{ z(%>$bJ7aK*FdBNP)?_tNf4zbFccgqQvI;$MzXg{WrG#Nf80st`2MO8E66PRbwzC8) z5-iRV9znt;X9=$(;WdW@>VM2+t_Hw|ko1X^L_@sTq_5nS#+C3K9DEQodfX_Ir)1n@ zwnkE1E0AXa`fs*|Q(Rkouw|i~i{d)-lgkuW)PmJT+!R!a1&~DX*C7iit_-W6A=ulc zxTV-^h=IjZz1cu<_d!7t&zXp0#n;$6ihD*JulO-+yo^@16u~;2uZH8UBaPzTQ&z)P ziaT0sp`kypyxKghisEo>0%}PdL?2sI$FcLD%DOl1RhG|;H-9L;t04<&WJj!ht0}%6 za!wovD z=;X5mT5%#%nT9w!1FWhjt@|S~tTqqSW)?c(w~%Vmg8*e(GgdV;a85InRT`eiYJci{ zzcI6-_4Qb0#$db#Y{HNVIu$2K%#DIdzC9Z2#7m)>aftLmv z3^cHS3>6R?CMjw*qDsXmT^J2|3REPefd2BfI>|C^cD1B6L(tN|uxKQqocbc8qx`er1@I3X+mG8gPz_Ck4r+e59n84T6uD z_b8gCK`ghVY4y? zxMCL<))5^Ixwi%5Jj80FDa%yjOhX>TII67mE{xF5nblE^!&Jeo^+P+h0HUGcHk#T- z1(9g2cVYEu4j7`f-h*4*b&w(K+Z9$_>10j4ILJ-zLl0wWQdtVN(9mxnR~4H(I)z&b z&;`u0E{BXH<&OE2eo%(GHqBM$KEU1z5F$9et>#U2xsYdv5Kyww_j*j zr8tuhdjeCY^zsW0tCnU{ZbTfm!9c^-H!>oEyJQ-bEgWb^K1kof7kSWt2w{o%DQ421U-1T z7ZOj&*TxYkmm1!|M8k0@0HucaXjE!A5^+#PncpGss&ye@G#po9h8rjWdl+behR>)r z*Jvr>H)cS<^8qw`F^?FIT@^-hvR&8|7N)ej0ae3rAU_ZxUaF?y+aVRSRvLb<+$s&< zD?->X=Lsq28*)x`8FE&z)^G^_1cX-=2uJwTm}^A{VG;v`BchZD268fu=*c7OAZM*% z*c0rpanOkICK@rY5zHe>L`X)Y*dbByJ7PJ?%xPTaL8;6_dl{g>%0M6Snt?{VY6pG4 z)m*J588G0J!zE|_3YpK?GhgDF*+?k7PKh2Ed{9OM3?{ahV7O|BffDzlzXd#ml_aJRIB?5o;%>@iH8)sup9Yac7QamL*jW#K1q*&d2^pWX>Yu4j`+gd&X-2S65 zBqhx>Q4;pX8X!4|O>-dVq}%Lhs=DDuz_KJ&t#6@LY%R&Px|3+2IgyJ*d64vnBj3aZB8O4$$N**o_TylgwaFi_ZKV)TcenVbJ z#`q^I5`dB+{~6!2yrxEZ*COvcxn54soT~TMu3$8sjP-x=YYI;%zq7);Qj}dAfL%Q$ zALS8@gc*@1Q}Q=T1S4{Rl7HtB$vD}>h}=jiK4OGb9HDwjiBck%pkQ>3{~3Zr{$eb4 zx^5){Ck6NJQ${J^q~xwNuM)ulVgXJ{u@b@HOQw{CJVJ&~pp+UVg288`l#M(>hOeGd zo)#k{`1VrDP9=iF_rU6P8ySougwiyWg69=dPTC>6&|n%$`CEx#m59?2r3QwFFD&y@NJHZ@?KNL>KlVuXsQvx1&SJ;%+y96{pK9+djGNW!VRFSW8p#e z6cxk-dlHAz-bNF9DXo~z02sr9LWf6%W-MH=u%bjix4eAL!ugU=WtbA53!=2^LKG}3 z(9cl)C?7MW{fkLImAn%_xbDxS#;D#VO1q@YehzIFRg}*uDbr79mpJw8J}25!wKb|# zXm4+8ftf}PXriqLOf(9o;w0HCwl!uhRnD4FTDBlTPxHzbmMzeiESX+XQe3H@$J?r+ zB|J-JIh9HMb^OCRB=vsOHKCUR(dbry=rKT)?7H`(djg`13Pfno@l7nV$ua~K1w=`* zltxdpRMY4=u}AL(UlfS`L$))FD2J=;XisUBgz$`=pJ3qgnym%@jR3I zl7t_IMt`AFks2Ytv}_U0EiKkplz_m>^ou0q&t1qy-Q|x8`D2y{T@>Vxv6yL0jC9@tB&H2yXr zT4`&1shP&dHqq9-CK}J>7gy4&CM_Dz3MR;?g>EFP3m*UN~SW-~Nb;|e^jeQ35i#3ljpaq-r*<1lkXl%%k8A6%R_$jkJ9;&4b zoY>@v{GD3LOfgXAU?83{*+Cw0F_Vdv1TW9tsHIFSXft6#&FID|5m3c&X!^KtTn!U- zW;uVImNG4rd3QBW!qc>r*@@L4@0>ckOiP*12;#%El)0+WyR?+K59_wf2iSCF7Cb!6 z5i;f}^Mq)Ja#+A-UIB2wh&x7<`L{Wbzj;CvTH`fZ${fvbpni;&CX9d%Yz?CcJ>{op zX~Mv20Q@?Ch?XX7LJ{^3Eln6-!@G=CpqZUepHSiQ5n7rsL41RjCfv_6(S%j}1zO$_ z*ALK+&Pj{!{~vrYk|t{KV5H0s6aCCI;R$rd4L?lmXQBzeDD)*tg9QCN=wy%qD%O;k zQY8&0&IOYwZ7nv_#KCAw@0#}}K5U|ix7*ul;;q0Izvf62|HfmEGDMSF10ue8?i!*= zeE<y9CSmGpCPXcMPm!`ZsUXVg z1Bm|V9-^#l6J>?jA>uR(eSQA?itntGJ>Jp+xWGh1CZbo5qjS8adF+wMWj5kF?_nvSv;nkai%BZx}NDi_X} zQ98Y}1l**kkE|_?();X}Rmw>ATV~2$>Yj_T&!DX*8@08dg32mqU_xy;AY$*Em{Qff z&l#oCQ*$N(qVL>~M^2@Qa)!8usNsN!5V@V=9mgZ*GoiCG9yw>slyjGRh;m{7&%wy+7JS_cPv;}RyWJzh|0>%KP4b~`m+fmw4?m?Xyq&DN0i^)N_iJpU6db>YOga(A!@)p zpI>31{CQ2PcNXy-Xs+6+pMi$Hk<26o<=@9v2a=|r|1et}h?;&w8$tdXEF;VUY#EXN z4$CMlxbhFNWds&nJmn-?M#%O3z?KnKZc4Df>)ot1ZiuoZh5@{wpR(#MNKjVYR50E^ z1?j|3is5B+Sp}=IKPb$oweh3EDpD#~C4?%ZRItHJ1^MoWw_ulv3YIIwt7@xpJB)bM zZT-$(i;*l}1`o9Lw)?gW-Ov_SW*4~3yrT<)?D z`*OEA_J~^)^c9d){s^y)?nyUU8jtP~-@PEJ#)b(1HWX>vdlZB>|PGE_fB1i0vzzI$bNJ?u=pUsdi+OAgcVCPon=H!Ds)AS&@cWu|b8 zG9FVZ%rpf{G`A38|98q%=MXiVjMy|JLga2T;v4|MEGbb_uBvb})e{gs;vS-@v4H4D z8KS>87njbTQ8~F&h<@Jvlf!L*b9^-I(1 zY6r}C;$OdvI(Sb&f~HMs!dDf)ADg>+YuD+W&%80~S&hcyv#TCQyhRV%N#=ELJ|DOu zs?*a?Y(2mKk<7jMt0T1KKh}EYEQ|m2aLP`P;%ytUJ{LK}Dtpx zq!-uWN}M`RuKGg^d#a<3m-dSo_u@s(pC9$vl9QUU?31ZAVe0h8VeOTM_6CRV%Zu7I zE~APGSn-YO%P}aS+A&i3iX8OK_6g;I-Fhi84FjHW8Mhv}&)hIt`i{8!w~r4WxWl$e zoxaVh8=DQdtd#B zpLPlx|N5eWDV?V$-l6gD8{OvT${6di=ZC#X|D|U~ zOu1+O@b&Awf4TeNH=kMZ%g0)bp!SZd+A{N?DV(<3q$aPsa&c+KmubJf_H}E0#*o|A zhnx%c4>Ru6dJer1G0ZhUAyZa;-}!#m>CH;1SLgcH_tZn@j{bI^@#(ML{eJlBd7o~Y zSF28M7@iyVkGC4eHTZU5s{anbb8(XTcjcF_@9N&G-H*pcg?2yk(!y`k2i)KNw8<~1 zJts5-+xtmPg{<1 z?UAYLd_s==b!hgVbw^gsduQciezEH&{dpHq-lgw^-R@DUORrOR@pw{g==fyawIM5h zxHIe8-F3Zh-R0KTvj3uU|5TT66mC#|qjZg?-btJ~DrIcy#U;1C_<~R7sUgLm{kAyT zko3)G_YPgX!jRbyum5q~ z!E-U{+MI;>kWAaYyztVND=|A$x=q+WzxSVudSAV!v7A^jJ8sREJ?l;#t&d$-_t>eF zg);}A+7a_>e}Ui@&PtXVJd$2p<)XA3t4p@E_Bj z?7O+bd}l$xf%_-!z4vzWgVn~J9`C%~|I>IUfT*LA)gO(++P#hA9AdN%UyVQXjiVf5 zP9NQNvGW%5!8TcM?|VOUOoGpS0V|*He7W$5VbtrBmM^+uEID@e$LBstF{f3uE3P~L z!psvh5>ux9wC1p1_q*opsNWpuP+P+fucxjwjB@K(rueKpm;Mg#b-F6tdiJi^nLm89 zcrTyrF6>>hWs=9ab{R_wCtMh|J@B&~x`X@nr(WJ0zpq!-J;A2>dGU2jQ*M8_;C=1*dCvlUz2U9*&p);GuI%%3=~AnNb``Z94?Y^; zyKU#o^`%9duRi3JI(>$|ciX(HI}_IHGJjkD#0qOc%JJ`x_V=2DCjMth$bYVBY2b!I zuNyhM5r0kIUp8n($WKE%y>57QY}}BoOP{XYwr=R=XY!|?>3;E+de4Y|9GMV5`krwV z_tfCsS2cgUlKzUe-QCFx7gxS9@1^-*Y)N67dQ%FM-=-x?H&1OnZ*Fh4T+A!Jwz$h(*?VGp?0$IK z`TC-QS8NvGNQlh==tg8+x%vw{jqz>i}eHR+eiNzT>brs{GDUJ9ov6$pOH&i_w@dy R;*Oy7tJ$9{uK4#`{{>#NYmEQ^ diff --git a/rollup/rollup_sync_service/testdata/commitBatch_codecv1.rlp b/rollup/rollup_sync_service/testdata/commitBatch_codecv1.rlp deleted file mode 100644 index b0feda0443cd8f807869d3adfd8e1deb3bb0e68f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1237 zcmdnVa*6pT%eiL8P3(*<*0WAAfC#>3#)_UP?qNZx!JNNpHd>aN?|ZrY_xd}6ry6#$ zs0gdBS-^lFFyfbHXuvN|NIMID?F5WS}9__V;~ zKzc>sJa(wwLbwP%21qX=iaDTqOYmud%c1H8hj{8A({i{xJ_f2@E|5~7-Zp$%;Bu&X z!RZUA_W@iU9|Kh{K9w-J>({2(&eHgvsk77FVRy=ho~>skSH)budw}WT0!H4tfOF{) zR#GPOV`R>|cl1qMdiUnPPUl%i&EDVl{S)1=fTLz|hmw8igSGD+7f)q+9lG>kb~#7g y!I@u}bPwK`a!h4G`ok5IPIoNW-+qAQMb86^@G_?B|L@d0ygYaJo=p6u*$Cds7 diff --git a/rollup/rollup_sync_service/testdata/commitBatch_codecv2.rlp b/rollup/rollup_sync_service/testdata/commitBatch_codecv2.rlp deleted file mode 100644 index cbb35f1579f1292bb5a08c4498fe90286c0f2a78..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 4437 zcmbuCc}Nsd9LINcZC7oVBnm8og$}xeG$FONW}_6NgVwa5$c;Qmlp-lAgr<|IXe;f~ zvdIq9vaHAuOIH6dDk~z$KkV2nh%PYbFzuRI-h0jOH*Y70_7C6tety6A+2@-XcWsRE zgny5jzs-Er8nJz9`CXIp!?7)_v#KmD%afZC*;{fYdEJ!ex7B@@pT^u38wZ^kJo=(! z(tm~NmrbI6-R#t_4-)n3rj2w17TlPb>V3X@VQE44$NHw5tKUqUY)K4H{Bmu}hn|eJ zzKpE*=Bl{U$8PrdMhf;(#n-*=-?gT!foU6jt*L2WlGg25kN7*i2#ZbA_6Ez z1Q0-RfPess10)1cw4#io)r$$tRzJ)**_@cbY!hL|$riu_=12i%oFXMmV2*-OlcKyD z1VEG@L`{fxY7hX?B8Zw09nc^E>>z1_-K%A4V)v_)cJ6R$nVL96b<)lq0WAZ>WhT+5 zjCt-Ksi2I4t^1J}L`M&crWvTnjm*DaDa-zJZX1EXFX(mB8uK4ei0q$BE4&f-jqU#+ z@h{+cDFgl#O7%vz|5Bw(-oHn$8~9y%owUaMWeSn~nY6+if#2Bq93uWDJTGOyKa*0u zk?sGm(k1VIsa`kmPuJ_DHReB}5ZS++R(K=u3%m}@nE$9!k^L)hhdTd<{}}P-sC{@2 z{Kl?t@cV$g|9tFJXW$pql~8kbJ{3xb>@Nt_sc_&oc72a4>t%nDGKP8qzYwZR&Ds7> zC{*^ZQCIOC_>DckmBe3+o$3ty#;)&4;$N@!;W_ZTag~?Z`AAAtKA(-eF3N!4*!4X{ z{3X=I8~?*!Mf{t2Udn*q*!iC({;kx-8-d^0{d0!+cksNF0e`>t|H(quo+bWW)WsWt zKc01*BWw3C!kK`zf4_HClePOeOBe!cZ493$YlGh%7)2AXb|@zCiy^Lv5#b^k8}`xip(Le1IvTqOSB-%)jkdJg>kx<8q}j`$_D z56^+$*z9gPnsU1LUcVn(yX^5Zf7naOT$p5U zvAgT)UA@uuxgSz`-q!3CI@{XY3u2>Y=U46$rMT*jo?-VUtXTB9=vPHyYu5O_Ih!0e xEC~pVYS` Date: Thu, 2 Jan 2025 16:04:41 +0800 Subject: [PATCH 17/36] address more review comments --- core/rawdb/accessors_rollup_event.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 1b60f6e4f0d8..e5daffcc5965 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -18,7 +18,8 @@ type ChunkBlockRange struct { // CommittedBatchMeta holds metadata for committed batches. type CommittedBatchMeta struct { - Version uint8 + Version uint8 + // BlobVersionedHashes are the versioned hashes of the blobs in the batch. Currently unused. Left for compatibility. BlobVersionedHashes []common.Hash ChunkBlockRanges []*ChunkBlockRange } From fb4fe7c5ce9fba02e198dcebebc13468a74193fe Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 14 Jan 2025 07:53:55 +0800 Subject: [PATCH 18/36] implement first version of new da-codec and to handle multiple batches submitted in one transaction --- rollup/da_syncer/da/calldata_blob_source.go | 113 +++++++++++++++++--- rollup/da_syncer/da/commitV1.go | 6 +- rollup/l1/abi.go | 1 + rollup/l1/reader.go | 3 + 4 files changed, 101 insertions(+), 22 deletions(-) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index bf4a2a24ef2c..38921c894ce4 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -8,6 +8,7 @@ import ( "github.com/scroll-tech/da-codec/encoding" "github.com/scroll-tech/go-ethereum/accounts/abi" + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" @@ -97,6 +98,10 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven var entries Entries var entry Entry var err error + + var emptyHash common.Hash + var lastCommitTransactionHash common.Hash + var lastCommitEvents []*l1.CommitBatchEvent for _, rollupEvent := range rollupEvents { switch rollupEvent.Type() { case l1.CommitEventType: @@ -105,11 +110,33 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven if !ok { return nil, fmt.Errorf("unexpected type of rollup event: %T", rollupEvent) } - if entry, err = ds.getCommitBatchDA(commitEvent); err != nil { - return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", rollupEvent.BatchIndex().Uint64(), err) + + // if this is a different commit transaction, we need to create a new DA + if lastCommitTransactionHash != commitEvent.TxHash() { + entry, err = ds.getCommitBatchDA(lastCommitEvents) + if err != nil { + return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", rollupEvent.BatchIndex().Uint64(), err) + } + entries = append(entries, entry) + lastCommitEvents = nil + lastCommitTransactionHash = emptyHash } + // add commit event to the list of previous commit events, so we can process events created in the same tx together + lastCommitTransactionHash = commitEvent.TxHash() + lastCommitEvents = append(lastCommitEvents, commitEvent) case l1.RevertEventType: + // if we have any previous commit events, we need to create a new DA before processing the revert event + if len(lastCommitEvents) > 0 { + entry, err = ds.getCommitBatchDA(lastCommitEvents) + if err != nil { + return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", rollupEvent.BatchIndex().Uint64(), err) + } + entries = append(entries, entry) + lastCommitEvents = nil + lastCommitTransactionHash = emptyHash + } + revertEvent, ok := rollupEvent.(*l1.RevertBatchEvent) // this should never happen because we just check event type if !ok { @@ -117,8 +144,19 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven } entry = NewRevertBatch(revertEvent) - + entries = append(entries, entry) case l1.FinalizeEventType: + // if we have any previous commit events, we need to create a new DA before processing the finalized event + if len(lastCommitEvents) > 0 { + entry, err = ds.getCommitBatchDA(lastCommitEvents) + if err != nil { + return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", rollupEvent.BatchIndex().Uint64(), err) + } + entries = append(entries, entry) + lastCommitEvents = nil + lastCommitTransactionHash = emptyHash + } + finalizeEvent, ok := rollupEvent.(*l1.FinalizeBatchEvent) // this should never happen because we just check event type if !ok { @@ -126,37 +164,78 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven } entry = NewFinalizeBatch(finalizeEvent) - + entries = append(entries, entry) default: return nil, fmt.Errorf("unknown rollup event, type: %v", rollupEvent.Type()) } + } + // if we have any previous commit events, we need to process them before returning + if len(lastCommitEvents) > 0 { + entry, err = ds.getCommitBatchDA(lastCommitEvents) + if err != nil { + return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", lastCommitEvents[0].BatchIndex().Uint64(), err) + } entries = append(entries, entry) } + return entries, nil } -func (ds *CalldataBlobSource) getCommitBatchDA(commitEvent *l1.CommitBatchEvent) (Entry, error) { - if commitEvent.BatchIndex().Uint64() == 0 { - return NewCommitBatchDAV0Empty(), nil +func (ds *CalldataBlobSource) getCommitBatchDA(commitEvents []*l1.CommitBatchEvent) (Entries, error) { + if len(commitEvents) == 0 { + return nil, fmt.Errorf("commit events are empty") + } + + if commitEvents[0].BatchIndex().Uint64() == 0 { + return Entries{NewCommitBatchDAV0Empty()}, nil } - args, err := ds.l1Reader.FetchCommitTxData(commitEvent) + firstCommitEvent := commitEvents[0] + args, err := ds.l1Reader.FetchCommitTxData(firstCommitEvent) if err != nil { - return nil, fmt.Errorf("failed to fetch commit tx data of batch %d, tx hash: %v, err: %w", commitEvent.BatchIndex().Uint64(), commitEvent.TxHash().Hex(), err) + return nil, fmt.Errorf("failed to fetch commit tx data of batch %d, tx hash: %v, err: %w", firstCommitEvent.BatchIndex().Uint64(), firstCommitEvent.TxHash().Hex(), err) } codec, err := encoding.CodecFromVersion(encoding.CodecVersion(args.Version)) if err != nil { - return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", args.Version, commitEvent.BatchIndex().Uint64(), err) + return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", args.Version, firstCommitEvent.BatchIndex().Uint64(), err) } - switch codec.Version() { - case 0: - return NewCommitBatchDAV0(ds.db, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - case 1, 2, 3, 4: - return NewCommitBatchDAWithBlob(ds.ctx, ds.db, ds.l1Reader, ds.blobClient, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap) - default: - return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) + var entries Entries + var entry Entry + var previousEvent *l1.CommitBatchEvent + for _, commitEvent := range commitEvents { + // sanity check events + if commitEvent.TxHash() != firstCommitEvent.TxHash() { + return nil, fmt.Errorf("commit events have different tx hashes, batch index: %d, tx: %s - batch index: %d, tx: %s", firstCommitEvent.BatchIndex().Uint64(), firstCommitEvent.TxHash().Hex(), commitEvent.BatchIndex().Uint64(), commitEvent.TxHash().Hex()) + } + if previousEvent != nil && commitEvent.BatchIndex().Uint64() != previousEvent.BatchIndex().Uint64()+1 { + return nil, fmt.Errorf("commit events are not in sequence, batch index: %d, hash: %s - previous batch index: %d, hash: %s", commitEvent.BatchIndex().Uint64(), commitEvent.BatchHash().Hex(), previousEvent.BatchIndex().Uint64(), previousEvent.BatchHash().Hex()) + } + previousEvent = commitEvent + + switch codec.Version() { + case 0: + if entry, err = NewCommitBatchDAV0(ds.db, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap); err != nil { + return nil, fmt.Errorf("failed to decode DA, batch index: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) + } + case 1, 2, 3, 4: + if entry, err = NewCommitBatchDAWithBlob(ds.ctx, ds.db, ds.l1Reader, ds.blobClient, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, args.BlobHashes); err != nil { + return nil, fmt.Errorf("failed to decode DA, batch index: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) + } + case 6: + // TODO: implement codec version 6 + // - there shouldn't be any need for args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap + // - get blob hash from args for this commit event + // - sanity check somehow that this is the correct blob hash -> compute batch hash? + return nil, nil + default: + return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) + } + + entries = append(entries, entry) } + + return entries, nil } diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 6fdcf45b6d14..52615a55669b 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -29,17 +29,13 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, parentBatchHeader []byte, chunks [][]byte, skippedL1MessageBitmap []byte, + versionedHashes []common.Hash, ) (*CommitBatchDAV1, error) { decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { return nil, fmt.Errorf("failed to unpack chunks: %v, err: %w", commitEvent.BatchIndex().Uint64(), err) } - versionedHashes, err := l1Reader.FetchTxBlobHashes(commitEvent.TxHash(), commitEvent.BlockHash()) - if err != nil { - return nil, fmt.Errorf("failed to fetch blob hash, err: %w", err) - } - // with CommitBatchDAV1 we expect only one versioned hash as we commit only one blob per batch submission if len(versionedHashes) != 1 { return nil, fmt.Errorf("unexpected number of versioned hashes: %d", len(versionedHashes)) diff --git a/rollup/l1/abi.go b/rollup/l1/abi.go index dcf09f25fd13..de232045365d 100644 --- a/rollup/l1/abi.go +++ b/rollup/l1/abi.go @@ -234,6 +234,7 @@ type CommitBatchArgs struct { ParentBatchHeader []byte Chunks [][]byte SkippedL1MessageBitmap []byte + BlobHashes []common.Hash } func newCommitBatchArgs(method *abi.Method, values []interface{}) (*CommitBatchArgs, error) { diff --git a/rollup/l1/reader.go b/rollup/l1/reader.go index 2902b48caefa..689a2b4e7688 100644 --- a/rollup/l1/reader.go +++ b/rollup/l1/reader.go @@ -238,6 +238,7 @@ func (r *Reader) processLogsToRollupEvents(logs []types.Log) (RollupEvents, erro var rollupEvent RollupEvent var err error + // TODO: once commit event is changed to include the versioned hash of the blob, we need to update this function and the CommitEvent struct for _, vLog := range logs { switch vLog.Topics[0] { case r.l1CommitBatchEventSignature: @@ -379,5 +380,7 @@ func (r *Reader) FetchCommitTxData(commitEvent *CommitBatchEvent) (*CommitBatchA return nil, fmt.Errorf("unknown method name for commit transaction: %s", method.Name) } + args.BlobHashes = tx.BlobHashes() + return args, nil } From 9bf2f256dfb8d2941d5b74eb1776b8a7895da57a Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Wed, 22 Jan 2025 13:00:24 +0800 Subject: [PATCH 19/36] add CommitBatchDAV7 and handle multiple commit events submitted in a single transactions --- go.mod | 2 +- go.sum | 6 + rollup/da_syncer/da/calldata_blob_source.go | 97 ++++++---- rollup/da_syncer/da/commitV1.go | 10 +- rollup/da_syncer/da/commitV7.go | 189 ++++++++++++++++++++ rollup/l1/reader.go | 1 - 6 files changed, 260 insertions(+), 45 deletions(-) create mode 100644 rollup/da_syncer/da/commitV7.go diff --git a/go.mod b/go.mod index db0e122a1d37..288892e7479a 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5 + github.com/scroll-tech/da-codec v0.1.3-0.20250122041800-4ef7bfc6b634 github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 diff --git a/go.sum b/go.sum index 47f631beb92e..0f05327e769b 100644 --- a/go.sum +++ b/go.sum @@ -398,6 +398,12 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5 h1:vZ75srkZCStjDWq/kqZGLoucf7Y7qXC13nKjQVZ0zp8= github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs= +github.com/scroll-tech/da-codec v0.1.3-0.20250121050419-8c2a5ccc1b2e h1:Sp1RjVsK9PLVW5zMlwMUNegsDpYmVN8noT/C4Bjro0U= +github.com/scroll-tech/da-codec v0.1.3-0.20250121050419-8c2a5ccc1b2e/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs= +github.com/scroll-tech/da-codec v0.1.3-0.20250122003441-91171709155b h1:DWiVtzXK/3lXK3+/aaAeorurjj88ITO17hhLECIS/0g= +github.com/scroll-tech/da-codec v0.1.3-0.20250122003441-91171709155b/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs= +github.com/scroll-tech/da-codec v0.1.3-0.20250122041800-4ef7bfc6b634 h1:YtD7XjP1F7GzL9nxj1lq88m1/bwSroVSGVR050i05yY= +github.com/scroll-tech/da-codec v0.1.3-0.20250122041800-4ef7bfc6b634/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 38921c894ce4..6da1b499ab7b 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -96,12 +96,30 @@ func (ds *CalldataBlobSource) L1Finalized() uint64 { func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEvents) (Entries, error) { var entries Entries - var entry Entry - var err error - var emptyHash common.Hash + // we keep track of the last commit transaction hash, so we can process all events created in the same tx together. + // if we have a different commit transaction, we need to create a new commit batch DA. var lastCommitTransactionHash common.Hash + // we keep track of the commit events created in the same tx, so we can process them together. var lastCommitEvents []*l1.CommitBatchEvent + + // getAndAppendCommitBatchDA is a helper function that gets the commit batch DA for the last commit events and appends it to the entries list. + // It also resets the last commit events and last commit transaction hash. + getAndAppendCommitBatchDA := func() error { + commitBatchDAEntries, err := ds.getCommitBatchDA(lastCommitEvents) + if err != nil { + return fmt.Errorf("failed to get commit batch da: %v, err: %w", lastCommitEvents[0].BatchIndex().Uint64(), err) + } + + entries = append(entries, commitBatchDAEntries...) + lastCommitEvents = nil + lastCommitTransactionHash = emptyHash + + return nil + } + + var entry Entry + var err error for _, rollupEvent := range rollupEvents { switch rollupEvent.Type() { case l1.CommitEventType: @@ -113,13 +131,9 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven // if this is a different commit transaction, we need to create a new DA if lastCommitTransactionHash != commitEvent.TxHash() { - entry, err = ds.getCommitBatchDA(lastCommitEvents) - if err != nil { - return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", rollupEvent.BatchIndex().Uint64(), err) + if err = getAndAppendCommitBatchDA(); err != nil { + return nil, fmt.Errorf("failed to get and append commit batch DA: %w", err) } - entries = append(entries, entry) - lastCommitEvents = nil - lastCommitTransactionHash = emptyHash } // add commit event to the list of previous commit events, so we can process events created in the same tx together @@ -128,13 +142,9 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven case l1.RevertEventType: // if we have any previous commit events, we need to create a new DA before processing the revert event if len(lastCommitEvents) > 0 { - entry, err = ds.getCommitBatchDA(lastCommitEvents) - if err != nil { - return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", rollupEvent.BatchIndex().Uint64(), err) + if err = getAndAppendCommitBatchDA(); err != nil { + return nil, fmt.Errorf("failed to get and append commit batch DA: %w", err) } - entries = append(entries, entry) - lastCommitEvents = nil - lastCommitTransactionHash = emptyHash } revertEvent, ok := rollupEvent.(*l1.RevertBatchEvent) @@ -148,13 +158,9 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven case l1.FinalizeEventType: // if we have any previous commit events, we need to create a new DA before processing the finalized event if len(lastCommitEvents) > 0 { - entry, err = ds.getCommitBatchDA(lastCommitEvents) - if err != nil { - return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", rollupEvent.BatchIndex().Uint64(), err) + if err = getAndAppendCommitBatchDA(); err != nil { + return nil, fmt.Errorf("failed to get and append commit batch DA: %w", err) } - entries = append(entries, entry) - lastCommitEvents = nil - lastCommitTransactionHash = emptyHash } finalizeEvent, ok := rollupEvent.(*l1.FinalizeBatchEvent) @@ -172,11 +178,9 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven // if we have any previous commit events, we need to process them before returning if len(lastCommitEvents) > 0 { - entry, err = ds.getCommitBatchDA(lastCommitEvents) - if err != nil { - return nil, fmt.Errorf("failed to get commit batch da: %v, err: %w", lastCommitEvents[0].BatchIndex().Uint64(), err) + if err = getAndAppendCommitBatchDA(); err != nil { + return nil, fmt.Errorf("failed to get and append commit batch DA: %w", err) } - entries = append(entries, entry) } return entries, nil @@ -197,6 +201,11 @@ func (ds *CalldataBlobSource) getCommitBatchDA(commitEvents []*l1.CommitBatchEve return nil, fmt.Errorf("failed to fetch commit tx data of batch %d, tx hash: %v, err: %w", firstCommitEvent.BatchIndex().Uint64(), firstCommitEvent.TxHash().Hex(), err) } + blockHeader, err := ds.l1Reader.FetchBlockHeaderByNumber(firstCommitEvent.BlockNumber()) + if err != nil { + return nil, fmt.Errorf("failed to get header by number, err: %w", err) + } + codec, err := encoding.CodecFromVersion(encoding.CodecVersion(args.Version)) if err != nil { return nil, fmt.Errorf("unsupported codec version: %v, batch index: %v, err: %w", args.Version, firstCommitEvent.BatchIndex().Uint64(), err) @@ -205,35 +214,51 @@ func (ds *CalldataBlobSource) getCommitBatchDA(commitEvents []*l1.CommitBatchEve var entries Entries var entry Entry var previousEvent *l1.CommitBatchEvent - for _, commitEvent := range commitEvents { - // sanity check events + for i, commitEvent := range commitEvents { + // sanity check commit events from batches submitted in the same L1 transaction if commitEvent.TxHash() != firstCommitEvent.TxHash() { return nil, fmt.Errorf("commit events have different tx hashes, batch index: %d, tx: %s - batch index: %d, tx: %s", firstCommitEvent.BatchIndex().Uint64(), firstCommitEvent.TxHash().Hex(), commitEvent.BatchIndex().Uint64(), commitEvent.TxHash().Hex()) } + if commitEvent.BlockNumber() != firstCommitEvent.BlockNumber() { + return nil, fmt.Errorf("commit events have different block numbers, batch index: %d, block number: %d - batch index: %d, block number: %d", firstCommitEvent.BatchIndex().Uint64(), firstCommitEvent.BlockNumber(), commitEvent.BatchIndex().Uint64(), commitEvent.BlockNumber()) + } + if commitEvent.BlockHash() != firstCommitEvent.BlockHash() { + return nil, fmt.Errorf("commit events have different block hashes, batch index: %d, hash: %s - batch index: %d, hash: %s", firstCommitEvent.BatchIndex().Uint64(), firstCommitEvent.BlockHash().Hex(), commitEvent.BatchIndex().Uint64(), commitEvent.BlockHash().Hex()) + } if previousEvent != nil && commitEvent.BatchIndex().Uint64() != previousEvent.BatchIndex().Uint64()+1 { return nil, fmt.Errorf("commit events are not in sequence, batch index: %d, hash: %s - previous batch index: %d, hash: %s", commitEvent.BatchIndex().Uint64(), commitEvent.BatchHash().Hex(), previousEvent.BatchIndex().Uint64(), previousEvent.BatchHash().Hex()) } - previousEvent = commitEvent switch codec.Version() { case 0: if entry, err = NewCommitBatchDAV0(ds.db, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap); err != nil { return nil, fmt.Errorf("failed to decode DA, batch index: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - case 1, 2, 3, 4: - if entry, err = NewCommitBatchDAWithBlob(ds.ctx, ds.db, ds.l1Reader, ds.blobClient, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, args.BlobHashes); err != nil { + case 1, 2, 3, 4, 5, 6: + if entry, err = NewCommitBatchDAV1(ds.ctx, ds.db, ds.blobClient, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, args.BlobHashes, blockHeader.Time); err != nil { + return nil, fmt.Errorf("failed to decode DA, batch index: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) + } + case 7: + if i >= len(args.BlobHashes) { + return nil, fmt.Errorf("not enough blob hashes for commit transaction: %s, index in tx: %d, batch index: %d, hash: %s", firstCommitEvent.TxHash(), i, commitEvent.BatchIndex().Uint64(), commitEvent.BatchHash().Hex()) + } + blobHash := args.BlobHashes[i] + + var parentBatchHash common.Hash + if previousEvent == nil { + parentBatchHash = common.BytesToHash(args.ParentBatchHeader) + } else { + parentBatchHash = previousEvent.BatchHash() + } + + if entry, err = NewCommitBatchDAV7(ds.ctx, ds.db, ds.blobClient, codec, commitEvent, blobHash, parentBatchHash, blockHeader.Time); err != nil { return nil, fmt.Errorf("failed to decode DA, batch index: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - case 6: - // TODO: implement codec version 6 - // - there shouldn't be any need for args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap - // - get blob hash from args for this commit event - // - sanity check somehow that this is the correct blob hash -> compute batch hash? - return nil, nil default: return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) } + previousEvent = commitEvent entries = append(entries, entry) } diff --git a/rollup/da_syncer/da/commitV1.go b/rollup/da_syncer/da/commitV1.go index 52615a55669b..c0ef1292055d 100644 --- a/rollup/da_syncer/da/commitV1.go +++ b/rollup/da_syncer/da/commitV1.go @@ -21,8 +21,7 @@ type CommitBatchDAV1 struct { versionedHashes []common.Hash } -func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, - l1Reader *l1.Reader, +func NewCommitBatchDAV1(ctx context.Context, db ethdb.Database, blobClient blob_client.BlobClient, codec encoding.Codec, commitEvent *l1.CommitBatchEvent, @@ -30,6 +29,7 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, chunks [][]byte, skippedL1MessageBitmap []byte, versionedHashes []common.Hash, + l1BlockTime uint64, ) (*CommitBatchDAV1, error) { decodedChunks, err := codec.DecodeDAChunksRawTx(chunks) if err != nil { @@ -42,11 +42,7 @@ func NewCommitBatchDAWithBlob(ctx context.Context, db ethdb.Database, } versionedHash := versionedHashes[0] - header, err := l1Reader.FetchBlockHeaderByNumber(commitEvent.BlockNumber()) - if err != nil { - return nil, fmt.Errorf("failed to get header by number, err: %w", err) - } - blob, err := blobClient.GetBlobByVersionedHashAndBlockTime(ctx, versionedHash, header.Time) + blob, err := blobClient.GetBlobByVersionedHashAndBlockTime(ctx, versionedHash, l1BlockTime) if err != nil { return nil, fmt.Errorf("failed to fetch blob from blob client, err: %w", err) } diff --git a/rollup/da_syncer/da/commitV7.go b/rollup/da_syncer/da/commitV7.go new file mode 100644 index 000000000000..01aa5249e749 --- /dev/null +++ b/rollup/da_syncer/da/commitV7.go @@ -0,0 +1,189 @@ +package da + +import ( + "context" + "crypto/sha256" + "fmt" + + "github.com/scroll-tech/da-codec/encoding" + + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/blob_client" + "github.com/scroll-tech/go-ethereum/rollup/da_syncer/serrors" + "github.com/scroll-tech/go-ethereum/rollup/l1" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/crypto/kzg4844" + "github.com/scroll-tech/go-ethereum/ethdb" +) + +type CommitBatchDAV7 struct { + version encoding.CodecVersion + batchIndex uint64 + initialL1MessageIndex uint64 + blocks []encoding.DABlock + transactions []types.Transactions + l1Txs []types.Transactions + versionedHashes []common.Hash + + event *l1.CommitBatchEvent +} + +func NewCommitBatchDAV7(ctx context.Context, db ethdb.Database, + blobClient blob_client.BlobClient, + codec encoding.Codec, + commitEvent *l1.CommitBatchEvent, + blobHash common.Hash, + parentBatchHash common.Hash, + l1BlockTime uint64, +) (*CommitBatchDAV7, error) { + calculatedBatch, err := codec.NewDABatchFromParams(commitEvent.BatchIndex().Uint64(), blobHash, parentBatchHash) + if err != nil { + return nil, fmt.Errorf("failed to create new DA batch from params, batch index: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) + } + + if calculatedBatch.Hash() != commitEvent.BatchHash() { + return nil, fmt.Errorf("calculated batch hash is not equal to the one from commit event: %s, calculated hash: %s", commitEvent.BatchHash().Hex(), calculatedBatch.Hash().Hex()) + } + + blob, err := blobClient.GetBlobByVersionedHashAndBlockTime(ctx, blobHash, l1BlockTime) + if err != nil { + return nil, fmt.Errorf("failed to fetch blob from blob client, err: %w", err) + } + if blob == nil { + return nil, fmt.Errorf("unexpected, blob == nil and err != nil, batch index: %d, versionedHash: %s, blobClient: %T", commitEvent.BatchIndex().Uint64(), blobHash.Hex(), blobClient) + } + + // compute blob versioned hash and compare with one from tx + c, err := kzg4844.BlobToCommitment(blob) + if err != nil { + return nil, fmt.Errorf("failed to create blob commitment: %w", err) + } + blobVersionedHash := common.Hash(kzg4844.CalcBlobHashV1(sha256.New(), &c)) + if blobVersionedHash != blobHash { + return nil, fmt.Errorf("blobVersionedHash from blob source is not equal to versionedHash from tx, correct versioned hash: %s, fetched blob hash: %s", blobHash.Hex(), blobVersionedHash.String()) + } + + blobPayload, err := codec.DecodeBlob(blob) + if err != nil { + return nil, fmt.Errorf("failed to decode blob: %w", err) + } + + l1Txs, err := getL1MessagesV7(db, blobPayload.Blocks(), blobPayload.InitialL1MessageIndex()) + if err != nil { + return nil, fmt.Errorf("failed to get L1 messages for v7 batch %d: %w", commitEvent.BatchIndex().Uint64(), err) + } + + return &CommitBatchDAV7{ + version: codec.Version(), + batchIndex: commitEvent.BatchIndex().Uint64(), + initialL1MessageIndex: blobPayload.InitialL1MessageIndex(), + blocks: blobPayload.Blocks(), + transactions: blobPayload.Transactions(), + l1Txs: l1Txs, + versionedHashes: []common.Hash{blobVersionedHash}, + event: commitEvent, + }, nil +} + +func (c *CommitBatchDAV7) Type() Type { + return CommitBatchWithBlobType +} + +func (c *CommitBatchDAV7) BlobVersionedHashes() []common.Hash { + return c.versionedHashes +} + +func (c *CommitBatchDAV7) BatchIndex() uint64 { + return c.batchIndex +} + +func (c *CommitBatchDAV7) L1BlockNumber() uint64 { + return c.event.BlockNumber() +} + +func (c *CommitBatchDAV7) CompareTo(other Entry) int { + if c.BatchIndex() < other.BatchIndex() { + return -1 + } else if c.BatchIndex() > other.BatchIndex() { + return 1 + } + return 0 +} + +func (c *CommitBatchDAV7) Event() l1.RollupEvent { + return c.event +} + +func (c *CommitBatchDAV7) Blocks() []*PartialBlock { + var blocks []*PartialBlock + + for i, daBlock := range c.blocks { + // create txs + txs := make(types.Transactions, 0, daBlock.NumTransactions()) + + // insert L1 messages + txs = append(txs, c.l1Txs[i]...) + + // insert L2 txs + txs = append(txs, c.transactions[i]...) + + block := NewPartialBlock( + &PartialHeader{ + Number: daBlock.Number(), + Time: daBlock.Timestamp(), + BaseFee: daBlock.BaseFee(), + GasLimit: daBlock.GasLimit(), + Difficulty: 1, // difficulty is enforced to be 1 + ExtraData: []byte{}, // extra data is enforced to be empty or at least excluded from the block hash + }, + txs) + blocks = append(blocks, block) + } + + return blocks +} + +func (c *CommitBatchDAV7) Version() encoding.CodecVersion { + return c.version +} + +func (c *CommitBatchDAV7) Chunks() []*encoding.DAChunkRawTx { + return []*encoding.DAChunkRawTx{ + { + Blocks: c.blocks, + Transactions: c.transactions, + }, + } +} + +func getL1MessagesV7(db ethdb.Database, blocks []encoding.DABlock, initialL1MessageIndex uint64) ([]types.Transactions, error) { + allTxs := make([]types.Transactions, 0, len(blocks)) + + messageIndex := initialL1MessageIndex + totalL1Messages := 0 + for _, block := range blocks { + var txsPerBlock types.Transactions + for i := messageIndex; i < messageIndex+uint64(block.NumL1Messages()); i++ { + l1Tx := rawdb.ReadL1Message(db, i) + if l1Tx == nil { + // message not yet available + // we return serrors.EOFError as this will be handled in the syncing pipeline with a backoff and retry + return nil, serrors.EOFError + } + + txsPerBlock = append(txsPerBlock, types.NewTx(l1Tx)) + } + + totalL1Messages += int(block.NumL1Messages()) + messageIndex += uint64(block.NumL1Messages()) + allTxs = append(allTxs, txsPerBlock) + } + + if messageIndex != initialL1MessageIndex+uint64(totalL1Messages) { + return nil, fmt.Errorf("unexpected message index: %d, expected: %d", messageIndex, initialL1MessageIndex+uint64(totalL1Messages)) + } + + return allTxs, nil +} diff --git a/rollup/l1/reader.go b/rollup/l1/reader.go index 689a2b4e7688..82905f9511ec 100644 --- a/rollup/l1/reader.go +++ b/rollup/l1/reader.go @@ -238,7 +238,6 @@ func (r *Reader) processLogsToRollupEvents(logs []types.Log) (RollupEvents, erro var rollupEvent RollupEvent var err error - // TODO: once commit event is changed to include the versioned hash of the blob, we need to update this function and the CommitEvent struct for _, vLog := range logs { switch vLog.Topics[0] { case r.l1CommitBatchEventSignature: From 3950e586e049eb5d4bf6f5db919ff6eb5324892c Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Wed, 22 Jan 2025 15:49:12 +0800 Subject: [PATCH 20/36] fix bug due to previous batch being empty when processing the first batch within a set of batches --- rollup/da_syncer/da/calldata_blob_source.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 6da1b499ab7b..2978c8c1bd79 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -130,7 +130,7 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven } // if this is a different commit transaction, we need to create a new DA - if lastCommitTransactionHash != commitEvent.TxHash() { + if lastCommitTransactionHash != commitEvent.TxHash() && len(lastCommitEvents) > 0 { if err = getAndAppendCommitBatchDA(); err != nil { return nil, fmt.Errorf("failed to get and append commit batch DA: %w", err) } From a043d2f1ab4757e8a81638797c59fea6e7dde913 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=96mer=20Faruk=20Irmak?= Date: Mon, 23 Dec 2024 23:08:25 +0300 Subject: [PATCH 21/36] Allow using MPT --- cmd/geth/main.go | 1 + cmd/geth/usage.go | 1 + cmd/utils/flags.go | 30 ++++--- core/block_validator.go | 3 +- core/blockchain.go | 7 +- core/blockchain_test.go | 29 ++++--- core/chain_makers.go | 5 +- core/genesis.go | 5 +- core/genesis_test.go | 2 +- core/rawdb/accessors_state.go | 14 ++++ core/rawdb/schema.go | 6 ++ core/state/database.go | 6 ++ core/state/snapshot/account.go | 4 +- core/state/snapshot/generate.go | 4 +- core/state/state_object.go | 17 +++- core/state/state_test.go | 11 +-- core/state/statedb.go | 55 ++++++++++++- core/stateless/database.go | 67 ++++++++++++++++ core/stateless/encoding.go | 76 ++++++++++++++++++ core/stateless/witness.go | 122 ++++++++++++++++++++++++++++ core/types/state_account.go | 4 +- eth/api.go | 133 +++++++++++++++++++++++++++++++ internal/web3ext/web3ext.go | 14 ++++ light/trie.go | 5 ++ miner/scroll_worker.go | 27 +++++-- miner/scroll_worker_test.go | 34 +++++--- params/config.go | 45 ++++++++--- rollup/ccc/async_checker.go | 5 ++ rollup/ccc/async_checker_test.go | 10 ++- rollup/pipeline/pipeline.go | 2 +- trie/database.go | 5 +- trie/iterator_test.go | 4 +- trie/proof.go | 2 +- trie/proof_test.go | 18 ++--- trie/secure_trie.go | 6 ++ trie/secure_trie_test.go | 3 +- trie/tracer.go | 122 ++++++++++++++++++++++++++++ trie/trie.go | 49 +++++++++++- trie/trie_test.go | 22 ++--- trie/zk_trie.go | 30 +++++++ trie/zk_trie_test.go | 73 +++++++++++++++++ 41 files changed, 969 insertions(+), 109 deletions(-) create mode 100644 core/stateless/database.go create mode 100644 core/stateless/encoding.go create mode 100644 core/stateless/witness.go create mode 100644 trie/tracer.go diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 00d22179665d..a702ce44620a 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -150,6 +150,7 @@ var ( utils.ScrollAlphaFlag, utils.ScrollSepoliaFlag, utils.ScrollFlag, + utils.ScrollMPTFlag, utils.VMEnableDebugFlag, utils.NetworkIdFlag, utils.EthStatsURLFlag, diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 26818ddfd14d..00111c9956f8 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -50,6 +50,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.ScrollAlphaFlag, utils.ScrollSepoliaFlag, utils.ScrollFlag, + utils.ScrollMPTFlag, utils.SyncModeFlag, utils.ExitWhenSyncedFlag, utils.GCModeFlag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index bccd6017b36e..0967f1000b6a 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -183,6 +183,10 @@ var ( Name: "scroll", Usage: "Scroll mainnet", } + ScrollMPTFlag = cli.BoolFlag{ + Name: "scroll-mpt", + Usage: "Use MPT trie for state storage", + } DeveloperFlag = cli.BoolFlag{ Name: "dev", Usage: "Ephemeral proof-of-authority network with a pre-funded developer account, mining enabled", @@ -1879,12 +1883,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { stack.Config().L1Confirmations = rpc.FinalizedBlockNumber log.Info("Setting flag", "--l1.sync.startblock", "4038000") stack.Config().L1DeploymentBlock = 4038000 - // disable pruning - if ctx.GlobalString(GCModeFlag.Name) != GCModeArchive { - log.Crit("Must use --gcmode=archive") + cfg.Genesis.Config.Scroll.UseZktrie = !ctx.GlobalBool(ScrollMPTFlag.Name) + if cfg.Genesis.Config.Scroll.UseZktrie { + // disable pruning + if ctx.GlobalString(GCModeFlag.Name) != GCModeArchive { + log.Crit("Must use --gcmode=archive") + } + log.Info("Pruning disabled") + cfg.NoPruning = true } - log.Info("Pruning disabled") - cfg.NoPruning = true case ctx.GlobalBool(ScrollFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 534352 @@ -1895,12 +1902,15 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { stack.Config().L1Confirmations = rpc.FinalizedBlockNumber log.Info("Setting flag", "--l1.sync.startblock", "18306000") stack.Config().L1DeploymentBlock = 18306000 - // disable pruning - if ctx.GlobalString(GCModeFlag.Name) != GCModeArchive { - log.Crit("Must use --gcmode=archive") + cfg.Genesis.Config.Scroll.UseZktrie = !ctx.GlobalBool(ScrollMPTFlag.Name) + if cfg.Genesis.Config.Scroll.UseZktrie { + // disable pruning + if ctx.GlobalString(GCModeFlag.Name) != GCModeArchive { + log.Crit("Must use --gcmode=archive") + } + log.Info("Pruning disabled") + cfg.NoPruning = true } - log.Info("Pruning disabled") - cfg.NoPruning = true case ctx.GlobalBool(DeveloperFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 1337 diff --git a/core/block_validator.go b/core/block_validator.go index fdc845682d96..9eb3accacdb4 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -226,7 +226,8 @@ func (v *BlockValidator) ValidateState(block *types.Block, statedb *state.StateD } // Validate the state root against the received state root and throw // an error if they don't match. - if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); header.Root != root { + shouldValidateStateRoot := v.config.Scroll.UseZktrie != v.config.IsEuclid(header.Time) + if root := statedb.IntermediateRoot(v.config.IsEIP158(header.Number)); shouldValidateStateRoot && header.Root != root { return fmt.Errorf("invalid merkle root (remote: %x local: %x)", header.Root, root) } return nil diff --git a/core/blockchain.go b/core/blockchain.go index a0bc05924531..57d82bc118a4 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -1318,6 +1318,9 @@ func (bc *BlockChain) writeBlockWithState(block *types.Block, receipts []*types. return NonStatTy, err } triedb := bc.stateCache.TrieDB() + if block.Root() != root { + rawdb.WriteDiskStateRoot(bc.db, block.Root(), root) + } // If we're running an archive node, always flush if bc.cacheConfig.TrieDirtyDisabled { @@ -1677,7 +1680,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er } // Enable prefetching to pull in trie node paths while processing transactions - statedb.StartPrefetcher("chain") + statedb.StartPrefetcher("chain", nil) activeState = statedb // If we have a followup block, run that against the current state to pre-cache @@ -1814,7 +1817,7 @@ func (bc *BlockChain) BuildAndWriteBlock(parentBlock *types.Block, header *types return NonStatTy, err } - statedb.StartPrefetcher("l1sync") + statedb.StartPrefetcher("l1sync", nil) defer statedb.StopPrefetcher() header.ParentHash = parentBlock.Hash() diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 13b75622b169..f3488b02ee0a 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -17,7 +17,6 @@ package core import ( - "encoding/json" "errors" "fmt" "io/ioutil" @@ -3032,15 +3031,16 @@ func TestPoseidonCodeHash(t *testing.T) { var callCreate2Code = common.Hex2Bytes("f4754f660000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000005c6080604052348015600f57600080fd5b50603f80601d6000396000f3fe6080604052600080fdfea2646970667358221220707985753fcb6578098bb16f3709cf6d012993cba6dd3712661cf8f57bbc0d4d64736f6c6343000807003300000000") var ( - key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - addr1 = crypto.PubkeyToAddress(key1.PublicKey) - db = rawdb.NewMemoryDatabase() - gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} - genesis = gspec.MustCommit(db) - signer = types.LatestSigner(gspec.Config) - engine = ethash.NewFaker() - blockchain, _ = NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil, nil) + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{Config: params.TestChainConfig.Clone(), Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000000)}}} + signer = types.LatestSigner(gspec.Config) + engine = ethash.NewFaker() ) + gspec.Config.Scroll.UseZktrie = true + genesis := gspec.MustCommit(db) + blockchain, _ := NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil, nil) defer blockchain.Stop() @@ -3053,7 +3053,7 @@ func TestPoseidonCodeHash(t *testing.T) { assert.Equal(t, common.HexToHash("0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"), keccakCodeHash, "code hash mismatch") // deploy contract through transaction - chain, receipts := GenerateChain(params.TestChainConfig, genesis, engine, db, 1, func(i int, gen *BlockGen) { + chain, receipts := GenerateChain(gspec.Config, genesis, engine, db, 1, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewContractCreation(gen.TxNonce(addr1), new(big.Int), 1000000, gen.header.BaseFee, deployCode), signer, key1) gen.AddTx(tx) }) @@ -3074,7 +3074,7 @@ func TestPoseidonCodeHash(t *testing.T) { assert.Equal(t, common.HexToHash("0x089bfd332dfa6117cbc20756f31801ce4f5a175eb258e46bf8123317da54cd96"), keccakCodeHash, "code hash mismatch") // deploy contract through another contract (CREATE and CREATE2) - chain, receipts = GenerateChain(params.TestChainConfig, blockchain.CurrentBlock(), engine, db, 1, func(i int, gen *BlockGen) { + chain, receipts = GenerateChain(gspec.Config, blockchain.CurrentBlock(), engine, db, 1, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), contractAddress, new(big.Int), 1000000, gen.header.BaseFee, callCreateCode), signer, key1) gen.AddTx(tx) @@ -3718,12 +3718,11 @@ func TestTransientStorageReset(t *testing.T) { func TestCurieTransition(t *testing.T) { // Set fork blocks in config // (we make a deep copy to avoid interference with other tests) - var config *params.ChainConfig - b, _ := json.Marshal(params.AllEthashProtocolChanges) - json.Unmarshal(b, &config) + config := params.AllEthashProtocolChanges.Clone() config.CurieBlock = big.NewInt(2) config.DarwinTime = nil config.DarwinV2Time = nil + config.Scroll.UseZktrie = true var ( db = rawdb.NewMemoryDatabase() @@ -3748,7 +3747,7 @@ func TestCurieTransition(t *testing.T) { number := block.Number().Uint64() baseFee := block.BaseFee() - statedb, _ := state.New(block.Root(), state.NewDatabase(db), nil) + statedb, _ := state.New(block.Root(), state.NewDatabaseWithConfig(db, &trie.Config{Zktrie: gspec.Config.Scroll.UseZktrie}), nil) code := statedb.GetCode(rcfg.L1GasPriceOracleAddress) codeSize := statedb.GetCodeSize(rcfg.L1GasPriceOracleAddress) diff --git a/core/chain_makers.go b/core/chain_makers.go index b79d92bf71e4..9d167f7ca0b9 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -29,6 +29,7 @@ import ( "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/params" "github.com/scroll-tech/go-ethereum/rollup/fees" + "github.com/scroll-tech/go-ethereum/trie" ) // BlockGen creates blocks for testing. @@ -220,7 +221,7 @@ func (b *BlockGen) OffsetTime(seconds int64) { // a similar non-validating proof of work implementation. func GenerateChain(config *params.ChainConfig, parent *types.Block, engine consensus.Engine, db ethdb.Database, n int, gen func(int, *BlockGen)) ([]*types.Block, []types.Receipts) { if config == nil { - config = params.TestChainConfig + config = params.TestChainConfig.Clone() } blocks, receipts := make(types.Blocks, n), make([]types.Receipts, n) chainreader := &fakeChainReader{config: config} @@ -264,7 +265,7 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse return nil, nil } for i := 0; i < n; i++ { - statedb, err := state.New(parent.Root(), state.NewDatabase(db), nil) + statedb, err := state.New(parent.Root(), state.NewDatabaseWithConfig(db, &trie.Config{Zktrie: config.Scroll.ZktrieEnabled()}), nil) if err != nil { panic(err) } diff --git a/core/genesis.go b/core/genesis.go index 50a8e8843a4d..9554be0ac304 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -322,7 +322,10 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { } statedb.Commit(false) statedb.Database().TrieDB().Commit(root, true, nil) - + if g.Config != nil && g.Config.Scroll.GenesisStateRoot != nil { + head.Root = *g.Config.Scroll.GenesisStateRoot + rawdb.WriteDiskStateRoot(db, head.Root, root) + } return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)) } diff --git a/core/genesis_test.go b/core/genesis_test.go index eaf39252ba17..9b9d06495346 100644 --- a/core/genesis_test.go +++ b/core/genesis_test.go @@ -41,7 +41,7 @@ func TestInvalidCliqueConfig(t *testing.T) { func TestSetupGenesis(t *testing.T) { var ( - customghash = common.HexToHash("0x700380ab70d789c462c4e8f0db082842095321f390d0a3f25f400f0746db32bc") + customghash = common.HexToHash("0x89c99d90b79719238d2645c7642f2c9295246e80775b38cfd162b696817fbd50") customg = Genesis{ Config: ¶ms.ChainConfig{HomesteadBlock: big.NewInt(3)}, Alloc: GenesisAlloc{ diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index f153af69f942..2738d9424668 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -94,3 +94,17 @@ func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { log.Crit("Failed to delete trie node", "err", err) } } + +func WriteDiskStateRoot(db ethdb.KeyValueWriter, headerRoot, diskRoot common.Hash) { + if err := db.Put(diskStateRootKey(headerRoot), diskRoot.Bytes()); err != nil { + log.Crit("Failed to store disk state root", "err", err) + } +} + +func ReadDiskStateRoot(db ethdb.KeyValueReader, headerRoot common.Hash) (common.Hash, error) { + data, err := db.Get(diskStateRootKey(headerRoot)) + if err != nil { + return common.Hash{}, err + } + return common.BytesToHash(data), nil +} diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index b4a51935b4ff..47b29c77d840 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -127,6 +127,8 @@ var ( // Scroll da syncer store daSyncedL1BlockNumberKey = []byte("LastDASyncedL1BlockNumber") + + diskStateRootPrefix = []byte("disk-state-root") ) // Use the updated "L1" prefix on all new networks @@ -312,3 +314,7 @@ func batchMetaKey(batchIndex uint64) []byte { func committedBatchMetaKey(batchIndex uint64) []byte { return append(committedBatchMetaPrefix, encodeBigEndian(batchIndex)...) } + +func diskStateRootKey(headerRoot common.Hash) []byte { + return append(diskStateRootPrefix, headerRoot.Bytes()...) +} diff --git a/core/state/database.go b/core/state/database.go index bb73fcecd216..9a58bc72246f 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -105,6 +105,9 @@ type Trie interface { // nodes of the longest existing prefix of the key (at least the root), ending // with the node that proves the absence of the key. Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) error + + // Witness returns a set containing all trie nodes that have been accessed. + Witness() map[string]struct{} } // NewDatabase creates a backing store for state. The returned database is safe for @@ -136,6 +139,9 @@ type cachingDB struct { // OpenTrie opens the main account trie at a specific root hash. func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { + if diskRoot, err := rawdb.ReadDiskStateRoot(db.db.DiskDB(), root); err == nil { + root = diskRoot + } if db.zktrie { tr, err := trie.NewZkTrie(root, trie.NewZktrieDatabaseFromTriedb(db.db)) if err != nil { diff --git a/core/state/snapshot/account.go b/core/state/snapshot/account.go index 13bbbbd7884b..8261693d6181 100644 --- a/core/state/snapshot/account.go +++ b/core/state/snapshot/account.go @@ -33,8 +33,8 @@ type Account struct { Balance *big.Int Root []byte KeccakCodeHash []byte - PoseidonCodeHash []byte - CodeSize uint64 + PoseidonCodeHash []byte `rlp:"-"` + CodeSize uint64 `rlp:"-"` } // SlimAccount converts a state.Account content into a slim snapshot account diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index e5e2b420018a..72e6d134d59b 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -618,8 +618,8 @@ func (dl *diskLayer) generate(stats *generatorStats) { Balance *big.Int Root common.Hash KeccakCodeHash []byte - PoseidonCodeHash []byte - CodeSize uint64 + PoseidonCodeHash []byte `rlp:"-"` + CodeSize uint64 `rlp:"-"` } if err := rlp.DecodeBytes(val, &acc); err != nil { log.Crit("Invalid account encountered during snapshot creation", "err", err) diff --git a/core/state/state_object.go b/core/state/state_object.go index f9213a0a31d7..4fb9e82c2ed7 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -500,8 +500,18 @@ func (s *stateObject) Code(db Database) []byte { // CodeSize returns the size of the contract code associated with this object, // or zero if none. This method is an almost mirror of Code, but uses a cache // inside the database to avoid loading codes seen recently. -func (s *stateObject) CodeSize() uint64 { - return s.data.CodeSize +func (s *stateObject) CodeSize(db Database) uint64 { + if s.code != nil { + return uint64(len(s.code)) + } + if bytes.Equal(s.KeccakCodeHash(), emptyKeccakCodeHash) { + return 0 + } + size, err := db.ContractCodeSize(s.addrHash, common.BytesToHash(s.KeccakCodeHash())) + if err != nil { + s.setError(fmt.Errorf("can't load code size %x: %v", s.KeccakCodeHash(), err)) + } + return uint64(size) } func (s *stateObject) SetCode(code []byte) { @@ -534,6 +544,9 @@ func (s *stateObject) setNonce(nonce uint64) { } func (s *stateObject) PoseidonCodeHash() []byte { + if !s.db.IsZktrie() { + panic("PoseidonCodeHash is only available in zktrie mode") + } return s.data.PoseidonCodeHash } diff --git a/core/state/state_test.go b/core/state/state_test.go index ea98b2dab833..8b53cb2eca2a 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -155,7 +155,8 @@ func TestSnapshotEmpty(t *testing.T) { } func TestSnapshot2(t *testing.T) { - state, _ := New(common.Hash{}, NewDatabase(rawdb.NewMemoryDatabase()), nil) + stateDb := NewDatabase(rawdb.NewMemoryDatabase()) + state, _ := New(common.Hash{}, stateDb, nil) stateobjaddr0 := common.BytesToAddress([]byte("so0")) stateobjaddr1 := common.BytesToAddress([]byte("so1")) @@ -201,7 +202,7 @@ func TestSnapshot2(t *testing.T) { so0Restored.GetState(state.db, storageaddr) so0Restored.Code(state.db) // non-deleted is equal (restored) - compareStateObjects(so0Restored, so0, t) + compareStateObjects(so0Restored, so0, stateDb, t) // deleted should be nil, both before and after restore of state copy so1Restored := state.getStateObject(stateobjaddr1) @@ -210,7 +211,7 @@ func TestSnapshot2(t *testing.T) { } } -func compareStateObjects(so0, so1 *stateObject, t *testing.T) { +func compareStateObjects(so0, so1 *stateObject, db Database, t *testing.T) { if so0.Address() != so1.Address() { t.Fatalf("Address mismatch: have %v, want %v", so0.address, so1.address) } @@ -229,8 +230,8 @@ func compareStateObjects(so0, so1 *stateObject, t *testing.T) { if !bytes.Equal(so0.PoseidonCodeHash(), so1.PoseidonCodeHash()) { t.Fatalf("PoseidonCodeHash mismatch: have %v, want %v", so0.PoseidonCodeHash(), so1.PoseidonCodeHash()) } - if so0.CodeSize() != so1.CodeSize() { - t.Fatalf("CodeSize mismatch: have %v, want %v", so0.CodeSize(), so1.CodeSize()) + if so0.CodeSize(db) != so1.CodeSize(db) { + t.Fatalf("CodeSize mismatch: have %v, want %v", so0.CodeSize(db), so1.CodeSize(db)) } if !bytes.Equal(so0.code, so1.code) { t.Fatalf("Code mismatch: have %v, want %v", so0.code, so1.code) diff --git a/core/state/statedb.go b/core/state/statedb.go index 6629a50eae57..7affd81fc409 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -29,6 +29,7 @@ import ( "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/state/snapshot" + "github.com/scroll-tech/go-ethereum/core/stateless" "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/log" @@ -106,6 +107,9 @@ type StateDB struct { validRevisions []revision nextRevisionId int + // State witness if cross validation is needed + witness *stateless.Witness + // Measurements gathered during execution for debugging purposes AccountReads time.Duration AccountHashes time.Duration @@ -159,11 +163,15 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) // StartPrefetcher initializes a new trie prefetcher to pull in nodes from the // state trie concurrently while the state is mutated so that when we reach the // commit phase, most of the needed data is already hot. -func (s *StateDB) StartPrefetcher(namespace string) { +func (s *StateDB) StartPrefetcher(namespace string, witness *stateless.Witness) { if s.prefetcher != nil { s.prefetcher.close() s.prefetcher = nil } + + // Enable witness collection if requested + s.witness = witness + if s.snap != nil { s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace) } @@ -289,6 +297,9 @@ func (s *StateDB) TxIndex() int { func (s *StateDB) GetCode(addr common.Address) []byte { stateObject := s.getStateObject(addr) if stateObject != nil { + if s.witness != nil { + s.witness.AddCode(stateObject.Code(s.db)) + } return stateObject.Code(s.db) } return nil @@ -297,7 +308,10 @@ func (s *StateDB) GetCode(addr common.Address) []byte { func (s *StateDB) GetCodeSize(addr common.Address) uint64 { stateObject := s.getStateObject(addr) if stateObject != nil { - return stateObject.CodeSize() + if s.witness != nil { + s.witness.AddCode(stateObject.Code(s.db)) + } + return stateObject.CodeSize(s.db) } return 0 } @@ -725,6 +739,9 @@ func (s *StateDB) Copy() *StateDB { journal: newJournal(), hasher: crypto.NewKeccakState(), } + if s.witness != nil { + state.witness = s.witness.Copy() + } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { // As documented [here](https://github.com/scroll-tech/go-ethereum/pull/16485#issuecomment-380438527), @@ -913,7 +930,33 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // to pull useful data from disk. for addr := range s.stateObjectsPending { if obj := s.stateObjects[addr]; !obj.deleted { + + // If witness building is enabled and the state object has a trie, + // gather the witnesses for its specific storage trie + if s.witness != nil && obj.trie != nil { + s.witness.AddState(obj.trie.Witness()) + } + obj.updateRoot(s.db) + + // If witness building is enabled and the state object has a trie, + // gather the witnesses for its specific storage trie + if s.witness != nil && obj.trie != nil { + s.witness.AddState(obj.trie.Witness()) + } + } + } + + if s.witness != nil { + // If witness building is enabled, gather the account trie witness for read-only operations + for _, obj := range s.stateObjects { + if len(obj.originStorage) == 0 { + continue + } + + if trie := obj.getTrie(s.db); trie != nil { + s.witness.AddState(trie.Witness()) + } } } // Now we're about to start to write changes to the trie. The trie is so far @@ -945,7 +988,13 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { if metrics.EnabledExpensive { defer func(start time.Time) { s.AccountHashes += time.Since(start) }(time.Now()) } - return s.trie.Hash() + + hash := s.trie.Hash() + // If witness building is enabled, gather the account trie witness + if s.witness != nil { + s.witness.AddState(s.trie.Witness()) + } + return hash } // SetTxContext sets the current transaction hash and index which are diff --git a/core/stateless/database.go b/core/stateless/database.go new file mode 100644 index 000000000000..e6278a98f872 --- /dev/null +++ b/core/stateless/database.go @@ -0,0 +1,67 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package stateless + +import ( + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/crypto" + "github.com/scroll-tech/go-ethereum/ethdb" +) + +// MakeHashDB imports tries, codes and block hashes from a witness into a new +// hash-based memory db. We could eventually rewrite this into a pathdb, but +// simple is better for now. +// +// Note, this hashdb approach is quite strictly self-validating: +// - Headers are persisted keyed by hash, so blockhash will error on junk +// - Codes are persisted keyed by hash, so bytecode lookup will error on junk +// - Trie nodes are persisted keyed by hash, so trie expansion will error on junk +// +// Acceleration structures built would need to explicitly validate the witness. +func (w *Witness) MakeHashDB() ethdb.Database { + var ( + memdb = rawdb.NewMemoryDatabase() + hasher = crypto.NewKeccakState() + hash = make([]byte, 32) + ) + // Inject all the "block hashes" (i.e. headers) into the ephemeral database + for _, header := range w.Headers { + rawdb.WriteHeader(memdb, header) + } + // Inject all the bytecodes into the ephemeral database + for code := range w.Codes { + blob := []byte(code) + + hasher.Reset() + hasher.Write(blob) + hasher.Read(hash) + + rawdb.WriteCode(memdb, common.BytesToHash(hash), blob) + } + // Inject all the MPT trie nodes into the ephemeral database + for node := range w.State { + blob := []byte(node) + + hasher.Reset() + hasher.Write(blob) + hasher.Read(hash) + + rawdb.WriteTrieNode(memdb, common.BytesToHash(hash), blob) + } + return memdb +} diff --git a/core/stateless/encoding.go b/core/stateless/encoding.go new file mode 100644 index 000000000000..b67b7460924a --- /dev/null +++ b/core/stateless/encoding.go @@ -0,0 +1,76 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package stateless + +import ( + "io" + + "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/rlp" +) + +// toExtWitness converts our internal witness representation to the consensus one. +func (w *Witness) toExtWitness() *extWitness { + ext := &extWitness{ + Headers: w.Headers, + } + ext.Codes = make([][]byte, 0, len(w.Codes)) + for code := range w.Codes { + ext.Codes = append(ext.Codes, []byte(code)) + } + ext.State = make([][]byte, 0, len(w.State)) + for node := range w.State { + ext.State = append(ext.State, []byte(node)) + } + return ext +} + +// fromExtWitness converts the consensus witness format into our internal one. +func (w *Witness) fromExtWitness(ext *extWitness) error { + w.Headers = ext.Headers + + w.Codes = make(map[string]struct{}, len(ext.Codes)) + for _, code := range ext.Codes { + w.Codes[string(code)] = struct{}{} + } + w.State = make(map[string]struct{}, len(ext.State)) + for _, node := range ext.State { + w.State[string(node)] = struct{}{} + } + return nil +} + +// EncodeRLP serializes a witness as RLP. +func (w *Witness) EncodeRLP(wr io.Writer) error { + return rlp.Encode(wr, w.toExtWitness()) +} + +// DecodeRLP decodes a witness from RLP. +func (w *Witness) DecodeRLP(s *rlp.Stream) error { + var ext extWitness + if err := s.Decode(&ext); err != nil { + return err + } + return w.fromExtWitness(&ext) +} + +// extWitness is a witness RLP encoding for transferring across clients. +type extWitness struct { + Headers []*types.Header + Codes [][]byte + State [][]byte +} diff --git a/core/stateless/witness.go b/core/stateless/witness.go new file mode 100644 index 000000000000..10e4b08ca1cb --- /dev/null +++ b/core/stateless/witness.go @@ -0,0 +1,122 @@ +// Copyright 2024 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package stateless + +import ( + "errors" + "maps" + "slices" + "sync" + + "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/types" +) + +// HeaderReader is an interface to pull in headers in place of block hashes for +// the witness. +type HeaderReader interface { + // GetHeader retrieves a block header from the database by hash and number, + GetHeader(hash common.Hash, number uint64) *types.Header +} + +// Witness encompasses the state required to apply a set of transactions and +// derive a post state/receipt root. +type Witness struct { + context *types.Header // Header to which this witness belongs to, with rootHash and receiptHash zeroed out + + Headers []*types.Header // Past headers in reverse order (0=parent, 1=parent's-parent, etc). First *must* be set. + Codes map[string]struct{} // Set of bytecodes ran or accessed + State map[string]struct{} // Set of MPT state trie nodes (account and storage together) + + chain HeaderReader // Chain reader to convert block hash ops to header proofs + lock sync.Mutex // Lock to allow concurrent state insertions +} + +// NewWitness creates an empty witness ready for population. +func NewWitness(context *types.Header, chain HeaderReader) (*Witness, error) { + // When building witnesses, retrieve the parent header, which will *always* + // be included to act as a trustless pre-root hash container + var headers []*types.Header + if chain != nil { + parent := chain.GetHeader(context.ParentHash, context.Number.Uint64()-1) + if parent == nil { + return nil, errors.New("failed to retrieve parent header") + } + headers = append(headers, parent) + } + // Create the wtness with a reconstructed gutted out block + return &Witness{ + context: context, + Headers: headers, + Codes: make(map[string]struct{}), + State: make(map[string]struct{}), + chain: chain, + }, nil +} + +// AddBlockHash adds a "blockhash" to the witness with the designated offset from +// chain head. Under the hood, this method actually pulls in enough headers from +// the chain to cover the block being added. +func (w *Witness) AddBlockHash(number uint64) { + // Keep pulling in headers until this hash is populated + for int(w.context.Number.Uint64()-number) > len(w.Headers) { + tail := w.Headers[len(w.Headers)-1] + w.Headers = append(w.Headers, w.chain.GetHeader(tail.ParentHash, tail.Number.Uint64()-1)) + } +} + +// AddCode adds a bytecode blob to the witness. +func (w *Witness) AddCode(code []byte) { + if len(code) == 0 { + return + } + w.Codes[string(code)] = struct{}{} +} + +// AddState inserts a batch of MPT trie nodes into the witness. +func (w *Witness) AddState(nodes map[string]struct{}) { + if len(nodes) == 0 { + return + } + w.lock.Lock() + defer w.lock.Unlock() + + maps.Copy(w.State, nodes) +} + +// Copy deep-copies the witness object. Witness.Block isn't deep-copied as it +// is never mutated by Witness +func (w *Witness) Copy() *Witness { + cpy := &Witness{ + Headers: slices.Clone(w.Headers), + Codes: maps.Clone(w.Codes), + State: maps.Clone(w.State), + chain: w.chain, + } + if w.context != nil { + cpy.context = types.CopyHeader(w.context) + } + return cpy +} + +// Root returns the pre-state root from the first header. +// +// Note, this method will panic in case of a bad witness (but RLP decoding will +// sanitize it and fail before that). +func (w *Witness) Root() common.Hash { + return w.Headers[0].Root +} diff --git a/core/types/state_account.go b/core/types/state_account.go index bb396d439d9e..bde0331d00b3 100644 --- a/core/types/state_account.go +++ b/core/types/state_account.go @@ -31,6 +31,6 @@ type StateAccount struct { KeccakCodeHash []byte // StateAccount Scroll extensions - PoseidonCodeHash []byte - CodeSize uint64 + PoseidonCodeHash []byte `rlp:"-"` + CodeSize uint64 `rlp:"-"` } diff --git a/eth/api.go b/eth/api.go index 74672347823c..07f5874037c8 100644 --- a/eth/api.go +++ b/eth/api.go @@ -34,11 +34,14 @@ import ( "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/core/state" + "github.com/scroll-tech/go-ethereum/core/stateless" "github.com/scroll-tech/go-ethereum/core/types" + "github.com/scroll-tech/go-ethereum/crypto" "github.com/scroll-tech/go-ethereum/internal/ethapi" "github.com/scroll-tech/go-ethereum/log" "github.com/scroll-tech/go-ethereum/rlp" "github.com/scroll-tech/go-ethereum/rollup/ccc" + "github.com/scroll-tech/go-ethereum/rollup/rcfg" "github.com/scroll-tech/go-ethereum/rpc" "github.com/scroll-tech/go-ethereum/trie" ) @@ -321,6 +324,109 @@ func (api *PublicDebugAPI) DumpBlock(blockNr rpc.BlockNumber) (state.Dump, error return stateDb.RawDump(opts), nil } +func (api *PublicDebugAPI) ExecutionWitness(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (*ExecutionWitness, error) { + block, err := api.eth.APIBackend.BlockByNumberOrHash(ctx, blockNrOrHash) + if err != nil { + return nil, fmt.Errorf("failed to retrieve block: %w", err) + } + if block == nil { + return nil, fmt.Errorf("block not found: %s", blockNrOrHash.String()) + } + + witness, err := generateWitness(api.eth.blockchain, block) + return ToExecutionWitness(witness), err +} + +func generateWitness(blockchain *core.BlockChain, block *types.Block) (*stateless.Witness, error) { + witness, err := stateless.NewWitness(block.Header(), blockchain) + if err != nil { + return nil, fmt.Errorf("failed to create witness: %w", err) + } + + parentHeader := witness.Headers[0] + statedb, err := blockchain.StateAt(parentHeader.Root) + if err != nil { + return nil, fmt.Errorf("failed to retrieve parent state: %w", err) + } + + // Collect storage locations that prover needs but sequencer might not touch necessarily + statedb.GetState(rcfg.L2MessageQueueAddress, rcfg.WithdrawTrieRootSlot) + + statedb.StartPrefetcher("debug_execution_witness", witness) + defer statedb.StopPrefetcher() + + receipts, _, usedGas, err := blockchain.Processor().Process(block, statedb, *blockchain.GetVMConfig()) + if err != nil { + return nil, fmt.Errorf("failed to process block %d: %w", block.Number(), err) + } + + if err := blockchain.Validator().ValidateState(block, statedb, receipts, usedGas); err != nil { + return nil, fmt.Errorf("failed to validate block %d: %w", block.Number(), err) + } + return witness, testWitness(blockchain, block, witness) +} + +func testWitness(blockchain *core.BlockChain, block *types.Block, witness *stateless.Witness) error { + stateRoot := witness.Root() + if diskRoot, _ := rawdb.ReadDiskStateRoot(blockchain.Database(), stateRoot); diskRoot != (common.Hash{}) { + stateRoot = diskRoot + } + + // Create and populate the state database to serve as the stateless backend + statedb, err := state.New(stateRoot, state.NewDatabase(witness.MakeHashDB()), nil) + if err != nil { + return fmt.Errorf("failed to create state database: %w", err) + } + + receipts, _, usedGas, err := blockchain.Processor().Process(block, statedb, *blockchain.GetVMConfig()) + if err != nil { + return fmt.Errorf("failed to process block %d: %w", block.Number(), err) + } + + if err := blockchain.Validator().ValidateState(block, statedb, receipts, usedGas); err != nil { + return fmt.Errorf("failed to validate block %d: %w", block.Number(), err) + } + + postStateRoot := block.Root() + if diskRoot, _ := rawdb.ReadDiskStateRoot(blockchain.Database(), postStateRoot); diskRoot != (common.Hash{}) { + postStateRoot = diskRoot + } + if statedb.GetRootHash() != postStateRoot { + return fmt.Errorf("failed to commit statelessly %d: %w", block.Number(), err) + } + return nil +} + +// ExecutionWitness is a witness json encoding for transferring across the network. +// In the future, we'll probably consider using the extWitness format instead for less overhead if performance becomes an issue. +// Currently using this format for ease of reading, parsing and compatibility across clients. +type ExecutionWitness struct { + Headers []*types.Header `json:"headers"` + Codes map[string]string `json:"codes"` + State map[string]string `json:"state"` +} + +func transformMap(in map[string]struct{}) map[string]string { + out := make(map[string]string, len(in)) + for item := range in { + bytes := []byte(item) + key := crypto.Keccak256Hash(bytes).Hex() + out[key] = hexutil.Encode(bytes) + } + return out +} + +// ToExecutionWitness converts a witness to an execution witness format that is compatible with reth. +// keccak(node) => node +// keccak(bytecodes) => bytecodes +func ToExecutionWitness(w *stateless.Witness) *ExecutionWitness { + return &ExecutionWitness{ + Headers: w.Headers, + Codes: transformMap(w.Codes), + State: transformMap(w.State), + } +} + // PrivateDebugAPI is the collection of Ethereum full node APIs exposed over // the private debugging endpoint. type PrivateDebugAPI struct { @@ -859,3 +965,30 @@ func (api *ScrollAPI) CalculateRowConsumptionByBlockNumber(ctx context.Context, asyncChecker.Wait() return rawdb.ReadBlockRowConsumption(api.eth.ChainDb(), block.Hash()), checkErr } + +type DiskAndHeaderRoot struct { + DiskRoot common.Hash `json:"diskRoot"` + HeaderRoot common.Hash `json:"headerRoot"` +} + +// CalculateRowConsumptionByBlockNumber +func (api *ScrollAPI) DiskRoot(ctx context.Context, blockNrOrHash *rpc.BlockNumberOrHash) (DiskAndHeaderRoot, error) { + block, err := api.eth.APIBackend.BlockByNumberOrHash(ctx, *blockNrOrHash) + if err != nil { + return DiskAndHeaderRoot{}, fmt.Errorf("failed to retrieve block: %w", err) + } + if block == nil { + return DiskAndHeaderRoot{}, fmt.Errorf("block not found: %s", blockNrOrHash.String()) + } + + if diskRoot, _ := rawdb.ReadDiskStateRoot(api.eth.ChainDb(), block.Root()); diskRoot != (common.Hash{}) { + return DiskAndHeaderRoot{ + DiskRoot: diskRoot, + HeaderRoot: block.Root(), + }, nil + } + return DiskAndHeaderRoot{ + DiskRoot: block.Root(), + HeaderRoot: block.Root(), + }, nil +} diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index d43e01467ee3..8de02ae6f57e 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -482,6 +482,13 @@ web3._extend({ params: 2, inputFormatter:[web3._extend.formatters.inputBlockNumberFormatter, web3._extend.formatters.inputBlockNumberFormatter], }), + new web3._extend.Method({ + name: 'executionWitness', + call: 'debug_executionWitness', + params: 1, + inputFormatter: [null] + }), + ], properties: [] }); @@ -942,6 +949,13 @@ web3._extend({ params: 1, inputFormatter: [web3._extend.formatters.inputBlockNumberFormatter] }), + new web3._extend.Method({ + name: 'diskRoot', + call: 'scroll_diskRoot', + params: 1, + inputFormatter: [web3._extend.formatters.inputDefaultBlockNumberFormatter], + }), + ], properties: [ diff --git a/light/trie.go b/light/trie.go index 1947c314090b..148814f3fbc5 100644 --- a/light/trie.go +++ b/light/trie.go @@ -181,6 +181,11 @@ func (t *odrTrie) do(key []byte, fn func() error) error { } } +// Witness returns a set containing all trie nodes that have been accessed. +func (t *odrTrie) Witness() map[string]struct{} { + panic("not implemented") +} + type nodeIterator struct { trie.NodeIterator t *odrTrie diff --git a/miner/scroll_worker.go b/miner/scroll_worker.go index e152878d40e6..a6aa7c21c207 100644 --- a/miner/scroll_worker.go +++ b/miner/scroll_worker.go @@ -372,7 +372,7 @@ func (w *worker) mainLoop() { select { case <-w.startCh: idleTimer.UpdateSince(idleStart) - if w.isRunning() { + if w.isRunning() && w.chainConfig.Scroll.UseZktrie { if err := w.checkHeadRowConsumption(); err != nil { log.Error("failed to start head checkers", "err", err) return @@ -490,9 +490,10 @@ func (w *worker) newWork(now time.Time, parentHash common.Hash, reorging bool, r vmConfig := *w.chain.GetVMConfig() cccLogger := ccc.NewLogger() - vmConfig.Debug = true - vmConfig.Tracer = cccLogger - + if w.chainConfig.Scroll.UseZktrie { + vmConfig.Debug = true + vmConfig.Tracer = cccLogger + } deadline := time.Unix(int64(header.Time), 0) if w.chainConfig.Clique != nil && w.chainConfig.Clique.RelaxedPeriod { // clique with relaxed period uses time.Now() as the header.Time, calculate the deadline @@ -566,6 +567,11 @@ func (w *worker) handleForks() (bool, error) { misc.ApplyCurieHardFork(w.current.state) return true, nil } + + if w.chainConfig.IsEuclid(w.current.header.Time) { + parent := w.chain.GetBlockByHash(w.current.header.ParentHash) + return parent != nil && !w.chainConfig.IsEuclid(parent.Time()), nil + } return false, nil } @@ -809,7 +815,10 @@ func (w *worker) commit() (common.Hash, error) { }(time.Now()) w.updateSnapshot() - if !w.isRunning() && !w.current.reorging { + // Since clocks of mpt-sequencer and zktrie-sequencer can be slightly out of sync, + // this might result in a reorg at the Euclid fork block. But it will be resolved shortly after. + canCommitState := w.chainConfig.Scroll.UseZktrie != w.chainConfig.IsEuclid(w.current.header.Time) + if !canCommitState || (!w.isRunning() && !w.current.reorging) { return common.Hash{}, nil } @@ -886,7 +895,7 @@ func (w *worker) commit() (common.Hash, error) { currentHeight := w.current.header.Number.Uint64() maxReorgDepth := uint64(w.config.CCCMaxWorkers + 1) - if !w.current.reorging && currentHeight > maxReorgDepth { + if w.chainConfig.Scroll.UseZktrie && !w.current.reorging && currentHeight > maxReorgDepth { ancestorHeight := currentHeight - maxReorgDepth ancestorHash := w.chain.GetHeaderByNumber(ancestorHeight).Hash() if rawdb.ReadBlockRowConsumption(w.chain.Database(), ancestorHash) == nil { @@ -914,8 +923,10 @@ func (w *worker) commit() (common.Hash, error) { w.mux.Post(core.NewMinedBlockEvent{Block: block}) checkStart := time.Now() - if err = w.asyncChecker.Check(block); err != nil { - log.Error("failed to launch CCC background task", "err", err) + if w.chainConfig.Scroll.UseZktrie { + if err = w.asyncChecker.Check(block); err != nil { + log.Error("failed to launch CCC background task", "err", err) + } } cccStallTimer.UpdateSince(checkStart) diff --git a/miner/scroll_worker_test.go b/miner/scroll_worker_test.go index 5f79902f0e15..e890d9d6def4 100644 --- a/miner/scroll_worker_test.go +++ b/miner/scroll_worker_test.go @@ -224,14 +224,15 @@ func testGenerateBlockAndImport(t *testing.T, isClique bool) { db = rawdb.NewMemoryDatabase() ) if isClique { - chainConfig = params.AllCliqueProtocolChanges + chainConfig = params.AllCliqueProtocolChanges.Clone() chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} engine = clique.New(chainConfig.Clique, db) } else { - chainConfig = params.AllEthashProtocolChanges + chainConfig = params.AllEthashProtocolChanges.Clone() engine = ethash.NewFaker() } chainConfig.Scroll.FeeVaultAddress = &common.Address{} + chainConfig.Scroll.UseZktrie = true chainConfig.LondonBlock = big.NewInt(0) w, b := newTestWorker(t, chainConfig, engine, db, 0) @@ -285,17 +286,18 @@ func testGenerateBlockWithL1Msg(t *testing.T, isClique bool) { rawdb.WriteL1Messages(db, msgs) if isClique { - chainConfig = params.AllCliqueProtocolChanges + chainConfig = params.AllCliqueProtocolChanges.Clone() chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} engine = clique.New(chainConfig.Clique, db) } else { - chainConfig = params.AllEthashProtocolChanges + chainConfig = params.AllEthashProtocolChanges.Clone() engine = ethash.NewFaker() } chainConfig.Scroll.L1Config = ¶ms.L1Config{ NumL1MessagesPerBlock: 1, } chainConfig.Scroll.FeeVaultAddress = &common.Address{} + chainConfig.Scroll.UseZktrie = true chainConfig.LondonBlock = big.NewInt(0) w, b := newTestWorker(t, chainConfig, engine, db, 0) @@ -341,9 +343,10 @@ func TestAcceptableTxlimit(t *testing.T) { chainConfig *params.ChainConfig db = rawdb.NewMemoryDatabase() ) - chainConfig = params.AllCliqueProtocolChanges + chainConfig = params.AllCliqueProtocolChanges.Clone() chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} chainConfig.Scroll.FeeVaultAddress = &common.Address{} + chainConfig.Scroll.UseZktrie = true engine = clique.New(chainConfig.Clique, db) // Set maxTxPerBlock = 4, which >= non-l1msg + non-skipped l1msg txs @@ -401,9 +404,10 @@ func TestUnacceptableTxlimit(t *testing.T) { chainConfig *params.ChainConfig db = rawdb.NewMemoryDatabase() ) - chainConfig = params.AllCliqueProtocolChanges + chainConfig = params.AllCliqueProtocolChanges.Clone() chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} chainConfig.Scroll.FeeVaultAddress = &common.Address{} + chainConfig.Scroll.UseZktrie = true engine = clique.New(chainConfig.Clique, db) // Set maxTxPerBlock = 3, which < non-l1msg + l1msg txs @@ -460,9 +464,10 @@ func TestL1MsgCorrectOrder(t *testing.T) { chainConfig *params.ChainConfig db = rawdb.NewMemoryDatabase() ) - chainConfig = params.AllCliqueProtocolChanges + chainConfig = params.AllCliqueProtocolChanges.Clone() chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} chainConfig.Scroll.FeeVaultAddress = &common.Address{} + chainConfig.Scroll.UseZktrie = true engine = clique.New(chainConfig.Clique, db) maxTxPerBlock := 4 @@ -523,8 +528,9 @@ func l1MessageTest(t *testing.T, msgs []types.L1MessageTx, withL2Tx bool, callba ) rawdb.WriteL1Messages(db, msgs) - chainConfig = params.AllCliqueProtocolChanges + chainConfig = params.AllCliqueProtocolChanges.Clone() chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} + chainConfig.Scroll.UseZktrie = true engine = clique.New(chainConfig.Clique, db) maxTxPerBlock := 4 chainConfig.Scroll.MaxTxPerBlock = &maxTxPerBlock @@ -872,13 +878,14 @@ func TestPrioritizeOverflowTx(t *testing.T) { assert := assert.New(t) var ( - chainConfig = params.AllCliqueProtocolChanges + chainConfig = params.AllCliqueProtocolChanges.Clone() db = rawdb.NewMemoryDatabase() ) chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} chainConfig.LondonBlock = big.NewInt(0) chainConfig.Scroll.FeeVaultAddress = &common.Address{} + chainConfig.Scroll.UseZktrie = true engine := clique.New(chainConfig.Clique, db) w, b := newTestWorker(t, chainConfig, engine, db, 0) @@ -1033,9 +1040,10 @@ func TestPending(t *testing.T) { chainConfig *params.ChainConfig db = rawdb.NewMemoryDatabase() ) - chainConfig = params.AllCliqueProtocolChanges + chainConfig = params.AllCliqueProtocolChanges.Clone() chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000} chainConfig.Scroll.FeeVaultAddress = &common.Address{} + chainConfig.Scroll.UseZktrie = true engine = clique.New(chainConfig.Clique, db) w, b := newTestWorker(t, chainConfig, engine, db, 0) defer w.close() @@ -1077,9 +1085,10 @@ func TestReorg(t *testing.T) { chainConfig *params.ChainConfig db = rawdb.NewMemoryDatabase() ) - chainConfig = params.AllCliqueProtocolChanges + chainConfig = params.AllCliqueProtocolChanges.Clone() chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000, RelaxedPeriod: true} chainConfig.Scroll.FeeVaultAddress = &common.Address{} + chainConfig.Scroll.UseZktrie = true engine = clique.New(chainConfig.Clique, db) maxTxPerBlock := 2 @@ -1191,9 +1200,10 @@ func TestRestartHeadCCC(t *testing.T) { chainConfig *params.ChainConfig db = rawdb.NewMemoryDatabase() ) - chainConfig = params.AllCliqueProtocolChanges + chainConfig = params.AllCliqueProtocolChanges.Clone() chainConfig.Clique = ¶ms.CliqueConfig{Period: 1, Epoch: 30000, RelaxedPeriod: true} chainConfig.Scroll.FeeVaultAddress = &common.Address{} + chainConfig.Scroll.UseZktrie = true engine = clique.New(chainConfig.Clique, db) maxTxPerBlock := 2 diff --git a/params/config.go b/params/config.go index eb2213fdd360..21fa4f05f067 100644 --- a/params/config.go +++ b/params/config.go @@ -18,6 +18,7 @@ package params import ( "encoding/binary" + "encoding/json" "fmt" "math/big" @@ -29,14 +30,16 @@ import ( // Genesis hashes to enforce below configs on. var ( - MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") - RopstenGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") - SepoliaGenesisHash = common.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9") - RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") - GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") - ScrollAlphaGenesisHash = common.HexToHash("0xa4fc62b9b0643e345bdcebe457b3ae898bef59c7203c3db269200055e037afda") - ScrollSepoliaGenesisHash = common.HexToHash("0xaa62d1a8b2bffa9e5d2368b63aae0d98d54928bd713125e3fd9e5c896c68592c") - ScrollMainnetGenesisHash = common.HexToHash("0xbbc05efd412b7cd47a2ed0e5ddfcf87af251e414ea4c801d78b6784513180a80") + MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") + RopstenGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") + SepoliaGenesisHash = common.HexToHash("0x25a5cc106eea7138acab33231d7160d69cb777ee0c2c553fcddf5138993e6dd9") + RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") + GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") + ScrollAlphaGenesisHash = common.HexToHash("0xa4fc62b9b0643e345bdcebe457b3ae898bef59c7203c3db269200055e037afda") + ScrollSepoliaGenesisHash = common.HexToHash("0xaa62d1a8b2bffa9e5d2368b63aae0d98d54928bd713125e3fd9e5c896c68592c") + ScrollMainnetGenesisHash = common.HexToHash("0xbbc05efd412b7cd47a2ed0e5ddfcf87af251e414ea4c801d78b6784513180a80") + ScrollSepoliaGenesisState = common.HexToHash("0x20695989e9038823e35f0e88fbc44659ffdbfa1fe89fbeb2689b43f15fa64cb5") + ScrollMainnetGenesisState = common.HexToHash("0x08d535cc60f40af5dd3b31e0998d7567c2d568b224bed2ba26070aeb078d1339") ) func newUint64(val uint64) *uint64 { return &val } @@ -340,6 +343,7 @@ var ( NumL1MessagesPerBlock: 10, ScrollChainAddress: common.HexToAddress("0x2D567EcE699Eabe5afCd141eDB7A4f2D0D6ce8a0"), }, + GenesisStateRoot: &ScrollSepoliaGenesisState, }, } @@ -380,6 +384,7 @@ var ( NumL1MessagesPerBlock: 10, ScrollChainAddress: common.HexToAddress("0xa13BAF47339d63B743e7Da8741db5456DAc1E556"), }, + GenesisStateRoot: &ScrollMainnetGenesisState, }, } @@ -633,6 +638,7 @@ type ChainConfig struct { CurieBlock *big.Int `json:"curieBlock,omitempty"` // Curie switch block (nil = no fork, 0 = already on curie) DarwinTime *uint64 `json:"darwinTime,omitempty"` // Darwin switch time (nil = no fork, 0 = already on darwin) DarwinV2Time *uint64 `json:"darwinv2Time,omitempty"` // DarwinV2 switch time (nil = no fork, 0 = already on darwinv2) + EuclidTime *uint64 `json:"euclidTime,omitempty"` // Euclid switch time (nil = no fork, 0 = already on euclid) // TerminalTotalDifficulty is the amount of total difficulty reached by // the network that triggers the consensus upgrade. @@ -646,6 +652,18 @@ type ChainConfig struct { Scroll ScrollConfig `json:"scroll,omitempty"` } +func (c *ChainConfig) Clone() *ChainConfig { + var clone ChainConfig + j, err := json.Marshal(c) + if err != nil { + panic(err) + } + if err = json.Unmarshal(j, &clone); err != nil { + panic(err) + } + return &clone +} + type ScrollConfig struct { // Use zktrie [optional] UseZktrie bool `json:"useZktrie,omitempty"` @@ -661,6 +679,9 @@ type ScrollConfig struct { // L1 config L1Config *L1Config `json:"l1Config,omitempty"` + + // Genesis State Root for MPT clients + GenesisStateRoot *common.Hash `json:"genesisStateRoot,omitempty"` } // L1Config contains the l1 parameters needed to sync l1 contract events (e.g., l1 messages, commit/revert/finalize batches) in the sequencer @@ -888,6 +909,11 @@ func (c *ChainConfig) IsDarwinV2(now uint64) bool { return isForkedTime(now, c.DarwinV2Time) } +// IsEuclid returns whether num is either equal to the Darwin fork block or greater. +func (c *ChainConfig) IsEuclid(now uint64) bool { + return isForkedTime(now, c.EuclidTime) +} + // IsTerminalPoWBlock returns whether the given block is the last block of PoW stage. func (c *ChainConfig) IsTerminalPoWBlock(parentTotalDiff *big.Int, totalDiff *big.Int) bool { if c.TerminalTotalDifficulty == nil { @@ -1100,7 +1126,7 @@ type Rules struct { IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool IsBerlin, IsLondon, IsArchimedes, IsShanghai bool - IsBernoulli, IsCurie, IsDarwin bool + IsBernoulli, IsCurie, IsDarwin, IsEuclid bool } // Rules ensures c's ChainID is not nil. @@ -1126,5 +1152,6 @@ func (c *ChainConfig) Rules(num *big.Int, time uint64) Rules { IsBernoulli: c.IsBernoulli(num), IsCurie: c.IsCurie(num), IsDarwin: c.IsDarwin(time), + IsEuclid: c.IsEuclid(time), } } diff --git a/rollup/ccc/async_checker.go b/rollup/ccc/async_checker.go index 1cb9b7d78768..b5815cb58403 100644 --- a/rollup/ccc/async_checker.go +++ b/rollup/ccc/async_checker.go @@ -98,6 +98,11 @@ func (c *AsyncChecker) Wait() { // Check spawns an async CCC verification task. func (c *AsyncChecker) Check(block *types.Block) error { + if c.bc.Config().IsEuclid(block.Time()) { + // Euclid blocks use MPT and CCC doesn't support them + return nil + } + if block.NumberU64() > c.currentHead.Number.Uint64()+1 { log.Warn("non continuous chain observed in AsyncChecker", "prev", c.currentHead, "got", block.Header()) } diff --git a/rollup/ccc/async_checker_test.go b/rollup/ccc/async_checker_test.go index bbfcd9b99996..97fcda030107 100644 --- a/rollup/ccc/async_checker_test.go +++ b/rollup/ccc/async_checker_test.go @@ -25,18 +25,20 @@ func TestAsyncChecker(t *testing.T) { // Create a database pre-initialize with a genesis block db := rawdb.NewMemoryDatabase() + chainConfig := params.TestChainConfig.Clone() + chainConfig.Scroll.UseZktrie = true (&core.Genesis{ - Config: params.TestChainConfig, + Config: chainConfig, Alloc: core.GenesisAlloc{testAddr: {Balance: new(big.Int).Mul(big.NewInt(1000), big.NewInt(params.Ether))}}, }).MustCommit(db) - chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) + chain, _ := core.NewBlockChain(db, nil, chainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) asyncChecker := NewAsyncChecker(chain, 1, false) chain.Validator().WithAsyncValidator(asyncChecker.Check) - bs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, 100, func(i int, block *core.BlockGen) { + bs, _ := core.GenerateChain(chainConfig, chain.Genesis(), ethash.NewFaker(), db, 100, func(i int, block *core.BlockGen) { for i := 0; i < 10; i++ { - signer := types.MakeSigner(params.TestChainConfig, block.Number()) + signer := types.MakeSigner(chainConfig, block.Number()) tx, err := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), testAddr, big.NewInt(1000), params.TxGas, block.BaseFee(), nil), signer, testKey) if err != nil { panic(err) diff --git a/rollup/pipeline/pipeline.go b/rollup/pipeline/pipeline.go index 90c6149b3858..77ac3aee24a0 100644 --- a/rollup/pipeline/pipeline.go +++ b/rollup/pipeline/pipeline.go @@ -228,7 +228,7 @@ func sendCancellable[T any, C comparable](resCh chan T, msg T, cancelCh <-chan C } func (p *Pipeline) traceAndApplyStage(txsIn <-chan *types.Transaction) (<-chan error, <-chan *BlockCandidate, error) { - p.state.StartPrefetcher("miner") + p.state.StartPrefetcher("miner", nil) downstreamCh := make(chan *BlockCandidate, p.downstreamChCapacity()) resCh := make(chan error) p.wg.Add(1) diff --git a/trie/database.go b/trie/database.go index 1c5b7f805aea..1301a83a5e98 100644 --- a/trie/database.go +++ b/trie/database.go @@ -667,6 +667,9 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H } batch.Reset() + if diskRoot, err := rawdb.ReadDiskStateRoot(db.diskdb, node); err == nil { + node = diskRoot + } if (node == common.Hash{}) { return nil } @@ -782,7 +785,7 @@ func (c *cleaner) Put(key []byte, rlp []byte) error { delete(c.db.dirties, hash) c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) if node.children != nil { - c.db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) + c.db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) } // Move the flushed node into the clean cache to prevent insta-reloads if c.db.cleans != nil { diff --git a/trie/iterator_test.go b/trie/iterator_test.go index 761d812bdfcc..7516d2879010 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -296,7 +296,7 @@ func TestUnionIterator(t *testing.T) { } func TestIteratorNoDups(t *testing.T) { - var tr Trie + tr := newEmpty() for _, val := range testdata1 { tr.Update([]byte(val.k), []byte(val.v)) } @@ -530,7 +530,7 @@ func TestNodeIteratorLargeTrie(t *testing.T) { trie.NodeIterator(common.FromHex("0x77667766776677766778855885885885")) // master: 24 get operations // this pr: 5 get operations - if have, want := logDb.getCount, uint64(5); have != want { + if have, want := logDb.getCount, uint64(10); have != want { t.Fatalf("Too many lookups during seek, have %d want %d", have, want) } } diff --git a/trie/proof.go b/trie/proof.go index 58fb4c3cc78a..3362512b20c7 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -559,7 +559,7 @@ func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, key } // Rebuild the trie with the leaf stream, the shape of trie // should be same with the original one. - tr := &Trie{root: root, db: NewDatabase(memorydb.New())} + tr := &Trie{root: root, db: NewDatabase(memorydb.New()), tracer: newTracer()} if empty { tr.root = nil } diff --git a/trie/proof_test.go b/trie/proof_test.go index 2155ae0fbd6a..5c5304261d2d 100644 --- a/trie/proof_test.go +++ b/trie/proof_test.go @@ -79,7 +79,7 @@ func TestProof(t *testing.T) { } func TestOneElementProof(t *testing.T) { - trie := new(Trie) + trie := newEmpty() updateString(trie, "k", "v") for i, prover := range makeProvers(trie) { proof := prover([]byte("k")) @@ -130,7 +130,7 @@ func TestBadProof(t *testing.T) { // Tests that missing keys can also be proven. The test explicitly uses a single // entry trie and checks for missing keys both before and after the single entry. func TestMissingKeyProof(t *testing.T) { - trie := new(Trie) + trie := newEmpty() updateString(trie, "k", "v") for i, key := range []string{"a", "j", "l", "z"} { @@ -386,7 +386,7 @@ func TestOneElementRangeProof(t *testing.T) { } // Test the mini trie with only a single element. - tinyTrie := new(Trie) + tinyTrie := newEmpty() entry := &kv{randBytes(32), randBytes(20), false} tinyTrie.Update(entry.k, entry.v) @@ -458,7 +458,7 @@ func TestAllElementsProof(t *testing.T) { // TestSingleSideRangeProof tests the range starts from zero. func TestSingleSideRangeProof(t *testing.T) { for i := 0; i < 64; i++ { - trie := new(Trie) + trie := newEmpty() var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} @@ -493,7 +493,7 @@ func TestSingleSideRangeProof(t *testing.T) { // TestReverseSingleSideRangeProof tests the range ends with 0xffff...fff. func TestReverseSingleSideRangeProof(t *testing.T) { for i := 0; i < 64; i++ { - trie := new(Trie) + trie := newEmpty() var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} @@ -600,7 +600,7 @@ func TestBadRangeProof(t *testing.T) { // TestGappedRangeProof focuses on the small trie with embedded nodes. // If the gapped node is embedded in the trie, it should be detected too. func TestGappedRangeProof(t *testing.T) { - trie := new(Trie) + trie := newEmpty() var entries []*kv // Sorted entries for i := byte(0); i < 10; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} @@ -674,7 +674,7 @@ func TestSameSideProofs(t *testing.T) { } func TestHasRightElement(t *testing.T) { - trie := new(Trie) + trie := newEmpty() var entries entrySlice for i := 0; i < 4096; i++ { value := &kv{randBytes(32), randBytes(20), false} @@ -1027,7 +1027,7 @@ func benchmarkVerifyRangeNoProof(b *testing.B, size int) { } func randomTrie(n int) (*Trie, map[string]*kv) { - trie := new(Trie) + trie := newEmpty() vals := make(map[string]*kv) for i := byte(0); i < 100; i++ { value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} @@ -1052,7 +1052,7 @@ func randBytes(n int) []byte { } func nonRandomTrie(n int) (*Trie, map[string]*kv) { - trie := new(Trie) + trie := newEmpty() vals := make(map[string]*kv) max := uint64(0xffffffffffffffff) for i := uint64(0); i < uint64(n); i++ { diff --git a/trie/secure_trie.go b/trie/secure_trie.go index 253b8d780ad3..a3529c2fc68f 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -190,6 +190,7 @@ func (t *SecureTrie) Hash() common.Hash { // Copy returns a copy of SecureTrie. func (t *SecureTrie) Copy() *SecureTrie { cpy := *t + cpy.trie.tracer = t.trie.tracer.copy() return &cpy } @@ -221,3 +222,8 @@ func (t *SecureTrie) getSecKeyCache() map[string][]byte { } return t.secKeyCache } + +// Witness returns a set containing all trie nodes that have been accessed. +func (t *SecureTrie) Witness() map[string]struct{} { + return t.trie.Witness() +} diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go index b81b4e1ad5b8..9baaa2e266ed 100644 --- a/trie/secure_trie_test.go +++ b/trie/secure_trie_test.go @@ -112,8 +112,7 @@ func TestSecureTrieConcurrency(t *testing.T) { threads := runtime.NumCPU() tries := make([]*SecureTrie, threads) for i := 0; i < threads; i++ { - cpy := *trie - tries[i] = &cpy + tries[i] = trie.Copy() } // Start a batch of goroutines interactng with the trie pend := new(sync.WaitGroup) diff --git a/trie/tracer.go b/trie/tracer.go new file mode 100644 index 000000000000..99cda0706f7d --- /dev/null +++ b/trie/tracer.go @@ -0,0 +1,122 @@ +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "maps" + + "github.com/scroll-tech/go-ethereum/common" +) + +// tracer tracks the changes of trie nodes. During the trie operations, +// some nodes can be deleted from the trie, while these deleted nodes +// won't be captured by trie.Hasher or trie.Committer. Thus, these deleted +// nodes won't be removed from the disk at all. Tracer is an auxiliary tool +// used to track all insert and delete operations of trie and capture all +// deleted nodes eventually. +// +// The changed nodes can be mainly divided into two categories: the leaf +// node and intermediate node. The former is inserted/deleted by callers +// while the latter is inserted/deleted in order to follow the rule of trie. +// This tool can track all of them no matter the node is embedded in its +// parent or not, but valueNode is never tracked. +// +// Besides, it's also used for recording the original value of the nodes +// when they are resolved from the disk. The pre-value of the nodes will +// be used to construct trie history in the future. +// +// Note tracer is not thread-safe, callers should be responsible for handling +// the concurrency issues by themselves. +type tracer struct { + inserts map[string]struct{} + deletes map[string]struct{} + accessList map[string][]byte +} + +// newTracer initializes the tracer for capturing trie changes. +func newTracer() *tracer { + return &tracer{ + inserts: make(map[string]struct{}), + deletes: make(map[string]struct{}), + accessList: make(map[string][]byte), + } +} + +// onRead tracks the newly loaded trie node and caches the rlp-encoded +// blob internally. Don't change the value outside of function since +// it's not deep-copied. +func (t *tracer) onRead(path []byte, val []byte) { + t.accessList[string(path)] = val +} + +// onInsert tracks the newly inserted trie node. If it's already +// in the deletion set (resurrected node), then just wipe it from +// the deletion set as it's "untouched". +func (t *tracer) onInsert(path []byte) { + if _, present := t.deletes[string(path)]; present { + delete(t.deletes, string(path)) + return + } + t.inserts[string(path)] = struct{}{} +} + +// onDelete tracks the newly deleted trie node. If it's already +// in the addition set, then just wipe it from the addition set +// as it's untouched. +func (t *tracer) onDelete(path []byte) { + if _, present := t.inserts[string(path)]; present { + delete(t.inserts, string(path)) + return + } + t.deletes[string(path)] = struct{}{} +} + +// reset clears the content tracked by tracer. +func (t *tracer) reset() { + t.inserts = make(map[string]struct{}) + t.deletes = make(map[string]struct{}) + t.accessList = make(map[string][]byte) +} + +// copy returns a deep copied tracer instance. +func (t *tracer) copy() *tracer { + accessList := make(map[string][]byte, len(t.accessList)) + for path, blob := range t.accessList { + accessList[path] = common.CopyBytes(blob) + } + return &tracer{ + inserts: maps.Clone(t.inserts), + deletes: maps.Clone(t.deletes), + accessList: accessList, + } +} + +// deletedNodes returns a list of node paths which are deleted from the trie. +func (t *tracer) deletedNodes() []string { + var paths []string + for path := range t.deletes { + // It's possible a few deleted nodes were embedded + // in their parent before, the deletions can be no + // effect by deleting nothing, filter them out. + _, ok := t.accessList[path] + if !ok { + continue + } + paths = append(paths, path) + } + return paths +} diff --git a/trie/trie.go b/trie/trie.go index 81cdd1627745..fe521d269074 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -62,6 +62,9 @@ type Trie struct { // hashing operation. This number will not directly map to the number of // actually unhashed nodes unhashed int + + // tracer is the tool to track the trie changes. + tracer *tracer } // newFlag returns the cache flag value for a newly created node. @@ -80,7 +83,8 @@ func New(root common.Hash, db *Database) (*Trie, error) { panic("trie.New called without a database") } trie := &Trie{ - db: db, + db: db, + tracer: newTracer(), } if root != (common.Hash{}) && root != emptyRoot { rootnode, err := trie.resolveHash(root[:], nil) @@ -313,6 +317,11 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error if matchlen == 0 { return true, branch, nil } + // New branch node is created as a child of the original short node. + // Track the newly inserted node in the tracer. The node identifier + // passed is the path from the root node. + t.tracer.onInsert(append(prefix, key[:matchlen]...)) + // Otherwise, replace it with a short node leading up to the branch. return true, &shortNode{key[:matchlen], branch, t.newFlag()}, nil @@ -327,6 +336,11 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error return true, n, nil case nil: + // New short node is created and track it in the tracer. The node identifier + // passed is the path from the root node. Note the valueNode won't be tracked + // since it's always embedded in its parent. + t.tracer.onInsert(prefix) + return true, &shortNode{key, value, t.newFlag()}, nil case hashNode: @@ -379,6 +393,11 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { return false, n, nil // don't replace n on mismatch } if matchlen == len(key) { + // The matched short node is deleted entirely and track + // it in the deletion set. The same the valueNode doesn't + // need to be tracked at all since it's always embedded. + t.tracer.onDelete(prefix) + return true, nil, nil // remove n entirely for whole matches } // The key is longer than n.Key. Remove the remaining suffix @@ -391,6 +410,10 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { } switch child := child.(type) { case *shortNode: + // The child shortNode is merged into its parent, track + // is deleted as well. + t.tracer.onDelete(append(prefix, n.Key...)) + // Deleting from the subtrie reduced it to another // short node. Merge the nodes to avoid creating a // shortNode{..., shortNode{...}}. Use concat (which @@ -452,6 +475,11 @@ func (t *Trie) delete(n node, prefix, key []byte) (bool, node, error) { return false, nil, err } if cnode, ok := cnode.(*shortNode); ok { + // Replace the entire full node with the short node. + // Mark the original short node as deleted since the + // value is embedded into the parent now. + t.tracer.onDelete(append(prefix, byte(pos))) + k := append([]byte{byte(pos)}, cnode.Key...) return true, &shortNode{k, cnode.Val, t.newFlag()}, nil } @@ -505,6 +533,11 @@ func (t *Trie) resolve(n node, prefix []byte) (node, error) { func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) { hash := common.BytesToHash(n) if node := t.db.node(hash); node != nil { + rlp, err := t.db.Node(hash) + if err != nil { + return nil, err + } + t.tracer.onRead(prefix, rlp) return node, nil } return nil, &MissingNodeError{NodeHash: hash, Path: prefix} @@ -582,4 +615,18 @@ func (t *Trie) hashRoot() (node, node, error) { func (t *Trie) Reset() { t.root = nil t.unhashed = 0 + t.tracer.reset() +} + +// Witness returns a set containing all trie nodes that have been accessed. +func (t *Trie) Witness() map[string]struct{} { + if len(t.tracer.accessList) == 0 { + return nil + } + + witness := make(map[string]struct{}, len(t.tracer.accessList)) + for _, node := range t.tracer.accessList { + witness[string(node)] = struct{}{} + } + return witness } diff --git a/trie/trie_test.go b/trie/trie_test.go index 23780a5ff807..e8f6293d0376 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -64,7 +64,7 @@ func TestEmptyTrie(t *testing.T) { } func TestNull(t *testing.T) { - var trie Trie + trie := newEmpty() key := make([]byte, 32) value := []byte("test") trie.Update(key, value) @@ -593,15 +593,15 @@ func TestTinyTrie(t *testing.T) { _, accounts := makeAccounts(5) trie := newEmpty() trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3]) - if exp, root := common.HexToHash("fc516c51c03bf9f1a0eec6ed6f6f5da743c2745dcd5670007519e6ec056f95a8"), trie.Hash(); exp != root { + if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root { t.Errorf("1: got %x, exp %x", root, exp) } trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001338"), accounts[4]) - if exp, root := common.HexToHash("5070d3f144546fd13589ad90cd153954643fa4ca6c1a5f08683cbfbbf76e960c"), trie.Hash(); exp != root { + if exp, root := common.HexToHash("ec63b967e98a5720e7f720482151963982890d82c9093c0d486b7eb8883a66b1"), trie.Hash(); exp != root { t.Errorf("2: got %x, exp %x", root, exp) } trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4]) - if exp, root := common.HexToHash("aa3fba77e50f6e931d8aacde70912be5bff04c7862f518ae06f3418dd4d37be3"), trie.Hash(); exp != root { + if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root { t.Errorf("3: got %x, exp %x", root, exp) } checktr, _ := New(common.Hash{}, trie.db) @@ -625,7 +625,7 @@ func TestCommitAfterHash(t *testing.T) { trie.Hash() trie.Commit(nil) root := trie.Hash() - exp := common.HexToHash("f0c0681648c93b347479cd58c61995557f01294425bd031ce1943c2799bbd4ec") + exp := common.HexToHash("72f9d3f3fe1e1dd7b8936442e7642aef76371472d94319900790053c493f3fe6") if exp != root { t.Errorf("got %x, exp %x", root, exp) } @@ -725,12 +725,12 @@ func TestCommitSequence(t *testing.T) { expWriteSeqHash []byte expCallbackSeqHash []byte }{ - {20, common.FromHex("7b908cce3bc16abb3eac5dff6c136856526f15225f74ce860a2bec47912a5492"), - common.FromHex("fac65cd2ad5e301083d0310dd701b5faaff1364cbe01cdbfaf4ec3609bb4149e")}, - {200, common.FromHex("55791f6ec2f83fee512a2d3d4b505784fdefaea89974e10440d01d62a18a298a"), - common.FromHex("5ab775b64d86a8058bb71c3c765d0f2158c14bbeb9cb32a65eda793a7e95e30f")}, - {2000, common.FromHex("ccb464abf67804538908c62431b3a6788e8dc6dee62aff9bfe6b10136acfceac"), - common.FromHex("b908adff17a5aa9d6787324c39014a74b04cef7fba6a92aeb730f48da1ca665d")}, + {20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066"), + common.FromHex("ff00f91ac05df53b82d7f178d77ada54fd0dca64526f537034a5dbe41b17df2a")}, + {200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e"), + common.FromHex("f3cd509064c8d319bbdd1c68f511850a902ad275e6ed5bea11547e23d492a926")}, + {2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7"), + common.FromHex("ff795ea898ba1e4cfed4a33b4cf5535a347a02cf931f88d88719faf810f9a1c9")}, } { addresses, accounts := makeAccounts(tc.count) // This spongeDb is used to check the sequence of disk-db-writes diff --git a/trie/zk_trie.go b/trie/zk_trie.go index 044e18ad66ba..a98ae474ddff 100644 --- a/trie/zk_trie.go +++ b/trie/zk_trie.go @@ -233,3 +233,33 @@ func VerifyProofSMT(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueRead return nil, fmt.Errorf("bad proof node %v", proof) } } + +func (t *ZkTrie) CountLeaves() uint64 { + root, err := t.ZkTrie.Tree().Root() + if err != nil { + panic("CountLeaves cannot get root") + } + return t.countLeaves(root) +} + +func (t *ZkTrie) countLeaves(root *zkt.Hash) uint64 { + if root == nil { + return 0 + } + + rootNode, err := t.ZkTrie.Tree().GetNode(root) + if err != nil { + panic("countLeaves cannot get rootNode") + } + + if rootNode.Type == zktrie.NodeTypeLeaf_New { + return 1 + } else { + return t.countLeaves(rootNode.ChildL) + t.countLeaves(rootNode.ChildR) + } +} + +// Witness returns a set containing all trie nodes that have been accessed. +func (t *ZkTrie) Witness() map[string]struct{} { + panic("not implemented") +} diff --git a/trie/zk_trie_test.go b/trie/zk_trie_test.go index 6c23abc2764b..d1700a75899d 100644 --- a/trie/zk_trie_test.go +++ b/trie/zk_trie_test.go @@ -26,12 +26,16 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" zkt "github.com/scroll-tech/zktrie/types" "github.com/scroll-tech/go-ethereum/common" + "github.com/scroll-tech/go-ethereum/core/rawdb" + "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb/leveldb" "github.com/scroll-tech/go-ethereum/ethdb/memorydb" + "github.com/scroll-tech/go-ethereum/rlp" ) func newEmptyZkTrie() *ZkTrie { @@ -264,3 +268,72 @@ func TestZkTrieDelete(t *testing.T) { assert.Equal(t, hashes[i].Hex(), hash.Hex()) } } + +func TestEquivalence(t *testing.T) { + t.Skip() + + zkDb, err := leveldb.New("/Users/omer/Documents/go-ethereum/l2geth-datadir/geth/chaindata", 0, 0, "", true) + require.NoError(t, err) + mptDb, err := leveldb.New("/Users/omer/Documents/go-ethereum/l2geth-datadir-mpt/geth/chaindata", 0, 0, "", true) + require.NoError(t, err) + + zkRoot := common.HexToHash("0x294b458b5b571bb634dbe9a81331dd2aabb5ef40cdc0328b075a9666d5df55d0") + mptRoot, err := rawdb.ReadDiskStateRoot(mptDb, zkRoot) + require.NoError(t, err) + + checkTrieEquality(t, &dbs{ + zkDb: zkDb, + mptDb: mptDb, + }, zkRoot, mptRoot, checkAccountEquality) +} + +type dbs struct { + zkDb *leveldb.Database + mptDb *leveldb.Database +} + +var accountsLeft = -1 + +func checkTrieEquality(t *testing.T, dbs *dbs, zkRoot, mptRoot common.Hash, leafChecker func(*testing.T, *dbs, []byte, []byte)) { + zkTrie, err := NewZkTrie(zkRoot, NewZktrieDatabase(dbs.zkDb)) + require.NoError(t, err) + + mptTrie, err := NewSecure(mptRoot, NewDatabaseWithConfig(dbs.mptDb, &Config{Preimages: true})) + require.NoError(t, err) + + expectedLeaves := zkTrie.CountLeaves() + trieIt := NewIterator(mptTrie.NodeIterator(nil)) + if accountsLeft == -1 { + accountsLeft = int(expectedLeaves) + } + + for trieIt.Next() { + expectedLeaves-- + preimageKey := mptTrie.GetKey(trieIt.Key) + require.NotEmpty(t, preimageKey) + leafChecker(t, dbs, zkTrie.Get(preimageKey), mptTrie.Get(preimageKey)) + } + require.Zero(t, expectedLeaves) +} + +func checkAccountEquality(t *testing.T, dbs *dbs, zkAccountBytes, mptAccountBytes []byte) { + mptAccount := &types.StateAccount{} + require.NoError(t, rlp.DecodeBytes(mptAccountBytes, mptAccount)) + zkAccount, err := types.UnmarshalStateAccount(zkAccountBytes) + require.NoError(t, err) + + require.Equal(t, mptAccount.Nonce, zkAccount.Nonce) + require.True(t, mptAccount.Balance.Cmp(zkAccount.Balance) == 0) + require.Equal(t, mptAccount.KeccakCodeHash, zkAccount.KeccakCodeHash) + checkTrieEquality(t, dbs, common.BytesToHash(zkAccount.Root[:]), common.BytesToHash(mptAccount.Root[:]), checkStorageEquality) + accountsLeft-- + t.Log("Accounts left:", accountsLeft) +} + +func checkStorageEquality(t *testing.T, _ *dbs, zkStorageBytes, mptStorageBytes []byte) { + zkValue := common.BytesToHash(zkStorageBytes) + _, content, _, err := rlp.Split(mptStorageBytes) + require.NoError(t, err) + mptValue := common.BytesToHash(content) + require.Equal(t, zkValue, mptValue) +} From 43d54cb077069520eb71078143b1efaf593a0a85 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 4 Feb 2025 08:39:15 +0800 Subject: [PATCH 22/36] update to latest da-codec --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 288892e7479a..74f2c55cfa1a 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.3-0.20250122041800-4ef7bfc6b634 + github.com/scroll-tech/da-codec v0.1.3-0.20250203093155-be6b422f605f github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 diff --git a/go.sum b/go.sum index 0f05327e769b..6f71317cad4c 100644 --- a/go.sum +++ b/go.sum @@ -404,6 +404,8 @@ github.com/scroll-tech/da-codec v0.1.3-0.20250122003441-91171709155b h1:DWiVtzXK github.com/scroll-tech/da-codec v0.1.3-0.20250122003441-91171709155b/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs= github.com/scroll-tech/da-codec v0.1.3-0.20250122041800-4ef7bfc6b634 h1:YtD7XjP1F7GzL9nxj1lq88m1/bwSroVSGVR050i05yY= github.com/scroll-tech/da-codec v0.1.3-0.20250122041800-4ef7bfc6b634/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs= +github.com/scroll-tech/da-codec v0.1.3-0.20250203093155-be6b422f605f h1:Kh2Tdy3/+ooeeFeZZ2duKeUlSHgKy+sOQ2oxzuuSNZE= +github.com/scroll-tech/da-codec v0.1.3-0.20250203093155-be6b422f605f/go.mod h1:irqXJdRI5fsGkilJCpNTnJb8oV8KR51j68QXIWoth6U= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= From 4290d16a09a6c63eaf89291957a41abec8a70549 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 4 Feb 2025 11:47:18 +0800 Subject: [PATCH 23/36] add field to CommittedBatchMeta to store LastL1MessageQueueHash for CodecV7 batches --- core/rawdb/accessors_rollup_event.go | 64 ++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 9 deletions(-) diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index e5daffcc5965..1a840da4cbc7 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -2,8 +2,11 @@ package rawdb import ( "bytes" + "fmt" "math/big" + "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/ethdb" "github.com/scroll-tech/go-ethereum/log" @@ -18,12 +21,26 @@ type ChunkBlockRange struct { // CommittedBatchMeta holds metadata for committed batches. type CommittedBatchMeta struct { + Version uint8 + ChunkBlockRanges []*ChunkBlockRange + + // introduced with CodecV7 + LastL1MessageQueueHash common.Hash +} + +type committedBatchMetaV0 struct { Version uint8 // BlobVersionedHashes are the versioned hashes of the blobs in the batch. Currently unused. Left for compatibility. BlobVersionedHashes []common.Hash ChunkBlockRanges []*ChunkBlockRange } +type committedBatchMetaV7 struct { + Version uint8 + ChunkBlockRanges []*ChunkBlockRange + LastL1MessageQueueHash common.Hash +} + // FinalizedBatchMeta holds metadata for finalized batches. type FinalizedBatchMeta struct { BatchHash common.Hash @@ -143,9 +160,23 @@ func ReadLastFinalizedBatchIndex(db ethdb.Reader) *uint64 { // WriteCommittedBatchMeta stores the CommittedBatchMeta for a specific batch in the database. func WriteCommittedBatchMeta(db ethdb.KeyValueWriter, batchIndex uint64, committedBatchMeta *CommittedBatchMeta) { - value, err := rlp.EncodeToBytes(committedBatchMeta) + var committedBatchMetaToStore any + if encoding.CodecVersion(committedBatchMeta.Version) < encoding.CodecV7 { + committedBatchMetaToStore = &committedBatchMetaV0{ + Version: committedBatchMeta.Version, + ChunkBlockRanges: committedBatchMeta.ChunkBlockRanges, + } + } else { + committedBatchMetaToStore = &committedBatchMetaV7{ + Version: committedBatchMeta.Version, + ChunkBlockRanges: committedBatchMeta.ChunkBlockRanges, + LastL1MessageQueueHash: committedBatchMeta.LastL1MessageQueueHash, + } + } + + value, err := rlp.EncodeToBytes(committedBatchMetaToStore) if err != nil { - log.Crit("failed to RLP encode committed batch metadata", "batch index", batchIndex, "committed batch meta", committedBatchMeta, "err", err) + log.Crit("failed to RLP encode committed batch metadata", "batch index", batchIndex, "committed batch meta", committedBatchMetaToStore, "err", err) } if err := db.Put(committedBatchMetaKey(batchIndex), value); err != nil { log.Crit("failed to store committed batch metadata", "batch index", batchIndex, "value", value, "err", err) @@ -153,20 +184,35 @@ func WriteCommittedBatchMeta(db ethdb.KeyValueWriter, batchIndex uint64, committ } // ReadCommittedBatchMeta fetches the CommittedBatchMeta for a specific batch from the database. -func ReadCommittedBatchMeta(db ethdb.Reader, batchIndex uint64) *CommittedBatchMeta { +func ReadCommittedBatchMeta(db ethdb.Reader, batchIndex uint64) (*CommittedBatchMeta, error) { data, err := db.Get(committedBatchMetaKey(batchIndex)) if err != nil && isNotFoundErr(err) { - return nil + return nil, nil } if err != nil { - log.Crit("failed to read committed batch metadata from database", "batch index", batchIndex, "err", err) + return nil, fmt.Errorf("failed to read committed batch metadata from database: batch index %d, err: %w", batchIndex, err) } - cbm := new(CommittedBatchMeta) - if err := rlp.Decode(bytes.NewReader(data), cbm); err != nil { - log.Crit("Invalid CommittedBatchMeta RLP", "batch index", batchIndex, "data", data, "err", err) + // Try decoding from the newest format for future proofness, then the older one for old data. + cbm7 := new(committedBatchMetaV7) + if err = rlp.Decode(bytes.NewReader(data), cbm7); err == nil { + return &CommittedBatchMeta{ + Version: cbm7.Version, + ChunkBlockRanges: cbm7.ChunkBlockRanges, + LastL1MessageQueueHash: cbm7.LastL1MessageQueueHash, + }, nil } - return cbm + + cbm0 := new(committedBatchMetaV0) + if err = rlp.Decode(bytes.NewReader(data), cbm0); err != nil { + return nil, fmt.Errorf("failed to decode committed batch metadata: batch index %d, err: %w", batchIndex, err) + } + + return &CommittedBatchMeta{ + Version: cbm0.Version, + ChunkBlockRanges: cbm0.ChunkBlockRanges, + LastL1MessageQueueHash: common.Hash{}, + }, nil } // DeleteCommittedBatchMeta removes the block ranges of all chunks associated with a specific batch from the database. From 574dd532e0065e499e2a5c8696a45132d9b54a5c Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 4 Feb 2025 11:50:54 +0800 Subject: [PATCH 24/36] adjust rollup verifier to support CodecV7 batches --- .../rollup_sync_service.go | 99 ++++++++++++++++--- 1 file changed, 84 insertions(+), 15 deletions(-) diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index 406895be1120..c4db08639cdc 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -11,6 +11,7 @@ import ( "github.com/scroll-tech/da-codec/encoding" + "github.com/scroll-tech/go-ethereum/common" "github.com/scroll-tech/go-ethereum/core" "github.com/scroll-tech/go-ethereum/core/rawdb" "github.com/scroll-tech/go-ethereum/ethdb" @@ -265,14 +266,24 @@ func (s *RollupSyncService) updateRollupEvents(daEntries da.Entries) error { var highestFinalizedBlockNumber uint64 batchWriter := s.db.NewBatch() for index := startBatchIndex; index <= batchIndex; index++ { - committedBatchMeta := rawdb.ReadCommittedBatchMeta(s.db, index) + var parentCommittedBatchMeta *rawdb.CommittedBatchMeta + var err error + if index > 0 { + if parentCommittedBatchMeta, err = rawdb.ReadCommittedBatchMeta(s.db, index-1); err != nil { + return fmt.Errorf("failed to read parent committed batch meta, batch index: %v, err: %w", index-1, err) + } + } + committedBatchMeta, err := rawdb.ReadCommittedBatchMeta(s.db, index) + if err != nil { + return fmt.Errorf("failed to read committed batch meta, batch index: %v, err: %w", index, err) + } chunks, err := s.getLocalChunksForBatch(committedBatchMeta.ChunkBlockRanges) if err != nil { return fmt.Errorf("failed to get local node info, batch index: %v, err: %w", index, err) } - endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, committedBatchMeta, chunks, s.stack) + endBlock, finalizedBatchMeta, err := validateBatch(index, event, parentFinalizedBatchMeta, parentCommittedBatchMeta, committedBatchMeta, chunks, s.stack) if err != nil { return fmt.Errorf("fatal: validateBatch failed: finalize event: %v, err: %w", event, err) } @@ -357,9 +368,9 @@ func (s *RollupSyncService) getLocalChunksForBatch(chunkBlockRanges []*rawdb.Chu func (s *RollupSyncService) getCommittedBatchMeta(commitedBatch da.EntryWithBlocks) (*rawdb.CommittedBatchMeta, error) { if commitedBatch.BatchIndex() == 0 { return &rawdb.CommittedBatchMeta{ - Version: 0, - BlobVersionedHashes: nil, - ChunkBlockRanges: []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, + Version: 0, + ChunkBlockRanges: []*rawdb.ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 0}}, + LastL1MessageQueueHash: common.Hash{}, }, nil } @@ -368,10 +379,49 @@ func (s *RollupSyncService) getCommittedBatchMeta(commitedBatch da.EntryWithBloc return nil, fmt.Errorf("failed to decode block ranges from chunks, batch index: %v, err: %w", commitedBatch.BatchIndex(), err) } + // With CodecV7 the batch creation changed. We need to compute and store LastL1MessageQueueHash. + // InitialL1MessageQueueHash of a batch == LastL1MessageQueueHash of the previous batch. + // We need to do this for every committed batch (instead of finalized batch) because the L1MessageQueueHash + // is a continuous hash of all L1 messages over all batches. With bundles we only receive the finalize event + // for the last batch of the bundle. + var lastL1MessageQueueHash common.Hash + if commitedBatch.Version() == encoding.CodecV7 { + parentCommittedBatchMeta, err := rawdb.ReadCommittedBatchMeta(s.db, commitedBatch.BatchIndex()-1) + if err != nil { + return nil, fmt.Errorf("failed to read parent committed batch meta, batch index: %v, err: %w", commitedBatch.BatchIndex()-1, err) + } + + // If parent batch has a lower version this means this is the first batch of CodecV7. + // In this case we need to compute the InitialL1MessageQueueHash from the empty hash. + var initialL1MessageQueueHash common.Hash + if encoding.CodecVersion(parentCommittedBatchMeta.Version) < commitedBatch.Version() { + initialL1MessageQueueHash = common.Hash{} + } else { + initialL1MessageQueueHash = parentCommittedBatchMeta.LastL1MessageQueueHash + } + + chunks, err := s.getLocalChunksForBatch(chunkRanges) + if err != nil { + return nil, fmt.Errorf("failed to get local node info, batch index: %v, err: %w", commitedBatch.BatchIndex(), err) + } + + // There is no chunks encoded in a batch anymore with CodecV7. + // For compatibility reason here we still use a single chunk to store the block ranges of the batch. + // We make sure that there is really only one chunk which contains all blocks of the batch. + if len(chunks) != 1 { + return nil, fmt.Errorf("invalid argument: chunk count is not 1 for CodecV7, batch index: %v", commitedBatch.BatchIndex()) + } + + lastL1MessageQueueHash, err = encoding.MessageQueueV2ApplyL1MessagesFromBlocks(initialL1MessageQueueHash, chunks[0].Blocks) + if err != nil { + return nil, fmt.Errorf("failed to apply L1 messages from blocks, batch index: %v, err: %w", chunks[0], err) + } + } + return &rawdb.CommittedBatchMeta{ - Version: uint8(commitedBatch.Version()), - ChunkBlockRanges: chunkRanges, - BlobVersionedHashes: commitedBatch.BlobVersionedHashes(), + Version: uint8(commitedBatch.Version()), + ChunkBlockRanges: chunkRanges, + LastL1MessageQueueHash: lastL1MessageQueueHash, }, nil } @@ -398,7 +448,7 @@ func (s *RollupSyncService) getCommittedBatchMeta(commitedBatch da.EntryWithBloc // Note: This function is compatible with both "finalize by batch" and "finalize by bundle" methods. // In "finalize by bundle", only the last batch of each bundle is fully verified. // This check still ensures the correctness of all batch hashes in the bundle due to the parent-child relationship between batch hashes. -func validateBatch(batchIndex uint64, event *l1.FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { +func validateBatch(batchIndex uint64, event *l1.FinalizeBatchEvent, parentFinalizedBatchMeta *rawdb.FinalizedBatchMeta, parentCommittedBatchMeta *rawdb.CommittedBatchMeta, committedBatchMeta *rawdb.CommittedBatchMeta, chunks []*encoding.Chunk, stack *node.Node) (uint64, *rawdb.FinalizedBatchMeta, error) { if len(chunks) == 0 { return 0, nil, fmt.Errorf("invalid argument: length of chunks is 0, batch index: %v", batchIndex) } @@ -416,11 +466,30 @@ func validateBatch(batchIndex uint64, event *l1.FinalizeBatchEvent, parentFinali endBlock := endChunk.Blocks[len(endChunk.Blocks)-1] // Note: All params of batch are calculated locally based on the block data. - batch := &encoding.Batch{ - Index: batchIndex, - TotalL1MessagePoppedBefore: parentFinalizedBatchMeta.TotalL1MessagePopped, - ParentBatchHash: parentFinalizedBatchMeta.BatchHash, - Chunks: chunks, + var batch *encoding.Batch + if encoding.CodecVersion(committedBatchMeta.Version) < encoding.CodecV7 { + batch = &encoding.Batch{ + Index: batchIndex, + TotalL1MessagePoppedBefore: parentFinalizedBatchMeta.TotalL1MessagePopped, + ParentBatchHash: parentFinalizedBatchMeta.BatchHash, + Chunks: chunks, + } + } else { + // With CodecV7 the batch creation changed. There is no chunks encoded in a batch anymore. + // For compatibility reason here we still use a single chunk to store the block ranges of the batch. + // We make sure that there is really only one chunk which contains all blocks of the batch. + if len(chunks) != 1 { + return 0, nil, fmt.Errorf("invalid argument: chunk count is not 1 for CodecV7, batch index: %v", batchIndex) + } + + batch = &encoding.Batch{ + Index: batchIndex, + ParentBatchHash: parentFinalizedBatchMeta.BatchHash, + InitialL1MessageIndex: parentFinalizedBatchMeta.TotalL1MessagePopped, + Blocks: startChunk.Blocks, + InitialL1MessageQueueHash: parentCommittedBatchMeta.LastL1MessageQueueHash, + LastL1MessageQueueHash: committedBatchMeta.LastL1MessageQueueHash, + } } codecVersion := encoding.CodecVersion(committedBatchMeta.Version) @@ -431,7 +500,7 @@ func validateBatch(batchIndex uint64, event *l1.FinalizeBatchEvent, parentFinali daBatch, err := codec.NewDABatch(batch) if err != nil { - return 0, nil, fmt.Errorf("failed to create DA batch, batch index: %v, codec version: %v, expected blob hashes: %v, err: %w", batchIndex, codecVersion, committedBatchMeta.BlobVersionedHashes, err) + return 0, nil, fmt.Errorf("failed to create DA batch, batch index: %v, codec version: %v, err: %w", batchIndex, codecVersion, err) } localBatchHash := daBatch.Hash() From 53b6ebf8b0f8d0ba0ccc100a1c9f063becb5e0c2 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Wed, 5 Feb 2025 08:54:15 +0800 Subject: [PATCH 25/36] address review comments --- rollup/da_syncer/da/calldata_blob_source.go | 6 ++++-- rollup/da_syncer/da/commitV7.go | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 2978c8c1bd79..77ab34aaaada 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -96,7 +96,6 @@ func (ds *CalldataBlobSource) L1Finalized() uint64 { func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEvents) (Entries, error) { var entries Entries - var emptyHash common.Hash // we keep track of the last commit transaction hash, so we can process all events created in the same tx together. // if we have a different commit transaction, we need to create a new commit batch DA. var lastCommitTransactionHash common.Hash @@ -105,6 +104,9 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven // getAndAppendCommitBatchDA is a helper function that gets the commit batch DA for the last commit events and appends it to the entries list. // It also resets the last commit events and last commit transaction hash. + // This is necessary because we need to process all events created in the same tx together. + // However, we only know all events created in the same tx when we see a different commit transaction (next iteration of the loop). + // Therefore, we need to process the last commit events when we see a different event (finalize, revert) or commit transaction (or when we reach the end of the rollup events). getAndAppendCommitBatchDA := func() error { commitBatchDAEntries, err := ds.getCommitBatchDA(lastCommitEvents) if err != nil { @@ -113,7 +115,7 @@ func (ds *CalldataBlobSource) processRollupEventsToDA(rollupEvents l1.RollupEven entries = append(entries, commitBatchDAEntries...) lastCommitEvents = nil - lastCommitTransactionHash = emptyHash + lastCommitTransactionHash = common.Hash{} return nil } diff --git a/rollup/da_syncer/da/commitV7.go b/rollup/da_syncer/da/commitV7.go index 01aa5249e749..dc97f53d132b 100644 --- a/rollup/da_syncer/da/commitV7.go +++ b/rollup/da_syncer/da/commitV7.go @@ -62,7 +62,7 @@ func NewCommitBatchDAV7(ctx context.Context, db ethdb.Database, } blobVersionedHash := common.Hash(kzg4844.CalcBlobHashV1(sha256.New(), &c)) if blobVersionedHash != blobHash { - return nil, fmt.Errorf("blobVersionedHash from blob source is not equal to versionedHash from tx, correct versioned hash: %s, fetched blob hash: %s", blobHash.Hex(), blobVersionedHash.String()) + return nil, fmt.Errorf("blobVersionedHash from blob source is not equal to versionedHash from tx, correct versioned hash: %s, fetched blob hash: %s", blobHash.Hex(), blobVersionedHash.Hex()) } blobPayload, err := codec.DecodeBlob(blob) From 3335654ddd2d0286cb9585c1d7778ead0ff507c0 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 6 Feb 2025 10:03:27 +0800 Subject: [PATCH 26/36] fix issues after merge --- trie/zk_trie_test.go | 76 +------------------------------------------- 1 file changed, 1 insertion(+), 75 deletions(-) diff --git a/trie/zk_trie_test.go b/trie/zk_trie_test.go index d1700a75899d..1aae33264db3 100644 --- a/trie/zk_trie_test.go +++ b/trie/zk_trie_test.go @@ -25,17 +25,12 @@ import ( "sync" "testing" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - zkt "github.com/scroll-tech/zktrie/types" + "github.com/stretchr/testify/assert" "github.com/scroll-tech/go-ethereum/common" - "github.com/scroll-tech/go-ethereum/core/rawdb" - "github.com/scroll-tech/go-ethereum/core/types" "github.com/scroll-tech/go-ethereum/ethdb/leveldb" "github.com/scroll-tech/go-ethereum/ethdb/memorydb" - "github.com/scroll-tech/go-ethereum/rlp" ) func newEmptyZkTrie() *ZkTrie { @@ -268,72 +263,3 @@ func TestZkTrieDelete(t *testing.T) { assert.Equal(t, hashes[i].Hex(), hash.Hex()) } } - -func TestEquivalence(t *testing.T) { - t.Skip() - - zkDb, err := leveldb.New("/Users/omer/Documents/go-ethereum/l2geth-datadir/geth/chaindata", 0, 0, "", true) - require.NoError(t, err) - mptDb, err := leveldb.New("/Users/omer/Documents/go-ethereum/l2geth-datadir-mpt/geth/chaindata", 0, 0, "", true) - require.NoError(t, err) - - zkRoot := common.HexToHash("0x294b458b5b571bb634dbe9a81331dd2aabb5ef40cdc0328b075a9666d5df55d0") - mptRoot, err := rawdb.ReadDiskStateRoot(mptDb, zkRoot) - require.NoError(t, err) - - checkTrieEquality(t, &dbs{ - zkDb: zkDb, - mptDb: mptDb, - }, zkRoot, mptRoot, checkAccountEquality) -} - -type dbs struct { - zkDb *leveldb.Database - mptDb *leveldb.Database -} - -var accountsLeft = -1 - -func checkTrieEquality(t *testing.T, dbs *dbs, zkRoot, mptRoot common.Hash, leafChecker func(*testing.T, *dbs, []byte, []byte)) { - zkTrie, err := NewZkTrie(zkRoot, NewZktrieDatabase(dbs.zkDb)) - require.NoError(t, err) - - mptTrie, err := NewSecure(mptRoot, NewDatabaseWithConfig(dbs.mptDb, &Config{Preimages: true})) - require.NoError(t, err) - - expectedLeaves := zkTrie.CountLeaves() - trieIt := NewIterator(mptTrie.NodeIterator(nil)) - if accountsLeft == -1 { - accountsLeft = int(expectedLeaves) - } - - for trieIt.Next() { - expectedLeaves-- - preimageKey := mptTrie.GetKey(trieIt.Key) - require.NotEmpty(t, preimageKey) - leafChecker(t, dbs, zkTrie.Get(preimageKey), mptTrie.Get(preimageKey)) - } - require.Zero(t, expectedLeaves) -} - -func checkAccountEquality(t *testing.T, dbs *dbs, zkAccountBytes, mptAccountBytes []byte) { - mptAccount := &types.StateAccount{} - require.NoError(t, rlp.DecodeBytes(mptAccountBytes, mptAccount)) - zkAccount, err := types.UnmarshalStateAccount(zkAccountBytes) - require.NoError(t, err) - - require.Equal(t, mptAccount.Nonce, zkAccount.Nonce) - require.True(t, mptAccount.Balance.Cmp(zkAccount.Balance) == 0) - require.Equal(t, mptAccount.KeccakCodeHash, zkAccount.KeccakCodeHash) - checkTrieEquality(t, dbs, common.BytesToHash(zkAccount.Root[:]), common.BytesToHash(mptAccount.Root[:]), checkStorageEquality) - accountsLeft-- - t.Log("Accounts left:", accountsLeft) -} - -func checkStorageEquality(t *testing.T, _ *dbs, zkStorageBytes, mptStorageBytes []byte) { - zkValue := common.BytesToHash(zkStorageBytes) - _, content, _, err := rlp.Split(mptStorageBytes) - require.NoError(t, err) - mptValue := common.BytesToHash(content) - require.Equal(t, zkValue, mptValue) -} From 634d1f191d23458c64d0d1e4a48c50543f0b7028 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 6 Feb 2025 16:43:47 +0800 Subject: [PATCH 27/36] go mod tidy --- go.sum | 8 -------- 1 file changed, 8 deletions(-) diff --git a/go.sum b/go.sum index 6f71317cad4c..adf90aec4789 100644 --- a/go.sum +++ b/go.sum @@ -396,14 +396,6 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5 h1:vZ75srkZCStjDWq/kqZGLoucf7Y7qXC13nKjQVZ0zp8= -github.com/scroll-tech/da-codec v0.1.3-0.20241218102542-9852fa4e1be5/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs= -github.com/scroll-tech/da-codec v0.1.3-0.20250121050419-8c2a5ccc1b2e h1:Sp1RjVsK9PLVW5zMlwMUNegsDpYmVN8noT/C4Bjro0U= -github.com/scroll-tech/da-codec v0.1.3-0.20250121050419-8c2a5ccc1b2e/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs= -github.com/scroll-tech/da-codec v0.1.3-0.20250122003441-91171709155b h1:DWiVtzXK/3lXK3+/aaAeorurjj88ITO17hhLECIS/0g= -github.com/scroll-tech/da-codec v0.1.3-0.20250122003441-91171709155b/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs= -github.com/scroll-tech/da-codec v0.1.3-0.20250122041800-4ef7bfc6b634 h1:YtD7XjP1F7GzL9nxj1lq88m1/bwSroVSGVR050i05yY= -github.com/scroll-tech/da-codec v0.1.3-0.20250122041800-4ef7bfc6b634/go.mod h1:XfQhUl3msmE6dpZEbR/LIwiMxywPQcUQsch9URgXDzs= github.com/scroll-tech/da-codec v0.1.3-0.20250203093155-be6b422f605f h1:Kh2Tdy3/+ooeeFeZZ2duKeUlSHgKy+sOQ2oxzuuSNZE= github.com/scroll-tech/da-codec v0.1.3-0.20250203093155-be6b422f605f/go.mod h1:irqXJdRI5fsGkilJCpNTnJb8oV8KR51j68QXIWoth6U= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= From 0fa374304837dc14a33ddc5cdbaa6e4e6e87c648 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Fri, 7 Feb 2025 17:34:05 +0800 Subject: [PATCH 28/36] fix unit tests --- core/rawdb/accessors_rollup_event_test.go | 84 +++++++++++-------- .../rollup_sync_service_test.go | 81 +++++++----------- 2 files changed, 79 insertions(+), 86 deletions(-) diff --git a/core/rawdb/accessors_rollup_event_test.go b/core/rawdb/accessors_rollup_event_test.go index a22880ee05a4..554b07d0b894 100644 --- a/core/rawdb/accessors_rollup_event_test.go +++ b/core/rawdb/accessors_rollup_event_test.go @@ -3,6 +3,8 @@ package rawdb import ( "testing" + "github.com/stretchr/testify/require" + "github.com/scroll-tech/go-ethereum/common" ) @@ -157,36 +159,47 @@ func TestWriteReadDeleteCommittedBatchMeta(t *testing.T) { { batchIndex: 0, meta: &CommittedBatchMeta{ - Version: 0, - BlobVersionedHashes: []common.Hash{}, - ChunkBlockRanges: []*ChunkBlockRange{}, + Version: 0, + ChunkBlockRanges: []*ChunkBlockRange{}, + }, + }, + { + batchIndex: 1, + meta: &CommittedBatchMeta{ + Version: 1, + ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}}, + }, + }, + { + batchIndex: 1, + meta: &CommittedBatchMeta{ + Version: 2, + ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}}, }, }, { batchIndex: 1, meta: &CommittedBatchMeta{ - Version: 1, - BlobVersionedHashes: []common.Hash{common.HexToHash("0x1234")}, - ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}}, + Version: 7, + ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}}, + LastL1MessageQueueHash: common.Hash{1, 2, 3, 4, 5, 6, 7}, }, }, { batchIndex: 255, meta: &CommittedBatchMeta{ - Version: 255, - BlobVersionedHashes: []common.Hash{common.HexToHash("0xabcd"), common.HexToHash("0xef01")}, - ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}, {StartBlockNumber: 11, EndBlockNumber: 20}}, + Version: 255, + ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}, {StartBlockNumber: 11, EndBlockNumber: 20}}, + LastL1MessageQueueHash: common.Hash{255}, }, }, } for _, tc := range testCases { WriteCommittedBatchMeta(db, tc.batchIndex, tc.meta) - got := ReadCommittedBatchMeta(db, tc.batchIndex) - - if got == nil { - t.Fatalf("Expected non-nil value for batch index %d", tc.batchIndex) - } + got, err := ReadCommittedBatchMeta(db, tc.batchIndex) + require.NoError(t, err) + require.NotNil(t, got) if !compareCommittedBatchMeta(tc.meta, got) { t.Fatalf("CommittedBatchMeta mismatch for batch index %d, expected %+v, got %+v", tc.batchIndex, tc.meta, got) @@ -194,7 +207,9 @@ func TestWriteReadDeleteCommittedBatchMeta(t *testing.T) { } // reading a non-existing value - if got := ReadCommittedBatchMeta(db, 256); got != nil { + got, err := ReadCommittedBatchMeta(db, 256) + require.NoError(t, err) + if got != nil { t.Fatalf("Expected nil for non-existing value, got %+v", got) } @@ -202,10 +217,9 @@ func TestWriteReadDeleteCommittedBatchMeta(t *testing.T) { for _, tc := range testCases { DeleteCommittedBatchMeta(db, tc.batchIndex) - readChunkRange := ReadCommittedBatchMeta(db, tc.batchIndex) - if readChunkRange != nil { - t.Fatal("Committed batch metadata was not deleted", "batch index", tc.batchIndex) - } + readChunkRange, err := ReadCommittedBatchMeta(db, tc.batchIndex) + require.NoError(t, err) + require.Nil(t, readChunkRange, "Committed batch metadata was not deleted", "batch index", tc.batchIndex) } // delete non-existing value: ensure the delete operation handles non-existing values without errors. @@ -217,19 +231,19 @@ func TestOverwriteCommittedBatchMeta(t *testing.T) { batchIndex := uint64(42) initialMeta := &CommittedBatchMeta{ - Version: 1, - BlobVersionedHashes: []common.Hash{common.HexToHash("0x1234")}, - ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}}, + Version: 1, + ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 10}}, } newMeta := &CommittedBatchMeta{ - Version: 2, - BlobVersionedHashes: []common.Hash{common.HexToHash("0x5678"), common.HexToHash("0x9abc")}, - ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 20}, {StartBlockNumber: 21, EndBlockNumber: 30}}, + Version: 255, + ChunkBlockRanges: []*ChunkBlockRange{{StartBlockNumber: 0, EndBlockNumber: 20}, {StartBlockNumber: 21, EndBlockNumber: 30}}, + LastL1MessageQueueHash: common.Hash{255}, } // write initial meta WriteCommittedBatchMeta(db, batchIndex, initialMeta) - got := ReadCommittedBatchMeta(db, batchIndex) + got, err := ReadCommittedBatchMeta(db, batchIndex) + require.NoError(t, err) if !compareCommittedBatchMeta(initialMeta, got) { t.Fatalf("Initial write failed, expected %+v, got %+v", initialMeta, got) @@ -237,7 +251,8 @@ func TestOverwriteCommittedBatchMeta(t *testing.T) { // overwrite with new meta WriteCommittedBatchMeta(db, batchIndex, newMeta) - got = ReadCommittedBatchMeta(db, batchIndex) + got, err = ReadCommittedBatchMeta(db, batchIndex) + require.NoError(t, err) if !compareCommittedBatchMeta(newMeta, got) { t.Fatalf("Overwrite failed, expected %+v, got %+v", newMeta, got) @@ -245,7 +260,8 @@ func TestOverwriteCommittedBatchMeta(t *testing.T) { // read non-existing batch index nonExistingIndex := uint64(999) - got = ReadCommittedBatchMeta(db, nonExistingIndex) + got, err = ReadCommittedBatchMeta(db, nonExistingIndex) + require.NoError(t, err) if got != nil { t.Fatalf("Expected nil for non-existing batch index, got %+v", got) @@ -256,14 +272,7 @@ func compareCommittedBatchMeta(a, b *CommittedBatchMeta) bool { if a.Version != b.Version { return false } - if len(a.BlobVersionedHashes) != len(b.BlobVersionedHashes) { - return false - } - for i := range a.BlobVersionedHashes { - if a.BlobVersionedHashes[i] != b.BlobVersionedHashes[i] { - return false - } - } + if len(a.ChunkBlockRanges) != len(b.ChunkBlockRanges) { return false } @@ -272,5 +281,6 @@ func compareCommittedBatchMeta(a, b *CommittedBatchMeta) bool { return false } } - return true + + return a.LastL1MessageQueueHash == b.LastL1MessageQueueHash } diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index dca18285d2c0..245051f02e9f 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -151,7 +151,6 @@ func TestGetCommittedBatchMetaCodecV1(t *testing.T) { require.Equal(t, encoding.CodecV1, encoding.CodecVersion(metadata.Version)) require.EqualValues(t, expectedRanges, metadata.ChunkBlockRanges) - require.EqualValues(t, expectedVersionedHashes, metadata.BlobVersionedHashes) } type mockEntryWithBlocks struct { @@ -254,11 +253,10 @@ func TestValidateBatchCodecV0(t *testing.T) { 1, ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV0), - BlobVersionedHashes: nil, + Version: uint8(encoding.CodecV0), } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, &rawdb.CommittedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -283,11 +281,10 @@ func TestValidateBatchCodecV0(t *testing.T) { 2, ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV0), - BlobVersionedHashes: nil, + Version: uint8(encoding.CodecV0), } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) @@ -321,11 +318,10 @@ func TestValidateBatchCodecV1(t *testing.T) { 1, ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV1), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x0129554070e4323800ca0e5ddd17bc447854601b306a70870002a058741214b3")}, + Version: uint8(encoding.CodecV1), } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, &rawdb.CommittedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -349,10 +345,9 @@ func TestValidateBatchCodecV1(t *testing.T) { 1, ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV1), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a327088bb2b13151449d8313c281d0006d12e8453e863637b746898b6ad5a6")}, + Version: uint8(encoding.CodecV1), } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) @@ -386,11 +381,10 @@ func TestValidateBatchCodecV2(t *testing.T) { 1, ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV2), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + Version: uint8(encoding.CodecV2), } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, &rawdb.CommittedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -414,10 +408,9 @@ func TestValidateBatchCodecV2(t *testing.T) { 1, ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV2), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + Version: uint8(encoding.CodecV2), } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) @@ -452,11 +445,10 @@ func TestValidateBatchCodecV3(t *testing.T) { ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV3), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x018d99636f4b20ccdc1dd11c289eb2a470e2c4dd631b1a7b48a6978805f49d18")}, + Version: uint8(encoding.CodecV3), } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, &rawdb.CommittedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1, chunk2, chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock1) @@ -480,10 +472,9 @@ func TestValidateBatchCodecV3(t *testing.T) { 1, ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV3), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + Version: uint8(encoding.CodecV3), } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock2) @@ -512,11 +503,10 @@ func TestValidateBatchUpgrades(t *testing.T) { ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV0), - BlobVersionedHashes: nil, + Version: uint8(encoding.CodecV0), } - endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(event1.BatchIndex().Uint64(), event1, parentFinalizedBatchMeta1, &rawdb.CommittedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) @@ -540,10 +530,9 @@ func TestValidateBatchUpgrades(t *testing.T) { 1, ) committedBatchMeta2 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV1), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x01a688c6e137310df38a62f5ad1e5119b8cb0455c386a9a4079b14fe92a239aa")}, + Version: uint8(encoding.CodecV1), } - endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(event2.BatchIndex().Uint64(), event2, parentFinalizedBatchMeta2, committedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) @@ -567,10 +556,9 @@ func TestValidateBatchUpgrades(t *testing.T) { 1, ) committedBatchMeta3 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV1), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x01ea66c4de196d36e2c3a5d7c0045100b9e46ef65be8f7a921ef20e6f2e99ebd")}, + Version: uint8(encoding.CodecV1), } - endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex().Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(event3.BatchIndex().Uint64(), event3, parentFinalizedBatchMeta3, committedBatchMeta2, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) @@ -594,10 +582,9 @@ func TestValidateBatchUpgrades(t *testing.T) { 1, ) committedBatchMeta4 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV3), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + Version: uint8(encoding.CodecV3), } - endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex().Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(event4.BatchIndex().Uint64(), event4, parentFinalizedBatchMeta4, committedBatchMeta3, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) @@ -631,38 +618,34 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { ) committedBatchMeta1 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV3), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x01bbc6b98d7d3783730b6208afac839ad37dcf211b9d9e7c83a5f9d02125ddd7")}, + Version: uint8(encoding.CodecV3), } committedBatchMeta2 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV3), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x01c81e5696e00f1e6e7d76c197f74ed51650147c49c4e6e5b0b702cdcc54352a")}, + Version: uint8(encoding.CodecV3), } committedBatchMeta3 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV3), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x012e15203534ae3f4cbe1b0f58fe6db6e5c29432115a8ece6ef5550bf2ffce4c")}, + Version: uint8(encoding.CodecV3), } committedBatchMeta4 := &rawdb.CommittedBatchMeta{ - Version: uint8(encoding.CodecV3), - BlobVersionedHashes: []common.Hash{common.HexToHash("0x015b4e3d3dcd64cc0eb6a5ad535d7a1844a8c4cdad366ec73557bcc533941370")}, + Version: uint8(encoding.CodecV3), } - endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) + endBlock1, finalizedBatchMeta1, err := validateBatch(0, event, &rawdb.FinalizedBatchMeta{}, &rawdb.CommittedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{chunk1}, nil) assert.NoError(t, err) assert.Equal(t, uint64(2), endBlock1) - endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) + endBlock2, finalizedBatchMeta2, err := validateBatch(1, event, finalizedBatchMeta1, committedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{chunk2}, nil) assert.NoError(t, err) assert.Equal(t, uint64(3), endBlock2) - endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) + endBlock3, finalizedBatchMeta3, err := validateBatch(2, event, finalizedBatchMeta2, committedBatchMeta2, committedBatchMeta3, []*encoding.Chunk{chunk3}, nil) assert.NoError(t, err) assert.Equal(t, uint64(13), endBlock3) - endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) + endBlock4, finalizedBatchMeta4, err := validateBatch(3, event, finalizedBatchMeta3, committedBatchMeta3, committedBatchMeta4, []*encoding.Chunk{chunk4}, nil) assert.NoError(t, err) assert.Equal(t, uint64(17), endBlock4) From a2a68e8250476d6a25f7aac0c419bed0a55ff1d8 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Mon, 10 Feb 2025 15:38:03 +0800 Subject: [PATCH 29/36] update da-codec --- go.mod | 2 +- go.sum | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 74f2c55cfa1a..65740d79eedd 100644 --- a/go.mod +++ b/go.mod @@ -50,7 +50,7 @@ require ( github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 github.com/rs/cors v1.7.0 - github.com/scroll-tech/da-codec v0.1.3-0.20250203093155-be6b422f605f + github.com/scroll-tech/da-codec v0.1.3-0.20250210041951-d028c537b995 github.com/scroll-tech/zktrie v0.8.4 github.com/shirou/gopsutil v3.21.11+incompatible github.com/sourcegraph/conc v0.3.0 diff --git a/go.sum b/go.sum index adf90aec4789..40aef325e255 100644 --- a/go.sum +++ b/go.sum @@ -398,6 +398,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/scroll-tech/da-codec v0.1.3-0.20250203093155-be6b422f605f h1:Kh2Tdy3/+ooeeFeZZ2duKeUlSHgKy+sOQ2oxzuuSNZE= github.com/scroll-tech/da-codec v0.1.3-0.20250203093155-be6b422f605f/go.mod h1:irqXJdRI5fsGkilJCpNTnJb8oV8KR51j68QXIWoth6U= +github.com/scroll-tech/da-codec v0.1.3-0.20250210041951-d028c537b995 h1:Zo1p42CUS9pADSKoDD0ZoDxf4dQ3gttqWZlV+RSeImk= +github.com/scroll-tech/da-codec v0.1.3-0.20250210041951-d028c537b995/go.mod h1:UZhhjzqYsyEhcvY0Y+SP+oMdeOUqFn/UXpbAYuPGzg0= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk= github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= From ca6649ee48a84f2f35d2c8d4440176c540261e21 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Mon, 10 Feb 2025 15:38:33 +0800 Subject: [PATCH 30/36] add test TestValidateBatchCodecV7 --- .../rollup_sync_service_test.go | 154 + .../testdata/blockTrace_06.json | 870 ++++ .../testdata/blockTrace_07.json | 3713 +++++++++++++++++ 3 files changed, 4737 insertions(+) create mode 100644 rollup/rollup_sync_service/testdata/blockTrace_06.json create mode 100644 rollup/rollup_sync_service/testdata/blockTrace_07.json diff --git a/rollup/rollup_sync_service/rollup_sync_service_test.go b/rollup/rollup_sync_service/rollup_sync_service_test.go index 245051f02e9f..36a3d8d22ecc 100644 --- a/rollup/rollup_sync_service/rollup_sync_service_test.go +++ b/rollup/rollup_sync_service/rollup_sync_service_test.go @@ -658,6 +658,155 @@ func TestValidateBatchInFinalizeByBundle(t *testing.T) { assert.Equal(t, parentFinalizedBatchMeta5, finalizedBatchMeta4) } +func TestValidateBatchCodecV7(t *testing.T) { + codecV7 := encoding.DACodecV7{} + + var finalizedBatchMeta1 *rawdb.FinalizedBatchMeta + var committedBatchMeta1 *rawdb.CommittedBatchMeta + { + block1 := replaceBlockNumber(readBlockFromJSON(t, "./testdata/blockTrace_02.json"), 1) + batch1 := &encoding.Batch{ + Index: 1, + InitialL1MessageIndex: 0, + InitialL1MessageQueueHash: common.Hash{}, + LastL1MessageQueueHash: common.Hash{}, + Blocks: []*encoding.Block{block1}, + } + batch1LastBlock := batch1.Blocks[len(batch1.Blocks)-1] + + daBatch1, err := codecV7.NewDABatch(batch1) + require.NoError(t, err) + + event1 := l1.NewFinalizeBatchEvent( + new(big.Int).SetUint64(batch1.Index), + daBatch1.Hash(), + batch1LastBlock.Header.Root, + batch1LastBlock.WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) + + committedBatchMeta1 = &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV7), + LastL1MessageQueueHash: common.Hash{}, + } + + var endBlock1 uint64 + endBlock1, finalizedBatchMeta1, err = validateBatch(event1.BatchIndex().Uint64(), event1, &rawdb.FinalizedBatchMeta{}, &rawdb.CommittedBatchMeta{}, committedBatchMeta1, []*encoding.Chunk{{Blocks: batch1.Blocks}}, nil) + require.NoError(t, err) + require.EqualValues(t, 1, endBlock1) + require.Equal(t, &rawdb.FinalizedBatchMeta{ + BatchHash: daBatch1.Hash(), + TotalL1MessagePopped: 0, + StateRoot: batch1LastBlock.Header.Root, + WithdrawRoot: batch1LastBlock.WithdrawRoot, + }, finalizedBatchMeta1) + } + + // finalize 3 batches with CodecV7 at once + block2 := replaceBlockNumber(readBlockFromJSON(t, "./testdata/blockTrace_03.json"), 2) + batch2 := &encoding.Batch{ + Index: 2, + ParentBatchHash: finalizedBatchMeta1.BatchHash, + InitialL1MessageIndex: 0, + InitialL1MessageQueueHash: common.Hash{}, + LastL1MessageQueueHash: common.Hash{}, + Blocks: []*encoding.Block{block2}, + } + batch2LastBlock := batch2.Blocks[len(batch2.Blocks)-1] + + daBatch2, err := codecV7.NewDABatch(batch2) + require.NoError(t, err) + + block3 := replaceBlockNumber(readBlockFromJSON(t, "./testdata/blockTrace_06.json"), 3) + LastL1MessageQueueHashBatch3, err := encoding.MessageQueueV2ApplyL1MessagesFromBlocks(common.Hash{}, []*encoding.Block{block3}) + require.NoError(t, err) + batch3 := &encoding.Batch{ + Index: 3, + ParentBatchHash: daBatch2.Hash(), + InitialL1MessageIndex: 0, + InitialL1MessageQueueHash: common.Hash{}, + LastL1MessageQueueHash: LastL1MessageQueueHashBatch3, + Blocks: []*encoding.Block{block3}, + } + batch3LastBlock := batch3.Blocks[len(batch3.Blocks)-1] + + daBatch3, err := codecV7.NewDABatch(batch3) + require.NoError(t, err) + + block4 := replaceBlockNumber(readBlockFromJSON(t, "./testdata/blockTrace_07.json"), 4) + LastL1MessageQueueHashBatch4, err := encoding.MessageQueueV2ApplyL1MessagesFromBlocks(LastL1MessageQueueHashBatch3, []*encoding.Block{block4}) + require.NoError(t, err) + batch4 := &encoding.Batch{ + Index: 4, + ParentBatchHash: daBatch3.Hash(), + InitialL1MessageIndex: 1, + InitialL1MessageQueueHash: LastL1MessageQueueHashBatch3, + LastL1MessageQueueHash: LastL1MessageQueueHashBatch4, + Blocks: []*encoding.Block{block4}, + } + batch4LastBlock := batch4.Blocks[len(batch4.Blocks)-1] + + daBatch4, err := codecV7.NewDABatch(batch4) + require.NoError(t, err) + + event2 := l1.NewFinalizeBatchEvent( + new(big.Int).SetUint64(batch4.Index), + daBatch4.Hash(), + batch4LastBlock.Header.Root, + batch4LastBlock.WithdrawRoot, + common.HexToHash("0x1"), + common.HexToHash("0x1"), + 1, + ) + + committedBatchMeta2 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV7), + LastL1MessageQueueHash: common.Hash{}, + } + + committedBatchMeta3 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV7), + LastL1MessageQueueHash: LastL1MessageQueueHashBatch3, + } + + committedBatchMeta4 := &rawdb.CommittedBatchMeta{ + Version: uint8(encoding.CodecV7), + LastL1MessageQueueHash: LastL1MessageQueueHashBatch4, + } + + endBlock2, finalizedBatchMeta2, err := validateBatch(2, event2, finalizedBatchMeta1, committedBatchMeta1, committedBatchMeta2, []*encoding.Chunk{{Blocks: batch2.Blocks}}, nil) + require.NoError(t, err) + require.EqualValues(t, 2, endBlock2) + require.Equal(t, &rawdb.FinalizedBatchMeta{ + BatchHash: daBatch2.Hash(), + TotalL1MessagePopped: 0, + StateRoot: batch2LastBlock.Header.Root, + WithdrawRoot: batch2LastBlock.WithdrawRoot, + }, finalizedBatchMeta2) + + endBlock3, finalizedBatchMeta3, err := validateBatch(3, event2, finalizedBatchMeta2, committedBatchMeta2, committedBatchMeta3, []*encoding.Chunk{{Blocks: batch3.Blocks}}, nil) + require.NoError(t, err) + require.EqualValues(t, 3, endBlock3) + require.Equal(t, &rawdb.FinalizedBatchMeta{ + BatchHash: daBatch3.Hash(), + TotalL1MessagePopped: 1, + StateRoot: batch3LastBlock.Header.Root, + WithdrawRoot: batch3LastBlock.WithdrawRoot, + }, finalizedBatchMeta3) + + endBlock4, finalizedBatchMeta4, err := validateBatch(4, event2, finalizedBatchMeta3, committedBatchMeta3, committedBatchMeta4, []*encoding.Chunk{{Blocks: batch4.Blocks}}, nil) + require.NoError(t, err) + require.EqualValues(t, 4, endBlock4) + require.Equal(t, &rawdb.FinalizedBatchMeta{ + BatchHash: daBatch4.Hash(), + TotalL1MessagePopped: 6, + StateRoot: batch4LastBlock.Header.Root, + WithdrawRoot: batch4LastBlock.WithdrawRoot, + }, finalizedBatchMeta4) +} + func readBlockFromJSON(t *testing.T, filename string) *encoding.Block { data, err := os.ReadFile(filename) assert.NoError(t, err) @@ -666,3 +815,8 @@ func readBlockFromJSON(t *testing.T, filename string) *encoding.Block { assert.NoError(t, json.Unmarshal(data, block)) return block } + +func replaceBlockNumber(block *encoding.Block, newNumber uint64) *encoding.Block { + block.Header.Number = new(big.Int).SetUint64(newNumber) + return block +} diff --git a/rollup/rollup_sync_service/testdata/blockTrace_06.json b/rollup/rollup_sync_service/testdata/blockTrace_06.json new file mode 100644 index 000000000000..ca03c58b29be --- /dev/null +++ b/rollup/rollup_sync_service/testdata/blockTrace_06.json @@ -0,0 +1,870 @@ +{ + "coinbase": { + "address": "0x5300000000000000000000000000000000000005", + "nonce": 0, + "balance": "0x2aa86921dcd2c0", + "keccakCodeHash": "0x256e306f068f0847c8aab5819879b2ff45c021ce2e2f428be51be663415b1d60", + "poseidonCodeHash": "0x2c49d7de76e39008575f2f090bb3e90912bad475ea8102c8565c249a75575df5", + "codeSize": 1652 + }, + "header": { + "parentHash": "0xe761181afb179bc4e6848ecc4e32af82c0eeff4aca77024f985d1dffb0ba0013", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "miner": "0x0000000000000000000000000000000000000000", + "stateRoot": "0x155c42b3ffa9b88987b02bc8f89fb31f2b555bb8bff971d6fcd92e04a144c248", + "transactionsRoot": "0x891f5907147c83867e1e7b200b9d26fb43c3c08f81202d04235c84a2aa79f72f", + "receiptsRoot": "0x7ad169feb178baf74f7c0a12a28570bd69bd10e616acad2caea09a55fd1fb541", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "difficulty": "0x2", + "number": "0xd", + "gasLimit": "0x7a1200", + "gasUsed": "0x5dc0", + "timestamp": "0x646b6e13", + "extraData": "0xd983030201846765746889676f312e31382e3130856c696e7578000000000000f942387d5a3dba7786280b806f022e2afaec53939149ac7b132b4ef1cf5cdf393d688543d984ae15b1896185ea13f9e7ae18b22b65e5ffec9128195d7cde6fa700", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "baseFeePerGas": null, + "hash": "0x09f75bc27efe18cd77a82491370442ea5a6066e910b73dc99fe1caff950c357b" + }, + "row_consumption": [ + ], + "transactions": [ + { + "type": 126, + "nonce": 0, + "txHash": "0xed6dff31c5516b3b9d169781865276cf27501aadd45c131bf8c841c5e619e56a", + "gas": 24000, + "gasPrice": "0x0", + "from": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "to": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "chainId": "0x0", + "value": "0x0", + "data": "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e59279b510f2000000000000000000000000f2ec6b6206f6208e8f9b394efc1a01c1cbde77750000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a4232e87480000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf0000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "isCreate": false, + "v": "0x0", + "r": "0x0", + "s": "0x0" + }, + { + "type": 0, + "nonce": 11, + "txHash": "0xed6dff31c5516b3b9d169781865276cf27501aadd45c131bf8c841c5e619e56a", + "gas": 24000, + "gasPrice": "0x0", + "from": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "to": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "chainId": "0x0", + "value": "0x0", + "data": "0x", + "isCreate": false, + "v": "0x0", + "r": "0x0", + "s": "0x0" + } + ], + "storageTrace": { + "rootBefore": "0x16d403e1c55dee3e020457262414ee7a20596922c08cac631385d8ea6d6c2c2b", + "rootAfter": "0x155c42b3ffa9b88987b02bc8f89fb31f2b555bb8bff971d6fcd92e04a144c248", + "proofs": { + "0x1a258d17bF244C4dF02d40343a7626A9D321e105": [ + "0x000f2d6436a450dc3daf4f111527f3e187a9641e7c5cbc4f53a386e6e4114bb8202cc33de5af63f5deca2409302103a4523463a3a16529835d526795e8966079db", + "0x0029ce00b3e5ddca3bd22d3a923b95239ed11243363803b8e1f5a89fb37ee3c6e52c0d8469864d5ee8e0d62944e8dc1de68f78b094d3ef7cf72a21b372866bab0a", + "0x001dcee8089ea21f679f1af199cc93ccb35fdea1257b9ffeac0ae5c89654a0dbce20790d9030fd3f822620f7395f1af3ca53789e7451f811c2364f2b4fa19be9fd", + "0x000d62fbf3a623b87d67d8f97132a8f1759360c03c1b78ea3654238eb6c72fd5dd0742c02437cc0294c49133a28968ba1f913963d9c2892254da675958cd4a4b2e", + "0x0026875849a967c3af8bbd7ac6efb4ef8250efaee44c8bd85ac026d541c7f509ac18ae138a98367696a39f7abe0a53fd3b32283fa843bdc4a2485d65b3b9651670", + "0x0125375fd5ae821cd3e835e2fba4ae79971635b7288d549ba8ba66bea36603686c05080000000000000000000000000000000000000000000000000867000000000000000130644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce1160000030221b0e9cf191ce544dcc5c8927fd08af82cb88be110d9533468ffd2d575aed31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb8351cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2201a258d17bf244c4df02d40343a7626a9d321e105000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x478CDd110520a8e733e2ACF9e543d2c687EA5239": [ + "0x000f2d6436a450dc3daf4f111527f3e187a9641e7c5cbc4f53a386e6e4114bb8202cc33de5af63f5deca2409302103a4523463a3a16529835d526795e8966079db", + "0x0029ce00b3e5ddca3bd22d3a923b95239ed11243363803b8e1f5a89fb37ee3c6e52c0d8469864d5ee8e0d62944e8dc1de68f78b094d3ef7cf72a21b372866bab0a", + "0x000bf7d923da6cc335d4074262981bf4615b43a8eb2a4dd6f2eda4fd8e1503d9311c4e63762bb10044749243a2b52db21797da53a89ba6b8ceb5cee1596150ac45", + "0x002b29daef215b12b331bf75a98e595b8a10a91928f479cca3562db3859315055a1cb697055013d78d58072071584b3e40e8d846948c8e829cbbe9915e4bcf08f0", + "0x00000000000000000000000000000000000000000000000000000000000000000007b1a84d4b19493ba2ca6a59dbc42d0e8559a7f8fb0c066bb8b1d90ceee9ce5c", + "0x0000000000000000000000000000000000000000000000000000000000000000000e9e173703b7c89f67443e861d959df35575c16617ea238fd235d8612f9020ba", + "0x0000000000000000000000000000000000000000000000000000000000000000000ea71dd32b28e075772420197e740ad0ed7990e3f6e5be7f5051f0c0709defce", + "0x000000000000000000000000000000000000000000000000000000000000000000186f00dca57567f28233cef5140efd49b1624b0ec3aef5b7f7ee42f03c3b6231", + "0x0006aac99418e9b09baea374df117e64523910d04427251eec6a9b482b6433bc54186c0fb6b2462a9c851df47ab11054dac43ed5b3f9d8d8a5fcf2fd0f9eb3e147", + "0x0109c2edb6138e8d6dc8f0b8b5ae98dd721c7053061887757f6749c484bddf92fa05080000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4702098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b6486420478cdd110520a8e733e2acf9e543d2c687ea5239000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x5300000000000000000000000000000000000000": [ + "0x000f2d6436a450dc3daf4f111527f3e187a9641e7c5cbc4f53a386e6e4114bb8202cc33de5af63f5deca2409302103a4523463a3a16529835d526795e8966079db", + "0x0029ce00b3e5ddca3bd22d3a923b95239ed11243363803b8e1f5a89fb37ee3c6e52c0d8469864d5ee8e0d62944e8dc1de68f78b094d3ef7cf72a21b372866bab0a", + "0x001dcee8089ea21f679f1af199cc93ccb35fdea1257b9ffeac0ae5c89654a0dbce20790d9030fd3f822620f7395f1af3ca53789e7451f811c2364f2b4fa19be9fd", + "0x000d62fbf3a623b87d67d8f97132a8f1759360c03c1b78ea3654238eb6c72fd5dd0742c02437cc0294c49133a28968ba1f913963d9c2892254da675958cd4a4b2e", + "0x0026875849a967c3af8bbd7ac6efb4ef8250efaee44c8bd85ac026d541c7f509ac18ae138a98367696a39f7abe0a53fd3b32283fa843bdc4a2485d65b3b9651670", + "0x000a3197466e4643551413444b60bbf8ab0ced04566326492fdf1993586eec3fe10000000000000000000000000000000000000000000000000000000000000000", + "0x002143f0cbad38f9696bb9c0be84281e5b517a06983edef7c75485b7a06473c97921dd9af8de7aade9fba53909b1a98ae938236ceec8ba6346ba3ba75c039194d7", + "0x0115d04fcf1fe3d9a4cc7a76b70fafcd7b9304b42108af39d9e500be391563775c0508000000000000000000000000000000000000000000000000064d000000000000000000000000000000000000000000000000000000000000000000000000000000002908ab50d1edc9dac80a344f44731acf807809c545e3388816b97a9882b5d4f974ae902ff6a84825a9cde7cc5f26e8c414e88139716c3423ed908f0a60c996011c70d94e9dc7c85d39f6877b01e59a87c057882957d9fd16c55025dfdcaa4d93205300000000000000000000000000000000000000000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x5300000000000000000000000000000000000002": [ + "0x000f2d6436a450dc3daf4f111527f3e187a9641e7c5cbc4f53a386e6e4114bb8202cc33de5af63f5deca2409302103a4523463a3a16529835d526795e8966079db", + "0x000150eaa497ee8904a3d2dc8350c03963fb1786ea5253d5cc16f321afcd862cee107e99fa497bffacfb8ab50a44b93c9a74bc7c669323c7fbd0560a657342c55a", + "0x000877a6983a09f78254ca94a086eb673296f5583aa33855bfbdbe6d2fadf0ff0107b2e01ad456a3ec4c88478c604ad6a15c6fb572259e49ef4cc781940fe1375e", + "0x0013b6a97296cf294d19f634904a7fa973d9714b90cc42e0456ad428b7278f338e0accad868d7f4aaa755b29eae6ad523415a9df210ffced28d7d33fa6d5a319b3", + "0x0011de0e672d258d43c785592fc939bc105441bafc9c1455901723358b0a73d5cc29562af63a2293f036058180ce56f5269c6a3d4d18d8e1dc75ef03cb8f51f8b9", + "0x01236b0ff4611519fb52869dd99bedcb730ebe17544687c5064da49f42f741831d05080000000000000000000000000000000000000000000000000873000000000000000000000000000000000000000000000000000000000000000000000000000000001bd955d4ef171429eb11fade67006376e84bf94630ddb9b9948c3f385ce0f05aa48c68219d344cebd30fca18d0777f587e55052ae6161c88fa4c16407211ddaa0d39d683afa3720f93c44224e2b95a5871a5a2207b5323f7fbf8f1862120ba90205300000000000000000000000000000000000002000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x5300000000000000000000000000000000000005": [ + "0x000f2d6436a450dc3daf4f111527f3e187a9641e7c5cbc4f53a386e6e4114bb8202cc33de5af63f5deca2409302103a4523463a3a16529835d526795e8966079db", + "0x0029ce00b3e5ddca3bd22d3a923b95239ed11243363803b8e1f5a89fb37ee3c6e52c0d8469864d5ee8e0d62944e8dc1de68f78b094d3ef7cf72a21b372866bab0a", + "0x000bf7d923da6cc335d4074262981bf4615b43a8eb2a4dd6f2eda4fd8e1503d9311c4e63762bb10044749243a2b52db21797da53a89ba6b8ceb5cee1596150ac45", + "0x002b29daef215b12b331bf75a98e595b8a10a91928f479cca3562db3859315055a1cb697055013d78d58072071584b3e40e8d846948c8e829cbbe9915e4bcf08f0", + "0x011facf302b106912bccc8194dff4cb12139e7f04288d3f5eefb57ccf4d842ba22050800000000000000000000000000000000000000000000000006740000000000000000000000000000000000000000000000000000000000000000002aa86921dcd2c018f4988204e816e17e42d9f9a2a468d8ca70ad453a88d3e371a0d9f743b799a6256e306f068f0847c8aab5819879b2ff45c021ce2e2f428be51be663415b1d602c49d7de76e39008575f2f090bb3e90912bad475ea8102c8565c249a75575df5205300000000000000000000000000000000000005000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ] + }, + "storageProofs": { + "0x1a258d17bF244C4dF02d40343a7626A9D321e105": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": [ + "0x001914b8a8cb4d4339d89ed1d5e6cd54ec609082fdf42fadb2d4101f3214f2a2290a1746dfbdf492c00e2854b46eda6adad88ad1b0583997db4121cb7d8e6de5ca", + "0x00084878451370def5a5648862c037adb6ae24f29b9237a1823638ca29d573bdd42446af3926a42a7e8b65f9a5fdd5a00e82e4f2b9684816fdc5d52c238bef604a", + "0x00027f6e365685a83e63cde58e13d22b99c130a578178f8198d755171a2ff97bf303e187b8ea9652424a9d9dac9bc16796838b196f141c6db57136643f22b48468", + "0x00149dad479c283104bb461dcce598d82aacff80a5844d863d8f64e0d3f3e83b1a0000000000000000000000000000000000000000000000000000000000000000", + "0x001f232429e01853a7456bc8bb4cbc3a35c132f7783e2b300306bceb64a44ce81e0000000000000000000000000000000000000000000000000000000000000000", + "0x0027e1c425d61d4468534c93b8aa80c34bbdea9ec2d69df7a730ecacf0089b22640000000000000000000000000000000000000000000000000000000000000000", + "0x001f4bdfdda0df475064a0ea35302dddc6401b8c93afad9a7569afb9f2534750560000000000000000000000000000000000000000000000000000000000000000", + "0x0001fc65caf9a60abae81bcb17c4854fa114100528e73ab1e649fac03ed9fa764e304459eb829e92aa3009534c4eba916b2900783c694385d2e7f87004e7649215", + "0x01249c7b39f739f430be8e1e2cae0f1db06dfe2f8d4cc631d312d5b98efb3e7402010100000000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce84820b53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ] + }, + "0x5300000000000000000000000000000000000000": { + "0x0000000000000000000000000000000000000000000000000000000000000000": [ + "0x0004f706d28ba7344cc73128f383e7f4df4c79f296a56e1bbc24cdfab5bc4cba5c2a970eaf68f6e47243e30bea39087adc0082afa5fd55fc5537baccd03f786953", + "0x00296af6438bc81ff661ef6d1bb16d33d6784e88ae39ff28258e56e4e72d5607052bb61b23d947a704c29df01936e7c557bf9ec541243566a336b43f8aeca37eed", + "0x001750ff1780c9b253cfcbd6274a4f79f3a95819e0856c31f0a6025e30ac3a5b261b73cc5623d88d2687f0fa6006bc823149c779b9e751477a6f2b83773062ddbe", + "0x0004c8c2bf27ee6712f4175555679ff662b9423a1d7205fe31e77999106cfb5a2f0efef64a4ef3d151d1364174e0e72745aeee51bf93fb17f8071e6daf4571a736", + "0x001de6dfed408db1b0cf580652da17c9277834302d9ee2c39ab074675ca61fd9e02ea58d0958b74734329987e16d8afa4d83a7acc46417a7f7dbc1fd42e305b394", + "0x001dd3e7dce636d92fdb4dd8b65cb4e5b8ffd3d64e54a51d93a527826bb1ec3a480000000000000000000000000000000000000000000000000000000000000000", + "0x02", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ] + }, + "0x5300000000000000000000000000000000000002": { + "0x0000000000000000000000000000000000000000000000000000000000000001": [ + "0x00024a2d3ee220db30dece4b39c0cffc2ba97ddded52a3f2da3aeed1f485d0a7220000000000000000000000000000000000000000000000000000000000000000", + "0x001da3cd3096ffd62c95bad392eedc1c578e7ccf248898c49c5ed82abb49a4b31a2b63c0d58a64939cf9026618503b904e267eeb0e465e15812b85485e81fb856c", + "0x01232927899d46fea05cc897a4f4671f808aa83c4eaf89396dfab15480fee91e8e010100000000000000000000000000005300000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000000000004", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x0000000000000000000000000000000000000000000000000000000000000002": [ + "0x00024a2d3ee220db30dece4b39c0cffc2ba97ddded52a3f2da3aeed1f485d0a7220000000000000000000000000000000000000000000000000000000000000000", + "0x001da3cd3096ffd62c95bad392eedc1c578e7ccf248898c49c5ed82abb49a4b31a2b63c0d58a64939cf9026618503b904e267eeb0e465e15812b85485e81fb856c", + "0x012098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864010100000000000000000000000000006f4c950442e1af093bcff730381e63ae9171b87a200000000000000000000000000000000000000000000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x0000000000000000000000000000000000000000000000000000000000000003": [ + "0x00024a2d3ee220db30dece4b39c0cffc2ba97ddded52a3f2da3aeed1f485d0a7220000000000000000000000000000000000000000000000000000000000000000", + "0x001da3cd3096ffd62c95bad392eedc1c578e7ccf248898c49c5ed82abb49a4b31a2b63c0d58a64939cf9026618503b904e267eeb0e465e15812b85485e81fb856c", + "0x012098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864010100000000000000000000000000006f4c950442e1af093bcff730381e63ae9171b87a200000000000000000000000000000000000000000000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ] + } + } + }, + "executionResults": [ + { + "gas": 24000, + "failed": true, + "returnValue": "", + "from": { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 10, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + "to": { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + "accountAfter": [ + { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 11, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + { + "address": "0x5300000000000000000000000000000000000005", + "nonce": 0, + "balance": "0x2aa86921dcd2c0", + "keccakCodeHash": "0x256e306f068f0847c8aab5819879b2ff45c021ce2e2f428be51be663415b1d60", + "poseidonCodeHash": "0x2c49d7de76e39008575f2f090bb3e90912bad475ea8102c8565c249a75575df5", + "codeSize": 1652 + } + ], + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "byteCode": "0x60806040526004361061004e5760003560e01c80633659cfe6146100655780634f1ef286146100855780635c60da1b146100985780638f283970146100c9578063f851a440146100e95761005d565b3661005d5761005b6100fe565b005b61005b6100fe565b34801561007157600080fd5b5061005b6100803660046106f1565b610118565b61005b61009336600461070c565b61015f565b3480156100a457600080fd5b506100ad6101d0565b6040516001600160a01b03909116815260200160405180910390f35b3480156100d557600080fd5b5061005b6100e43660046106f1565b61020b565b3480156100f557600080fd5b506100ad610235565b61010661029b565b61011661011161033a565b610344565b565b610120610368565b6001600160a01b0316336001600160a01b03161415610157576101548160405180602001604052806000815250600061039b565b50565b6101546100fe565b610167610368565b6001600160a01b0316336001600160a01b031614156101c8576101c38383838080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506001925061039b915050565b505050565b6101c36100fe565b60006101da610368565b6001600160a01b0316336001600160a01b03161415610200576101fb61033a565b905090565b6102086100fe565b90565b610213610368565b6001600160a01b0316336001600160a01b0316141561015757610154816103c6565b600061023f610368565b6001600160a01b0316336001600160a01b03161415610200576101fb610368565b6060610285838360405180606001604052806027815260200161080b6027913961041a565b9392505050565b6001600160a01b03163b151590565b6102a3610368565b6001600160a01b0316336001600160a01b031614156101165760405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b60006101fb6104f7565b3660008037600080366000845af43d6000803e808015610363573d6000f35b3d6000fd5b60007fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b6103a48361051f565b6000825111806103b15750805b156101c3576103c08383610260565b50505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f6103ef610368565b604080516001600160a01b03928316815291841660208301520160405180910390a16101548161055f565b60606001600160a01b0384163b6104825760405162461bcd60e51b815260206004820152602660248201527f416464726573733a2064656c65676174652063616c6c20746f206e6f6e2d636f6044820152651b9d1c9858dd60d21b6064820152608401610331565b600080856001600160a01b03168560405161049d91906107bb565b600060405180830381855af49150503d80600081146104d8576040519150601f19603f3d011682016040523d82523d6000602084013e6104dd565b606091505b50915091506104ed828286610608565b9695505050505050565b60007f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61038c565b61052881610641565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b6001600160a01b0381166105c45760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b6064820152608401610331565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b60608315610617575081610285565b8251156106275782518084602001fd5b8160405162461bcd60e51b815260040161033191906107d7565b6001600160a01b0381163b6106ae5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b6064820152608401610331565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6105e7565b80356001600160a01b03811681146106ec57600080fd5b919050565b60006020828403121561070357600080fd5b610285826106d5565b60008060006040848603121561072157600080fd5b61072a846106d5565b9250602084013567ffffffffffffffff8082111561074757600080fd5b818601915086601f83011261075b57600080fd5b81358181111561076a57600080fd5b87602082850101111561077c57600080fd5b6020830194508093505050509250925092565b60005b838110156107aa578181015183820152602001610792565b838111156103c05750506000910152565b600082516107cd81846020870161078f565b9190910192915050565b60208152600082518060208401526107f681604085016020870161078f565b601f01601f1916919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a2646970667358221220366737524a7ac8fa76e3b2cd04bb1e0b8aa75e165c32f59b0076ead59d529de564736f6c634300080a0033", + "structLogs": [ + { + "pc": 0, + "op": "PUSH1", + "gas": 320, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 2, + "op": "PUSH1", + "gas": 317, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x80" + ] + }, + { + "pc": 4, + "op": "MSTORE", + "gas": 314, + "gasCost": 12, + "depth": 1, + "stack": [ + "0x80", + "0x40" + ] + }, + { + "pc": 5, + "op": "PUSH1", + "gas": 302, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 7, + "op": "CALLDATASIZE", + "gas": 299, + "gasCost": 2, + "depth": 1, + "stack": [ + "0x4" + ] + }, + { + "pc": 8, + "op": "LT", + "gas": 297, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x4", + "0x184" + ] + }, + { + "pc": 9, + "op": "PUSH2", + "gas": 294, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 12, + "op": "JUMPI", + "gas": 291, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x0", + "0x4e" + ] + }, + { + "pc": 13, + "op": "PUSH1", + "gas": 281, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 15, + "op": "CALLDATALOAD", + "gas": 278, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 16, + "op": "PUSH1", + "gas": 275, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592" + ] + }, + { + "pc": 18, + "op": "SHR", + "gas": 272, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592", + "0xe0" + ] + }, + { + "pc": 19, + "op": "DUP1", + "gas": 269, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 20, + "op": "PUSH4", + "gas": 266, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 25, + "op": "EQ", + "gas": 263, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x3659cfe6" + ] + }, + { + "pc": 26, + "op": "PUSH2", + "gas": 260, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 29, + "op": "JUMPI", + "gas": 257, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x65" + ] + }, + { + "pc": 30, + "op": "DUP1", + "gas": 247, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 31, + "op": "PUSH4", + "gas": 244, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 36, + "op": "EQ", + "gas": 241, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x4f1ef286" + ] + }, + { + "pc": 37, + "op": "PUSH2", + "gas": 238, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 40, + "op": "JUMPI", + "gas": 235, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x85" + ] + }, + { + "pc": 41, + "op": "DUP1", + "gas": 225, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 42, + "op": "PUSH4", + "gas": 222, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 47, + "op": "EQ", + "gas": 219, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x5c60da1b" + ] + }, + { + "pc": 48, + "op": "PUSH2", + "gas": 216, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 51, + "op": "JUMPI", + "gas": 213, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x98" + ] + }, + { + "pc": 52, + "op": "DUP1", + "gas": 203, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 53, + "op": "PUSH4", + "gas": 200, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 58, + "op": "EQ", + "gas": 197, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x8f283970" + ] + }, + { + "pc": 59, + "op": "PUSH2", + "gas": 194, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 62, + "op": "JUMPI", + "gas": 191, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xc9" + ] + }, + { + "pc": 63, + "op": "DUP1", + "gas": 181, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 64, + "op": "PUSH4", + "gas": 178, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 69, + "op": "EQ", + "gas": 175, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0xf851a440" + ] + }, + { + "pc": 70, + "op": "PUSH2", + "gas": 172, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 73, + "op": "JUMPI", + "gas": 169, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xe9" + ] + }, + { + "pc": 74, + "op": "PUSH2", + "gas": 159, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 77, + "op": "JUMP", + "gas": 156, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5d" + ] + }, + { + "pc": 93, + "op": "JUMPDEST", + "gas": 148, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 94, + "op": "PUSH2", + "gas": 147, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 97, + "op": "PUSH2", + "gas": 144, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 100, + "op": "JUMP", + "gas": 141, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0xfe" + ] + }, + { + "pc": 254, + "op": "JUMPDEST", + "gas": 133, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 255, + "op": "PUSH2", + "gas": 132, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 258, + "op": "PUSH2", + "gas": 129, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 261, + "op": "JUMP", + "gas": 126, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x29b" + ] + }, + { + "pc": 667, + "op": "JUMPDEST", + "gas": 118, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 668, + "op": "PUSH2", + "gas": 117, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 671, + "op": "PUSH2", + "gas": 114, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 674, + "op": "JUMP", + "gas": 111, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x368" + ] + }, + { + "pc": 872, + "op": "JUMPDEST", + "gas": 103, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 873, + "op": "PUSH1", + "gas": 102, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 875, + "op": "PUSH32", + "gas": 99, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0" + ] + }, + { + "pc": 908, + "op": "JUMPDEST", + "gas": 96, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ] + }, + { + "pc": 909, + "op": "SLOAD", + "gas": 95, + "gasCost": 2100, + "depth": 1, + "error": "out of gas", + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ], + "storage": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + }, + "extraData": { + "proofList": [ + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151, + "storage": { + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "value": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + } + } + ] + } + } + ] + } + ], + "withdraw_trie_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + } \ No newline at end of file diff --git a/rollup/rollup_sync_service/testdata/blockTrace_07.json b/rollup/rollup_sync_service/testdata/blockTrace_07.json new file mode 100644 index 000000000000..836f475e8dab --- /dev/null +++ b/rollup/rollup_sync_service/testdata/blockTrace_07.json @@ -0,0 +1,3713 @@ +{ + "chainID": 222222, + "version": "3.2.1-alpha-3926c3be", + "coinbase": { + "address": "0x5300000000000000000000000000000000000005", + "nonce": 0, + "balance": "0x2aa86921dcd2c0", + "keccakCodeHash": "0x256e306f068f0847c8aab5819879b2ff45c021ce2e2f428be51be663415b1d60", + "poseidonCodeHash": "0x2c49d7de76e39008575f2f090bb3e90912bad475ea8102c8565c249a75575df5", + "codeSize": 1652 + }, + "header": { + "parentHash": "0x13c3a48df629a7e59b48bb1c86acf39f54a3675f53eab1cd55ffab8c79316509", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "miner": "0x0000000000000000000000000000000000000000", + "stateRoot": "0x03600db82d06d6f0615c9a37c3fb3a2aab90214859e273f866ed75cf6cddac6b", + "transactionsRoot": "0xad02a6564c31d174daaf8d1ff9989e389551dc28c9fab56a6c9538a11b51e72b", + "receiptsRoot": "0xb34de12e8c48e5f31b5b9360fa683d4a604035919de337c4ca04ab2cffa63625", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "difficulty": "0x2", + "number": "0x11", + "gasLimit": "0x7a1200", + "gasUsed": "0x1d4c0", + "timestamp": "0x646b6ed0", + "extraData": "0xd983030201846765746889676f312e31382e3130856c696e7578000000000000de7d5264f79a11dee9fd011f46b9353dee5ca5fa2c90b1151d276b0de78dcf8e442fb021951f6891532a93a34e4d34e35ac417681caa40c3f209a88e46b33f7401", + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000000", + "baseFeePerGas": null, + "hash": "0x003fee335455c0c293dda17ea9365fe0caa94071ed7216baf61f7aeb808e8a28" + }, + "row_consumption": [ + ], + "transactions": [ + { + "type": 126, + "nonce": 1, + "txHash": "0x9f546503cfc8a8ab4ad1bed5404fd9749f607702842a2a991f7378041854183f", + "gas": 24000, + "gasPrice": "0x0", + "from": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "to": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "chainId": "0x0", + "value": "0x0", + "data": "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e59279b510f2000000000000000000000000f2ec6b6206f6208e8f9b394efc1a01c1cbde77750000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002500000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a4232e87480000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf0000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "isCreate": false, + "v": "0x0", + "r": "0x0", + "s": "0x0" + }, + { + "type": 126, + "nonce": 2, + "txHash": "0x932534fde469ab4f5e177b6cf83315e9ad4ff58163c61d4c998213904ed23c7f", + "gas": 24000, + "gasPrice": "0x0", + "from": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "to": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "chainId": "0x0", + "value": "0x0", + "data": "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e59279b510f2000000000000000000000000f2ec6b6206f6208e8f9b394efc1a01c1cbde77750000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002600000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a4232e87480000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf0000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "isCreate": false, + "v": "0x0", + "r": "0x0", + "s": "0x0" + }, + { + "type": 126, + "nonce": 3, + "txHash": "0x385fe2f4ee6060d891bc4b9ee0ac8a631097fe1c3d4b9a5327c7b870682e7552", + "gas": 24000, + "gasPrice": "0x0", + "from": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "to": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "chainId": "0x0", + "value": "0x0", + "data": "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e59279b510f2000000000000000000000000f2ec6b6206f6208e8f9b394efc1a01c1cbde77750000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002700000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a4232e87480000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf0000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "isCreate": false, + "v": "0x0", + "r": "0x0", + "s": "0x0" + }, + { + "type": 126, + "nonce": 4, + "txHash": "0x3893c2d9caad874ccd3da9d89b61aa7e77a4b94f89855cb1596eee1147cd6f73", + "gas": 24000, + "gasPrice": "0x0", + "from": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "to": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "chainId": "0x0", + "value": "0x0", + "data": "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e59279b510f2000000000000000000000000f2ec6b6206f6208e8f9b394efc1a01c1cbde77750000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002800000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a4232e87480000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf0000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "isCreate": false, + "v": "0x0", + "r": "0x0", + "s": "0x0" + }, + { + "type": 126, + "nonce": 5, + "txHash": "0x7eaa620a880d881142e6716a8a526d4c16af21cc26c9058ff5bb589aa5d87523", + "gas": 24000, + "gasPrice": "0x0", + "from": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "to": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "chainId": "0x0", + "value": "0x0", + "data": "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e59279b510f2000000000000000000000000f2ec6b6206f6208e8f9b394efc1a01c1cbde77750000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002900000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a4232e87480000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf0000000000000000000000002b5ad5c4795c026514f8317c7a215e218dccd6cf00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "isCreate": false, + "v": "0x0", + "r": "0x0", + "s": "0x0" + } + ], + "storageTrace": { + "rootBefore": "0x175df7d5c86d17089e01563e4b2cb9aece5b5ab51931a0196ca52f4c05c0baa0", + "rootAfter": "0x03600db82d06d6f0615c9a37c3fb3a2aab90214859e273f866ed75cf6cddac6b", + "proofs": { + "0x1a258d17bF244C4dF02d40343a7626A9D321e105": [ + "0x00219880aab690429224e09d6931814a6919738db08a339ff0b54478161730bdb42cc33de5af63f5deca2409302103a4523463a3a16529835d526795e8966079db", + "0x0029ce00b3e5ddca3bd22d3a923b95239ed11243363803b8e1f5a89fb37ee3c6e505bdf4a0e62b30bba833f966fd24562ac54f109509962ecf29aed0b67d08f4f3", + "0x001dcee8089ea21f679f1af199cc93ccb35fdea1257b9ffeac0ae5c89654a0dbce20790d9030fd3f822620f7395f1af3ca53789e7451f811c2364f2b4fa19be9fd", + "0x000d62fbf3a623b87d67d8f97132a8f1759360c03c1b78ea3654238eb6c72fd5dd0742c02437cc0294c49133a28968ba1f913963d9c2892254da675958cd4a4b2e", + "0x0026875849a967c3af8bbd7ac6efb4ef8250efaee44c8bd85ac026d541c7f509ac18ae138a98367696a39f7abe0a53fd3b32283fa843bdc4a2485d65b3b9651670", + "0x0125375fd5ae821cd3e835e2fba4ae79971635b7288d549ba8ba66bea36603686c05080000000000000000000000000000000000000000000000000867000000000000000130644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce1160000030221b0e9cf191ce544dcc5c8927fd08af82cb88be110d9533468ffd2d575aed31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb8351cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2201a258d17bf244c4df02d40343a7626a9d321e105000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x478CDd110520a8e733e2ACF9e543d2c687EA5239": [ + "0x00219880aab690429224e09d6931814a6919738db08a339ff0b54478161730bdb42cc33de5af63f5deca2409302103a4523463a3a16529835d526795e8966079db", + "0x0029ce00b3e5ddca3bd22d3a923b95239ed11243363803b8e1f5a89fb37ee3c6e505bdf4a0e62b30bba833f966fd24562ac54f109509962ecf29aed0b67d08f4f3", + "0x00150fe477116ceddc889b77cdf43c38b46e00f309f80cbed73d478cf012b23f1b1c4e63762bb10044749243a2b52db21797da53a89ba6b8ceb5cee1596150ac45", + "0x002b29daef215b12b331bf75a98e595b8a10a91928f479cca3562db3859315055a153c7f3aa910c0b391c34f4d84236061fb73c23109153ca6b5d590382bb339ed", + "0x00000000000000000000000000000000000000000000000000000000000000000025aca70325f2e717478dafb557c316b85719987d3585dc74479f00c8a780d154", + "0x0000000000000000000000000000000000000000000000000000000000000000001207de03b3476b45074ef61e712923ae8686ba20d935e187ce14ee8705c12997", + "0x00000000000000000000000000000000000000000000000000000000000000000012f68b93918e4f78f57292abd9ee9dd50700e086e1235b8d00bc20b504d9d636", + "0x00000000000000000000000000000000000000000000000000000000000000000002c2892a683e4c55452bac236671a3d3d950993a16f3bfe1fe0dc0e7eb905882", + "0x000bdc67dc26f0c31dff4f27c2189c4fa2f805debefed3becadcd996900f74353b186c0fb6b2462a9c851df47ab11054dac43ed5b3f9d8d8a5fcf2fd0f9eb3e147", + "0x0109c2edb6138e8d6dc8f0b8b5ae98dd721c7053061887757f6749c484bddf92fa05080000000000000000000000000000000000000000000000000000000000000000002500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a4702098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b6486420478cdd110520a8e733e2acf9e543d2c687ea5239000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x5300000000000000000000000000000000000000": [ + "0x00219880aab690429224e09d6931814a6919738db08a339ff0b54478161730bdb42cc33de5af63f5deca2409302103a4523463a3a16529835d526795e8966079db", + "0x0029ce00b3e5ddca3bd22d3a923b95239ed11243363803b8e1f5a89fb37ee3c6e505bdf4a0e62b30bba833f966fd24562ac54f109509962ecf29aed0b67d08f4f3", + "0x001dcee8089ea21f679f1af199cc93ccb35fdea1257b9ffeac0ae5c89654a0dbce20790d9030fd3f822620f7395f1af3ca53789e7451f811c2364f2b4fa19be9fd", + "0x000d62fbf3a623b87d67d8f97132a8f1759360c03c1b78ea3654238eb6c72fd5dd0742c02437cc0294c49133a28968ba1f913963d9c2892254da675958cd4a4b2e", + "0x0026875849a967c3af8bbd7ac6efb4ef8250efaee44c8bd85ac026d541c7f509ac18ae138a98367696a39f7abe0a53fd3b32283fa843bdc4a2485d65b3b9651670", + "0x000a3197466e4643551413444b60bbf8ab0ced04566326492fdf1993586eec3fe10000000000000000000000000000000000000000000000000000000000000000", + "0x002143f0cbad38f9696bb9c0be84281e5b517a06983edef7c75485b7a06473c97921dd9af8de7aade9fba53909b1a98ae938236ceec8ba6346ba3ba75c039194d7", + "0x0115d04fcf1fe3d9a4cc7a76b70fafcd7b9304b42108af39d9e500be391563775c0508000000000000000000000000000000000000000000000000064d000000000000000000000000000000000000000000000000000000000000000000000000000000002908ab50d1edc9dac80a344f44731acf807809c545e3388816b97a9882b5d4f974ae902ff6a84825a9cde7cc5f26e8c414e88139716c3423ed908f0a60c996011c70d94e9dc7c85d39f6877b01e59a87c057882957d9fd16c55025dfdcaa4d93205300000000000000000000000000000000000000000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x5300000000000000000000000000000000000002": [ + "0x00219880aab690429224e09d6931814a6919738db08a339ff0b54478161730bdb42cc33de5af63f5deca2409302103a4523463a3a16529835d526795e8966079db", + "0x000150eaa497ee8904a3d2dc8350c03963fb1786ea5253d5cc16f321afcd862cee107e99fa497bffacfb8ab50a44b93c9a74bc7c669323c7fbd0560a657342c55a", + "0x000877a6983a09f78254ca94a086eb673296f5583aa33855bfbdbe6d2fadf0ff0107b2e01ad456a3ec4c88478c604ad6a15c6fb572259e49ef4cc781940fe1375e", + "0x0013b6a97296cf294d19f634904a7fa973d9714b90cc42e0456ad428b7278f338e0accad868d7f4aaa755b29eae6ad523415a9df210ffced28d7d33fa6d5a319b3", + "0x0011de0e672d258d43c785592fc939bc105441bafc9c1455901723358b0a73d5cc29562af63a2293f036058180ce56f5269c6a3d4d18d8e1dc75ef03cb8f51f8b9", + "0x01236b0ff4611519fb52869dd99bedcb730ebe17544687c5064da49f42f741831d05080000000000000000000000000000000000000000000000000873000000000000000000000000000000000000000000000000000000000000000000000000000000001bd955d4ef171429eb11fade67006376e84bf94630ddb9b9948c3f385ce0f05aa48c68219d344cebd30fca18d0777f587e55052ae6161c88fa4c16407211ddaa0d39d683afa3720f93c44224e2b95a5871a5a2207b5323f7fbf8f1862120ba90205300000000000000000000000000000000000002000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x5300000000000000000000000000000000000005": [ + "0x00219880aab690429224e09d6931814a6919738db08a339ff0b54478161730bdb42cc33de5af63f5deca2409302103a4523463a3a16529835d526795e8966079db", + "0x0029ce00b3e5ddca3bd22d3a923b95239ed11243363803b8e1f5a89fb37ee3c6e505bdf4a0e62b30bba833f966fd24562ac54f109509962ecf29aed0b67d08f4f3", + "0x00150fe477116ceddc889b77cdf43c38b46e00f309f80cbed73d478cf012b23f1b1c4e63762bb10044749243a2b52db21797da53a89ba6b8ceb5cee1596150ac45", + "0x002b29daef215b12b331bf75a98e595b8a10a91928f479cca3562db3859315055a153c7f3aa910c0b391c34f4d84236061fb73c23109153ca6b5d590382bb339ed", + "0x011facf302b106912bccc8194dff4cb12139e7f04288d3f5eefb57ccf4d842ba22050800000000000000000000000000000000000000000000000006740000000000000000000000000000000000000000000000000000000000000000002aa86921dcd2c018f4988204e816e17e42d9f9a2a468d8ca70ad453a88d3e371a0d9f743b799a6256e306f068f0847c8aab5819879b2ff45c021ce2e2f428be51be663415b1d602c49d7de76e39008575f2f090bb3e90912bad475ea8102c8565c249a75575df5205300000000000000000000000000000000000005000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ] + }, + "storageProofs": { + "0x1a258d17bF244C4dF02d40343a7626A9D321e105": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": [ + "0x001914b8a8cb4d4339d89ed1d5e6cd54ec609082fdf42fadb2d4101f3214f2a2290a1746dfbdf492c00e2854b46eda6adad88ad1b0583997db4121cb7d8e6de5ca", + "0x00084878451370def5a5648862c037adb6ae24f29b9237a1823638ca29d573bdd42446af3926a42a7e8b65f9a5fdd5a00e82e4f2b9684816fdc5d52c238bef604a", + "0x00027f6e365685a83e63cde58e13d22b99c130a578178f8198d755171a2ff97bf303e187b8ea9652424a9d9dac9bc16796838b196f141c6db57136643f22b48468", + "0x00149dad479c283104bb461dcce598d82aacff80a5844d863d8f64e0d3f3e83b1a0000000000000000000000000000000000000000000000000000000000000000", + "0x001f232429e01853a7456bc8bb4cbc3a35c132f7783e2b300306bceb64a44ce81e0000000000000000000000000000000000000000000000000000000000000000", + "0x0027e1c425d61d4468534c93b8aa80c34bbdea9ec2d69df7a730ecacf0089b22640000000000000000000000000000000000000000000000000000000000000000", + "0x001f4bdfdda0df475064a0ea35302dddc6401b8c93afad9a7569afb9f2534750560000000000000000000000000000000000000000000000000000000000000000", + "0x0001fc65caf9a60abae81bcb17c4854fa114100528e73ab1e649fac03ed9fa764e304459eb829e92aa3009534c4eba916b2900783c694385d2e7f87004e7649215", + "0x01249c7b39f739f430be8e1e2cae0f1db06dfe2f8d4cc631d312d5b98efb3e7402010100000000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce84820b53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ] + }, + "0x5300000000000000000000000000000000000000": { + "0x0000000000000000000000000000000000000000000000000000000000000000": [ + "0x0004f706d28ba7344cc73128f383e7f4df4c79f296a56e1bbc24cdfab5bc4cba5c2a970eaf68f6e47243e30bea39087adc0082afa5fd55fc5537baccd03f786953", + "0x00296af6438bc81ff661ef6d1bb16d33d6784e88ae39ff28258e56e4e72d5607052bb61b23d947a704c29df01936e7c557bf9ec541243566a336b43f8aeca37eed", + "0x001750ff1780c9b253cfcbd6274a4f79f3a95819e0856c31f0a6025e30ac3a5b261b73cc5623d88d2687f0fa6006bc823149c779b9e751477a6f2b83773062ddbe", + "0x0004c8c2bf27ee6712f4175555679ff662b9423a1d7205fe31e77999106cfb5a2f0efef64a4ef3d151d1364174e0e72745aeee51bf93fb17f8071e6daf4571a736", + "0x001de6dfed408db1b0cf580652da17c9277834302d9ee2c39ab074675ca61fd9e02ea58d0958b74734329987e16d8afa4d83a7acc46417a7f7dbc1fd42e305b394", + "0x001dd3e7dce636d92fdb4dd8b65cb4e5b8ffd3d64e54a51d93a527826bb1ec3a480000000000000000000000000000000000000000000000000000000000000000", + "0x02", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ] + }, + "0x5300000000000000000000000000000000000002": { + "0x0000000000000000000000000000000000000000000000000000000000000001": [ + "0x00024a2d3ee220db30dece4b39c0cffc2ba97ddded52a3f2da3aeed1f485d0a7220000000000000000000000000000000000000000000000000000000000000000", + "0x001da3cd3096ffd62c95bad392eedc1c578e7ccf248898c49c5ed82abb49a4b31a2b63c0d58a64939cf9026618503b904e267eeb0e465e15812b85485e81fb856c", + "0x01232927899d46fea05cc897a4f4671f808aa83c4eaf89396dfab15480fee91e8e010100000000000000000000000000005300000000000000000000000000000000000003200000000000000000000000000000000000000000000000000000000000000004", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x0000000000000000000000000000000000000000000000000000000000000002": [ + "0x00024a2d3ee220db30dece4b39c0cffc2ba97ddded52a3f2da3aeed1f485d0a7220000000000000000000000000000000000000000000000000000000000000000", + "0x001da3cd3096ffd62c95bad392eedc1c578e7ccf248898c49c5ed82abb49a4b31a2b63c0d58a64939cf9026618503b904e267eeb0e465e15812b85485e81fb856c", + "0x012098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864010100000000000000000000000000006f4c950442e1af093bcff730381e63ae9171b87a200000000000000000000000000000000000000000000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ], + "0x0000000000000000000000000000000000000000000000000000000000000003": [ + "0x00024a2d3ee220db30dece4b39c0cffc2ba97ddded52a3f2da3aeed1f485d0a7220000000000000000000000000000000000000000000000000000000000000000", + "0x001da3cd3096ffd62c95bad392eedc1c578e7ccf248898c49c5ed82abb49a4b31a2b63c0d58a64939cf9026618503b904e267eeb0e465e15812b85485e81fb856c", + "0x012098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864010100000000000000000000000000006f4c950442e1af093bcff730381e63ae9171b87a200000000000000000000000000000000000000000000000000000000000000000", + "0x5448495320495320534f4d45204d4147494320425954455320464f5220534d54206d3172525867503278704449" + ] + } + } + }, + "executionResults": [ + { + "gas": 24000, + "failed": true, + "returnValue": "", + "from": { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 37, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + "to": { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + "accountAfter": [ + { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 38, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + { + "address": "0x5300000000000000000000000000000000000005", + "nonce": 0, + "balance": "0x2aa86921dcd2c0", + "keccakCodeHash": "0x256e306f068f0847c8aab5819879b2ff45c021ce2e2f428be51be663415b1d60", + "poseidonCodeHash": "0x2c49d7de76e39008575f2f090bb3e90912bad475ea8102c8565c249a75575df5", + "codeSize": 1652 + } + ], + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "byteCode": "0x60806040526004361061004e5760003560e01c80633659cfe6146100655780634f1ef286146100855780635c60da1b146100985780638f283970146100c9578063f851a440146100e95761005d565b3661005d5761005b6100fe565b005b61005b6100fe565b34801561007157600080fd5b5061005b6100803660046106f1565b610118565b61005b61009336600461070c565b61015f565b3480156100a457600080fd5b506100ad6101d0565b6040516001600160a01b03909116815260200160405180910390f35b3480156100d557600080fd5b5061005b6100e43660046106f1565b61020b565b3480156100f557600080fd5b506100ad610235565b61010661029b565b61011661011161033a565b610344565b565b610120610368565b6001600160a01b0316336001600160a01b03161415610157576101548160405180602001604052806000815250600061039b565b50565b6101546100fe565b610167610368565b6001600160a01b0316336001600160a01b031614156101c8576101c38383838080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506001925061039b915050565b505050565b6101c36100fe565b60006101da610368565b6001600160a01b0316336001600160a01b03161415610200576101fb61033a565b905090565b6102086100fe565b90565b610213610368565b6001600160a01b0316336001600160a01b0316141561015757610154816103c6565b600061023f610368565b6001600160a01b0316336001600160a01b03161415610200576101fb610368565b6060610285838360405180606001604052806027815260200161080b6027913961041a565b9392505050565b6001600160a01b03163b151590565b6102a3610368565b6001600160a01b0316336001600160a01b031614156101165760405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b60006101fb6104f7565b3660008037600080366000845af43d6000803e808015610363573d6000f35b3d6000fd5b60007fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b6103a48361051f565b6000825111806103b15750805b156101c3576103c08383610260565b50505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f6103ef610368565b604080516001600160a01b03928316815291841660208301520160405180910390a16101548161055f565b60606001600160a01b0384163b6104825760405162461bcd60e51b815260206004820152602660248201527f416464726573733a2064656c65676174652063616c6c20746f206e6f6e2d636f6044820152651b9d1c9858dd60d21b6064820152608401610331565b600080856001600160a01b03168560405161049d91906107bb565b600060405180830381855af49150503d80600081146104d8576040519150601f19603f3d011682016040523d82523d6000602084013e6104dd565b606091505b50915091506104ed828286610608565b9695505050505050565b60007f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61038c565b61052881610641565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b6001600160a01b0381166105c45760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b6064820152608401610331565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b60608315610617575081610285565b8251156106275782518084602001fd5b8160405162461bcd60e51b815260040161033191906107d7565b6001600160a01b0381163b6106ae5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b6064820152608401610331565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6105e7565b80356001600160a01b03811681146106ec57600080fd5b919050565b60006020828403121561070357600080fd5b610285826106d5565b60008060006040848603121561072157600080fd5b61072a846106d5565b9250602084013567ffffffffffffffff8082111561074757600080fd5b818601915086601f83011261075b57600080fd5b81358181111561076a57600080fd5b87602082850101111561077c57600080fd5b6020830194508093505050509250925092565b60005b838110156107aa578181015183820152602001610792565b838111156103c05750506000910152565b600082516107cd81846020870161078f565b9190910192915050565b60208152600082518060208401526107f681604085016020870161078f565b601f01601f1916919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a2646970667358221220366737524a7ac8fa76e3b2cd04bb1e0b8aa75e165c32f59b0076ead59d529de564736f6c634300080a0033", + "structLogs": [ + { + "pc": 0, + "op": "PUSH1", + "gas": 320, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 2, + "op": "PUSH1", + "gas": 317, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x80" + ] + }, + { + "pc": 4, + "op": "MSTORE", + "gas": 314, + "gasCost": 12, + "depth": 1, + "stack": [ + "0x80", + "0x40" + ] + }, + { + "pc": 5, + "op": "PUSH1", + "gas": 302, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 7, + "op": "CALLDATASIZE", + "gas": 299, + "gasCost": 2, + "depth": 1, + "stack": [ + "0x4" + ] + }, + { + "pc": 8, + "op": "LT", + "gas": 297, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x4", + "0x184" + ] + }, + { + "pc": 9, + "op": "PUSH2", + "gas": 294, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 12, + "op": "JUMPI", + "gas": 291, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x0", + "0x4e" + ] + }, + { + "pc": 13, + "op": "PUSH1", + "gas": 281, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 15, + "op": "CALLDATALOAD", + "gas": 278, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 16, + "op": "PUSH1", + "gas": 275, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592" + ] + }, + { + "pc": 18, + "op": "SHR", + "gas": 272, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592", + "0xe0" + ] + }, + { + "pc": 19, + "op": "DUP1", + "gas": 269, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 20, + "op": "PUSH4", + "gas": 266, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 25, + "op": "EQ", + "gas": 263, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x3659cfe6" + ] + }, + { + "pc": 26, + "op": "PUSH2", + "gas": 260, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 29, + "op": "JUMPI", + "gas": 257, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x65" + ] + }, + { + "pc": 30, + "op": "DUP1", + "gas": 247, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 31, + "op": "PUSH4", + "gas": 244, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 36, + "op": "EQ", + "gas": 241, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x4f1ef286" + ] + }, + { + "pc": 37, + "op": "PUSH2", + "gas": 238, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 40, + "op": "JUMPI", + "gas": 235, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x85" + ] + }, + { + "pc": 41, + "op": "DUP1", + "gas": 225, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 42, + "op": "PUSH4", + "gas": 222, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 47, + "op": "EQ", + "gas": 219, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x5c60da1b" + ] + }, + { + "pc": 48, + "op": "PUSH2", + "gas": 216, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 51, + "op": "JUMPI", + "gas": 213, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x98" + ] + }, + { + "pc": 52, + "op": "DUP1", + "gas": 203, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 53, + "op": "PUSH4", + "gas": 200, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 58, + "op": "EQ", + "gas": 197, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x8f283970" + ] + }, + { + "pc": 59, + "op": "PUSH2", + "gas": 194, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 62, + "op": "JUMPI", + "gas": 191, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xc9" + ] + }, + { + "pc": 63, + "op": "DUP1", + "gas": 181, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 64, + "op": "PUSH4", + "gas": 178, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 69, + "op": "EQ", + "gas": 175, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0xf851a440" + ] + }, + { + "pc": 70, + "op": "PUSH2", + "gas": 172, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 73, + "op": "JUMPI", + "gas": 169, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xe9" + ] + }, + { + "pc": 74, + "op": "PUSH2", + "gas": 159, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 77, + "op": "JUMP", + "gas": 156, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5d" + ] + }, + { + "pc": 93, + "op": "JUMPDEST", + "gas": 148, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 94, + "op": "PUSH2", + "gas": 147, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 97, + "op": "PUSH2", + "gas": 144, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 100, + "op": "JUMP", + "gas": 141, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0xfe" + ] + }, + { + "pc": 254, + "op": "JUMPDEST", + "gas": 133, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 255, + "op": "PUSH2", + "gas": 132, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 258, + "op": "PUSH2", + "gas": 129, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 261, + "op": "JUMP", + "gas": 126, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x29b" + ] + }, + { + "pc": 667, + "op": "JUMPDEST", + "gas": 118, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 668, + "op": "PUSH2", + "gas": 117, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 671, + "op": "PUSH2", + "gas": 114, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 674, + "op": "JUMP", + "gas": 111, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x368" + ] + }, + { + "pc": 872, + "op": "JUMPDEST", + "gas": 103, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 873, + "op": "PUSH1", + "gas": 102, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 875, + "op": "PUSH32", + "gas": 99, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0" + ] + }, + { + "pc": 908, + "op": "JUMPDEST", + "gas": 96, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ] + }, + { + "pc": 909, + "op": "SLOAD", + "gas": 95, + "gasCost": 2100, + "depth": 1, + "error": "out of gas", + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ], + "storage": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + }, + "extraData": { + "proofList": [ + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151, + "storage": { + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "value": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + } + } + ] + } + } + ] + }, + { + "gas": 24000, + "failed": true, + "returnValue": "", + "from": { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 38, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + "to": { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + "accountAfter": [ + { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 39, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + { + "address": "0x5300000000000000000000000000000000000005", + "nonce": 0, + "balance": "0x2aa86921dcd2c0", + "keccakCodeHash": "0x256e306f068f0847c8aab5819879b2ff45c021ce2e2f428be51be663415b1d60", + "poseidonCodeHash": "0x2c49d7de76e39008575f2f090bb3e90912bad475ea8102c8565c249a75575df5", + "codeSize": 1652 + } + ], + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "byteCode": "0x60806040526004361061004e5760003560e01c80633659cfe6146100655780634f1ef286146100855780635c60da1b146100985780638f283970146100c9578063f851a440146100e95761005d565b3661005d5761005b6100fe565b005b61005b6100fe565b34801561007157600080fd5b5061005b6100803660046106f1565b610118565b61005b61009336600461070c565b61015f565b3480156100a457600080fd5b506100ad6101d0565b6040516001600160a01b03909116815260200160405180910390f35b3480156100d557600080fd5b5061005b6100e43660046106f1565b61020b565b3480156100f557600080fd5b506100ad610235565b61010661029b565b61011661011161033a565b610344565b565b610120610368565b6001600160a01b0316336001600160a01b03161415610157576101548160405180602001604052806000815250600061039b565b50565b6101546100fe565b610167610368565b6001600160a01b0316336001600160a01b031614156101c8576101c38383838080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506001925061039b915050565b505050565b6101c36100fe565b60006101da610368565b6001600160a01b0316336001600160a01b03161415610200576101fb61033a565b905090565b6102086100fe565b90565b610213610368565b6001600160a01b0316336001600160a01b0316141561015757610154816103c6565b600061023f610368565b6001600160a01b0316336001600160a01b03161415610200576101fb610368565b6060610285838360405180606001604052806027815260200161080b6027913961041a565b9392505050565b6001600160a01b03163b151590565b6102a3610368565b6001600160a01b0316336001600160a01b031614156101165760405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b60006101fb6104f7565b3660008037600080366000845af43d6000803e808015610363573d6000f35b3d6000fd5b60007fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b6103a48361051f565b6000825111806103b15750805b156101c3576103c08383610260565b50505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f6103ef610368565b604080516001600160a01b03928316815291841660208301520160405180910390a16101548161055f565b60606001600160a01b0384163b6104825760405162461bcd60e51b815260206004820152602660248201527f416464726573733a2064656c65676174652063616c6c20746f206e6f6e2d636f6044820152651b9d1c9858dd60d21b6064820152608401610331565b600080856001600160a01b03168560405161049d91906107bb565b600060405180830381855af49150503d80600081146104d8576040519150601f19603f3d011682016040523d82523d6000602084013e6104dd565b606091505b50915091506104ed828286610608565b9695505050505050565b60007f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61038c565b61052881610641565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b6001600160a01b0381166105c45760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b6064820152608401610331565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b60608315610617575081610285565b8251156106275782518084602001fd5b8160405162461bcd60e51b815260040161033191906107d7565b6001600160a01b0381163b6106ae5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b6064820152608401610331565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6105e7565b80356001600160a01b03811681146106ec57600080fd5b919050565b60006020828403121561070357600080fd5b610285826106d5565b60008060006040848603121561072157600080fd5b61072a846106d5565b9250602084013567ffffffffffffffff8082111561074757600080fd5b818601915086601f83011261075b57600080fd5b81358181111561076a57600080fd5b87602082850101111561077c57600080fd5b6020830194508093505050509250925092565b60005b838110156107aa578181015183820152602001610792565b838111156103c05750506000910152565b600082516107cd81846020870161078f565b9190910192915050565b60208152600082518060208401526107f681604085016020870161078f565b601f01601f1916919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a2646970667358221220366737524a7ac8fa76e3b2cd04bb1e0b8aa75e165c32f59b0076ead59d529de564736f6c634300080a0033", + "structLogs": [ + { + "pc": 0, + "op": "PUSH1", + "gas": 320, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 2, + "op": "PUSH1", + "gas": 317, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x80" + ] + }, + { + "pc": 4, + "op": "MSTORE", + "gas": 314, + "gasCost": 12, + "depth": 1, + "stack": [ + "0x80", + "0x40" + ] + }, + { + "pc": 5, + "op": "PUSH1", + "gas": 302, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 7, + "op": "CALLDATASIZE", + "gas": 299, + "gasCost": 2, + "depth": 1, + "stack": [ + "0x4" + ] + }, + { + "pc": 8, + "op": "LT", + "gas": 297, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x4", + "0x184" + ] + }, + { + "pc": 9, + "op": "PUSH2", + "gas": 294, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 12, + "op": "JUMPI", + "gas": 291, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x0", + "0x4e" + ] + }, + { + "pc": 13, + "op": "PUSH1", + "gas": 281, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 15, + "op": "CALLDATALOAD", + "gas": 278, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 16, + "op": "PUSH1", + "gas": 275, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592" + ] + }, + { + "pc": 18, + "op": "SHR", + "gas": 272, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592", + "0xe0" + ] + }, + { + "pc": 19, + "op": "DUP1", + "gas": 269, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 20, + "op": "PUSH4", + "gas": 266, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 25, + "op": "EQ", + "gas": 263, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x3659cfe6" + ] + }, + { + "pc": 26, + "op": "PUSH2", + "gas": 260, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 29, + "op": "JUMPI", + "gas": 257, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x65" + ] + }, + { + "pc": 30, + "op": "DUP1", + "gas": 247, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 31, + "op": "PUSH4", + "gas": 244, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 36, + "op": "EQ", + "gas": 241, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x4f1ef286" + ] + }, + { + "pc": 37, + "op": "PUSH2", + "gas": 238, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 40, + "op": "JUMPI", + "gas": 235, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x85" + ] + }, + { + "pc": 41, + "op": "DUP1", + "gas": 225, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 42, + "op": "PUSH4", + "gas": 222, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 47, + "op": "EQ", + "gas": 219, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x5c60da1b" + ] + }, + { + "pc": 48, + "op": "PUSH2", + "gas": 216, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 51, + "op": "JUMPI", + "gas": 213, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x98" + ] + }, + { + "pc": 52, + "op": "DUP1", + "gas": 203, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 53, + "op": "PUSH4", + "gas": 200, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 58, + "op": "EQ", + "gas": 197, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x8f283970" + ] + }, + { + "pc": 59, + "op": "PUSH2", + "gas": 194, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 62, + "op": "JUMPI", + "gas": 191, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xc9" + ] + }, + { + "pc": 63, + "op": "DUP1", + "gas": 181, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 64, + "op": "PUSH4", + "gas": 178, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 69, + "op": "EQ", + "gas": 175, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0xf851a440" + ] + }, + { + "pc": 70, + "op": "PUSH2", + "gas": 172, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 73, + "op": "JUMPI", + "gas": 169, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xe9" + ] + }, + { + "pc": 74, + "op": "PUSH2", + "gas": 159, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 77, + "op": "JUMP", + "gas": 156, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5d" + ] + }, + { + "pc": 93, + "op": "JUMPDEST", + "gas": 148, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 94, + "op": "PUSH2", + "gas": 147, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 97, + "op": "PUSH2", + "gas": 144, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 100, + "op": "JUMP", + "gas": 141, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0xfe" + ] + }, + { + "pc": 254, + "op": "JUMPDEST", + "gas": 133, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 255, + "op": "PUSH2", + "gas": 132, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 258, + "op": "PUSH2", + "gas": 129, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 261, + "op": "JUMP", + "gas": 126, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x29b" + ] + }, + { + "pc": 667, + "op": "JUMPDEST", + "gas": 118, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 668, + "op": "PUSH2", + "gas": 117, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 671, + "op": "PUSH2", + "gas": 114, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 674, + "op": "JUMP", + "gas": 111, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x368" + ] + }, + { + "pc": 872, + "op": "JUMPDEST", + "gas": 103, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 873, + "op": "PUSH1", + "gas": 102, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 875, + "op": "PUSH32", + "gas": 99, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0" + ] + }, + { + "pc": 908, + "op": "JUMPDEST", + "gas": 96, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ] + }, + { + "pc": 909, + "op": "SLOAD", + "gas": 95, + "gasCost": 2100, + "depth": 1, + "error": "out of gas", + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ], + "storage": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + }, + "extraData": { + "proofList": [ + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151, + "storage": { + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "value": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + } + } + ] + } + } + ] + }, + { + "gas": 24000, + "failed": true, + "returnValue": "", + "from": { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 39, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + "to": { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + "accountAfter": [ + { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 40, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + { + "address": "0x5300000000000000000000000000000000000005", + "nonce": 0, + "balance": "0x2aa86921dcd2c0", + "keccakCodeHash": "0x256e306f068f0847c8aab5819879b2ff45c021ce2e2f428be51be663415b1d60", + "poseidonCodeHash": "0x2c49d7de76e39008575f2f090bb3e90912bad475ea8102c8565c249a75575df5", + "codeSize": 1652 + } + ], + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "byteCode": "0x60806040526004361061004e5760003560e01c80633659cfe6146100655780634f1ef286146100855780635c60da1b146100985780638f283970146100c9578063f851a440146100e95761005d565b3661005d5761005b6100fe565b005b61005b6100fe565b34801561007157600080fd5b5061005b6100803660046106f1565b610118565b61005b61009336600461070c565b61015f565b3480156100a457600080fd5b506100ad6101d0565b6040516001600160a01b03909116815260200160405180910390f35b3480156100d557600080fd5b5061005b6100e43660046106f1565b61020b565b3480156100f557600080fd5b506100ad610235565b61010661029b565b61011661011161033a565b610344565b565b610120610368565b6001600160a01b0316336001600160a01b03161415610157576101548160405180602001604052806000815250600061039b565b50565b6101546100fe565b610167610368565b6001600160a01b0316336001600160a01b031614156101c8576101c38383838080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506001925061039b915050565b505050565b6101c36100fe565b60006101da610368565b6001600160a01b0316336001600160a01b03161415610200576101fb61033a565b905090565b6102086100fe565b90565b610213610368565b6001600160a01b0316336001600160a01b0316141561015757610154816103c6565b600061023f610368565b6001600160a01b0316336001600160a01b03161415610200576101fb610368565b6060610285838360405180606001604052806027815260200161080b6027913961041a565b9392505050565b6001600160a01b03163b151590565b6102a3610368565b6001600160a01b0316336001600160a01b031614156101165760405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b60006101fb6104f7565b3660008037600080366000845af43d6000803e808015610363573d6000f35b3d6000fd5b60007fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b6103a48361051f565b6000825111806103b15750805b156101c3576103c08383610260565b50505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f6103ef610368565b604080516001600160a01b03928316815291841660208301520160405180910390a16101548161055f565b60606001600160a01b0384163b6104825760405162461bcd60e51b815260206004820152602660248201527f416464726573733a2064656c65676174652063616c6c20746f206e6f6e2d636f6044820152651b9d1c9858dd60d21b6064820152608401610331565b600080856001600160a01b03168560405161049d91906107bb565b600060405180830381855af49150503d80600081146104d8576040519150601f19603f3d011682016040523d82523d6000602084013e6104dd565b606091505b50915091506104ed828286610608565b9695505050505050565b60007f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61038c565b61052881610641565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b6001600160a01b0381166105c45760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b6064820152608401610331565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b60608315610617575081610285565b8251156106275782518084602001fd5b8160405162461bcd60e51b815260040161033191906107d7565b6001600160a01b0381163b6106ae5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b6064820152608401610331565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6105e7565b80356001600160a01b03811681146106ec57600080fd5b919050565b60006020828403121561070357600080fd5b610285826106d5565b60008060006040848603121561072157600080fd5b61072a846106d5565b9250602084013567ffffffffffffffff8082111561074757600080fd5b818601915086601f83011261075b57600080fd5b81358181111561076a57600080fd5b87602082850101111561077c57600080fd5b6020830194508093505050509250925092565b60005b838110156107aa578181015183820152602001610792565b838111156103c05750506000910152565b600082516107cd81846020870161078f565b9190910192915050565b60208152600082518060208401526107f681604085016020870161078f565b601f01601f1916919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a2646970667358221220366737524a7ac8fa76e3b2cd04bb1e0b8aa75e165c32f59b0076ead59d529de564736f6c634300080a0033", + "structLogs": [ + { + "pc": 0, + "op": "PUSH1", + "gas": 320, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 2, + "op": "PUSH1", + "gas": 317, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x80" + ] + }, + { + "pc": 4, + "op": "MSTORE", + "gas": 314, + "gasCost": 12, + "depth": 1, + "stack": [ + "0x80", + "0x40" + ] + }, + { + "pc": 5, + "op": "PUSH1", + "gas": 302, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 7, + "op": "CALLDATASIZE", + "gas": 299, + "gasCost": 2, + "depth": 1, + "stack": [ + "0x4" + ] + }, + { + "pc": 8, + "op": "LT", + "gas": 297, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x4", + "0x184" + ] + }, + { + "pc": 9, + "op": "PUSH2", + "gas": 294, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 12, + "op": "JUMPI", + "gas": 291, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x0", + "0x4e" + ] + }, + { + "pc": 13, + "op": "PUSH1", + "gas": 281, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 15, + "op": "CALLDATALOAD", + "gas": 278, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 16, + "op": "PUSH1", + "gas": 275, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592" + ] + }, + { + "pc": 18, + "op": "SHR", + "gas": 272, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592", + "0xe0" + ] + }, + { + "pc": 19, + "op": "DUP1", + "gas": 269, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 20, + "op": "PUSH4", + "gas": 266, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 25, + "op": "EQ", + "gas": 263, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x3659cfe6" + ] + }, + { + "pc": 26, + "op": "PUSH2", + "gas": 260, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 29, + "op": "JUMPI", + "gas": 257, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x65" + ] + }, + { + "pc": 30, + "op": "DUP1", + "gas": 247, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 31, + "op": "PUSH4", + "gas": 244, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 36, + "op": "EQ", + "gas": 241, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x4f1ef286" + ] + }, + { + "pc": 37, + "op": "PUSH2", + "gas": 238, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 40, + "op": "JUMPI", + "gas": 235, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x85" + ] + }, + { + "pc": 41, + "op": "DUP1", + "gas": 225, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 42, + "op": "PUSH4", + "gas": 222, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 47, + "op": "EQ", + "gas": 219, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x5c60da1b" + ] + }, + { + "pc": 48, + "op": "PUSH2", + "gas": 216, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 51, + "op": "JUMPI", + "gas": 213, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x98" + ] + }, + { + "pc": 52, + "op": "DUP1", + "gas": 203, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 53, + "op": "PUSH4", + "gas": 200, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 58, + "op": "EQ", + "gas": 197, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x8f283970" + ] + }, + { + "pc": 59, + "op": "PUSH2", + "gas": 194, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 62, + "op": "JUMPI", + "gas": 191, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xc9" + ] + }, + { + "pc": 63, + "op": "DUP1", + "gas": 181, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 64, + "op": "PUSH4", + "gas": 178, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 69, + "op": "EQ", + "gas": 175, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0xf851a440" + ] + }, + { + "pc": 70, + "op": "PUSH2", + "gas": 172, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 73, + "op": "JUMPI", + "gas": 169, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xe9" + ] + }, + { + "pc": 74, + "op": "PUSH2", + "gas": 159, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 77, + "op": "JUMP", + "gas": 156, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5d" + ] + }, + { + "pc": 93, + "op": "JUMPDEST", + "gas": 148, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 94, + "op": "PUSH2", + "gas": 147, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 97, + "op": "PUSH2", + "gas": 144, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 100, + "op": "JUMP", + "gas": 141, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0xfe" + ] + }, + { + "pc": 254, + "op": "JUMPDEST", + "gas": 133, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 255, + "op": "PUSH2", + "gas": 132, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 258, + "op": "PUSH2", + "gas": 129, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 261, + "op": "JUMP", + "gas": 126, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x29b" + ] + }, + { + "pc": 667, + "op": "JUMPDEST", + "gas": 118, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 668, + "op": "PUSH2", + "gas": 117, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 671, + "op": "PUSH2", + "gas": 114, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 674, + "op": "JUMP", + "gas": 111, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x368" + ] + }, + { + "pc": 872, + "op": "JUMPDEST", + "gas": 103, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 873, + "op": "PUSH1", + "gas": 102, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 875, + "op": "PUSH32", + "gas": 99, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0" + ] + }, + { + "pc": 908, + "op": "JUMPDEST", + "gas": 96, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ] + }, + { + "pc": 909, + "op": "SLOAD", + "gas": 95, + "gasCost": 2100, + "depth": 1, + "error": "out of gas", + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ], + "storage": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + }, + "extraData": { + "proofList": [ + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151, + "storage": { + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "value": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + } + } + ] + } + } + ] + }, + { + "gas": 24000, + "failed": true, + "returnValue": "", + "from": { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 40, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + "to": { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + "accountAfter": [ + { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 41, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + { + "address": "0x5300000000000000000000000000000000000005", + "nonce": 0, + "balance": "0x2aa86921dcd2c0", + "keccakCodeHash": "0x256e306f068f0847c8aab5819879b2ff45c021ce2e2f428be51be663415b1d60", + "poseidonCodeHash": "0x2c49d7de76e39008575f2f090bb3e90912bad475ea8102c8565c249a75575df5", + "codeSize": 1652 + } + ], + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "byteCode": "0x60806040526004361061004e5760003560e01c80633659cfe6146100655780634f1ef286146100855780635c60da1b146100985780638f283970146100c9578063f851a440146100e95761005d565b3661005d5761005b6100fe565b005b61005b6100fe565b34801561007157600080fd5b5061005b6100803660046106f1565b610118565b61005b61009336600461070c565b61015f565b3480156100a457600080fd5b506100ad6101d0565b6040516001600160a01b03909116815260200160405180910390f35b3480156100d557600080fd5b5061005b6100e43660046106f1565b61020b565b3480156100f557600080fd5b506100ad610235565b61010661029b565b61011661011161033a565b610344565b565b610120610368565b6001600160a01b0316336001600160a01b03161415610157576101548160405180602001604052806000815250600061039b565b50565b6101546100fe565b610167610368565b6001600160a01b0316336001600160a01b031614156101c8576101c38383838080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506001925061039b915050565b505050565b6101c36100fe565b60006101da610368565b6001600160a01b0316336001600160a01b03161415610200576101fb61033a565b905090565b6102086100fe565b90565b610213610368565b6001600160a01b0316336001600160a01b0316141561015757610154816103c6565b600061023f610368565b6001600160a01b0316336001600160a01b03161415610200576101fb610368565b6060610285838360405180606001604052806027815260200161080b6027913961041a565b9392505050565b6001600160a01b03163b151590565b6102a3610368565b6001600160a01b0316336001600160a01b031614156101165760405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b60006101fb6104f7565b3660008037600080366000845af43d6000803e808015610363573d6000f35b3d6000fd5b60007fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b6103a48361051f565b6000825111806103b15750805b156101c3576103c08383610260565b50505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f6103ef610368565b604080516001600160a01b03928316815291841660208301520160405180910390a16101548161055f565b60606001600160a01b0384163b6104825760405162461bcd60e51b815260206004820152602660248201527f416464726573733a2064656c65676174652063616c6c20746f206e6f6e2d636f6044820152651b9d1c9858dd60d21b6064820152608401610331565b600080856001600160a01b03168560405161049d91906107bb565b600060405180830381855af49150503d80600081146104d8576040519150601f19603f3d011682016040523d82523d6000602084013e6104dd565b606091505b50915091506104ed828286610608565b9695505050505050565b60007f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61038c565b61052881610641565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b6001600160a01b0381166105c45760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b6064820152608401610331565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b60608315610617575081610285565b8251156106275782518084602001fd5b8160405162461bcd60e51b815260040161033191906107d7565b6001600160a01b0381163b6106ae5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b6064820152608401610331565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6105e7565b80356001600160a01b03811681146106ec57600080fd5b919050565b60006020828403121561070357600080fd5b610285826106d5565b60008060006040848603121561072157600080fd5b61072a846106d5565b9250602084013567ffffffffffffffff8082111561074757600080fd5b818601915086601f83011261075b57600080fd5b81358181111561076a57600080fd5b87602082850101111561077c57600080fd5b6020830194508093505050509250925092565b60005b838110156107aa578181015183820152602001610792565b838111156103c05750506000910152565b600082516107cd81846020870161078f565b9190910192915050565b60208152600082518060208401526107f681604085016020870161078f565b601f01601f1916919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a2646970667358221220366737524a7ac8fa76e3b2cd04bb1e0b8aa75e165c32f59b0076ead59d529de564736f6c634300080a0033", + "structLogs": [ + { + "pc": 0, + "op": "PUSH1", + "gas": 320, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 2, + "op": "PUSH1", + "gas": 317, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x80" + ] + }, + { + "pc": 4, + "op": "MSTORE", + "gas": 314, + "gasCost": 12, + "depth": 1, + "stack": [ + "0x80", + "0x40" + ] + }, + { + "pc": 5, + "op": "PUSH1", + "gas": 302, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 7, + "op": "CALLDATASIZE", + "gas": 299, + "gasCost": 2, + "depth": 1, + "stack": [ + "0x4" + ] + }, + { + "pc": 8, + "op": "LT", + "gas": 297, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x4", + "0x184" + ] + }, + { + "pc": 9, + "op": "PUSH2", + "gas": 294, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 12, + "op": "JUMPI", + "gas": 291, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x0", + "0x4e" + ] + }, + { + "pc": 13, + "op": "PUSH1", + "gas": 281, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 15, + "op": "CALLDATALOAD", + "gas": 278, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 16, + "op": "PUSH1", + "gas": 275, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592" + ] + }, + { + "pc": 18, + "op": "SHR", + "gas": 272, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592", + "0xe0" + ] + }, + { + "pc": 19, + "op": "DUP1", + "gas": 269, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 20, + "op": "PUSH4", + "gas": 266, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 25, + "op": "EQ", + "gas": 263, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x3659cfe6" + ] + }, + { + "pc": 26, + "op": "PUSH2", + "gas": 260, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 29, + "op": "JUMPI", + "gas": 257, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x65" + ] + }, + { + "pc": 30, + "op": "DUP1", + "gas": 247, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 31, + "op": "PUSH4", + "gas": 244, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 36, + "op": "EQ", + "gas": 241, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x4f1ef286" + ] + }, + { + "pc": 37, + "op": "PUSH2", + "gas": 238, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 40, + "op": "JUMPI", + "gas": 235, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x85" + ] + }, + { + "pc": 41, + "op": "DUP1", + "gas": 225, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 42, + "op": "PUSH4", + "gas": 222, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 47, + "op": "EQ", + "gas": 219, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x5c60da1b" + ] + }, + { + "pc": 48, + "op": "PUSH2", + "gas": 216, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 51, + "op": "JUMPI", + "gas": 213, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x98" + ] + }, + { + "pc": 52, + "op": "DUP1", + "gas": 203, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 53, + "op": "PUSH4", + "gas": 200, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 58, + "op": "EQ", + "gas": 197, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x8f283970" + ] + }, + { + "pc": 59, + "op": "PUSH2", + "gas": 194, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 62, + "op": "JUMPI", + "gas": 191, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xc9" + ] + }, + { + "pc": 63, + "op": "DUP1", + "gas": 181, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 64, + "op": "PUSH4", + "gas": 178, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 69, + "op": "EQ", + "gas": 175, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0xf851a440" + ] + }, + { + "pc": 70, + "op": "PUSH2", + "gas": 172, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 73, + "op": "JUMPI", + "gas": 169, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xe9" + ] + }, + { + "pc": 74, + "op": "PUSH2", + "gas": 159, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 77, + "op": "JUMP", + "gas": 156, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5d" + ] + }, + { + "pc": 93, + "op": "JUMPDEST", + "gas": 148, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 94, + "op": "PUSH2", + "gas": 147, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 97, + "op": "PUSH2", + "gas": 144, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 100, + "op": "JUMP", + "gas": 141, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0xfe" + ] + }, + { + "pc": 254, + "op": "JUMPDEST", + "gas": 133, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 255, + "op": "PUSH2", + "gas": 132, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 258, + "op": "PUSH2", + "gas": 129, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 261, + "op": "JUMP", + "gas": 126, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x29b" + ] + }, + { + "pc": 667, + "op": "JUMPDEST", + "gas": 118, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 668, + "op": "PUSH2", + "gas": 117, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 671, + "op": "PUSH2", + "gas": 114, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 674, + "op": "JUMP", + "gas": 111, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x368" + ] + }, + { + "pc": 872, + "op": "JUMPDEST", + "gas": 103, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 873, + "op": "PUSH1", + "gas": 102, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 875, + "op": "PUSH32", + "gas": 99, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0" + ] + }, + { + "pc": 908, + "op": "JUMPDEST", + "gas": 96, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ] + }, + { + "pc": 909, + "op": "SLOAD", + "gas": 95, + "gasCost": 2100, + "depth": 1, + "error": "out of gas", + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ], + "storage": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + }, + "extraData": { + "proofList": [ + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151, + "storage": { + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "value": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + } + } + ] + } + } + ] + }, + { + "gas": 24000, + "failed": true, + "returnValue": "", + "from": { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 41, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + "to": { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + "accountAfter": [ + { + "address": "0x478cdd110520a8e733e2acf9e543d2c687ea5239", + "nonce": 42, + "balance": "0x0", + "keccakCodeHash": "0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470", + "poseidonCodeHash": "0x2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864", + "codeSize": 0 + }, + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151 + }, + { + "address": "0x5300000000000000000000000000000000000005", + "nonce": 0, + "balance": "0x2aa86921dcd2c0", + "keccakCodeHash": "0x256e306f068f0847c8aab5819879b2ff45c021ce2e2f428be51be663415b1d60", + "poseidonCodeHash": "0x2c49d7de76e39008575f2f090bb3e90912bad475ea8102c8565c249a75575df5", + "codeSize": 1652 + } + ], + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "byteCode": "0x60806040526004361061004e5760003560e01c80633659cfe6146100655780634f1ef286146100855780635c60da1b146100985780638f283970146100c9578063f851a440146100e95761005d565b3661005d5761005b6100fe565b005b61005b6100fe565b34801561007157600080fd5b5061005b6100803660046106f1565b610118565b61005b61009336600461070c565b61015f565b3480156100a457600080fd5b506100ad6101d0565b6040516001600160a01b03909116815260200160405180910390f35b3480156100d557600080fd5b5061005b6100e43660046106f1565b61020b565b3480156100f557600080fd5b506100ad610235565b61010661029b565b61011661011161033a565b610344565b565b610120610368565b6001600160a01b0316336001600160a01b03161415610157576101548160405180602001604052806000815250600061039b565b50565b6101546100fe565b610167610368565b6001600160a01b0316336001600160a01b031614156101c8576101c38383838080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506001925061039b915050565b505050565b6101c36100fe565b60006101da610368565b6001600160a01b0316336001600160a01b03161415610200576101fb61033a565b905090565b6102086100fe565b90565b610213610368565b6001600160a01b0316336001600160a01b0316141561015757610154816103c6565b600061023f610368565b6001600160a01b0316336001600160a01b03161415610200576101fb610368565b6060610285838360405180606001604052806027815260200161080b6027913961041a565b9392505050565b6001600160a01b03163b151590565b6102a3610368565b6001600160a01b0316336001600160a01b031614156101165760405162461bcd60e51b815260206004820152604260248201527f5472616e73706172656e745570677261646561626c6550726f78793a2061646d60448201527f696e2063616e6e6f742066616c6c6261636b20746f2070726f78792074617267606482015261195d60f21b608482015260a4015b60405180910390fd5b60006101fb6104f7565b3660008037600080366000845af43d6000803e808015610363573d6000f35b3d6000fd5b60007fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b546001600160a01b0316919050565b6103a48361051f565b6000825111806103b15750805b156101c3576103c08383610260565b50505050565b7f7e644d79422f17c01e4894b5f4f588d331ebfa28653d42ae832dc59e38c9798f6103ef610368565b604080516001600160a01b03928316815291841660208301520160405180910390a16101548161055f565b60606001600160a01b0384163b6104825760405162461bcd60e51b815260206004820152602660248201527f416464726573733a2064656c65676174652063616c6c20746f206e6f6e2d636f6044820152651b9d1c9858dd60d21b6064820152608401610331565b600080856001600160a01b03168560405161049d91906107bb565b600060405180830381855af49150503d80600081146104d8576040519150601f19603f3d011682016040523d82523d6000602084013e6104dd565b606091505b50915091506104ed828286610608565b9695505050505050565b60007f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc61038c565b61052881610641565b6040516001600160a01b038216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b6001600160a01b0381166105c45760405162461bcd60e51b815260206004820152602660248201527f455243313936373a206e65772061646d696e20697320746865207a65726f206160448201526564647265737360d01b6064820152608401610331565b807fb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d61035b80546001600160a01b0319166001600160a01b039290921691909117905550565b60608315610617575081610285565b8251156106275782518084602001fd5b8160405162461bcd60e51b815260040161033191906107d7565b6001600160a01b0381163b6106ae5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201526c1bdd08184818dbdb9d1c9858dd609a1b6064820152608401610331565b807f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc6105e7565b80356001600160a01b03811681146106ec57600080fd5b919050565b60006020828403121561070357600080fd5b610285826106d5565b60008060006040848603121561072157600080fd5b61072a846106d5565b9250602084013567ffffffffffffffff8082111561074757600080fd5b818601915086601f83011261075b57600080fd5b81358181111561076a57600080fd5b87602082850101111561077c57600080fd5b6020830194508093505050509250925092565b60005b838110156107aa578181015183820152602001610792565b838111156103c05750506000910152565b600082516107cd81846020870161078f565b9190910192915050565b60208152600082518060208401526107f681604085016020870161078f565b601f01601f1916919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564a2646970667358221220366737524a7ac8fa76e3b2cd04bb1e0b8aa75e165c32f59b0076ead59d529de564736f6c634300080a0033", + "structLogs": [ + { + "pc": 0, + "op": "PUSH1", + "gas": 320, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 2, + "op": "PUSH1", + "gas": 317, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x80" + ] + }, + { + "pc": 4, + "op": "MSTORE", + "gas": 314, + "gasCost": 12, + "depth": 1, + "stack": [ + "0x80", + "0x40" + ] + }, + { + "pc": 5, + "op": "PUSH1", + "gas": 302, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 7, + "op": "CALLDATASIZE", + "gas": 299, + "gasCost": 2, + "depth": 1, + "stack": [ + "0x4" + ] + }, + { + "pc": 8, + "op": "LT", + "gas": 297, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x4", + "0x184" + ] + }, + { + "pc": 9, + "op": "PUSH2", + "gas": 294, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 12, + "op": "JUMPI", + "gas": 291, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x0", + "0x4e" + ] + }, + { + "pc": 13, + "op": "PUSH1", + "gas": 281, + "gasCost": 3, + "depth": 1 + }, + { + "pc": 15, + "op": "CALLDATALOAD", + "gas": 278, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x0" + ] + }, + { + "pc": 16, + "op": "PUSH1", + "gas": 275, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592" + ] + }, + { + "pc": 18, + "op": "SHR", + "gas": 272, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e000000000000000000000000ea08a65b1829af779261e768d609e592", + "0xe0" + ] + }, + { + "pc": 19, + "op": "DUP1", + "gas": 269, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 20, + "op": "PUSH4", + "gas": 266, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 25, + "op": "EQ", + "gas": 263, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x3659cfe6" + ] + }, + { + "pc": 26, + "op": "PUSH2", + "gas": 260, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 29, + "op": "JUMPI", + "gas": 257, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x65" + ] + }, + { + "pc": 30, + "op": "DUP1", + "gas": 247, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 31, + "op": "PUSH4", + "gas": 244, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 36, + "op": "EQ", + "gas": 241, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x4f1ef286" + ] + }, + { + "pc": 37, + "op": "PUSH2", + "gas": 238, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 40, + "op": "JUMPI", + "gas": 235, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x85" + ] + }, + { + "pc": 41, + "op": "DUP1", + "gas": 225, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 42, + "op": "PUSH4", + "gas": 222, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 47, + "op": "EQ", + "gas": 219, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x5c60da1b" + ] + }, + { + "pc": 48, + "op": "PUSH2", + "gas": 216, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 51, + "op": "JUMPI", + "gas": 213, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0x98" + ] + }, + { + "pc": 52, + "op": "DUP1", + "gas": 203, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 53, + "op": "PUSH4", + "gas": 200, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 58, + "op": "EQ", + "gas": 197, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0x8f283970" + ] + }, + { + "pc": 59, + "op": "PUSH2", + "gas": 194, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 62, + "op": "JUMPI", + "gas": 191, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xc9" + ] + }, + { + "pc": 63, + "op": "DUP1", + "gas": 181, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 64, + "op": "PUSH4", + "gas": 178, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e" + ] + }, + { + "pc": 69, + "op": "EQ", + "gas": 175, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x8ef1332e", + "0xf851a440" + ] + }, + { + "pc": 70, + "op": "PUSH2", + "gas": 172, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0" + ] + }, + { + "pc": 73, + "op": "JUMPI", + "gas": 169, + "gasCost": 10, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x0", + "0xe9" + ] + }, + { + "pc": 74, + "op": "PUSH2", + "gas": 159, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 77, + "op": "JUMP", + "gas": 156, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5d" + ] + }, + { + "pc": 93, + "op": "JUMPDEST", + "gas": 148, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 94, + "op": "PUSH2", + "gas": 147, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e" + ] + }, + { + "pc": 97, + "op": "PUSH2", + "gas": 144, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 100, + "op": "JUMP", + "gas": 141, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0xfe" + ] + }, + { + "pc": 254, + "op": "JUMPDEST", + "gas": 133, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 255, + "op": "PUSH2", + "gas": 132, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b" + ] + }, + { + "pc": 258, + "op": "PUSH2", + "gas": 129, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 261, + "op": "JUMP", + "gas": 126, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x29b" + ] + }, + { + "pc": 667, + "op": "JUMPDEST", + "gas": 118, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 668, + "op": "PUSH2", + "gas": 117, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106" + ] + }, + { + "pc": 671, + "op": "PUSH2", + "gas": 114, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 674, + "op": "JUMP", + "gas": 111, + "gasCost": 8, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x368" + ] + }, + { + "pc": 872, + "op": "JUMPDEST", + "gas": 103, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 873, + "op": "PUSH1", + "gas": 102, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3" + ] + }, + { + "pc": 875, + "op": "PUSH32", + "gas": 99, + "gasCost": 3, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0" + ] + }, + { + "pc": 908, + "op": "JUMPDEST", + "gas": 96, + "gasCost": 1, + "depth": 1, + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ] + }, + { + "pc": 909, + "op": "SLOAD", + "gas": 95, + "gasCost": 2100, + "depth": 1, + "error": "out of gas", + "stack": [ + "0x8ef1332e", + "0x5b", + "0x106", + "0x2a3", + "0x0", + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103" + ], + "storage": { + "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + }, + "extraData": { + "proofList": [ + { + "address": "0x1a258d17bf244c4df02d40343a7626a9d321e105", + "nonce": 1, + "balance": "0x30644e72e131a029b85045b68181585d2833e84879b9705b0e1847ce11600000", + "keccakCodeHash": "0x31f2125c021fb94759cb1993a2f07eae01792311e13f209441ff8969cf1eb835", + "poseidonCodeHash": "0x1cafbbe8f01ed4c292d9a27be523919a274441a076b20c7d713d192dbe6485c2", + "codeSize": 2151, + "storage": { + "key": "0xb53127684a568b3173ae13b9f8a6016e243e63b6e8ee1178d6a717850b5d6103", + "value": "0x0000000000000000000000008eebfef33eb00149852cadb631838ad9bfcce848" + } + } + ] + } + } + ] + } + ], + "withdraw_trie_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + \ No newline at end of file From 80976adee8d14d73f56447de6f6e8a638b5a7360 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Mon, 10 Feb 2025 15:39:38 +0800 Subject: [PATCH 31/36] go mod tidy --- go.sum | 2 -- 1 file changed, 2 deletions(-) diff --git a/go.sum b/go.sum index 40aef325e255..9c60daf0a630 100644 --- a/go.sum +++ b/go.sum @@ -396,8 +396,6 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/scroll-tech/da-codec v0.1.3-0.20250203093155-be6b422f605f h1:Kh2Tdy3/+ooeeFeZZ2duKeUlSHgKy+sOQ2oxzuuSNZE= -github.com/scroll-tech/da-codec v0.1.3-0.20250203093155-be6b422f605f/go.mod h1:irqXJdRI5fsGkilJCpNTnJb8oV8KR51j68QXIWoth6U= github.com/scroll-tech/da-codec v0.1.3-0.20250210041951-d028c537b995 h1:Zo1p42CUS9pADSKoDD0ZoDxf4dQ3gttqWZlV+RSeImk= github.com/scroll-tech/da-codec v0.1.3-0.20250210041951-d028c537b995/go.mod h1:UZhhjzqYsyEhcvY0Y+SP+oMdeOUqFn/UXpbAYuPGzg0= github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE= From 402298990e51d51fb9289bab47fa69f317fdf00e Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Tue, 11 Feb 2025 12:25:39 +0800 Subject: [PATCH 32/36] do not log error on shutdown --- rollup/rollup_sync_service/rollup_sync_service.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/rollup/rollup_sync_service/rollup_sync_service.go b/rollup/rollup_sync_service/rollup_sync_service.go index c4db08639cdc..d498288079d5 100644 --- a/rollup/rollup_sync_service/rollup_sync_service.go +++ b/rollup/rollup_sync_service/rollup_sync_service.go @@ -146,6 +146,13 @@ func (s *RollupSyncService) Start() { case <-syncTicker.C: err := s.fetchRollupEvents() if err != nil { + // Do not log the error if the context is canceled. + select { + case <-s.ctx.Done(): + return + default: + } + log.Error("failed to fetch rollup events", "err", err) } case <-logTicker.C: From d4cc8978b43920e7e5da43cc494dc59a222c8947 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Wed, 12 Feb 2025 11:08:12 +0800 Subject: [PATCH 33/36] add sanity check for version to deserialization of committedBatchMetaV7 --- core/rawdb/accessors_rollup_event.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/rawdb/accessors_rollup_event.go b/core/rawdb/accessors_rollup_event.go index 1a840da4cbc7..c48c9c027801 100644 --- a/core/rawdb/accessors_rollup_event.go +++ b/core/rawdb/accessors_rollup_event.go @@ -196,6 +196,9 @@ func ReadCommittedBatchMeta(db ethdb.Reader, batchIndex uint64) (*CommittedBatch // Try decoding from the newest format for future proofness, then the older one for old data. cbm7 := new(committedBatchMetaV7) if err = rlp.Decode(bytes.NewReader(data), cbm7); err == nil { + if encoding.CodecVersion(cbm7.Version) < encoding.CodecV7 { + return nil, fmt.Errorf("unexpected committed batch metadata version: batch index %d, version %d", batchIndex, cbm7.Version) + } return &CommittedBatchMeta{ Version: cbm7.Version, ChunkBlockRanges: cbm7.ChunkBlockRanges, From 6d5af23e5ea3ff96fcfe4a85124363e5b5f07f4c Mon Sep 17 00:00:00 2001 From: Thegaram Date: Wed, 12 Feb 2025 13:54:14 +0000 Subject: [PATCH 34/36] =?UTF-8?q?chore:=20auto=20version=20bump=E2=80=89[b?= =?UTF-8?q?ot]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index b63a3765575d..2a27774e2890 100644 --- a/params/version.go +++ b/params/version.go @@ -24,7 +24,7 @@ import ( const ( VersionMajor = 5 // Major version component of the current release VersionMinor = 8 // Minor version component of the current release - VersionPatch = 5 // Patch version component of the current release + VersionPatch = 6 // Patch version component of the current release VersionMeta = "mainnet" // Version metadata to append to the version string ) From 3c21f4e87f66a0b6fdd0e0004116e943796342f7 Mon Sep 17 00:00:00 2001 From: jonastheis <4181434+jonastheis@users.noreply.github.com> Date: Thu, 13 Feb 2025 08:23:00 +0800 Subject: [PATCH 35/36] address review comments --- rollup/da_syncer/da/calldata_blob_source.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/rollup/da_syncer/da/calldata_blob_source.go b/rollup/da_syncer/da/calldata_blob_source.go index 77ab34aaaada..c581ae94aae3 100644 --- a/rollup/da_syncer/da/calldata_blob_source.go +++ b/rollup/da_syncer/da/calldata_blob_source.go @@ -240,7 +240,7 @@ func (ds *CalldataBlobSource) getCommitBatchDA(commitEvents []*l1.CommitBatchEve if entry, err = NewCommitBatchDAV1(ds.ctx, ds.db, ds.blobClient, codec, commitEvent, args.ParentBatchHeader, args.Chunks, args.SkippedL1MessageBitmap, args.BlobHashes, blockHeader.Time); err != nil { return nil, fmt.Errorf("failed to decode DA, batch index: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - case 7: + default: // CodecVersion 7 and above if i >= len(args.BlobHashes) { return nil, fmt.Errorf("not enough blob hashes for commit transaction: %s, index in tx: %d, batch index: %d, hash: %s", firstCommitEvent.TxHash(), i, commitEvent.BatchIndex().Uint64(), commitEvent.BatchHash().Hex()) } @@ -256,8 +256,6 @@ func (ds *CalldataBlobSource) getCommitBatchDA(commitEvents []*l1.CommitBatchEve if entry, err = NewCommitBatchDAV7(ds.ctx, ds.db, ds.blobClient, codec, commitEvent, blobHash, parentBatchHash, blockHeader.Time); err != nil { return nil, fmt.Errorf("failed to decode DA, batch index: %d, err: %w", commitEvent.BatchIndex().Uint64(), err) } - default: - return nil, fmt.Errorf("failed to decode DA, codec version is unknown: codec version: %d", args.Version) } previousEvent = commitEvent From ca408bccdabed92e916be9d2ee3468de5b59554d Mon Sep 17 00:00:00 2001 From: Thegaram Date: Thu, 13 Feb 2025 09:16:33 +0000 Subject: [PATCH 36/36] =?UTF-8?q?chore:=20auto=20version=20bump=E2=80=89[b?= =?UTF-8?q?ot]?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- params/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/version.go b/params/version.go index 2a27774e2890..0612d402f1e8 100644 --- a/params/version.go +++ b/params/version.go @@ -24,7 +24,7 @@ import ( const ( VersionMajor = 5 // Major version component of the current release VersionMinor = 8 // Minor version component of the current release - VersionPatch = 6 // Patch version component of the current release + VersionPatch = 7 // Patch version component of the current release VersionMeta = "mainnet" // Version metadata to append to the version string )