diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go index 5a0aaf011a..1cbf031975 100644 --- a/cmd/geth/dbcmd.go +++ b/cmd/geth/dbcmd.go @@ -106,12 +106,12 @@ Remove blockchain and state databases`, dbInspectTrieCmd = &cli.Command{ Action: inspectTrie, Name: "inspect-trie", - ArgsUsage: " ", + ArgsUsage: " ", Flags: []cli.Flag{ utils.DataDirFlag, utils.SyncModeFlag, }, - Usage: "Inspect the MPT tree of the account and contract.", + Usage: "Inspect the MPT tree of the account and contract. 'blocknum' can be latest/snapshot/number. 'topn' means output the top N storage tries info ranked by the total number of TrieNodes", Description: `This commands iterates the entrie WorldState.`, } dbCheckStateContentCmd = &cli.Command{ @@ -386,6 +386,7 @@ func inspectTrie(ctx *cli.Context) error { blockNumber uint64 trieRootHash common.Hash jobnum uint64 + topN uint64 ) stack, _ := makeConfigNode(ctx) @@ -411,12 +412,25 @@ func inspectTrie(ctx *cli.Context) error { if ctx.NArg() == 1 { jobnum = 1000 + topN = 10 + } else if ctx.NArg() == 2 { + var err error + jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64) + if err != nil { + return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err) + } + topN = 10 } else { var err error jobnum, err = strconv.ParseUint(ctx.Args().Get(1), 10, 64) if err != nil { return fmt.Errorf("failed to Parse jobnum, Args[1]: %v, err: %v", ctx.Args().Get(1), err) } + + topN, err = strconv.ParseUint(ctx.Args().Get(2), 10, 64) + if err != nil { + return fmt.Errorf("failed to Parse topn, Args[1]: %v, err: %v", ctx.Args().Get(1), err) + } } if blockNumber != math.MaxUint64 { @@ -437,6 +451,7 @@ func inspectTrie(ctx *cli.Context) error { if dbScheme == rawdb.PathScheme { config = &triedb.Config{ PathDB: utils.PathDBConfigAddJournalFilePath(stack, pathdb.ReadOnly), + Cache: 0, } } else if dbScheme == rawdb.HashScheme { config = triedb.HashDefaults @@ -448,7 +463,7 @@ func inspectTrie(ctx *cli.Context) error { fmt.Printf("fail to new trie tree, err: %v, rootHash: %v\n", err, trieRootHash.String()) return err } - theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum) + theInspect, err := trie.NewInspector(theTrie, triedb, trieRootHash, blockNumber, jobnum, int(topN)) if err != nil { return err } diff --git a/cmd/jsutils/check_blobtx.js b/cmd/jsutils/check_blobtx.js new file mode 100644 index 0000000000..2e8fe1ed1e --- /dev/null +++ b/cmd/jsutils/check_blobtx.js @@ -0,0 +1,51 @@ +import { ethers } from "ethers"; +import program from "commander"; + +// depends on ethjs v6.11.0+ for 4844, https://github.com/ethers-io/ethers.js/releases/tag/v6.11.0 +// BSC testnet enabled 4844 on block: 39539137 +// Usage: +// nvm use 20 +// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137 +// node check_blobtx.js --rpc https://data-seed-prebsc-1-s1.binance.org:8545 --startNum 39539137 --endNum 40345994 +program.option("--rpc ", "Rpc Server URL"); +program.option("--startNum ", "start block", 0); +program.option("--endNum ", "end block", 0); +program.parse(process.argv); + +const provider = new ethers.JsonRpcProvider(program.rpc); +const main = async () => { + var startBlock = parseInt(program.startNum) + var endBlock = parseInt(program.endNum) + if (isNaN(endBlock) || isNaN(startBlock) || startBlock == 0) { + console.error("invalid input, --startNum", program.startNum, "--end", program.endNum) + return + } + // if --endNum is not specified, set it to the latest block number. + if (endBlock == 0) { + endBlock = await provider.getBlockNumber(); + } + if (startBlock > endBlock) { + console.error("invalid input, startBlock:",startBlock, " endBlock:", endBlock); + return + } + + for (let i = startBlock; i <= endBlock; i++) { + let blockData = await provider.getBlock(i); + console.log("startBlock:",startBlock, "endBlock:", endBlock, "curBlock", i, "blobGasUsed", blockData.blobGasUsed); + if (blockData.blobGasUsed == 0) { + continue + } + for (let txIndex = 0; txIndex<= blockData.transactions.length - 1; txIndex++) { + let txHash = blockData.transactions[txIndex] + let txData = await provider.getTransaction(txHash); + if (txData.type == 3) { + console.log("BlobTx in block:",i, " txIndex:", txIndex, " txHash:", txHash); + } + } + } +}; +main().then(() => process.exit(0)) + .catch((error) => { + console.error(error); + process.exit(1); + }); \ No newline at end of file diff --git a/core/block_validator.go b/core/block_validator.go index d15e2cd786..b82965a99d 100644 --- a/core/block_validator.go +++ b/core/block_validator.go @@ -66,6 +66,31 @@ func NewBlockValidator(config *params.ChainConfig, blockchain *BlockChain, engin return validator } +// ValidateListsInBody validates that UncleHash, WithdrawalsHash, and WithdrawalsHash correspond to the lists in the block body, respectively. +func ValidateListsInBody(block *types.Block) error { + header := block.Header() + if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash { + return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash) + } + if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { + return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) + } + // Withdrawals are present after the Shanghai fork. + if header.WithdrawalsHash != nil { + // Withdrawals list must be present in body after Shanghai. + if block.Withdrawals() == nil { + return errors.New("missing withdrawals in block body") + } + if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash { + return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash) + } + } else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars + // Withdrawals are not allowed prior to shanghai fork + return errors.New("withdrawals present in block body") + } + return nil +} + // ValidateBody validates the given block's uncles and verifies the block // header's transaction and uncle roots. The headers are assumed to be already // validated at this point. @@ -83,31 +108,12 @@ func (v *BlockValidator) ValidateBody(block *types.Block) error { if err := v.engine.VerifyUncles(v.bc, block); err != nil { return err } - if hash := types.CalcUncleHash(block.Uncles()); hash != header.UncleHash { - return fmt.Errorf("uncle root hash mismatch (header value %x, calculated %x)", header.UncleHash, hash) - } validateFuns := []func() error{ func() error { - if hash := types.DeriveSha(block.Transactions(), trie.NewStackTrie(nil)); hash != header.TxHash { - return fmt.Errorf("transaction root hash mismatch: have %x, want %x", hash, header.TxHash) - } - return nil + return ValidateListsInBody(block) }, func() error { - // Withdrawals are present after the Shanghai fork. - if header.WithdrawalsHash != nil { - // Withdrawals list must be present in body after Shanghai. - if block.Withdrawals() == nil { - return errors.New("missing withdrawals in block body") - } - if hash := types.DeriveSha(block.Withdrawals(), trie.NewStackTrie(nil)); hash != *header.WithdrawalsHash { - return fmt.Errorf("withdrawals root hash mismatch (header value %x, calculated %x)", *header.WithdrawalsHash, hash) - } - } else if block.Withdrawals() != nil { // Withdrawals turn into empty from nil when BlockBody has Sidecars - // Withdrawals are not allowed prior to shanghai fork - return errors.New("withdrawals present in block body") - } // Blob transactions may be present after the Cancun fork. var blobs int for i, tx := range block.Transactions() { diff --git a/core/blockchain_reader.go b/core/blockchain_reader.go index 8c22d66d11..d440590b8b 100644 --- a/core/blockchain_reader.go +++ b/core/blockchain_reader.go @@ -511,3 +511,12 @@ func (bc *BlockChain) SubscribeBlockProcessingEvent(ch chan<- bool) event.Subscr func (bc *BlockChain) SubscribeFinalizedHeaderEvent(ch chan<- FinalizedHeaderEvent) event.Subscription { return bc.scope.Track(bc.finalizedHeaderFeed.Subscribe(ch)) } + +// AncientTail retrieves the tail the ancients blocks +func (bc *BlockChain) AncientTail() (uint64, error) { + tail, err := bc.db.Tail() + if err != nil { + return 0, err + } + return tail, nil +} diff --git a/core/rawdb/freezer.go b/core/rawdb/freezer.go index ba1542294d..e1dc46bde3 100644 --- a/core/rawdb/freezer.go +++ b/core/rawdb/freezer.go @@ -239,7 +239,7 @@ func (f *Freezer) Ancient(kind string, number uint64) ([]byte, error) { // - if maxBytes is not specified, 'count' items will be returned if they are present. func (f *Freezer) AncientRange(kind string, start, count, maxBytes uint64) ([][]byte, error) { if table := f.tables[kind]; table != nil { - return table.RetrieveItems(start, count, maxBytes) + return table.RetrieveItems(start-f.offset, count, maxBytes) } return nil, errUnknownTable } @@ -252,7 +252,7 @@ func (f *Freezer) Ancients() (uint64, error) { func (f *Freezer) TableAncients(kind string) (uint64, error) { f.writeLock.RLock() defer f.writeLock.RUnlock() - return f.tables[kind].items.Load(), nil + return f.tables[kind].items.Load() + f.offset, nil } // ItemAmountInAncient returns the actual length of current ancientDB. diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 7e57153c28..664f775052 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -209,6 +209,9 @@ type BlockChain interface { // UpdateChasingHead update remote best chain head, used by DA check now. UpdateChasingHead(head *types.Header) + + // AncientTail retrieves the tail the ancients blocks + AncientTail() (uint64, error) } type DownloadOption func(downloader *Downloader) *Downloader @@ -797,6 +800,11 @@ func (d *Downloader) findAncestor(p *peerConnection, localHeight uint64, remoteH // We're above the max reorg threshold, find the earliest fork point floor = int64(localHeight - maxForkAncestry) } + // if we have pruned too much history, reset the floor + if tail, err := d.blockchain.AncientTail(); err == nil && tail > uint64(floor) { + floor = int64(tail) + } + // If we're doing a light sync, ensure the floor doesn't go below the CHT, as // all headers before that point will be missing. if mode == LightSync { diff --git a/eth/handler.go b/eth/handler.go index 4a41276ed1..23dba9e14d 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -320,26 +320,22 @@ func newHandler(config *handlerConfig) (*handler, error) { } broadcastBlockWithCheck := func(block *types.Block, propagate bool) { - // All the block fetcher activities should be disabled - // after the transition. Print the warning log. - if h.merger.PoSFinalized() { - log.Warn("Unexpected validation activity", "hash", block.Hash(), "number", block.Number()) - return - } - // Reject all the PoS style headers in the first place. No matter - // the chain has finished the transition or not, the PoS headers - // should only come from the trusted consensus layer instead of - // p2p network. - if beacon, ok := h.chain.Engine().(*beacon.Beacon); ok { - if beacon.IsPoSHeader(block.Header()) { - log.Warn("unexpected post-merge header") - return - } - } if propagate { - if err := core.IsDataAvailable(h.chain, block); err != nil { - log.Error("Propagating block with invalid sidecars", "number", block.Number(), "hash", block.Hash(), "err", err) - return + checkErrs := make(chan error, 2) + + go func() { + checkErrs <- core.ValidateListsInBody(block) + }() + go func() { + checkErrs <- core.IsDataAvailable(h.chain, block) + }() + + for i := 0; i < cap(checkErrs); i++ { + err := <-checkErrs + if err != nil { + log.Error("Propagating invalid block", "number", block.Number(), "hash", block.Hash(), "err", err) + return + } } } h.BroadcastBlock(block, propagate) diff --git a/trie/inspect_trie.go b/trie/inspect_trie.go index 885d3c7454..89f691a36d 100644 --- a/trie/inspect_trie.go +++ b/trie/inspect_trie.go @@ -4,17 +4,15 @@ import ( "bytes" "errors" "fmt" - "math/big" - "os" "runtime" - "sort" - "strconv" + "strings" "sync" "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" @@ -26,63 +24,113 @@ import ( "golang.org/x/sync/semaphore" ) -type Account struct { - Nonce uint64 - Balance *big.Int - Root common.Hash // merkle root of the storage trie - CodeHash []byte -} - type Database interface { database.Database Scheme() string Cap(limit common.StorageSize) error DiskDB() ethdb.Database } + +const TopN = 3 + type Inspector struct { trie *Trie // traverse trie db Database stateRootHash common.Hash - blocknum uint64 + blockNum uint64 root node // root of triedb - totalNum uint64 - wg sync.WaitGroup - statLock sync.RWMutex - result map[string]*TrieTreeStat sem *semaphore.Weighted eoaAccountNums uint64 + + wg sync.WaitGroup + + results stat + topN int + + totalAccountNum atomic.Uint64 + totalStorageNum atomic.Uint64 + lastTime mclock.AbsTime +} + +type stat struct { + lock sync.RWMutex + account *trieStat + storageTopN []*trieStat + storageTopNTotal []uint64 + storageTotal nodeStat + storageTrieNum uint64 } -type TrieTreeStat struct { - is_account_trie bool - theNodeStatByLevel [15]NodeStat - totalNodeStat NodeStat +type trieStat struct { + owner common.Hash + totalNodeStat nodeStat + nodeStatByLevel [16]nodeStat } -type NodeStat struct { - ShortNodeCnt uint64 - FullNodeCnt uint64 - ValueNodeCnt uint64 +type nodeStat struct { + ShortNodeCnt atomic.Uint64 + FullNodeCnt atomic.Uint64 + ValueNodeCnt atomic.Uint64 +} + +func (ns *nodeStat) IsEmpty() bool { + if ns.FullNodeCnt.Load() == 0 && ns.ShortNodeCnt.Load() == 0 && ns.ValueNodeCnt.Load() == 0 { + return true + } + return false +} + +func (s *stat) add(ts *trieStat, topN int) { + s.lock.Lock() + defer s.lock.Unlock() + if ts.owner == (common.Hash{}) { + s.account = ts + return + } + + total := ts.totalNodeStat.ValueNodeCnt.Load() + ts.totalNodeStat.FullNodeCnt.Load() + ts.totalNodeStat.ShortNodeCnt.Load() + if len(s.storageTopNTotal) == 0 || total > s.storageTopNTotal[len(s.storageTopNTotal)-1] { + var ( + i int + t uint64 + ) + for i, t = range s.storageTopNTotal { + if total < t { + continue + } + break + } + s.storageTopNTotal = append(s.storageTopNTotal[:i], append([]uint64{total}, s.storageTopNTotal[i:]...)...) + s.storageTopN = append(s.storageTopN[:i], append([]*trieStat{ts}, s.storageTopN[i:]...)...) + if len(s.storageTopN) > topN { + s.storageTopNTotal = s.storageTopNTotal[:topN] + s.storageTopN = s.storageTopN[:topN] + } + } + + s.storageTotal.ShortNodeCnt.Add(ts.totalNodeStat.ShortNodeCnt.Load()) + s.storageTotal.ValueNodeCnt.Add(ts.totalNodeStat.ValueNodeCnt.Load()) + s.storageTotal.FullNodeCnt.Add(ts.totalNodeStat.FullNodeCnt.Load()) + s.storageTrieNum++ } -func (trieStat *TrieTreeStat) AtomicAdd(theNode node, height uint32) { +func (trieStat *trieStat) add(theNode node, height int) { switch (theNode).(type) { case *shortNode: - atomic.AddUint64(&trieStat.totalNodeStat.ShortNodeCnt, 1) - atomic.AddUint64(&(trieStat.theNodeStatByLevel[height].ShortNodeCnt), 1) + trieStat.totalNodeStat.ShortNodeCnt.Add(1) + trieStat.nodeStatByLevel[height].ShortNodeCnt.Add(1) case *fullNode: - atomic.AddUint64(&trieStat.totalNodeStat.FullNodeCnt, 1) - atomic.AddUint64(&trieStat.theNodeStatByLevel[height].FullNodeCnt, 1) + trieStat.totalNodeStat.FullNodeCnt.Add(1) + trieStat.nodeStatByLevel[height].FullNodeCnt.Add(1) case valueNode: - atomic.AddUint64(&trieStat.totalNodeStat.ValueNodeCnt, 1) - atomic.AddUint64(&((trieStat.theNodeStatByLevel[height]).ValueNodeCnt), 1) - default: - panic(errors.New("Invalid node type to statistics")) + trieStat.totalNodeStat.ValueNodeCnt.Add(1) + trieStat.nodeStatByLevel[height].ValueNodeCnt.Add(1) } } -func (trieStat *TrieTreeStat) Display(ownerAddress string, treeType string) { - table := tablewriter.NewWriter(os.Stdout) +func (trieStat *trieStat) Display(ownerAddress string, treeType string) string { + sw := new(strings.Builder) + table := tablewriter.NewWriter(sw) table.SetHeader([]string{"-", "Level", "ShortNodeCnt", "FullNodeCnt", "ValueNodeCnt"}) if ownerAddress == "" { table.SetCaption(true, fmt.Sprintf("%v", treeType)) @@ -90,38 +138,27 @@ func (trieStat *TrieTreeStat) Display(ownerAddress string, treeType string) { table.SetCaption(true, fmt.Sprintf("%v-%v", treeType, ownerAddress)) } table.SetAlignment(1) - for i := 0; i < len(trieStat.theNodeStatByLevel); i++ { - nodeStat := trieStat.theNodeStatByLevel[i] - if nodeStat.FullNodeCnt == 0 && nodeStat.ShortNodeCnt == 0 && nodeStat.ValueNodeCnt == 0 { - break + + for i := range trieStat.nodeStatByLevel { + if trieStat.nodeStatByLevel[i].IsEmpty() { + continue } table.AppendBulk([][]string{ - {"-", strconv.Itoa(i), nodeStat.ShortNodeCount(), nodeStat.FullNodeCount(), nodeStat.ValueNodeCount()}, + {"-", fmt.Sprintf("%d", i), + fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].ShortNodeCnt.Load()), + fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].FullNodeCnt.Load()), + fmt.Sprintf("%d", trieStat.nodeStatByLevel[i].ValueNodeCnt.Load())}, }) } table.AppendBulk([][]string{ - {"Total", "-", trieStat.totalNodeStat.ShortNodeCount(), trieStat.totalNodeStat.FullNodeCount(), trieStat.totalNodeStat.ValueNodeCount()}, + {"Total", "-", fmt.Sprintf("%d", trieStat.totalNodeStat.ShortNodeCnt.Load()), fmt.Sprintf("%d", trieStat.totalNodeStat.FullNodeCnt.Load()), fmt.Sprintf("%d", trieStat.totalNodeStat.ValueNodeCnt.Load())}, }) table.Render() -} - -func Uint64ToString(cnt uint64) string { - return fmt.Sprintf("%v", cnt) -} - -func (nodeStat *NodeStat) ShortNodeCount() string { - return Uint64ToString(nodeStat.ShortNodeCnt) -} - -func (nodeStat *NodeStat) FullNodeCount() string { - return Uint64ToString(nodeStat.FullNodeCnt) -} -func (nodeStat *NodeStat) ValueNodeCount() string { - return Uint64ToString(nodeStat.ValueNodeCnt) + return sw.String() } // NewInspector return a inspector obj -func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uint64, jobnum uint64) (*Inspector, error) { +func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blockNum uint64, jobNum uint64, topN int) (*Inspector, error) { if tr == nil { return nil, errors.New("trie is nil") } @@ -131,15 +168,20 @@ func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uin } ins := &Inspector{ - trie: tr, - db: db, - stateRootHash: stateRootHash, - blocknum: blocknum, - root: tr.root, - result: make(map[string]*TrieTreeStat), - totalNum: (uint64)(0), - wg: sync.WaitGroup{}, - sem: semaphore.NewWeighted(int64(jobnum)), + trie: tr, + db: db, + stateRootHash: stateRootHash, + blockNum: blockNum, + root: tr.root, + results: stat{}, + topN: topN, + totalAccountNum: atomic.Uint64{}, + totalStorageNum: atomic.Uint64{}, + lastTime: mclock.Now(), + sem: semaphore.NewWeighted(int64(jobNum)), + + wg: sync.WaitGroup{}, + eoaAccountNums: 0, } @@ -147,155 +189,123 @@ func NewInspector(tr *Trie, db Database, stateRootHash common.Hash, blocknum uin } // Run statistics, external call -func (inspect *Inspector) Run() { - accountTrieStat := &TrieTreeStat{ - is_account_trie: true, - } - if inspect.db.Scheme() == rawdb.HashScheme { - ticker := time.NewTicker(30 * time.Second) - go func() { - defer ticker.Stop() - for range ticker.C { - inspect.db.Cap(DEFAULT_TRIEDBCACHE_SIZE) +func (s *Inspector) Run() { + ticker := time.NewTicker(30 * time.Second) + go func() { + defer ticker.Stop() + for range ticker.C { + if s.db.Scheme() == rawdb.HashScheme { + s.db.Cap(DEFAULT_TRIEDBCACHE_SIZE) } - }() - } - - if _, ok := inspect.result[""]; !ok { - inspect.result[""] = accountTrieStat - } - log.Info("Find Account Trie Tree", "rootHash: ", inspect.trie.Hash().String(), "BlockNum: ", inspect.blocknum) - - inspect.ConcurrentTraversal(inspect.trie, accountTrieStat, inspect.root, 0, []byte{}) - inspect.wg.Wait() -} + runtime.GC() + } + }() -func (inspect *Inspector) SubConcurrentTraversal(theTrie *Trie, theTrieTreeStat *TrieTreeStat, theNode node, height uint32, path []byte) { - inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, theNode, height, path) - inspect.wg.Done() -} + log.Info("Find Account Trie Tree", "rootHash: ", s.trie.Hash().String(), "BlockNum: ", s.blockNum) -func (inspect *Inspector) ConcurrentTraversal(theTrie *Trie, theTrieTreeStat *TrieTreeStat, theNode node, height uint32, path []byte) { - // print process progress - total_num := atomic.AddUint64(&inspect.totalNum, 1) - if total_num%100000 == 0 { - fmt.Printf("Complete progress: %v, go routines Num: %v\n", total_num, runtime.NumGoroutine()) + ts := &trieStat{ + owner: common.Hash{}, } + s.traversal(s.trie, ts, s.root, 0, []byte{}) + s.results.add(ts, s.topN) + s.wg.Wait() +} +func (s *Inspector) traversal(trie *Trie, ts *trieStat, n node, height int, path []byte) { // nil node - if theNode == nil { + if n == nil { return } - switch current := (theNode).(type) { + ts.add(n, height) + + switch current := (n).(type) { case *shortNode: - inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, current.Val, height, append(path, current.Key...)) + s.traversal(trie, ts, current.Val, height, append(path, current.Key...)) case *fullNode: for idx, child := range current.Children { if child == nil { continue } - childPath := append(path, byte(idx)) - if inspect.sem.TryAcquire(1) { - inspect.wg.Add(1) - dst := make([]byte, len(childPath)) - copy(dst, childPath) - go inspect.SubConcurrentTraversal(theTrie, theTrieTreeStat, child, height+1, dst) - } else { - inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, child, height+1, childPath) - } + p := common.CopyBytes(append(path, byte(idx))) + s.traversal(trie, ts, child, height+1, p) } case hashNode: - n, err := theTrie.resloveWithoutTrack(current, path) + tn, err := trie.resloveWithoutTrack(current, path) if err != nil { - fmt.Printf("Resolve HashNode error: %v, TrieRoot: %v, Height: %v, Path: %v\n", err, theTrie.Hash().String(), height+1, path) + fmt.Printf("Resolve HashNode error: %v, TrieRoot: %v, Height: %v, Path: %v\n", err, trie.Hash().String(), height+1, path) return } - inspect.ConcurrentTraversal(theTrie, theTrieTreeStat, n, height, path) - return + s.PrintProgress(trie) + s.traversal(trie, ts, tn, height, path) case valueNode: if !hasTerm(path) { break } - var account Account + var account types.StateAccount if err := rlp.Decode(bytes.NewReader(current), &account); err != nil { break } if common.BytesToHash(account.CodeHash) == types.EmptyCodeHash { - inspect.eoaAccountNums++ + s.eoaAccountNums++ } if account.Root == (common.Hash{}) || account.Root == types.EmptyRootHash { break } ownerAddress := common.BytesToHash(hexToCompact(path)) - contractTrie, err := New(StorageTrieID(inspect.stateRootHash, ownerAddress, account.Root), inspect.db) + contractTrie, err := New(StorageTrieID(s.stateRootHash, ownerAddress, account.Root), s.db) if err != nil { - fmt.Printf("New contract trie node: %v, error: %v, Height: %v, Path: %v\n", theNode, err, height, path) - break + panic(err) } contractTrie.tracer.reset() - trieStat := &TrieTreeStat{ - is_account_trie: false, - } - inspect.statLock.Lock() - if _, ok := inspect.result[ownerAddress.String()]; !ok { - inspect.result[ownerAddress.String()] = trieStat + if s.sem.TryAcquire(1) { + s.wg.Add(1) + go func() { + t := &trieStat{ + owner: ownerAddress, + } + s.traversal(contractTrie, t, contractTrie.root, 0, []byte{}) + s.results.add(t, s.topN) + s.sem.Release(1) + s.wg.Done() + }() + } else { + t := &trieStat{ + owner: ownerAddress, + } + s.traversal(contractTrie, t, contractTrie.root, 0, []byte{}) + s.results.add(t, s.topN) } - inspect.statLock.Unlock() - - // log.Info("Find Contract Trie Tree, rootHash: ", contractTrie.Hash().String(), "") - inspect.wg.Add(1) - go inspect.SubConcurrentTraversal(contractTrie, trieStat, contractTrie.root, 0, []byte{}) default: - panic(errors.New("Invalid node type to traverse.")) + panic(errors.New("invalid node type to traverse")) } - theTrieTreeStat.AtomicAdd(theNode, height) } -func (inspect *Inspector) DisplayResult() { - // display root hash - if _, ok := inspect.result[""]; !ok { - log.Info("Display result error", "missing account trie") - return +func (s *Inspector) PrintProgress(t *Trie) { + var ( + elapsed = mclock.Now().Sub(s.lastTime) + ) + if t.owner == (common.Hash{}) { + s.totalAccountNum.Add(1) + } else { + s.totalStorageNum.Add(1) } - inspect.result[""].Display("", "AccountTrie") - - type SortedTrie struct { - totalNum uint64 - ownerAddress string + if elapsed > 4*time.Second { + log.Info("traversal progress", "TotalAccountNum", s.totalAccountNum.Load(), "TotalStorageNum", s.totalStorageNum.Load(), "Goroutine", runtime.NumGoroutine()) + s.lastTime = mclock.Now() } - // display contract trie - var sortedTriesByNums []SortedTrie - var totalContactsNodeStat NodeStat - var contractTrieCnt uint64 = 0 +} - for ownerAddress, stat := range inspect.result { - if ownerAddress == "" { - continue - } - contractTrieCnt++ - totalContactsNodeStat.ShortNodeCnt += stat.totalNodeStat.ShortNodeCnt - totalContactsNodeStat.FullNodeCnt += stat.totalNodeStat.FullNodeCnt - totalContactsNodeStat.ValueNodeCnt += stat.totalNodeStat.ValueNodeCnt - totalNodeCnt := stat.totalNodeStat.ShortNodeCnt + stat.totalNodeStat.ValueNodeCnt + stat.totalNodeStat.FullNodeCnt - sortedTriesByNums = append(sortedTriesByNums, SortedTrie{totalNum: totalNodeCnt, ownerAddress: ownerAddress}) - } - sort.Slice(sortedTriesByNums, func(i, j int) bool { - return sortedTriesByNums[i].totalNum > sortedTriesByNums[j].totalNum - }) - fmt.Println("EOA accounts num: ", inspect.eoaAccountNums) - // only display top 5 - for i, t := range sortedTriesByNums { - if i > 5 { - break - } - if stat, ok := inspect.result[t.ownerAddress]; !ok { - log.Error("Storage trie stat not found", "ownerAddress", t.ownerAddress) - } else { - stat.Display(t.ownerAddress, "ContractTrie") - } +func (s *Inspector) DisplayResult() { + // display root hash + fmt.Println(s.results.account.Display("", "AccountTrie")) + fmt.Println("EOA accounts num: ", s.eoaAccountNums) + + // display contract trie + for _, st := range s.results.storageTopN { + fmt.Println(st.Display(st.owner.String(), "StorageTrie")) } fmt.Printf("Contract Trie, total trie num: %v, ShortNodeCnt: %v, FullNodeCnt: %v, ValueNodeCnt: %v\n", - contractTrieCnt, totalContactsNodeStat.ShortNodeCnt, totalContactsNodeStat.FullNodeCnt, totalContactsNodeStat.ValueNodeCnt) + s.results.storageTrieNum, s.results.storageTotal.ShortNodeCnt.Load(), s.results.storageTotal.FullNodeCnt.Load(), s.results.storageTotal.ValueNodeCnt.Load()) }