From 456a38b140e758d653ae5e7d9ddb3851afc5c76f Mon Sep 17 00:00:00 2001 From: Timothy Wu Date: Fri, 10 Jan 2025 22:33:51 -0500 Subject: [PATCH] feat(internal/client/db) Introduce `api.Backend` interface and `db.Backend` implementation (#4405) Co-authored-by: Haiko Schol --- go.mod | 6 +- go.sum | 8 +- internal/client/api/backend.go | 207 ++ internal/client/api/client.go | 14 + internal/client/api/leaves.go | 48 +- internal/client/api/utils/utils.go | 51 + .../client/consensus/grandpa/authorities.go | 2 +- .../client/consensus/grandpa/justification.go | 15 +- internal/client/db/backend.go | 1832 +++++++++++++ .../client/db/backend_integration_test.go | 54 + internal/client/db/backend_test.go | 2357 +++++++++++++++++ internal/client/db/children.go | 8 +- internal/client/db/columns/columns.go | 17 +- internal/client/db/db.go | 117 +- internal/client/db/db_test.go | 11 +- internal/client/db/metakeys/metakeys.go | 5 +- internal/client/db/offchain/offchain.go | 94 + internal/client/db/offchain/offchain_test.go | 61 + internal/client/db/pinned_blocks_cache.go | 59 +- .../client/db/pinned_blocks_cache_test.go | 28 +- internal/client/db/utils.go | 115 +- internal/client/state-db/noncanonical.go | 66 +- internal/client/state-db/noncanonical_test.go | 2 +- internal/client/state-db/pruning.go | 44 +- internal/client/state-db/pruning_test.go | 4 +- internal/client/state-db/statedb.go | 59 +- internal/client/state-db/statedb_test.go | 6 +- internal/cost-lru/cost_lru.go | 10 +- internal/hash-db/hash_db.go | 5 +- internal/kvdb/kvdb.go | 148 ++ internal/kvdb/kvdb_test.go | 26 + internal/kvdb/memory-kvdb/memory_kvdb.go | 173 ++ internal/kvdb/memory-kvdb/memory_kvdb_test.go | 323 +++ internal/memory-db/memory_db.go | 10 +- internal/primitives/blockchain/backend.go | 88 +- internal/primitives/blockchain/error.go | 17 + .../primitives/blockchain/header_metadata.go | 203 +- .../primitives/consensus/grandpa/grandpa.go | 2 +- internal/primitives/core/crypto/crypto.go | 28 +- internal/primitives/core/ed25519/ed25519.go | 55 +- internal/primitives/core/hash/hash.go | 4 + internal/primitives/core/offchain/offchain.go | 35 + internal/primitives/database/database.go | 26 +- internal/primitives/database/kvdb.go | 116 + internal/primitives/database/mem.go | 10 +- internal/primitives/runtime/digest.go | 17 +- internal/primitives/runtime/generic/block.go | 36 +- internal/primitives/runtime/generic/header.go | 4 +- internal/primitives/runtime/runtime.go | 22 +- .../primitives/runtime/testing/testing.go | 13 + internal/primitives/state-machine/backend.go | 60 +- .../state-machine/in_memory_backend.go | 2 +- .../state-machine/overlayed_changes.go | 56 +- .../primitives/state-machine/trie_backend.go | 14 +- .../state-machine/trie_backend_essence.go | 2 +- .../state-machine/trie_backend_test.go | 4 +- internal/primitives/storage/keys/keys.go | 23 + internal/primitives/storage/keys/keys_test.go | 19 + internal/primitives/storage/storage.go | 68 +- internal/primitives/trie/cache/cache.go | 34 +- .../primitives/trie/cache/shared_cache.go | 57 +- internal/primitives/trie/recorder/recorder.go | 33 +- internal/primitives/trie/storage_proof.go | 10 +- internal/primitives/trie/trie.go | 28 +- internal/saturating/saturating.go | 67 + internal/saturating/saturating_test.go | 34 + lib/blocktree/blocktree_test.go | 3 +- pkg/finality-grandpa/dummy_chain_test.go | 4 +- pkg/finality-grandpa/voter_test.go | 2 +- pkg/scale/varying_data_type_test.go | 2 +- pkg/trie/triedb/cache.go | 2 +- pkg/trie/triedb/lookup.go | 10 +- 72 files changed, 6627 insertions(+), 568 deletions(-) create mode 100644 internal/client/api/backend.go create mode 100644 internal/client/api/client.go create mode 100644 internal/client/api/utils/utils.go create mode 100644 internal/client/db/backend.go create mode 100644 internal/client/db/backend_integration_test.go create mode 100644 internal/client/db/backend_test.go create mode 100644 internal/client/db/offchain/offchain.go create mode 100644 internal/client/db/offchain/offchain_test.go create mode 100644 internal/kvdb/kvdb.go create mode 100644 internal/kvdb/kvdb_test.go create mode 100644 internal/kvdb/memory-kvdb/memory_kvdb.go create mode 100644 internal/kvdb/memory-kvdb/memory_kvdb_test.go create mode 100644 internal/primitives/blockchain/error.go create mode 100644 internal/primitives/core/offchain/offchain.go create mode 100644 internal/primitives/database/kvdb.go create mode 100644 internal/primitives/runtime/testing/testing.go create mode 100644 internal/primitives/storage/keys/keys.go create mode 100644 internal/primitives/storage/keys/keys_test.go create mode 100644 internal/saturating/saturating.go create mode 100644 internal/saturating/saturating_test.go diff --git a/go.mod b/go.mod index 03426abf78..14860b50d2 100644 --- a/go.mod +++ b/go.mod @@ -30,7 +30,6 @@ require ( github.com/jpillora/ipfilter v1.2.9 github.com/karlseguin/ccache/v3 v3.0.6 github.com/klauspost/compress v1.17.11 - github.com/li1234yun/gods-generic v0.0.0-20230420031820-5b9f08f4846b github.com/libp2p/go-libp2p v0.38.2 github.com/libp2p/go-libp2p-kad-dht v0.29.0 github.com/minio/sha256-simd v1.0.1 @@ -45,6 +44,7 @@ require ( github.com/tetratelabs/wazero v1.1.0 github.com/tidwall/btree v1.7.0 github.com/tyler-smith/go-bip39 v1.1.0 + github.com/ugurcsen/gods-generic v0.10.4 go.uber.org/mock v0.5.0 golang.org/x/crypto v0.33.0 golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 @@ -229,3 +229,7 @@ toolchain go1.23.2 replace github.com/tetratelabs/wazero => github.com/ChainSafe/wazero v0.0.0-20240319130522-78b21a59bd5f replace github.com/centrifuge/go-substrate-rpc-client/v4 => github.com/timwu20/go-substrate-rpc-client/v4 v4.0.0-20231110032757-3d8e441b7303 + +replace github.com/elastic/go-freelru => github.com/timwu20/go-freelru v0.0.0-20241023201517-deb64adeae4c + +replace github.com/ugurcsen/gods-generic => github.com/timwu20/gods-generic v0.0.0-20241206024616-791a209639f8 diff --git a/go.sum b/go.sum index 3d8ddf1931..1f53cec303 100644 --- a/go.sum +++ b/go.sum @@ -129,8 +129,6 @@ github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9pu github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/elastic/go-freelru v0.15.0 h1:Jo1aY8JAvpyxbTDJEudrsBfjFDaALpfVv8mxuh9sfvI= -github.com/elastic/go-freelru v0.15.0/go.mod h1:bSdWT4M0lW79K8QbX6XY2heQYSCqD7THoYf82pT/H3I= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.3 h1:xwkKwPia+hSfg9GqrCUKYdId102m9qTJIIr7egmK/uo= github.com/elastic/gosigar v0.14.3/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= @@ -344,8 +342,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= -github.com/li1234yun/gods-generic v0.0.0-20230420031820-5b9f08f4846b h1:qMDBhRBx0FqFOH9qVZT2Y/7nXjFPqBYHtDdVjnasFFE= -github.com/li1234yun/gods-generic v0.0.0-20230420031820-5b9f08f4846b/go.mod h1:DZQmZyoCj20lR+owZ4HFwOmJ9pMiQtXpGYln6DGuv6Q= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= @@ -630,8 +626,12 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45 github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/timwu20/go-freelru v0.0.0-20241023201517-deb64adeae4c h1:WKEqrNAA0DIVpTHGV70TonOX7pN0tB8CSkT8Z8Jbxjw= +github.com/timwu20/go-freelru v0.0.0-20241023201517-deb64adeae4c/go.mod h1:bSdWT4M0lW79K8QbX6XY2heQYSCqD7THoYf82pT/H3I= github.com/timwu20/go-substrate-rpc-client/v4 v4.0.0-20231110032757-3d8e441b7303 h1:FX7wMjDD0sWGWsC9k+stJaYwThbaq6BDT7ArlInU0KI= github.com/timwu20/go-substrate-rpc-client/v4 v4.0.0-20231110032757-3d8e441b7303/go.mod h1:1p5145LS4BacYYKFstnHScydK9MLjZ15l72v8mbngPQ= +github.com/timwu20/gods-generic v0.0.0-20241206024616-791a209639f8 h1:woES76T+jY3c6JCKAsJGi4X7DdNoBzhnK3xVm3kqvSM= +github.com/timwu20/gods-generic v0.0.0-20241206024616-791a209639f8/go.mod h1:mGYOa88Y5sbw+ADXLpScxjJ7s5iHoWya/YHyeQ4f6c4= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= diff --git a/internal/client/api/backend.go b/internal/client/api/backend.go new file mode 100644 index 0000000000..173722362b --- /dev/null +++ b/internal/client/api/backend.go @@ -0,0 +1,207 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package api + +import ( + "sync" + + "github.com/ChainSafe/gossamer/internal/primitives/blockchain" + "github.com/ChainSafe/gossamer/internal/primitives/core/offchain" + "github.com/ChainSafe/gossamer/internal/primitives/runtime" + statemachine "github.com/ChainSafe/gossamer/internal/primitives/state-machine" + "github.com/ChainSafe/gossamer/internal/primitives/storage" +) + +// NewBlockState is the state of a new block. +type NewBlockState uint8 + +const ( + // NewBlockStateNormal is a normal block. + NewBlockStateNormal NewBlockState = iota + // NewBlockStateBest is a new best block. + NewBlockStateBest + // NewBlockStateFinal is a newly finalized block. + NewBlockStateFinal +) + +// IsBest returns true if equal to [NewBlockStateBest] +func (nbs NewBlockState) IsBest() bool { + return nbs == NewBlockStateBest +} + +// IsFinal returns true if equal to [NewBlockStateFinal] +func (nbs NewBlockState) IsFinal() bool { + return nbs == NewBlockStateFinal +} + +// BlockImportOperation keeps hold of the inserted block state and data. +type BlockImportOperation[ + N runtime.Number, + H runtime.Hash, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], + E runtime.Extrinsic, +] interface { + // State returns the pending state. + // Returns nil for backends with locally-unavailable state data. + State() (statemachine.Backend[H, Hasher], error) + + // SetBlockData will set block data to the transaction. + SetBlockData( + header Header, + body []E, + indexedBody [][]byte, + justifications runtime.Justifications, + state NewBlockState, + ) error + + // UpdateDBStorage will inject storage data into the database. + UpdateDBStorage(update statemachine.BackendTransaction[H, Hasher]) error + + // SetGenesisState will set genesis state. If commit is false the state is saved in memory, but is not written + // to the database. + SetGenesisState(storage storage.Storage, commit bool, stateVersion storage.StateVersion) (H, error) + + // ResetStorage will inject storage data into the database replacing any existing data. + ResetStorage(storage storage.Storage, stateVersion storage.StateVersion) (H, error) + + // UpdateStorage will set storage changes. + UpdateStorage(update statemachine.StorageCollection, childUpdate statemachine.ChildStorageCollection) error + + // UpdateOffchainStorage will write offchain storage changes to the database. + UpdateOffchainStorage(offchainUpdate statemachine.OffchainChangesCollection) error + + // InsertAux will insert auxiliary keys. + // Values that are nil respresent the keys should be deleted. + InsertAux(ops AuxDataOperations) error + + // MarkFinalized marks a block as finalized. + MarkFinalized(hash H, justification *runtime.Justification) error + + // MarkHead marks a block as the new head. If the block import changes the head and MarkHead is called with + // a different block hash, MarkHead will override the changed head as a result of the block import. + MarkHead(hash H) error + + // UpdateTransactionIndex adds a transaction index operation. + UpdateTransactionIndex(index []statemachine.IndexOperation) error +} + +type KeyValue struct { + Key []byte + Value []byte +} + +// AuxStore provides access to an auxiliary database. +// +// This is a simple global database not aware of forks. Can be used for storing auxiliary +// information like total block weight/difficulty for fork resolution purposes as a common use +// case. +type AuxStore interface { + // Insert auxiliary data into key-value store. + // + // Deletions occur after insertions. + InsertAux(insert []KeyValue, delete [][]byte) error + + // Query auxiliary data from key-value store. + GetAux(key []byte) ([]byte, error) +} + +// Backend is the client backend. +// +// Manages the data layer. +// +// # State Pruning +// +// While an object from StateAt is alive, the state +// should not be pruned. The backend should internally reference-count +// its state objects. +// +// The same applies for live BlockImportOperation instances: while an import operation building on a +// parent P is alive, the state for P should not be pruned. +// +// # Block Pruning +// +// Users can pin blocks in memory by calling PinBlock. When +// a block would be pruned, its value is kept in an in-memory cache +// until it is unpinned via UnpinBlock. +// +// While a block is pinned, its state is also preserved. +// +// The backend should internally reference count the number of pin / unpin calls. +type Backend[ + H runtime.Hash, + N runtime.Number, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], + E runtime.Extrinsic, +] interface { + AuxStore // Insert auxiliary data into key-value store. + + // BeginOperation begins a new block insertion transaction with given parent block id. + // When constructing the genesis, this is called with all-zero hash. + BeginOperation() (BlockImportOperation[N, H, Hasher, Header, E], error) + + // BeginStateOperation notes an operation to contain state transition. + BeginStateOperation(operation BlockImportOperation[N, H, Hasher, Header, E], block H) error + + // CommitOperation will commit block insertion. + CommitOperation(transaction BlockImportOperation[N, H, Hasher, Header, E]) error + + // FinalizeBlock will finalize block with given hash. + // + // This should only be called if the parent of the given block has been finalized. + FinalizeBlock(hash H, justification *runtime.Justification) error + + // AppendJustification appends justification to the block with the given hash. + // + // This should only be called for blocks that are already finalized. + AppendJustification(hash H, justification runtime.Justification) error + + // Blockchain returns reference to blockchain backend. + Blockchain() blockchain.Backend[H, N, Header, E] + + // OffchainStorage returns a pointer to offchain storage. + OffchainStorage() offchain.OffchainStorage + + // PinBlock pins the block to keep body, justification and state available after pruning. + // Number of pins are reference counted. Users need to make sure to perform + // one call to UnpinBlock per call to PinBlock. + PinBlock(hash H) error + + // Unpin the block to allow pruning. + UnpinBlock(hash H) + + // HaveStateAt returns true if state for given block is available. + HaveStateAt(hash H, number N) bool + + // StateAt returns state backend with post-state of given block. + StateAt(hash H) (statemachine.Backend[H, Hasher], error) + + // Revert attempts to revert the chain by n blocks. If revertFinalized is set it will attempt to + // revert past any finalized block. This is unsafe and can potentially leave the node in an + // inconsistent state. All blocks higher than the best block are also reverted and not counting + // towards n. + // + // Returns the number of blocks that were successfully reverted and the list of finalized + // blocks that has been reverted. + Revert(n N, revertFinalized bool) (N, map[H]any, error) + + // RemoveLeafBlock discards non-best, unfinalized leaf block. + RemoveLeafBlock(hash H) error + + // GetImportLock returns access to the import lock for this backend. + // + // NOTE: Backend isn't expected to acquire the lock by itself ever. Rather + // the using components should acquire and hold the lock whenever they do + // something that the import of a block would interfere with, e.g. importing + // a new block or calculating the best head. + GetImportLock() *sync.RWMutex + + // RequiresFullSync tells whether the backend requires full-sync mode. + RequiresFullSync() bool + + // UsageInfo returns current usage statistics. + // TODO: implement UsageInfo if we require it + // UsageInfo() *UsageInfo +} diff --git a/internal/client/api/client.go b/internal/client/api/client.go new file mode 100644 index 0000000000..133bd2db20 --- /dev/null +++ b/internal/client/api/client.go @@ -0,0 +1,14 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package api + +// AuxDataOperation is a slice of operations to be performed on storage aux data. +// Key is the encoded data key. +// Value is the encoded optional data to write. +// If Value is nil, the key and the associated data are deleted from storage. +type AuxDataOperation struct { + Key []byte + Data []byte +} +type AuxDataOperations []AuxDataOperation diff --git a/internal/client/api/leaves.go b/internal/client/api/leaves.go index 3bd388bfef..2da440a911 100644 --- a/internal/client/api/leaves.go +++ b/internal/client/api/leaves.go @@ -18,13 +18,13 @@ type leafSetItem[H comparable, N runtime.Number] struct { number N } -// ImportOutcome privately contains the inserted and removed leaves after an import action. +// ImportOutcome contains the inserted and removed leaves after an import action. type ImportOutcome[H comparable, N runtime.Number] struct { inserted leafSetItem[H, N] removed *H } -// RemoveOutcome privates contains the inserted and removed leaves after a remove action. +// RemoveOutcome contains the inserted and removed leaves after a remove action. type RemoveOutcome[H comparable, N runtime.Number] struct { inserted *H removed leafSetItem[H, N] @@ -38,20 +38,20 @@ type FinalizationOutcome[H comparable, N runtime.Number] struct { // Leaves returns the leaves that were removed after a finalization action. func (fo FinalizationOutcome[H, N]) Leaves() []H { leaves := make([]H, 0) - for _, hashes := range fo.removed.Values() { - leaves = append(leaves, hashes...) - } + fo.removed.Reverse(func(key N, value []H) bool { + leaves = append(leaves, value...) + return true + }) return leaves } -// list of leaf hashes ordered by number (descending). -// stored in memory for fast access. -// this allows very fast checking and modification of active leaves. +// LeafSet is the list of leaf hashes ordered by number (descending) stored in memory for fast access. +// This allows very fast checking and modification of active leaves. type LeafSet[H comparable, N runtime.Number] struct { storage btree.Map[N, []H] } -// NewLeafSet is constructor for a new, blank `LeafSet`. +// NewLeafSet is constructor for a new, blank [LeafSet]. func NewLeafSet[H comparable, N runtime.Number]() LeafSet[H, N] { return LeafSet[H, N]{ storage: *btree.NewMap[N, []H](0), @@ -104,11 +104,11 @@ func (ls *LeafSet[H, N]) Import(hash H, number N, parentHash H) ImportOutcome[H, // Remove will update the leaf list on removal. // // Note that the leaves set structure doesn't have the information to decide if the -// leaf we're removing is the last children of the parent. Follows that this method requires -// the caller to check this condition and optionally pass the `parentHash` if `hash` is +// leaf we're removing is the last child of the parent. Follows that this method requires +// the caller to check this condition and optionally pass the parentHash if hash is // its last child. // -// Returns `nil` if no modifications are applied. +// Returns nil if no modifications are applied. func (ls *LeafSet[H, N]) Remove(hash H, number N, parentHash *H) *RemoveOutcome[H, N] { if !ls.removeLeaf(number, hash) { return nil @@ -132,10 +132,10 @@ func (ls *LeafSet[H, N]) Remove(hash H, number N, parentHash *H) *RemoveOutcome[ } // FinalizeHeight will note a block height finalized, displacing all leaves with number less than the finalized -// block's. +// block number. // // Although it would be more technically correct to also prune out leaves at the -// same number as the finalized block, but with different hashes, the current behavior +// same number as the finalized block with different hashes, the current behavior // is simpler and our assumptions about how finalization works means that those leaves // will be pruned soon afterwards anyway. func (ls *LeafSet[H, N]) FinalizeHeight(number N) FinalizationOutcome[H, N] { @@ -145,15 +145,19 @@ func (ls *LeafSet[H, N]) FinalizeHeight(number N) FinalizationOutcome[H, N] { } boundary := number - 1 belowBoundary := btree.NewMap[N, []H](0) - ls.storage.Ascend(boundary, func(key N, value []H) bool { - belowBoundary.Set(key, value) - ls.storage.Delete(key) - return false + + ls.storage.Reverse(func(key N, value []H) bool { + if key <= boundary { + belowBoundary.Set(key, value) + ls.storage.Delete(key) + } + return true }) + return FinalizationOutcome[H, N]{removed: *belowBoundary} } -// DisplacedByFinalHeight is the same as `FinalizeHeight()`, but it only simulates the operation. +// DisplacedByFinalHeight is the same as FinalizeHeight(), but it only simulates the operation. // // This means that no changes are done. // @@ -165,16 +169,16 @@ func (ls *LeafSet[H, N]) DisplacedByFinalHeight(number N) FinalizationOutcome[H, } boundary := number - 1 belowBoundary := btree.NewMap[N, []H](0) - ls.storage.Ascend(boundary, func(key N, value []H) bool { + ls.storage.Descend(boundary, func(key N, value []H) bool { belowBoundary.Set(key, value) - return false + return true }) return FinalizationOutcome[H, N]{removed: *belowBoundary} } // Undo all pending operations. // -// This returns an `Undo` struct, where any +// This returns an [Undo] struct, where any // outcomes objects that have returned by previous method calls // should be passed to via the appropriate methods. Otherwise, // the on-disk state may get out of sync with in-memory state. diff --git a/internal/client/api/utils/utils.go b/internal/client/api/utils/utils.go new file mode 100644 index 0000000000..6a65509f8d --- /dev/null +++ b/internal/client/api/utils/utils.go @@ -0,0 +1,51 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package utils + +import ( + "github.com/ChainSafe/gossamer/internal/primitives/blockchain" + "github.com/ChainSafe/gossamer/internal/primitives/runtime" +) + +// HashParent is the hash and parent hash of a block +type HashParent[H runtime.Hash] struct { + Hash H + Parent H +} + +// IsDescendantOf returns a function for checking block ancestry, the returned function will return true if the given +// hash (second parameter) is a descendent of the base (first parameter). If current is defined, it should represent +// the current block hash and its parent hash. if current is given, the function that is returned will assume that +// current.Hash isn't part of the local DB yet, and all searches in the DB will instead reference the parent. +func IsDescendantOf[H runtime.Hash, N runtime.Number, Header runtime.Header[N, H], E runtime.Extrinsic]( + client blockchain.Backend[H, N, Header, E], current *HashParent[H], +) func(a H, b H) (bool, error) { + return func(base H, hash H) (bool, error) { + if base == hash { + return false, nil + } + + if current != nil { + currentHash := current.Hash + currentParentHash := current.Parent + if base == currentHash { + return false, nil + } + if hash == currentHash { + if base == currentParentHash { + return true, nil + } else { + hash = currentParentHash + } + } + } + + ancestor, err := blockchain.LowestCommonAncestor(client, hash, base) + if err != nil { + return false, err + } + + return ancestor.Hash == base, nil + } +} diff --git a/internal/client/consensus/grandpa/authorities.go b/internal/client/consensus/grandpa/authorities.go index 261042fe9a..4accb53dd5 100644 --- a/internal/client/consensus/grandpa/authorities.go +++ b/internal/client/consensus/grandpa/authorities.go @@ -3,7 +3,7 @@ package grandpa -// generic representation of hash and number tuple +// HashNumber is a generic representation of hash and number type HashNumber[H, N any] struct { Hash H Number N diff --git a/internal/client/consensus/grandpa/justification.go b/internal/client/consensus/grandpa/justification.go index 00bcdf6bf1..584abaf3c9 100644 --- a/internal/client/consensus/grandpa/justification.go +++ b/internal/client/consensus/grandpa/justification.go @@ -22,14 +22,13 @@ var ( errBlockNotDescendentOfBase = errors.New("block not descendent of base") ) -// A GRANDPA justification for block finality, it includes a commit message and -// an ancestry proof including all headers routing all precommit target blocks -// to the commit target block. Due to the current voting strategy the precommit -// targets should be the same as the commit target, since honest voters don't -// vote past authority set change blocks. +// GrandpaJustification is a GRANDPA justification for block finality, it includes a commit message and an ancestry +// proof including all headers routing all precommit target blocks to the commit target block. Due to the current +// voting strategy the precommit targets should be the same as the commit target, since honest voters don't vote past +// authority set change blocks. // -// This is meant to be stored in the db and passed around the network to other -// nodes, and are used by syncing nodes to prove authority set handoffs. +// This is meant to be stored in the db and passed around the network to other nodes, and are used by syncing nodes +// to prove authority set handoffs. type GrandpaJustification[Hash runtime.Hash, N runtime.Number] struct { // The GRANDPA justification for block finality. Justification primitives.GrandpaJustification[Hash, N] @@ -248,7 +247,7 @@ func (j *GrandpaJustification[Hash, N]) Target() HashNumber[Hash, N] { } } -// ancestryChain a utility trait implementing `grandpa.Chain` using a given set of headers. +// ancestryChain a utility trait implementing grandpa.Chain using a given set of headers. // This is useful when validating commits, using the given set of headers to // verify a valid ancestry route to the target commit block. type ancestryChain[Hash runtime.Hash, N runtime.Number] struct { diff --git a/internal/client/db/backend.go b/internal/client/db/backend.go new file mode 100644 index 0000000000..42bb589681 --- /dev/null +++ b/internal/client/db/backend.go @@ -0,0 +1,1832 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package db + +import ( + "errors" + "fmt" + "maps" + "sync" + + "github.com/ChainSafe/gossamer/internal/client/api" + "github.com/ChainSafe/gossamer/internal/client/api/utils" + "github.com/ChainSafe/gossamer/internal/client/db/columns" + "github.com/ChainSafe/gossamer/internal/client/db/metakeys" + "github.com/ChainSafe/gossamer/internal/client/db/offchain" + statedb "github.com/ChainSafe/gossamer/internal/client/state-db" + hashdb "github.com/ChainSafe/gossamer/internal/hash-db" + "github.com/ChainSafe/gossamer/internal/log" + memorydb "github.com/ChainSafe/gossamer/internal/memory-db" + "github.com/ChainSafe/gossamer/internal/primitives/blockchain" + "github.com/ChainSafe/gossamer/internal/primitives/core/hash" + p_offchain "github.com/ChainSafe/gossamer/internal/primitives/core/offchain" + "github.com/ChainSafe/gossamer/internal/primitives/database" + "github.com/ChainSafe/gossamer/internal/primitives/runtime" + "github.com/ChainSafe/gossamer/internal/primitives/runtime/generic" + statemachine "github.com/ChainSafe/gossamer/internal/primitives/state-machine" + "github.com/ChainSafe/gossamer/internal/primitives/storage" + "github.com/ChainSafe/gossamer/internal/primitives/storage/keys" + "github.com/ChainSafe/gossamer/internal/primitives/trie" + "github.com/ChainSafe/gossamer/internal/primitives/trie/cache" + "github.com/ChainSafe/gossamer/internal/saturating" + "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/ChainSafe/gossamer/pkg/trie/triedb" + "golang.org/x/exp/slices" +) + +var logger = log.NewFromGlobal(log.AddContext("pkg", "client/db")) + +// BlocksPruning represent block pruning settings. +type BlocksPruning interface { + isBlocksPruning() +} +type BlocksPruningValues interface { + BlocksPruningKeepAll | BlocksPruningKeepFinalized | BlocksPruningSome +} + +// BlocksPruningKeepAll keeps full block history, of every block that was ever imported. +type BlocksPruningKeepAll struct{} + +// BlocksPruningKeepFinalized keeps full finalized block history. +type BlocksPruningKeepFinalized struct{} + +// BlocksPruningSome keep a defined number of recent finalized blocks. +type BlocksPruningSome uint32 + +func (BlocksPruningKeepAll) isBlocksPruning() {} +func (BlocksPruningKeepFinalized) isBlocksPruning() {} +func (BlocksPruningSome) isBlocksPruning() {} + +// DatabaseSource is the source of the database. +// NOTE: only uses a custom already-open database. +type DatabaseSource struct { + // the handle to the custom storage + DB database.Database[hash.H256] + // if set, the create flag will be required to open such datasource + RequireCreateFlag bool +} + +// DBState is a db backed patricia trie state, transaction type is an overlay of changes to commit. +type DBState[H runtime.Hash, Hasher runtime.Hasher[H]] struct { + *statemachine.TrieBackend[H, Hasher] +} + +// refTrackingState is a reference tracking state. +// +// It makes sure that the hash we are using stays pinned in storage +// until this structure is dropped. +type refTrackingState[H runtime.Hash, Hasher runtime.Hasher[H]] struct { + state DBState[H, Hasher] + storage storageDB[H] + parentHash *H // can be nil +} + +func (rts *refTrackingState[H, Hasher]) Drop() { + if rts.parentHash != nil { + rts.storage.StateDB.Unpin(*rts.parentHash) + } +} + +// DatabaseConfig is the database configuration. +type DatabaseConfig struct { + // TrieCacheMaximumSize is the maximum trie cache size in bytes. + // If nil is given, the cache is disabled. + TrieCacheMaximumSize *uint + // StatePruning is the requested state pruning mode. + StatePruning statedb.PruningMode + // Source is the source of the database + Source DatabaseSource + // BlocksPruning is the block pruning mode. + // NOTE: only finalized blocks are subject for removal! + BlocksPruning BlocksPruning +} + +// wrapper around [database.Database] that implements [statedb.MetaDB] +type stateMetaDB struct { + db database.Database[hash.H256] +} + +func (smdb stateMetaDB) GetMeta(key []byte) (statedb.DBValue, error) { + val := smdb.db.Get(columns.StateMeta, key) + if val == nil { + return nil, nil + } + dbVal := statedb.DBValue(val) + return dbVal, nil +} + +type finalizedBlock[H runtime.Hash] struct { + Hash H + *runtime.Justification +} + +// BlockImportOperation is [Backend] block import operation which represents a transaction. +type BlockImportOperation[ + H runtime.Hash, + Hasher runtime.Hasher[H], + N runtime.Number, + Header runtime.Header[N, H], + E runtime.Extrinsic, +] struct { + oldState refTrackingState[H, Hasher] + dbUpdates trie.PrefixedMemoryDB[H, Hasher] + storageUpdates statemachine.StorageCollection + childStorageUpdates statemachine.ChildStorageCollection + offchainStorageUpdates statemachine.OffchainChangesCollection + pendingBlock *pendingBlock[H, N, Header, E] // can be nil to represent no pending block + auxOps api.AuxDataOperations + finalizedBlocks []finalizedBlock[H] + setHead *H // can be nil to represent no head + commitState bool + indexOps []statemachine.IndexOperation +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) applyOffchain(transaction *database.Transaction[hash.H256]) { + var count uint32 + offchainStorageUpdates := bio.offchainStorageUpdates + bio.offchainStorageUpdates = nil + for _, update := range offchainStorageUpdates { + prefix := update.PrefixKey.Prefix + key := update.PrefixKey.Key + valueOperation := update.ValueOperation + count += 1 + key = offchain.ConcatenatePrefixAndKey(prefix, key) + switch valueOperation := valueOperation.(type) { + case p_offchain.OffchainOverlayedChangeSetValue: + transaction.Set(columns.Offchain, key, valueOperation) + case p_offchain.OffchainOverlayedChangeRemove: + transaction.Remove(columns.Offchain, key) + default: + panic("unreachable") + } + } + + if count > 0 { + logger.Debugf("Applied %d offchain indexing changes.", count) + } +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) applyAux(transaction *database.Transaction[hash.H256]) { + auxOps := bio.auxOps + bio.auxOps = nil + for _, op := range auxOps { + switch op.Data { + case nil: + transaction.Remove(columns.Aux, op.Key) + default: + transaction.Set(columns.Aux, op.Key, op.Data) + } + } +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) applyNewState( + storage storage.Storage, stateVersion storage.StateVersion, +) (root H, err error) { + contains := slices.ContainsFunc(storage.Top.Keys(), func(key string) bool { + return keys.IsChildStorageKey([]byte(key)) + }) + if contains { + return root, blockchain.ErrInvalidState + } + + var childDeltas []statemachine.ChildDelta + for childContent := range maps.Values(storage.ChildrenDefault) { + var deltas []statemachine.Delta + childContent.Data.Scan(func(key string, value []byte) bool { + deltas = append(deltas, statemachine.Delta{ + Key: []byte(key), + Value: value, + }) + return true + }) + childDeltas = append(childDeltas, statemachine.ChildDelta{ + ChildInfo: childContent.ChildInfo, + Deltas: deltas, + }) + } + + var deltas []statemachine.Delta + storage.Top.Scan(func(key string, value []byte) bool { + deltas = append(deltas, statemachine.Delta{ + Key: []byte(key), + Value: value, + }) + return true + }) + + root, transaction := bio.oldState.state.FullStorageRoot(deltas, childDeltas, stateVersion) + bio.dbUpdates = *transaction.PrefixedMemoryDB + return root, nil +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) State() (statemachine.Backend[H, Hasher], error) { + return &bio.oldState.state, nil +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) SetBlockData( + header Header, + body []E, + indexedBody [][]byte, + justifications runtime.Justifications, + leafState api.NewBlockState, +) error { + if bio.pendingBlock != nil { + return fmt.Errorf("only one block per operation is allowed") + } + bio.pendingBlock = &pendingBlock[H, N, Header, E]{ + header: header, + body: body, + indexedBody: indexedBody, + justifications: justifications, + leafState: leafState, + } + return nil +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) UpdateDBStorage( + update statemachine.BackendTransaction[H, Hasher], +) error { + bio.dbUpdates = *update.PrefixedMemoryDB + return nil +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) SetGenesisState( + storage storage.Storage, commit bool, stateVersion storage.StateVersion, +) (H, error) { + root, err := bio.applyNewState(storage, stateVersion) + if err != nil { + return root, err + } + bio.commitState = commit + return root, err +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) ResetStorage( + storage storage.Storage, stateVersion storage.StateVersion, +) (H, error) { + root, err := bio.applyNewState(storage, stateVersion) + if err != nil { + return root, err + } + bio.commitState = true + return root, err +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) UpdateStorage( + update statemachine.StorageCollection, childUpdate statemachine.ChildStorageCollection, +) error { + bio.storageUpdates = update + bio.childStorageUpdates = childUpdate + return nil +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) UpdateOffchainStorage( + offchainUpdate statemachine.OffchainChangesCollection, +) error { + bio.offchainStorageUpdates = offchainUpdate + return nil +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) InsertAux(ops api.AuxDataOperations) error { + bio.auxOps = append(bio.auxOps, ops...) + return nil +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) MarkFinalized( + hash H, justification *runtime.Justification, +) error { + bio.finalizedBlocks = append(bio.finalizedBlocks, finalizedBlock[H]{hash, justification}) + return nil +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) MarkHead(hash H) error { + if bio.setHead != nil { + panic("only one set head per operation is allowed") + } + bio.setHead = &hash + return nil +} + +func (bio *BlockImportOperation[H, Hasher, N, Header, E]) UpdateTransactionIndex( + indexOps []statemachine.IndexOperation, +) error { + bio.indexOps = indexOps + return nil +} + +type pendingBlock[H runtime.Hash, N runtime.Number, Header runtime.Header[N, H], E runtime.Extrinsic] struct { + header Header + justifications runtime.Justifications // can be nil + body []E // can be nil to reprsent no body + indexedBody [][]byte // can be nil to represent no indexed body + leafState api.NewBlockState +} + +type nodeDBStorageDB[H runtime.Hash] struct { + *storageDB[H] +} + +func (ndbsdb nodeDBStorageDB[H]) Get(key string) (statedb.DBValue, error) { + return ndbsdb.storageDB.db.Get(columns.State, []byte(key)), nil +} + +type storageDB[H runtime.Hash] struct { + db database.Database[hash.H256] + StateDB *statedb.StateDB[H, string] + prefixKeys bool +} + +func (sdb *storageDB[H]) Get(key H, prefix hashdb.Prefix) ([]byte, error) { + if sdb.prefixKeys { + key := memorydb.NewPrefixedKey[H](key, prefix) + return sdb.StateDB.Get(string(key), nodeDBStorageDB[H]{sdb}) + } else { + return sdb.StateDB.Get(string(key.Bytes()), nodeDBStorageDB[H]{sdb}) + } +} + +type dbGenesisStorage[H runtime.Hash, Hasher runtime.Hasher[H]] struct { + root H + storage trie.PrefixedMemoryDB[H, Hasher] +} + +type emptyStorage[H runtime.Hash] struct { + root H +} + +func newEmptyStorage[H runtime.Hash, Hasher runtime.Hasher[H]]() emptyStorage[H] { + var root H + mdb := trie.NewMemoryDB[H, Hasher]() + trie := triedb.NewEmptyTrieDB[H, Hasher](mdb) + trie.SetVersion(triedb.V1) + root = trie.MustHash() + return emptyStorage[H]{root} +} + +// Backend keeps data in a key-value store. In archive mode, trie nodes are kept from all +// blocks. Otherwise, trie nodes are kept only from some recent blocks. +type Backend[ + H runtime.Hash, + Hasher runtime.Hasher[H], + N runtime.Number, + E runtime.Extrinsic, + Header runtime.Header[N, H], +] struct { + storage storageDB[H] + offchainStorage *offchain.LocalStorage + blockchain *blockchainDB[H, N, E, Header] + canonicalizationDelay uint64 + importLock sync.RWMutex + isArchive bool + blocksPruning BlocksPruning + genesisState *dbGenesisStorage[H, Hasher] // can be nil to represent no genesisState + genesisStateMtx sync.RWMutex + sharedTrieCache *cache.SharedTrieCache[H] // can be nil to respresent no shared trie cache +} + +// NewBackend creates a new instance of database backend. +// +// dbConfig is of type [DatabaseConfig] and contains both state and block history pruning settings. +// canonicalizationDelay represents the number of blocks it waits to canonicalize the block and initiate +// pruning based on canonicalization. +func NewBackend[ + H runtime.Hash, + N runtime.Number, + E runtime.Extrinsic, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], +]( + dbConfig DatabaseConfig, + canonicalizationDelay uint64, +) (*Backend[H, Hasher, N, E, Header], error) { + var ( + needsInit bool + db database.Database[hash.H256] + ) + dbSource := dbConfig.Source + db, err := openDatabase(dbSource, false) + if err != nil { + if errors.Is(err, errDoesNotExist) { + db, err = openDatabase(dbSource, true) + if err != nil { + return nil, err + } + needsInit = true + } else { + return nil, err + } + } else { + needsInit = false + } + + return newBackendFromDatabase[H, N, E, Hasher, Header](db, canonicalizationDelay, dbConfig, needsInit) +} + +func newBackendFromDatabase[ + H runtime.Hash, + N runtime.Number, + E runtime.Extrinsic, + Hasher runtime.Hasher[H], + Header runtime.Header[N, H], +]( + db database.Database[hash.H256], + canonicalizationDelay uint64, + config DatabaseConfig, + shouldInit bool, +) (*Backend[H, Hasher, N, E, Header], error) { + var dbInitTransaction database.Transaction[hash.H256] + + requestedStatePruning := config.StatePruning + stateMetaDB := stateMetaDB{db} + + stateDBInitCommitSet, stateDB, err := statedb.NewStateDB[H, string](stateMetaDB, requestedStatePruning, shouldInit) + if err != nil { + return nil, fmt.Errorf("%w: %v", blockchain.ErrStateDatabase, err) + } + + applyStateCommit(&dbInitTransaction, stateDBInitCommitSet) + + statePruningUsed := stateDB.PruningMode() + isArchivePruning := statePruningUsed.IsArchive() + blockchain, err := newBlockchainDB[H, N, Hasher, E, Header](db) + if err != nil { + return nil, err + } + + storageDB := storageDB[H]{ + db: db, + StateDB: stateDB, + prefixKeys: true, + } + + backend := Backend[H, Hasher, N, E, Header]{ + storage: storageDB, + offchainStorage: offchain.NewLocalStorage(db), + blockchain: blockchain, + canonicalizationDelay: canonicalizationDelay, + isArchive: isArchivePruning, + blocksPruning: config.BlocksPruning, + } + if config.TrieCacheMaximumSize != nil { + backend.sharedTrieCache = cache.NewSharedTrieCache[H](*config.TrieCacheMaximumSize) + } + + // Older DB versions have no last state key. Check if the state is available and set it. + info := backend.blockchain.Info() + if info.FinalizedState == nil && info.FinalizedHash != *new(H) && + backend.HaveStateAt(info.FinalizedHash, info.FinalizedNumber) { + backend.blockchain.updateMeta(metaUpdate[H, N]{ + Hash: info.FinalizedHash, + Number: info.FinalizedNumber, + IsBest: info.FinalizedHash == info.BestHash, + IsFinalized: true, + WithState: true, + }) + } + + err = db.Commit(dbInitTransaction) + if err != nil { + return nil, err + } + + return &backend, nil +} + +// ResetTrieCache resets the shared trie cache. +func (b *Backend[H, Hasher, N, E, Header]) ResetTrieCache() { + if b.sharedTrieCache != nil { + b.sharedTrieCache.Reset() + } +} + +type numberHash[H, N any] struct { + Number N + Hash H +} + +// Handles setting head within a transaction. routeTo should be the last +// block that existed in the database. bestTo should be the best block +// to be set. +// +// In the case where the new best block is a block to be imported, routeTo +// should be the parent of bestTO. In the case where we set an existing block +// to be best, routeTo should equal to bestTo. +func (b *Backend[H, Hasher, N, E, Header]) setHeadWithTransaction( + transaction *database.Transaction[hash.H256], routeTo H, bestTo numberHash[H, N], +) ([2][]H, error) { + var ( + enacted []H + retracted []H + ) + bestNumber := bestTo.Number + bestHash := bestTo.Hash + + b.blockchain.metaMtx.RLock() + defer b.blockchain.metaMtx.RUnlock() + meta := b.blockchain.meta + if saturating.Into[N, uint64](saturating.Sub(meta.BestNumber, bestNumber)) > b.canonicalizationDelay { + return [2][]H{}, blockchain.ErrSetHeadTooOld + } + + var parentExists bool + status, err := b.blockchain.Status(routeTo) + if err != nil { + return [2][]H{}, err + } + parentExists = status == blockchain.BlockStatusInChain + + // Cannot find tree route with empty DB or when imported a detached block. + if meta.BestHash != (*new(H)) && parentExists { + treeRoute, err := blockchain.NewTreeRoute[H, N](b.blockchain, meta.BestHash, routeTo) + if err != nil { + return [2][]H{}, err + } + + // uncanonicalize: check safety violations and ensure the numbers no longer + // point to these block hashes in the key mapping. + for _, r := range treeRoute.Retracted() { + if r.Hash == meta.FinalizedHash { + logger.Warnf("Potential safety failure: reverting finalized block %+v", r) + + return [2][]H{}, blockchain.ErrNotInFinalizedChain + } + + retracted = append(retracted, r.Hash) + err := removeNumberToKeyMapping(transaction, uint32(columns.KeyLookup), r.Number) + if err != nil { + return [2][]H{}, err + } + } + + // canonicalize: set the number lookup to map to this block's hash. + for _, e := range treeRoute.Enacted() { + enacted = append(enacted, e.Hash) + err := insertNumberToKeyMapping(transaction, uint32(columns.KeyLookup), e.Number, e.Hash) + if err != nil { + return [2][]H{}, err + } + } + } + + lookupKey, err := newLookupKey(bestNumber, bestHash) + if err != nil { + return [2][]H{}, err + } + transaction.Set(columns.Meta, metakeys.BestBlock, lookupKey) + err = insertNumberToKeyMapping(transaction, uint32(columns.KeyLookup), bestNumber, bestHash) + if err != nil { + return [2][]H{}, err + } + + return [2][]H{enacted, retracted}, nil +} + +func (b *Backend[H, Hasher, N, E, Header]) ensureSequentialFinalization(header Header, lastFinalized *H) error { + if lastFinalized == nil { + b.blockchain.metaMtx.RLock() + lf := b.blockchain.meta.FinalizedHash + lastFinalized = &lf + b.blockchain.metaMtx.RUnlock() + } + b.blockchain.metaMtx.RLock() + defer b.blockchain.metaMtx.RUnlock() + if *lastFinalized != b.blockchain.meta.FinalizedHash && header.ParentHash() != *lastFinalized { + return fmt.Errorf("%w: Last finalized %s not parent of %s", + blockchain.ErrNonSequentialFinalization, *lastFinalized, header.Hash()) + } + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) finalizeBlockWithTransaction( + transaction *database.Transaction[hash.H256], + hash H, + header *Header, + lastFinalized *H, + justification *runtime.Justification, + currentTransactionJustifications map[H]runtime.Justification, +) (metaUpdate[H, N], error) { + // TODO: ensure best chain contains this block. (from substrate as well) + number := (*header).Number() + err := b.ensureSequentialFinalization(*header, lastFinalized) + if err != nil { + return metaUpdate[H, N]{}, err + } + withState := b.HaveStateAt(hash, number) + + err = b.noteFinalized(transaction, *header, hash, withState, currentTransactionJustifications) + if err != nil { + return metaUpdate[H, N]{}, err + } + + if justification != nil { + lookupKey, err := newLookupKey(number, hash) + if err != nil { + return metaUpdate[H, N]{}, err + } + justifications := runtime.Justifications{*justification} + transaction.Set(columns.Justifications, lookupKey, scale.MustMarshal(justifications)) + currentTransactionJustifications[hash] = *justification + } + return metaUpdate[H, N]{ + Hash: hash, + Number: number, + IsBest: false, + IsFinalized: true, + WithState: withState, + }, nil +} + +// performs forced canonicalization with a delay after importing a non-finalized block. +func (b *Backend[H, Hasher, N, E, Header]) forceDelayedCanonicalize( + transaction *database.Transaction[hash.H256], +) error { + var bestCanonical uint64 + switch lc := b.storage.StateDB.LastCanonicalized().(type) { + case statedb.LastCanonicalizedNone: + bestCanonical = 0 + case statedb.LastCanonicalizedBlock: + bestCanonical = uint64(lc) + case statedb.LastCanonicalizedNotCanonicalizing: + // Nothing needs to be done when canonicalization is not happening. + return nil + } + + info := b.blockchain.Info() + bestNumber := saturating.Into[N, uint64](info.BestNumber) + + end := saturating.Sub(bestNumber, b.canonicalizationDelay) + for toCanonicalize := bestCanonical + 1; toCanonicalize <= end; toCanonicalize++ { + hashToCanonicalize, err := b.blockchain.Hash(saturating.Into[uint64, N](toCanonicalize)) + if err != nil { + return err + } + if hashToCanonicalize == nil { + bestHash := info.BestHash + return fmt.Errorf("%w: Can't canonicalize missing block number %d when for best block %s (%d})", + blockchain.ErrBackend, toCanonicalize, bestHash, bestNumber) + } + + if !b.HaveStateAt(*hashToCanonicalize, saturating.Into[uint64, N](toCanonicalize)) { + return nil + } + + logger.Tracef("Canonicalize block #%d (%s)", toCanonicalize, *hashToCanonicalize) + commit, err := b.storage.StateDB.CanonicalizeBlock(*hashToCanonicalize) + if err != nil { + return fmt.Errorf("%w: %v", blockchain.ErrStateDatabase, err) + } + applyStateCommit(transaction, commit) + } + + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) tryCommitOperation( //nolint:gocyclo + operation *BlockImportOperation[H, Hasher, N, Header, E], +) error { + var transaction database.Transaction[hash.H256] + + operation.applyAux(&transaction) + operation.applyOffchain(&transaction) + + var metaUpdates []metaUpdate[H, N] + + b.blockchain.metaMtx.RLock() + + meta := b.blockchain.meta + bestNum := meta.BestNumber + lastFinalizedHash := meta.FinalizedHash + lastFinalizedNumber := meta.FinalizedNumber + blockGap := meta.BlockGap + + b.blockchain.metaMtx.RUnlock() + + currentTransactionJustifications := make(map[H]runtime.Justification) + for _, finalizedBlock := range operation.finalizedBlocks { + blockHeader, err := b.blockchain.header(finalizedBlock.Hash) + if err != nil { + return err + } + meta, err := b.finalizeBlockWithTransaction( + &transaction, + finalizedBlock.Hash, + blockHeader, + &lastFinalizedHash, + finalizedBlock.Justification, + currentTransactionJustifications, + ) + if err != nil { + return err + } + metaUpdates = append(metaUpdates, meta) + lastFinalizedHash = finalizedBlock.Hash + lastFinalizedNumber = (*blockHeader).Number() + } + + type headerHash struct { + Header Header + Hash H + } + var imported *headerHash + if pendingBlock := operation.pendingBlock; pendingBlock != nil { + hash := pendingBlock.header.Hash() + + parentHash := pendingBlock.header.ParentHash() + number := pendingBlock.header.Number() + b.blockchain.leavesMtx.RLock() + highestLeaf := b.blockchain.leaves.HighestLeaf() + b.blockchain.leavesMtx.RUnlock() + var highestLeafNumber N + if highestLeaf != nil { + highestLeafNumber = highestLeaf.Number + } + var existingHeader bool + if number <= highestLeafNumber { + header, err := b.blockchain.Header(hash) + if err != nil { + return err + } + existingHeader = header != nil + } + + // blocks are keyed by number + hash. + lookupKey, err := newLookupKey(number, hash) + if err != nil { + return err + } + + if pendingBlock.leafState.IsBest() { + _, err := b.setHeadWithTransaction(&transaction, parentHash, numberHash[H, N]{ + Number: number, + Hash: hash, + }) + if err != nil { + return err + } + } + + err = insertHashToKeyMapping(&transaction, uint32(columns.KeyLookup), number, hash) + if err != nil { + return err + } + + transaction.Set(columns.Header, lookupKey, scale.MustMarshal(pendingBlock.header)) + if body := pendingBlock.body; body != nil { + // If we have any index operations we save block in the new format with indexed + // extrinsic headers Otherwise we save the body as a single blob. + if len(operation.indexOps) == 0 { + transaction.Set(columns.Body, lookupKey, scale.MustMarshal(body)) + } else { + body := applyIndexOps(&transaction, body, operation.indexOps) + transaction.Set(columns.BodyIndex, lookupKey, body) + } + } + if body := pendingBlock.indexedBody; body != nil { + applyIndexedBody(&transaction, body) + } + if justifications := pendingBlock.justifications; justifications != nil { + transaction.Set(columns.Justifications, lookupKey, scale.MustMarshal(justifications)) + } + + if number == 0 { + transaction.Set(columns.Meta, metakeys.GenesisHash, hash.Bytes()) + + if operation.commitState { + transaction.Set(columns.Meta, metakeys.FinalizedState, lookupKey) + } else { + // When we don't want to commit the genesis state, we still preserve it in + // memory to bootstrap consensus. It is queried for an initial list of + // authorities, etc. + b.genesisStateMtx.Lock() + b.genesisState = &dbGenesisStorage[H, Hasher]{ + root: pendingBlock.header.StateRoot(), + storage: operation.dbUpdates, + } + b.genesisStateMtx.Unlock() + } + } + + var finalized bool + if operation.commitState { + var ( + changeset statedb.ChangeSet[string] + ops uint64 + bytes uint64 + removal uint64 + bytesRemoval uint64 + ) + for key, update := range operation.dbUpdates.Drain() { + if rc := update.RC; rc > 0 { + ops += 1 + bytes += uint64(len(key) + len(update.Data)) + if rc == 1 { + changeset.Inserted = append(changeset.Inserted, statedb.HashDBValue[string]{ + Hash: key, + DBValue: update.Data, + }) + } else { + changeset.Inserted = append(changeset.Inserted, statedb.HashDBValue[string]{ + Hash: key, + DBValue: update.Data, + }) + for i := int32(0); i < rc-1; i++ { + changeset.Inserted = append(changeset.Inserted, statedb.HashDBValue[string]{ + Hash: key, + DBValue: make([]byte, 0), + }) + } + } + } else if rc < 0 { + removal += 1 + bytesRemoval += uint64(len(key)) + if rc == -1 { + changeset.Deleted = append(changeset.Deleted, key) + } else { + for i := int32(0); i < (rc * -1); i++ { + changeset.Deleted = append(changeset.Deleted, key) + } + } + } + } + + numberU64 := saturating.Into[N, uint64](number) + commit, err := b.storage.StateDB.InsertBlock(hash, numberU64, pendingBlock.header.ParentHash(), changeset) + if err != nil { + return fmt.Errorf("%w: %v", blockchain.ErrStateDatabase, err) + } + applyStateCommit(&transaction, commit) + if number <= lastFinalizedNumber { + // Canonicalize in the db when re-importing existing blocks with state. + commit, err := b.storage.StateDB.CanonicalizeBlock(hash) + if err != nil { + return fmt.Errorf("%w: %v", blockchain.ErrStateDatabase, err) + } + applyStateCommit(&transaction, commit) + metaUpdates = append(metaUpdates, metaUpdate[H, N]{ + Hash: hash, + Number: number, + IsBest: false, + IsFinalized: true, + WithState: true, + }) + } + + // Check if need to finalize. Genesis is always finalized instantly. + finalized = numberU64 == 0 || pendingBlock.leafState.IsFinal() + } else { + finalized = number == 0 && lastFinalizedNumber == 0 || pendingBlock.leafState.IsFinal() + } + + header := pendingBlock.header + isBest := pendingBlock.leafState.IsBest() + logger.Debugf("DB commit %s (%d), best=%v, state=%+v, existing=%+v, finalized=%v", + hash, number, isBest, operation.commitState, existingHeader, finalized, + ) + + // VERY IMPORTANT: drop state reference so that it can be finalized + // NOTE: this is supposed to merge the state usage stats as well if we decide to implement that + operation.oldState.Drop() + + if finalized { + // TODO: ensure best chain contains this block. (from substrate as well) + err := b.ensureSequentialFinalization(header, &lastFinalizedHash) + if err != nil { + return err + } + currentTransactionJustifications := make(map[H]runtime.Justification) + err = b.noteFinalized(&transaction, header, hash, operation.commitState, currentTransactionJustifications) + if err != nil { + return err + } + } else { + err := b.forceDelayedCanonicalize(&transaction) + if err != nil { + return err + } + } + + if !existingHeader { + // Add a new leaf if the block has the potential to be finalized. + if number > lastFinalizedNumber || lastFinalizedNumber == 0 { + b.blockchain.leavesMtx.Lock() + b.blockchain.leaves.Import(hash, number, parentHash) + b.blockchain.leaves.PrepareTransaction(&transaction, uint32(columns.Meta), metakeys.LeafPrefix) + b.blockchain.leavesMtx.Unlock() + } + + children, err := readChildren(b.storage.db, columns.Meta, metakeys.ChildrenPrefix, parentHash) + if err != nil { + return err + } + if !slices.Contains(children, hash) { + children = append(children, hash) + writeChildren(&transaction, columns.Meta, metakeys.ChildrenPrefix, parentHash, children) + } + + if blockGap != nil { + start := blockGap[0] + end := blockGap[1] + if number == start { + start += 1 + err := insertNumberToKeyMapping(&transaction, uint32(columns.KeyLookup), number, hash) + if err != nil { + return err + } + } + if start > end { + transaction.Remove(columns.Meta, metakeys.BlockGap) + blockGap = nil + logger.Debugf("Removed block gap.") + } else { + blockGap = &[2]N{start, end} + logger.Debugf("Update block gap. %v", *blockGap) + transaction.Set(columns.Meta, metakeys.BlockGap, scale.MustMarshal(*blockGap)) + } + } else if number > bestNum+1 && number > 1 { + header, err := b.blockchain.Header(parentHash) + if err != nil { + return err + } + if header == nil { + gap := [2]N{bestNum + 1, number - 1} + transaction.Set(columns.Meta, metakeys.BlockGap, scale.MustMarshal(gap)) + blockGap = &gap + logger.Debugf("Detected block gap. %v", *blockGap) + } + } + } + + metaUpdates = append(metaUpdates, metaUpdate[H, N]{ + Hash: hash, + Number: number, + IsBest: pendingBlock.leafState.IsBest(), + IsFinalized: finalized, + WithState: operation.commitState, + }) + imported = &headerHash{ + Header: pendingBlock.header, + Hash: hash, + } + } + + if setHead := operation.setHead; setHead != nil { + header, err := b.blockchain.header(*setHead) + if err != nil { + return err + } + if header != nil { + number := (*header).Number() + hash := (*header).Hash() + + _, err := b.setHeadWithTransaction(&transaction, hash, numberHash[H, N]{Number: number, Hash: hash}) + if err != nil { + return err + } + + metaUpdates = append(metaUpdates, metaUpdate[H, N]{ + Hash: hash, + Number: number, + IsBest: true, + IsFinalized: false, + WithState: false, + }) + } else { + return fmt.Errorf("%w Cannot set head %s", blockchain.ErrUnknownBlock, setHead) + } + } + + err := b.storage.db.Commit(transaction) + if err != nil { + return err + } + + // Apply all in-memory state changes. + // Code beyond this point can't fail. + if imported != nil { + header := &(imported.Header) + hash := imported.Hash + logger.Tracef("DB commit done %s", hash) + headerMetadata := blockchain.NewCachedHeaderMetadata(*header) + b.blockchain.InsertHeaderMetadata(headerMetadata.Hash, headerMetadata) + b.blockchain.headerCacheMtx.Lock() + b.blockchain.cacheHeader(hash, header) + b.blockchain.headerCacheMtx.Unlock() + } + + for _, m := range metaUpdates { + b.blockchain.updateMeta(m) + } + b.blockchain.updateBlockGap(blockGap) + + return nil +} + +// Write to a transaction after a new block is finalized. +// This canonicalizes finalized blocks. Fails if called with a block which +// is not a child of the last finalized block. +func (b *Backend[H, Hasher, N, E, Header]) noteFinalized( + transaction *database.Transaction[hash.H256], + fHeader Header, + fHash H, + withState bool, + currentTransactionJustifications map[H]runtime.Justification, +) error { + fNum := fHeader.Number() + + lookupKey, err := newLookupKey(fNum, fHash) + if err != nil { + return err + } + if withState { + transaction.Set(columns.Meta, metakeys.FinalizedState, lookupKey) + } + transaction.Set(columns.Meta, metakeys.FinalizedBlock, lookupKey) + + var requiresCanonicalization bool + switch lc := b.storage.StateDB.LastCanonicalized().(type) { + case statedb.LastCanonicalizedNone: + requiresCanonicalization = true + case statedb.LastCanonicalizedBlock: + requiresCanonicalization = saturating.Into[N, uint64](fNum) > uint64(lc) + case statedb.LastCanonicalizedNotCanonicalizing: + requiresCanonicalization = false + default: + panic("unreachable") + } + + if requiresCanonicalization && b.HaveStateAt(fHash, fNum) { + commit, err := b.storage.StateDB.CanonicalizeBlock(fHash) + if err != nil { + return fmt.Errorf("%w: %v", blockchain.ErrStateDatabase, err) + } + applyStateCommit(transaction, commit) + } + + b.blockchain.leavesMtx.Lock() + defer b.blockchain.leavesMtx.Unlock() + newDisplaced := b.blockchain.leaves.FinalizeHeight(fNum) + err = b.pruneBlocks(transaction, fNum, fHash, newDisplaced, currentTransactionJustifications) + if err != nil { + return err + } + + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) pruneBlocks( + transaction *database.Transaction[hash.H256], + finalizedNumber N, + finalizedHash H, + displaced api.FinalizationOutcome[H, N], + currentTransactionJustifications map[H]runtime.Justification, +) error { + switch blocksPruning := b.blocksPruning.(type) { + case BlocksPruningKeepAll: + case BlocksPruningSome: + // Always keep the last finalized block + keep := uint32(1) + if uint32(blocksPruning) > keep { + keep = uint32(blocksPruning) + } + if finalizedNumber >= N(keep) { + number := saturating.Sub(finalizedNumber, keep) + + // Before we prune a block, check if it is pinned + hash, err := b.blockchain.Hash(number) + if err != nil { + return err + } + if hash != nil { + err := b.blockchain.insertPersistedBodyIfPinned(*hash) + if err != nil { + return err + } + + // If the block was finalized in this transaction, it will not be in the db + // yet. + justification, ok := currentTransactionJustifications[*hash] + if ok { + delete(currentTransactionJustifications, *hash) + b.blockchain.insertJustifcationsIfPinned(*hash, justification) + } else { + err := b.blockchain.insertPersistedJustificationsIfPinned(*hash) + if err != nil { + return err + } + } + } + err = b.pruneBlock(transaction, generic.BlockIDNumber[N]{Number: number}) + if err != nil { + return err + } + } + err := b.pruneDisplacedBranches(transaction, finalizedHash, displaced) + if err != nil { + return err + } + case BlocksPruningKeepFinalized: + err := b.pruneDisplacedBranches(transaction, finalizedHash, displaced) + if err != nil { + return err + } + default: + panic("unreachable") + } + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) pruneDisplacedBranches( + transaction *database.Transaction[hash.H256], + finalized H, + displaced api.FinalizationOutcome[H, N], +) error { + // Discard all blocks from displaced branches + for _, h := range displaced.Leaves() { + treeRoute, err := blockchain.NewTreeRoute(b.blockchain, h, finalized) + if err != nil { + if errors.Is(err, blockchain.ErrUnknownBlock) { + // Sometimes routes can't be calculated. eg. after warp sync. + return nil + } + return err + } + for _, r := range treeRoute.Retracted() { + err := b.blockchain.insertPersistedBodyIfPinned(r.Hash) + if err != nil { + return err + } + err = b.pruneBlock(transaction, generic.BlockIDHash[H]{Hash: r.Hash}) + if err != nil { + return err + } + } + } + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) pruneBlock( + transaction *database.Transaction[hash.H256], + id generic.BlockID, +) error { + logger.Debugf("Removing block %s", id) + err := removeFromDB[H, N](transaction, b.storage.db, uint32(columns.KeyLookup), uint32(columns.Body), id) + if err != nil { + return err + } + err = removeFromDB[H, N](transaction, b.storage.db, uint32(columns.KeyLookup), uint32(columns.Justifications), id) + if err != nil { + return err + } + index, err := readDB[H, N](b.storage.db, columns.KeyLookup, columns.BodyIndex, id) + if err != nil { + return err + } + if index != nil { + err := removeFromDB[H, N](transaction, b.storage.db, uint32(columns.KeyLookup), uint32(columns.BodyIndex), id) + if err != nil { + return err + } + var dbExtrinsics []dbExtrinsic[E] + err = scale.Unmarshal(index, &dbExtrinsics) + if err != nil { + return fmt.Errorf("%w: Error decoding body list: %v", blockchain.ErrBackend, err) + } + for _, ex := range dbExtrinsics { + val, err := ex.Value() + if err != nil { + return err + } + indexed, ok := val.(dbExtrinsicIndexed) + if ok { + transaction.Release(columns.Transaction, indexed.Hash) + } + } + } + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) emptyState() refTrackingState[H, Hasher] { + root := newEmptyStorage[H, Hasher]().root + var localCache *cache.LocalTrieCache[H] + if b.sharedTrieCache != nil { + lcc := b.sharedTrieCache.LocalTrieCache() + localCache = &lcc + } + dbState := statemachine.NewTrieBackend[H, Hasher](&b.storage, root, localCache, nil) + state := refTrackingState[H, Hasher]{ + state: DBState[H, Hasher]{dbState}, + storage: b.storage, + parentHash: nil, + } + return state +} + +func applyStateCommit(transaction *database.Transaction[hash.H256], commit statedb.CommitSet[string]) { + for _, hdbv := range commit.Data.Inserted { + transaction.Set(columns.State, []byte(hdbv.Hash), hdbv.DBValue) + } + for _, key := range commit.Data.Deleted { + transaction.Remove(columns.State, []byte(key)) + } + for _, hdbv := range commit.Meta.Inserted { + transaction.Set(columns.StateMeta, hdbv.Hash, hdbv.DBValue) + } + for _, key := range commit.Meta.Deleted { + transaction.Remove(columns.StateMeta, key) + } +} + +func applyIndexOps[E runtime.Extrinsic]( + transaction *database.Transaction[hash.H256], body []E, ops []statemachine.IndexOperation, +) []byte { + var extrinsicIndex []dbExtrinsic[E] + indexMap := make(map[uint32]struct { + Hash []byte + Size uint32 + }) + renewedMap := make(map[uint32]hash.H256) + for _, op := range ops { + switch op := op.(type) { + case statemachine.IndexOperationInsert: + indexMap[op.Extrinsic] = struct { + Hash []byte + Size uint32 + }{Hash: op.Hash, Size: op.Size} + case statemachine.IndexOperationRenew: + renewedMap[op.Extrinsic] = hash.H256(op.Hash) + default: + panic("unreachable") + } + } + for index, extrinsic := range body { + var dbExtrinsic dbExtrinsic[E] + hash, ok := renewedMap[uint32(index)] + if ok { + // Bump ref counter + encoded := scale.MustMarshal(extrinsic) + transaction.Reference(columns.Transaction, hash) + dbExtrinsic = newDbExtrinsic[E](dbExtrinsicIndexed{Hash: hash, Header: encoded}) + } else { + i, ok := indexMap[uint32(index)] + if ok { + encoded := scale.MustMarshal(extrinsic) + if int(i.Size) <= len(encoded) { + offset := len(encoded) - int(i.Size) + transaction.Store(columns.Transaction, dbHash(i.Hash), encoded[offset:]) + dbExtrinsic = newDbExtrinsic[E](dbExtrinsicIndexed{Hash: dbHash(i.Hash), Header: encoded[:offset]}) + } else { + // Invalid indexed slice. Just store full data and don't index anything. + dbExtrinsic = newDbExtrinsic[E](dbExtrinsicFull[E]{Extrinsic: extrinsic}) + } + } else { + dbExtrinsic = newDbExtrinsic[E](dbExtrinsicFull[E]{Extrinsic: extrinsic}) + } + } + extrinsicIndex = append(extrinsicIndex, dbExtrinsic) + } + logger.Debugf("DB transaction index: %d inserted, %d renewed, %d full", + len(indexMap), len(renewedMap), len(extrinsicIndex)-len(indexMap)-len(renewedMap)) + + return scale.MustMarshal(extrinsicIndex) +} + +func applyIndexedBody(transaction *database.Transaction[hash.H256], body [][]byte) { + for _, extrinsic := range body { + hash := runtime.BlakeTwo256{}.Hash(extrinsic) + transaction.Store(columns.Transaction, hash, extrinsic) + } +} + +func (b *Backend[H, Hasher, N, E, Header]) InsertAux(insert []api.KeyValue, delete [][]byte) error { + var transaction database.Transaction[dbHash] + for _, kv := range insert { + transaction.Set(columns.Aux, kv.Key, kv.Value) + } + for _, h := range delete { + transaction.Remove(columns.Aux, h) + } + err := b.storage.db.Commit(transaction) + if err != nil { + return err + } + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) GetAux(key []byte) ([]byte, error) { + return b.storage.db.Get(columns.Aux, key), nil +} + +func (b *Backend[H, Hasher, N, E, Header]) beginOperation() *BlockImportOperation[H, Hasher, N, Header, E] { + return &BlockImportOperation[H, Hasher, N, Header, E]{ + pendingBlock: nil, + oldState: b.emptyState(), + dbUpdates: *trie.NewPrefixedMemoryDB[H, Hasher](), + storageUpdates: make(statemachine.StorageCollection, 0), + childStorageUpdates: make(statemachine.ChildStorageCollection, 0), + offchainStorageUpdates: make(statemachine.OffchainChangesCollection, 0), + auxOps: make(api.AuxDataOperations, 0), + finalizedBlocks: make([]finalizedBlock[H], 0), + setHead: nil, + commitState: false, + indexOps: make([]statemachine.IndexOperation, 0), + } +} + +func (b *Backend[H, Hasher, N, E, Header]) BeginOperation() ( + api.BlockImportOperation[N, H, Hasher, Header, E], error, +) { + return b.beginOperation(), nil +} + +func (b *Backend[H, Hasher, N, E, Header]) BeginStateOperation( + operation api.BlockImportOperation[N, H, Hasher, Header, E], block H, +) error { + op := operation.(*BlockImportOperation[H, Hasher, N, Header, E]) + if block == *(new(H)) { + op.oldState = b.emptyState() + } else { + state, err := b.stateAt(block) + if err != nil { + return err + } + op.oldState = state + } + + op.commitState = true + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) CommitOperation( + operation api.BlockImportOperation[N, H, Hasher, Header, E], +) error { + op := operation.(*BlockImportOperation[H, Hasher, N, Header, E]) + + err := b.tryCommitOperation(op) + if err != nil { + stateMetaDB := stateMetaDB{b.storage.db} + resetErr := b.storage.StateDB.Reset(stateMetaDB) + if resetErr != nil { + return fmt.Errorf("%w: %v", blockchain.ErrStateDatabase, resetErr) + } + b.blockchain.clearPinningCache() + return err + } + b.storage.StateDB.Sync() + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) FinalizeBlock(hash H, justification *runtime.Justification) error { + var transaction database.Transaction[dbHash] + header, err := b.blockchain.header(hash) + if err != nil { + return err + } + currentTransactionJustifications := make(map[H]runtime.Justification) + m, err := b.finalizeBlockWithTransaction( + &transaction, hash, header, nil, justification, currentTransactionJustifications, + ) + if err != nil { + return err + } + + err = b.storage.db.Commit(transaction) + if err != nil { + return err + } + b.blockchain.updateMeta(m) + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) AppendJustification(hash H, justification runtime.Justification) error { + var transaction database.Transaction[dbHash] + header, err := b.blockchain.header(hash) + if err != nil { + return err + } + number := (*header).Number() + + // Check if the block is finalized first. + isDescendantOf := utils.IsDescendantOf(b.blockchain, nil) + lastFinalized, err := b.blockchain.LastFinalized() + if err != nil { + return err + } + + // We can do a quick check first, before doing a proper but more expensive check. + if number > b.blockchain.Info().FinalizedNumber { + return blockchain.ErrNotInFinalizedChain + } + if hash != lastFinalized { + ido, err := isDescendantOf(hash, lastFinalized) + if err != nil { + return err + } + if !ido { + return blockchain.ErrNotInFinalizedChain + } + } + + var justifications runtime.Justifications + storedJustifications, err := b.blockchain.Justifications(hash) + if err != nil { + return err + } + if storedJustifications != nil { + if !storedJustifications.Append(justification) { + return fmt.Errorf("%w: Duplicate consensus engine ID", blockchain.ErrBadJustification) + } + justifications = storedJustifications + } else { + justifications = runtime.Justifications{justification} + } + + lookupKey, err := newLookupKey(number, hash) + if err != nil { + return err + } + transaction.Set(columns.Justifications, lookupKey, scale.MustMarshal(justifications)) + + err = b.storage.db.Commit(transaction) + if err != nil { + return err + } + + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) OffchainStorage() p_offchain.OffchainStorage { + return b.offchainStorage +} + +func (b *Backend[H, Hasher, N, E, Header]) Revert(n N, revertFinalized bool) (N, map[H]any, error) { + revertedFinalized := make(map[H]any) + + info := b.blockchain.Info() + + var highestLeaf *numberHash[H, N] + b.blockchain.leavesMtx.RLock() + numberHashes := b.blockchain.leaves.HighestLeaf() + if numberHashes != nil { + highestLeaf = &numberHash[H, N]{ + Number: numberHashes.Number, + Hash: numberHashes.Hashes[len(numberHashes.Hashes)-1], + } + } + b.blockchain.leavesMtx.RUnlock() + bestNumber := info.BestNumber + bestHash := info.BestHash + + finalized := info.FinalizedNumber + + revertible := bestNumber - finalized + + if !revertFinalized && revertible < n { + n = revertible + } + + var ( + numberToRevert N + hashToRevert H + ) + if highestLeaf != nil { + n = n + highestLeaf.Number - bestNumber + numberToRevert = highestLeaf.Number + hashToRevert = highestLeaf.Hash + } else { + numberToRevert = bestNumber + hashToRevert = bestHash + } + + var revertBlocks = func() (N, error) { + for c := uint64(0); c < saturating.Into[N, uint64](n); c++ { + if numberToRevert == 0 { + return saturating.Into[uint64, N](c), nil + } + var transaction database.Transaction[hash.H256] + removed, err := b.blockchain.Header(hashToRevert) + if err != nil { + return 0, err + } + if removed == nil { + return 0, fmt.Errorf("%w: Error reverting to %s, Block header not found", + blockchain.ErrUnknownBlock, hashToRevert) + } + removedHash := (*removed).Hash() + + prevNumber := saturating.Sub(numberToRevert, uint(1)) + var prevHash H + if prevNumber == bestNumber { + prevHash = bestHash + } else { + prevHash = (*removed).ParentHash() + } + + if !b.HaveStateAt(prevHash, prevNumber) { + return saturating.Into[uint64, N](c), nil + } + + commit := b.storage.StateDB.RevertOne() + if commit != nil { + applyStateCommit(&transaction, *commit) + + numberToRevert = prevNumber + hashToRevert = prevHash + + updateFinalized := numberToRevert < finalized + + key, err := newLookupKey(numberToRevert, hashToRevert) + if err != nil { + return 0, err + } + + if updateFinalized { + transaction.Set(columns.Meta, metakeys.FinalizedBlock, key) + + revertedFinalized[removedHash] = nil + if finalizedState := b.blockchain.Info().FinalizedState; finalizedState != nil { + if finalizedState.Hash == hashToRevert { + if !(numberToRevert == 0) && b.HaveStateAt(prevHash, numberToRevert-1) { + lookupKey, err := newLookupKey(numberToRevert-1, prevHash) + if err != nil { + return 0, err + } + transaction.Set(columns.Meta, metakeys.FinalizedState, lookupKey) + } else { + transaction.Remove(columns.Meta, metakeys.FinalizedState) + } + } + } + } + transaction.Set(columns.Meta, metakeys.BestBlock, key) + transaction.Remove(columns.KeyLookup, (*removed).Hash().Bytes()) + removeChildren(&transaction, columns.Meta, metakeys.ChildrenPrefix, hashToRevert) + err = b.storage.db.Commit(transaction) + if err != nil { + return 0, err + } + + isBest := numberToRevert < bestNumber + + b.blockchain.updateMeta(metaUpdate[H, N]{ + Hash: hashToRevert, + Number: numberToRevert, + IsBest: isBest, + IsFinalized: updateFinalized, + WithState: false, + }) + } else { + return saturating.Into[uint64, N](c), nil + } + + } + return n, nil + } + + reverted, err := revertBlocks() + if err != nil { + return 0, nil, err + } + + var revertLeaves = func() error { + var transaction database.Transaction[hash.H256] + b.blockchain.leavesMtx.Lock() + defer b.blockchain.leavesMtx.Unlock() + leaves := &b.blockchain.leaves + + leaves.Revert(hashToRevert, numberToRevert) + leaves.PrepareTransaction(&transaction, uint32(columns.Meta), metakeys.LeafPrefix) + err := b.storage.db.Commit(transaction) + if err != nil { + return err + } + return nil + } + + err = revertLeaves() + if err != nil { + return 0, nil, err + } + + return reverted, revertedFinalized, nil +} + +func (b *Backend[H, Hasher, N, E, Header]) RemoveLeafBlock(hash H) error { + bestHash := b.blockchain.Info().BestHash + + if bestHash == hash { + return fmt.Errorf("%w: Can't remove best block %s", blockchain.ErrBackend, hash) + } + + hdr, err := b.blockchain.HeaderMetadata(hash) + if err != nil { + return err + } + if !b.HaveStateAt(hash, hdr.Number) { + return fmt.Errorf("%w: State already discarded for %s", blockchain.ErrUnknownBlock, hash) + } + + b.blockchain.leavesMtx.Lock() + defer b.blockchain.leavesMtx.Unlock() + if !b.blockchain.leaves.Contains(hdr.Number, hash) { + return fmt.Errorf("%w: Can't remove non-leaf block %s", blockchain.ErrBackend, hash) + } + + var transaction database.Transaction[dbHash] + commit := b.storage.StateDB.Remove(hash) + if commit != nil { + applyStateCommit(&transaction, *commit) + } + transaction.Remove(columns.KeyLookup, hash.Bytes()) + + unfiltered, err := b.blockchain.Children(hdr.Parent) + if err != nil { + return err + } + var children []H + for _, child := range unfiltered { + if child != hash { + children = append(children, child) + } + } + + var parentLeaf *H + if len(children) == 0 { + removeChildren(&transaction, columns.Meta, metakeys.ChildrenPrefix, hdr.Parent) + parentLeaf = &hdr.Parent + } else { + writeChildren(&transaction, columns.Meta, metakeys.ChildrenPrefix, hdr.Parent, children) + } + + removeOutcome := b.blockchain.leaves.Remove(hash, hdr.Number, parentLeaf) + b.blockchain.leaves.PrepareTransaction(&transaction, uint32(columns.Meta), metakeys.LeafPrefix) + err = b.storage.db.Commit(transaction) + if err != nil { + if removeOutcome != nil { + b.blockchain.leaves.Undo().UndoRemove(*removeOutcome) + } + return err + } + b.blockchain.RemoveHeaderMetadata(hash) + + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) HaveStateAt(hash H, number N) bool { + if b.isArchive { + header, err := b.blockchain.HeaderMetadata(hash) + if err != nil { + return false + } + val, err := b.storage.Get(header.StateRoot, hashdb.EmptyPrefix) + if err != nil { + return false + } + return val != nil + } else { + isPruned := b.storage.StateDB.IsPruned(hash, saturating.Into[N, uint64](number)) + switch isPruned { + case statedb.IsPrunedPruned: + return false + case statedb.IsPrunedNotPruned: + return true + case statedb.IsPrunedMaybePruned: + header, err := b.blockchain.HeaderMetadata(hash) + if err != nil { + return false + } + val, err := b.storage.Get(header.StateRoot, hashdb.EmptyPrefix) + if err != nil { + return false + } + return val != nil + default: + panic("unreachable") + } + } +} + +func (b *Backend[H, Hasher, N, E, Header]) stateAt(hash H) (refTrackingState[H, Hasher], error) { + b.blockchain.metaMtx.RLock() + if hash == b.blockchain.meta.GenesisHash { + b.genesisStateMtx.RLock() + if genesisState := b.genesisState; genesisState != nil { + root := genesisState.root + var localCache *cache.LocalTrieCache[H] + if b.sharedTrieCache != nil { + lcc := b.sharedTrieCache.LocalTrieCache() + localCache = &lcc + } + dbState := statemachine.NewTrieBackend[H, Hasher](&b.storage, root, localCache, nil) + state := refTrackingState[H, Hasher]{ + state: DBState[H, Hasher]{dbState}, + storage: b.storage, + parentHash: nil, + } + b.genesisStateMtx.RUnlock() + b.blockchain.metaMtx.RUnlock() + return state, nil + } + b.genesisStateMtx.RUnlock() + } + b.blockchain.metaMtx.RUnlock() + + hdr, err := b.blockchain.HeaderMetadata(hash) + if err != nil { + return refTrackingState[H, Hasher]{}, err + } + + var hint = func() bool { + val := b.storage.db.Get(columns.State, hdr.StateRoot.Bytes()) + return val != nil + } + + err = b.storage.StateDB.Pin(hash, saturating.Into[N, uint64](hdr.Number), hint) + if err != nil { + return refTrackingState[H, Hasher]{}, + fmt.Errorf("%w: State already discarded for %s", blockchain.ErrUnknownBlock, hash) + } + root := hdr.StateRoot + var localCache *cache.LocalTrieCache[H] + if b.sharedTrieCache != nil { + lcc := b.sharedTrieCache.LocalTrieCache() + localCache = &lcc + } + dbState := statemachine.NewTrieBackend[H, Hasher](&b.storage, root, localCache, nil) + state := refTrackingState[H, Hasher]{ + state: DBState[H, Hasher]{dbState}, + storage: b.storage, + parentHash: &hash, + } + return state, nil +} + +func (b *Backend[H, Hasher, N, E, Header]) StateAt(hash H) (statemachine.Backend[H, Hasher], error) { + state, err := b.stateAt(hash) + if err != nil { + return nil, err + } + backend := state.state + return &backend, nil +} + +func (b *Backend[H, Hasher, N, E, Header]) Blockchain() blockchain.Backend[H, N, Header, E] { + return b.blockchain +} + +func (b *Backend[H, Hasher, N, E, Header]) GetImportLock() *sync.RWMutex { + return &b.importLock +} + +func (b *Backend[H, Hasher, N, E, Header]) RequiresFullSync() bool { + pruningMode := b.storage.StateDB.PruningMode() + switch pruningMode.(type) { + case statedb.PruningModeArchiveAll: + return true + case statedb.PruningModeArchiveCanonical: + return true + case statedb.PruningModeConstrained: + return false + default: + panic("unreachable") + } +} + +func (b *Backend[H, Hasher, N, E, Header]) PinBlock(hash H) error { + var hint = func() bool { + hdr, err := b.blockchain.HeaderMetadata(hash) + if err != nil { + return false + } + val := b.storage.db.Get(columns.State, hdr.StateRoot.Bytes()) + return val != nil + } + + number, err := b.blockchain.Number(hash) + if err != nil { + return err + } + if number != nil { + err := b.storage.StateDB.Pin(hash, saturating.Into[N, uint64](*number), hint) + if err != nil { + return fmt.Errorf("%w: State already discarded for %s", blockchain.ErrUnknownBlock, hash) + } + } else { + return fmt.Errorf("%w: Can not pin block with hash %s. Block not found", blockchain.ErrUnknownBlock, hash) + } + + if _, ok := b.blocksPruning.(BlocksPruningKeepAll); !ok { + // Only increase reference count for this hash. Value is loaded once we prune. + b.blockchain.bumpRef(hash) + } + return nil +} + +func (b *Backend[H, Hasher, N, E, Header]) UnpinBlock(hash H) { + b.storage.StateDB.Unpin(hash) + + if _, ok := b.blocksPruning.(BlocksPruningKeepAll); !ok { + // Only increase reference count for this hash. Value is loaded once we prune. + b.blockchain.unpin(hash) + } +} diff --git a/internal/client/db/backend_integration_test.go b/internal/client/db/backend_integration_test.go new file mode 100644 index 0000000000..4cb8ab4c11 --- /dev/null +++ b/internal/client/db/backend_integration_test.go @@ -0,0 +1,54 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +//go:build integration + +package db + +import ( + "testing" + + p_blockchain "github.com/ChainSafe/gossamer/internal/primitives/blockchain" + "github.com/ChainSafe/gossamer/internal/primitives/core/hash" + "github.com/stretchr/testify/require" +) + +func TestBackend_Integration(t *testing.T) { + t.Run("tree_route_regression", func(t *testing.T) { + // NOTE: this is a test for a regression introduced in #3665, the result + // of tree_route would be erroneously computed, since it was taking into + // account the ancestor in CachedHeaderMetadata for the comparison. + // in this test we simulate the same behavior with the side-effect + // triggering the issue being eviction of a previously fetched record + // from the cache, therefore this test is dependent on the LRU cache + // size for header metadata, which is currently set to 5000 elements. + backend := NewTestBackend(t, BlocksPruningSome(10000), 10000) + blockchain := backend.blockchain + + genesis := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + + parent := genesis + for i := uint64(1); i <= 100; i++ { + parent = insertHeader(t, backend, i, parent, nil, hash.H256("")) + } + block100 := parent + + for i := uint64(101); i <= 7000; i++ { + parent = insertHeader(t, backend, i, parent, nil, hash.H256("")) + } + block7000 := parent + + // This will cause the ancestor of block100 to be set to genesis as a side-effect. + _, err := p_blockchain.LowestCommonAncestor(blockchain, genesis, block100) + require.NoError(t, err) + + // While traversing the tree we will have to do 6900 calls to + // HeaderMetadata, which will make sure we will exhaust our cache + // which only takes 5000 elements. In particular, the CachedHeaderMetadata struct for + // block #100 will be evicted and will get a new value (with ancestor set to its parent). + treeRoute, err := p_blockchain.NewTreeRoute(blockchain, block100, block7000) + require.NoError(t, err) + + require.Empty(t, treeRoute.Retracted()) + }) +} diff --git a/internal/client/db/backend_test.go b/internal/client/db/backend_test.go new file mode 100644 index 0000000000..0589a89f0c --- /dev/null +++ b/internal/client/db/backend_test.go @@ -0,0 +1,2357 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package db + +import ( + "bytes" + "fmt" + "testing" + + "github.com/ChainSafe/gossamer/internal/client/api" + "github.com/ChainSafe/gossamer/internal/client/db/columns" + statedb "github.com/ChainSafe/gossamer/internal/client/state-db" + hashdb "github.com/ChainSafe/gossamer/internal/hash-db" + memorykvdb "github.com/ChainSafe/gossamer/internal/kvdb/memory-kvdb" + memorydb "github.com/ChainSafe/gossamer/internal/memory-db" + p_blockchain "github.com/ChainSafe/gossamer/internal/primitives/blockchain" + "github.com/ChainSafe/gossamer/internal/primitives/core/hash" + "github.com/ChainSafe/gossamer/internal/primitives/database" + "github.com/ChainSafe/gossamer/internal/primitives/runtime" + "github.com/ChainSafe/gossamer/internal/primitives/runtime/generic" + rt_testing "github.com/ChainSafe/gossamer/internal/primitives/runtime/testing" + statemachine "github.com/ChainSafe/gossamer/internal/primitives/state-machine" + "github.com/ChainSafe/gossamer/internal/primitives/storage" + "github.com/ChainSafe/gossamer/internal/primitives/trie" + "github.com/ChainSafe/gossamer/pkg/scale" + "github.com/ChainSafe/gossamer/pkg/trie/triedb" + "github.com/stretchr/testify/require" + "github.com/tidwall/btree" +) + +var ( + _ api.BlockImportOperation[ + uint32, hash.H256, runtime.BlakeTwo256, *generic.Header[uint32, hash.H256, runtime.BlakeTwo256], noopExtrinsic, + ] = &BlockImportOperation[ + hash.H256, runtime.BlakeTwo256, uint32, *generic.Header[uint32, hash.H256, runtime.BlakeTwo256], noopExtrinsic, + ]{} + _ api.Backend[ + hash.H256, uint32, runtime.BlakeTwo256, *generic.Header[uint32, hash.H256, runtime.BlakeTwo256], noopExtrinsic, + ] = &Backend[ + hash.H256, runtime.BlakeTwo256, uint32, noopExtrinsic, *generic.Header[uint32, hash.H256, runtime.BlakeTwo256], + ]{} +) + +func NewTestBackend(t *testing.T, + blocksPruning BlocksPruning, canonicalizationDelay uint64, +) *Backend[ + hash.H256, + runtime.BlakeTwo256, + uint64, + rt_testing.ExtrinsicsWrapper[uint64], + *generic.Header[uint64, hash.H256, runtime.BlakeTwo256], +] { + t.Helper() + + kvdb := memorykvdb.New(13) + var statePruning statedb.PruningMode + switch blocksPruning := blocksPruning.(type) { + case BlocksPruningKeepAll: + statePruning = statedb.PruningModeArchiveAll{} + case BlocksPruningKeepFinalized: + statePruning = statedb.PruningModeArchiveCanonical{} + case BlocksPruningSome: + statePruning = statedb.NewPruningModeConstrained(uint32(blocksPruning)) + default: + t.Fatalf("unreachable") + } + trieCacheMaxSize := uint(16 * 1024 * 1024) + dbSetting := DatabaseConfig{ + TrieCacheMaximumSize: &trieCacheMaxSize, + StatePruning: statePruning, + Source: DatabaseSource{DB: database.NewDBAdapter[hash.H256](kvdb), RequireCreateFlag: true}, + BlocksPruning: blocksPruning, + } + + backend, err := NewBackend[ + hash.H256, + uint64, + rt_testing.ExtrinsicsWrapper[uint64], + runtime.BlakeTwo256, + *generic.Header[uint64, hash.H256, runtime.BlakeTwo256], + ](dbSetting, canonicalizationDelay) + if err != nil { + panic(err) + } + return backend +} + +func insertHeader(t *testing.T, + backend *Backend[ + hash.H256, + runtime.BlakeTwo256, + uint64, + rt_testing.ExtrinsicsWrapper[uint64], + *generic.Header[uint64, hash.H256, runtime.BlakeTwo256], + ], + number uint64, + parentHash hash.H256, + changes []trie.KeyValue, //nolint:unparam + extrinisicsRoot hash.H256, +) hash.H256 { + t.Helper() + hash, err := insertBlock( + t, + backend, + number, + parentHash, + changes, + extrinisicsRoot, + make([]rt_testing.ExtrinsicsWrapper[uint64], 0), + nil, + ) + require.NoError(t, err) + return hash +} + +func insertBlock(t *testing.T, + backend *Backend[ + hash.H256, + runtime.BlakeTwo256, + uint64, + rt_testing.ExtrinsicsWrapper[uint64], + *generic.Header[uint64, hash.H256, runtime.BlakeTwo256], + ], + number uint64, + parentHash hash.H256, + _changes []trie.KeyValue, + extrinisicsRoot hash.H256, + body []rt_testing.ExtrinsicsWrapper[uint64], + transactionIndex []statemachine.IndexOperation, +) (hash.H256, error) { + t.Helper() + var digest runtime.Digest + header := generic.NewHeader[uint64, hash.H256, runtime.BlakeTwo256]( + number, extrinisicsRoot, hash.H256(""), parentHash, digest, + ) + + var blockHash hash.H256 + if number != 0 { + blockHash = parentHash + } + + op := backend.beginOperation() + err := backend.BeginStateOperation(op, blockHash) + require.NoError(t, err) + if transactionIndex != nil { + err = op.UpdateTransactionIndex(transactionIndex) + require.NoError(t, err) + } + + // Insert some fake data to ensure that the block can be found in the state column. + root, overlay := op.oldState.state.StorageRoot( + []statemachine.Delta{{Key: blockHash.Bytes(), Value: blockHash.Bytes()}}, + storage.StateVersionV1, + ) + err = op.UpdateDBStorage(overlay) + require.NoError(t, err) + header.SetStateRoot(root) + + err = op.SetBlockData(header, body, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + + err = backend.CommitOperation(op) + return header.Hash(), err +} + +func insertHeaderNoHead(t *testing.T, + backend *Backend[ + hash.H256, + runtime.BlakeTwo256, + uint64, + rt_testing.ExtrinsicsWrapper[uint64], + *generic.Header[uint64, hash.H256, runtime.BlakeTwo256], + ], + number uint64, + parentHash hash.H256, + extrinisicsRoot hash.H256, +) hash.H256 { + var digest runtime.Digest + header := generic.NewHeader[uint64, hash.H256, runtime.BlakeTwo256]( + number, extrinisicsRoot, hash.H256(""), parentHash, digest, + ) + op := backend.beginOperation() + + state, err := backend.StateAt(parentHash) + if err != nil { + if parentHash == hash.H256("") { + tb := backend.emptyState().state.TrieBackend + state = tb + } else { + t.Fail() + } + } + root, _ := state.StorageRoot([]statemachine.Delta{ + {Key: parentHash.Bytes(), Value: parentHash.Bytes()}, + }, storage.StateVersionV1) + header.SetStateRoot(root) + + err = op.SetBlockData(header, nil, nil, nil, api.NewBlockStateNormal) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + return header.Hash() + +} + +func TestBackend(t *testing.T) { + t.Run("block_hash_inserted_correctly", func(t *testing.T) { + var backing database.Database[hash.H256] + { + db := NewTestBackend(t, BlocksPruningSome(1), 0) + for i := uint64(0); i < 10; i++ { + h, err := db.Blockchain().Hash(i) + require.NoError(t, err) + require.Nil(t, h) + + { + var hash hash.H256 + if i != 0 { + h, err := db.blockchain.Hash(i - 1) + require.NoError(t, err) + require.NotNil(t, h) + hash = *h + } + + op, err := db.BeginOperation() + require.NoError(t, err) + err = db.BeginStateOperation(op, hash) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + i, dbHash(""), dbHash(""), hash, runtime.Digest{}, + ) + + err = op.SetBlockData(header, nil, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + err = db.CommitOperation(op) + require.NoError(t, err) + } + + h, err = db.Blockchain().Hash(i) + require.NoError(t, err) + require.NotNil(t, h) + } + backing = db.storage.db + } + + trieCacheMaxSize := uint(16 * 1024 * 1024) + backend, err := NewBackend[ + dbHash, + uint64, + noopExtrinsic, + runtime.BlakeTwo256, + *generic.Header[uint64, hash.H256, runtime.BlakeTwo256], + ]( + DatabaseConfig{ + TrieCacheMaximumSize: &trieCacheMaxSize, + StatePruning: statedb.NewPruningModeConstrained(1), + Source: DatabaseSource{DB: backing, RequireCreateFlag: false}, + BlocksPruning: BlocksPruningKeepFinalized{}, + }, + 0, + ) + require.NoError(t, err) + require.Equal(t, uint64(9), backend.Blockchain().Info().BestNumber) + for i := uint64(0); i < 10; i++ { + hash, err := backend.Blockchain().Hash(i) + require.NoError(t, err) + require.NotNil(t, hash) + } + }) + + t.Run("set_state_data", func(t *testing.T) { + for i, stateVersion := range []storage.StateVersion{storage.StateVersionV0, storage.StateVersionV1} { + t.Run(fmt.Sprintf("StateVersion%d", i), func(t *testing.T) { + db := NewTestBackend(t, BlocksPruningSome(2), 0) + var hash dbHash + { + op := db.beginOperation() + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 0, dbHash(""), dbHash(""), dbHash(""), runtime.Digest{}, + ) + + deltas := []statemachine.Delta{ + {Key: []byte{1, 3, 5}, Value: []byte{2, 4, 6}}, + {Key: []byte{1, 2, 3}, Value: []byte{9, 9, 9}}, + } + + root, _ := op.oldState.state.StorageRoot(deltas, stateVersion) + header.SetStateRoot(root) + h := header.Hash() + + top := btree.NewMap[string, []byte](0) + for _, delta := range deltas { + top.Set(string(delta.Key), delta.Value) + } + _, err := op.ResetStorage(storage.Storage{ + Top: *top, + }, stateVersion) + require.NoError(t, err) + err = op.SetBlockData(header, nil, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + + err = db.CommitOperation(op) + require.NoError(t, err) + + state, err := db.StateAt(h) + require.NoError(t, err) + + val, err := state.Storage([]byte{1, 3, 5}) + require.NoError(t, err) + require.Equal(t, []byte{2, 4, 6}, []byte(val)) + val, err = state.Storage([]byte{1, 2, 3}) + require.NoError(t, err) + require.Equal(t, []byte{9, 9, 9}, []byte(val)) + val, err = state.Storage([]byte{5, 5, 5}) + require.NoError(t, err) + require.Nil(t, val) + + hash = h + } + + { + op := db.beginOperation() + err := db.BeginStateOperation(op, hash) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 1, + dbHash(""), + dbHash(""), + hash, + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{ + {Key: []byte{1, 3, 5}, Value: nil}, + {Key: []byte{5, 5, 5}, Value: []byte{4, 5, 6}}, + } + + root, overlay := op.oldState.state.StorageRoot(deltas, stateVersion) + err = op.UpdateDBStorage(overlay) + require.NoError(t, err) + header.SetStateRoot(root) + + copiedDeltas := make(statemachine.StorageCollection, 0) + for _, delta := range deltas { + copiedDeltas = append(copiedDeltas, statemachine.StorageKeyValue{ + StorageKey: delta.Key, + StorageValue: delta.Value, + }) + } + err = op.UpdateStorage(copiedDeltas, nil) + require.NoError(t, err) + err = op.SetBlockData(header, nil, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + + err = db.CommitOperation(op) + require.NoError(t, err) + + state, err := db.StateAt(header.Hash()) + require.NoError(t, err) + + val, err := state.Storage([]byte{1, 3, 5}) + require.NoError(t, err) + require.Nil(t, val) + val, err = state.Storage([]byte{1, 2, 3}) + require.NoError(t, err) + require.Equal(t, []byte{9, 9, 9}, []byte(val)) + val, err = state.Storage([]byte{5, 5, 5}) + require.NoError(t, err) + require.Equal(t, []byte{4, 5, 6}, []byte(val)) + } + }) + } + }) + + t.Run("delete_only_when_negative_rc", func(t *testing.T) { + stateVersion := storage.StateVersionV1 + var key dbHash + backend := NewTestBackend(t, BlocksPruningSome(1), 0) + + var hash dbHash + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, dbHash("")) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 0, + dbHash(""), + dbHash(""), + dbHash(""), + runtime.Digest{}, + ) + + root, _ := op.oldState.state.StorageRoot(nil, stateVersion) + header.SetStateRoot(root) + h := header.Hash() + + _, err = op.ResetStorage(storage.Storage{Top: *btree.NewMap[string, []byte](0)}, stateVersion) + require.NoError(t, err) + + key = op.dbUpdates.Insert(hashdb.EmptyPrefix, []byte("hello")) + err = op.SetBlockData(header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + val := backend.storage.db.Get(columns.State, memorydb.NewPrefixedKey(key, hashdb.EmptyPrefix)) + require.Equal(t, []byte("hello"), val) + + hash = h + } + + var hash1 dbHash + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, hash) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 1, + dbHash(""), + dbHash(""), + hash, + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{} + + root, _ := op.oldState.state.StorageRoot(deltas, stateVersion) + header.SetStateRoot(root) + h := header.Hash() + + op.dbUpdates.Insert(hashdb.EmptyPrefix, []byte("hello")) + op.dbUpdates.Remove(key, hashdb.EmptyPrefix) + + err = op.SetBlockData(header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + val := backend.storage.db.Get(columns.State, memorydb.NewPrefixedKey(key, hashdb.EmptyPrefix)) + require.Equal(t, []byte("hello"), val) + + hash1 = h + } + + var hash2 dbHash + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, hash1) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 2, + dbHash(""), + dbHash(""), + hash1, + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{} + + root, _ := op.oldState.state.StorageRoot(deltas, stateVersion) + header.SetStateRoot(root) + h := header.Hash() + + op.dbUpdates.Remove(key, hashdb.EmptyPrefix) + err = op.SetBlockData(header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + val := backend.storage.db.Get(columns.State, memorydb.NewPrefixedKey(key, hashdb.EmptyPrefix)) + require.Equal(t, []byte("hello"), val) + + hash2 = h + } + + var hash3 dbHash + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, hash2) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 3, + dbHash(""), + dbHash(""), + hash2, + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{} + + root, _ := op.oldState.state.StorageRoot(deltas, stateVersion) + header.SetStateRoot(root) + h := header.Hash() + + err = op.SetBlockData(header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + val := backend.storage.db.Get(columns.State, memorydb.NewPrefixedKey(key, hashdb.EmptyPrefix)) + require.Equal(t, []byte("hello"), val) + + hash3 = h + } + + var hash4 dbHash + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, hash3) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 4, + dbHash(""), + dbHash(""), + hash3, + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{} + + root, _ := op.oldState.state.StorageRoot(deltas, stateVersion) + header.SetStateRoot(root) + h := header.Hash() + + err = op.SetBlockData(header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + val := backend.storage.db.Get(columns.State, memorydb.NewPrefixedKey(key, hashdb.EmptyPrefix)) + require.Nil(t, val) + + hash4 = h + } + + err := backend.FinalizeBlock(hash1, nil) + require.NoError(t, err) + err = backend.FinalizeBlock(hash2, nil) + require.NoError(t, err) + err = backend.FinalizeBlock(hash3, nil) + require.NoError(t, err) + err = backend.FinalizeBlock(hash4, nil) + require.NoError(t, err) + + val := backend.storage.db.Get(columns.State, memorydb.NewPrefixedKey(key, hashdb.EmptyPrefix)) + require.Nil(t, val) + }) + + t.Run("tree_route_works", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(1000), 100) + blockchain := backend.blockchain + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + + // fork from genesis: 3 prong. + a1 := insertHeader(t, backend, 1, block0, nil, hash.H256("")) + a2 := insertHeader(t, backend, 2, a1, nil, hash.H256("")) + a3 := insertHeader(t, backend, 3, a2, nil, hash.H256("")) + + // fork from genesis: 2 prong. + b1 := insertHeader(t, backend, 1, block0, nil, hash.H256(bytes.Repeat([]byte{1}, 32))) + b2 := insertHeader(t, backend, 2, b1, nil, hash.H256("")) + + { + treeRoute, err := p_blockchain.NewTreeRoute(blockchain, a1, a1) + require.NoError(t, err) + + require.Equal(t, a1, treeRoute.CommonBlock().Hash) + require.Empty(t, treeRoute.Retracted()) + require.Empty(t, treeRoute.Enacted()) + } + + { + treeRoute, err := p_blockchain.NewTreeRoute(blockchain, a3, b2) + require.NoError(t, err) + + require.Equal(t, block0, treeRoute.CommonBlock().Hash) + var retractedHashes []hash.H256 + for _, hn := range treeRoute.Retracted() { + retractedHashes = append(retractedHashes, hn.Hash) + } + require.Equal(t, []hash.H256{a3, a2, a1}, retractedHashes) + } + + { + treeRoute, err := p_blockchain.NewTreeRoute(blockchain, a3, a1) + require.NoError(t, err) + + require.Equal(t, a1, treeRoute.CommonBlock().Hash) + var retractedHashes []hash.H256 + for _, hn := range treeRoute.Retracted() { + retractedHashes = append(retractedHashes, hn.Hash) + } + require.Equal(t, []hash.H256{a3, a2}, retractedHashes) + require.Empty(t, treeRoute.Enacted()) + } + + { + treeRoute, err := p_blockchain.NewTreeRoute(blockchain, a2, a2) + require.NoError(t, err) + + require.Equal(t, a2, treeRoute.CommonBlock().Hash) + require.Empty(t, treeRoute.Retracted()) + require.Empty(t, treeRoute.Enacted()) + } + }) + + t.Run("tree_route_child", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(1000), 100) + blockchain := backend.blockchain + + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + block1 := insertHeader(t, backend, 1, block0, nil, hash.H256("")) + + { + treeRoute, err := p_blockchain.NewTreeRoute(blockchain, block0, block1) + require.NoError(t, err) + + require.Equal(t, block0, treeRoute.CommonBlock().Hash) + require.Empty(t, treeRoute.Retracted()) + var enactedHashes []hash.H256 + for _, hn := range treeRoute.Enacted() { + enactedHashes = append(enactedHashes, hn.Hash) + } + require.Equal(t, []hash.H256{block1}, enactedHashes) + } + }) + + t.Run("lowest_common_ancestor", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(1000), 100) + blockchain := backend.blockchain + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + + // fork from genesis: 3 prong. + a1 := insertHeader(t, backend, 1, block0, nil, hash.H256("")) + a2 := insertHeader(t, backend, 2, a1, nil, hash.H256("")) + a3 := insertHeader(t, backend, 3, a2, nil, hash.H256("")) + + // fork from genesis: 2 prong. + b1 := insertHeader(t, backend, 1, block0, nil, hash.H256(bytes.Repeat([]byte{1}, 32))) + b2 := insertHeader(t, backend, 2, b1, nil, hash.H256("")) + + { + lca, err := p_blockchain.LowestCommonAncestor(blockchain, a3, b2) + require.NoError(t, err) + + require.Equal(t, block0, lca.Hash) + require.Equal(t, uint64(0), lca.Number) + } + + { + lca, err := p_blockchain.LowestCommonAncestor(blockchain, a1, a3) + require.NoError(t, err) + + require.Equal(t, a1, lca.Hash) + require.Equal(t, uint64(1), lca.Number) + } + + { + lca, err := p_blockchain.LowestCommonAncestor(blockchain, a3, a1) + require.NoError(t, err) + + require.Equal(t, a1, lca.Hash) + require.Equal(t, uint64(1), lca.Number) + } + + { + lca, err := p_blockchain.LowestCommonAncestor(blockchain, a2, a3) + require.NoError(t, err) + + require.Equal(t, a2, lca.Hash) + require.Equal(t, uint64(2), lca.Number) + } + + { + lca, err := p_blockchain.LowestCommonAncestor(blockchain, a2, a1) + require.NoError(t, err) + + require.Equal(t, a1, lca.Hash) + require.Equal(t, uint64(1), lca.Number) + } + + { + lca, err := p_blockchain.LowestCommonAncestor(blockchain, a2, a2) + require.NoError(t, err) + + require.Equal(t, a2, lca.Hash) + require.Equal(t, uint64(2), lca.Number) + } + }) + + t.Run("leaves_pruned_on_finality", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + + block1a := insertHeader(t, backend, 1, block0, nil, hash.H256("")) + block1b := insertHeader(t, backend, 1, block0, nil, hash.H256(bytes.Repeat([]byte{1}, 32))) + block1c := insertHeader(t, backend, 1, block0, nil, hash.H256(bytes.Repeat([]byte{2}, 32))) + + leaves, err := backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{block1a, block1b, block1c}, leaves) + + block2a := insertHeader(t, backend, 2, block1a, nil, hash.H256("")) + block2b := insertHeader(t, backend, 2, block1b, nil, hash.H256("")) + block2c := insertHeader(t, backend, 2, block1b, nil, hash.H256(bytes.Repeat([]byte{1}, 32))) + + leaves, err = backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{block2a, block2b, block2c, block1c}, leaves) + + err = backend.FinalizeBlock(block1a, nil) + require.NoError(t, err) + + // leaves at same height stay. Leaves at lower heights pruned. + leaves, err = backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{block2a, block2b, block2c, block1c}, leaves) + + err = backend.FinalizeBlock(block2a, nil) + require.NoError(t, err) + + // leaves at same height stay. Leaves at lower heights pruned. + leaves, err = backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{block2a, block2b, block2c}, leaves) + }) + + t.Run("test_aux", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(0), 0) + val, err := backend.GetAux([]byte("test")) + require.NoError(t, err) + require.Nil(t, val) + + err = backend.InsertAux([]api.KeyValue{{Key: []byte("test"), Value: []byte("hello")}}, nil) + require.NoError(t, err) + val, err = backend.GetAux([]byte("test")) + require.NoError(t, err) + require.Equal(t, []byte("hello"), val) + + err = backend.InsertAux(nil, [][]byte{[]byte("test")}) + require.NoError(t, err) + val, err = backend.GetAux([]byte("test")) + require.NoError(t, err) + require.Nil(t, val) + }) + + var CON0EngineID [4]byte + copy(CON0EngineID[:], "CON0") + + var CON1EngineID [4]byte + copy(CON1EngineID[:], "CON1") + + t.Run("finalize_block_with_justification", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + block1 := insertHeader(t, backend, 1, block0, nil, hash.H256("")) + + justification := runtime.Justification{ + ConsensusEngineID: CON0EngineID, + EncodedJustification: runtime.EncodedJustification{1, 2, 3}, + } + err := backend.FinalizeBlock(block1, &justification) + require.NoError(t, err) + + justifications, err := backend.Blockchain().Justifications(block1) + require.NoError(t, err) + require.Equal(t, runtime.Justifications{justification}, justifications) + }) + + t.Run("append_justification_to_finalized_block", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + block1 := insertHeader(t, backend, 1, block0, nil, hash.H256("")) + + just0 := runtime.Justification{ + ConsensusEngineID: CON0EngineID, + EncodedJustification: runtime.EncodedJustification{1, 2, 3}, + } + err := backend.FinalizeBlock(block1, &just0) + require.NoError(t, err) + + just1 := runtime.Justification{ + ConsensusEngineID: CON1EngineID, + EncodedJustification: runtime.EncodedJustification{4, 5}, + } + err = backend.AppendJustification(block1, just1) + require.NoError(t, err) + + just2 := runtime.Justification{ + ConsensusEngineID: CON1EngineID, + EncodedJustification: runtime.EncodedJustification{6, 7}, + } + err = backend.AppendJustification(block1, just2) + require.ErrorIs(t, err, p_blockchain.ErrBadJustification) + + expected := runtime.Justifications{just0, just1} + justifications, err := backend.Blockchain().Justifications(block1) + require.NoError(t, err) + require.Equal(t, expected, justifications) + }) + + t.Run("finalize_multiple_blocks_in_single_op", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + block1 := insertHeader(t, backend, 1, block0, nil, hash.H256("")) + block2 := insertHeader(t, backend, 2, block1, nil, hash.H256("")) + block3 := insertHeader(t, backend, 3, block2, nil, hash.H256("")) + block4 := insertHeader(t, backend, 4, block3, nil, hash.H256("")) + + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, block0) + require.NoError(t, err) + err = op.MarkFinalized(block1, nil) + require.NoError(t, err) + err = op.MarkFinalized(block2, nil) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + } + + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, block2) + require.NoError(t, err) + err = op.MarkFinalized(block3, nil) + require.NoError(t, err) + err = op.MarkFinalized(block4, nil) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + } + }) + + t.Run("storage_hash_is_cached_correctly", func(t *testing.T) { + stateVersion := storage.StateVersionV1 + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + + var hash0 dbHash + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, dbHash("")) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 0, + dbHash(""), + dbHash(""), + dbHash(""), + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{ + {Key: []byte("test"), Value: []byte("test")}, + } + + root, _ := op.oldState.state.StorageRoot(deltas, stateVersion) + header.SetStateRoot(root) + h := header.Hash() + + top := btree.NewMap[string, []byte](0) + for _, delta := range deltas { + top.Set(string(delta.Key), delta.Value) + } + _, err = op.ResetStorage(storage.Storage{ + Top: *top, + }, stateVersion) + require.NoError(t, err) + + err = op.SetBlockData(header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + hash0 = h + } + + state, err := backend.StateAt(hash0) + require.NoError(t, err) + block0Hash, err := state.StorageHash([]byte("test")) + require.NoError(t, err) + require.NotNil(t, block0Hash) + + var hash1 dbHash + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, hash0) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 1, + dbHash(""), + dbHash(""), + hash0, + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{ + {Key: []byte("test"), Value: []byte("test2")}, + } + + root, overlay := op.oldState.state.StorageRoot(deltas, stateVersion) + err = op.UpdateDBStorage(overlay) + require.NoError(t, err) + header.SetStateRoot(root) + h := header.Hash() + + copiedDeltas := make(statemachine.StorageCollection, 0) + for _, delta := range deltas { + copiedDeltas = append(copiedDeltas, statemachine.StorageKeyValue{ + StorageKey: delta.Key, + StorageValue: delta.Value, + }) + } + err = op.UpdateStorage(copiedDeltas, nil) + require.NoError(t, err) + err = op.SetBlockData(header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateNormal) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + hash1 = h + } + + { + header, err := backend.Blockchain().Header(hash1) + require.NoError(t, err) + require.NotNil(t, header) + op, err := backend.BeginOperation() + require.NoError(t, err) + err = op.SetBlockData(*header, nil, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + } + + state, err = backend.StateAt(hash1) + require.NoError(t, err) + block1Hash, err := state.StorageHash([]byte("test")) + require.NoError(t, err) + require.NotNil(t, block1Hash) + + require.NotEqual(t, *block0Hash, *block1Hash) + }) + + t.Run("finalize_non_sequential", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + block1 := insertHeader(t, backend, 1, block0, nil, hash.H256("")) + block2 := insertHeader(t, backend, 2, block1, nil, hash.H256("")) + + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, block0) + require.NoError(t, err) + err = op.MarkFinalized(block2, nil) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.Error(t, err) + } + }) + + t.Run("prune_blocks_on_finalize", func(t *testing.T) { + pruningModes := []BlocksPruning{BlocksPruningSome(2), BlocksPruningKeepFinalized{}, BlocksPruningKeepAll{}} + + for _, pruningMode := range pruningModes { + backend := NewTestBackend(t, pruningMode, 0) + var blocks []hash.H256 + var prevHash hash.H256 + for i := 0; i < 5; i++ { + hash, err := insertBlock(t, + backend, + uint64(i), + prevHash, + nil, + hash.H256(""), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(i)}}, + nil, + ) + require.NoError(t, err) + blocks = append(blocks, hash) + prevHash = hash + } + + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, blocks[4]) + require.NoError(t, err) + for i := 1; i < 5; i++ { + err = op.MarkFinalized(blocks[i], nil) + require.NoError(t, err) + } + err = backend.CommitOperation(op) + require.NoError(t, err) + } + + bc := backend.Blockchain() + switch pruningMode.(type) { + case BlocksPruningSome: + body, err := bc.Body(blocks[0]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[1]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[2]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[3]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(3)}}, body) + body, err = bc.Body(blocks[4]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(4)}}, body) + default: + for i := 0; i < 5; i++ { + body, err := bc.Body(blocks[i]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(i)}}, body) + } + } + } + }) + + t.Run("prune_blocks_on_finalize_with_fork", func(t *testing.T) { + pruningModes := []BlocksPruning{BlocksPruningSome(2), BlocksPruningKeepFinalized{}, BlocksPruningKeepAll{}} + + for _, pruningMode := range pruningModes { + backend := NewTestBackend(t, pruningMode, 10) + var blocks []hash.H256 + var prevHash hash.H256 + for i := 0; i < 5; i++ { + hash, err := insertBlock(t, + backend, + uint64(i), + prevHash, + nil, + hash.H256(""), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(i)}}, + nil, + ) + require.NoError(t, err) + blocks = append(blocks, hash) + prevHash = hash + } + + // insert a fork at block 2 + forkHashRoot, err := insertBlock(t, + backend, + 2, + blocks[1], + nil, + hash.NewRandomH256(), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: 2}}, + nil, + ) + require.NoError(t, err) + + _, err = insertBlock(t, + backend, + 3, + forkHashRoot, + nil, + hash.NewRandomH256(), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: 2}, {T: 11}}, + nil, + ) + require.NoError(t, err) + op, err := backend.BeginOperation() + require.NoError(t, err) + err = backend.BeginStateOperation(op, blocks[4]) + require.NoError(t, err) + err = op.MarkHead(blocks[4]) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + + bc := backend.Blockchain() + body, err := bc.Body(forkHashRoot) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(2)}}, body) + + for i := 1; i < 5; i++ { + op, err := backend.BeginOperation() + require.NoError(t, err) + err = backend.BeginStateOperation(op, blocks[4]) + require.NoError(t, err) + err = op.MarkFinalized(blocks[i], nil) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + } + + switch pruningMode.(type) { + case BlocksPruningSome: + body, err := bc.Body(blocks[0]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[1]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[2]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[3]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(3)}}, body) + body, err = bc.Body(blocks[4]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(4)}}, body) + default: + for i := 0; i < 5; i++ { + body, err := bc.Body(blocks[i]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(i)}}, body) + } + } + + switch pruningMode.(type) { + case BlocksPruningKeepAll: + body, err := bc.Body(forkHashRoot) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(2)}}, body) + default: + body, err := bc.Body(forkHashRoot) + require.NoError(t, err) + require.Nil(t, body) + } + + require.Equal(t, uint64(4), bc.Info().BestNumber) + for i := 0; i < 5; i++ { + hash, err := bc.Hash(uint64(i)) + require.NoError(t, err) + require.NotNil(t, hash) + } + } + }) + + t.Run("prune_blocks_on_finalize_and_reorg", func(t *testing.T) { + // 0 - 1b + // \ - 1a - 2a - 3a + // \ - 2b + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + + var makeBlock = func(index uint64, parent hash.H256, val uint64) hash.H256 { + hash, err := insertBlock(t, + backend, + index, + parent, + nil, + hash.NewRandomH256(), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: val}}, + nil, + ) + require.NoError(t, err) + return hash + } + + block0 := makeBlock(0, "", 0) + block1a := makeBlock(1, block0, 0x1a) + block1b := makeBlock(1, block0, 0x1b) + block2a := makeBlock(2, block1a, 0x2a) + block2b := makeBlock(2, block1a, 0x2b) + block3a := makeBlock(3, block2a, 0x3a) + + // Make sure 1b is head + op, err := backend.BeginOperation() + require.NoError(t, err) + err = backend.BeginStateOperation(op, block0) + require.NoError(t, err) + err = op.MarkHead(block1b) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + + // Finalize 3a + op, err = backend.BeginOperation() + require.NoError(t, err) + err = backend.BeginStateOperation(op, block0) + require.NoError(t, err) + err = op.MarkHead(block3a) + require.NoError(t, err) + err = op.MarkFinalized(block1a, nil) + require.NoError(t, err) + err = op.MarkFinalized(block2a, nil) + require.NoError(t, err) + err = op.MarkFinalized(block3a, nil) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + bc := backend.Blockchain() + body, err := bc.Body(block1b) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(block2b) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(block0) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(0x00)}}, body) + body, err = bc.Body(block1a) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(0x1a)}}, body) + body, err = bc.Body(block2a) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(0x2a)}}, body) + body, err = bc.Body(block3a) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(0x3a)}}, body) + }) + + t.Run("indexed_data_block_body", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(1), 10) + + x0 := scale.MustMarshal(rt_testing.ExtrinsicsWrapper[uint64]{T: uint64(0)}) + x1 := scale.MustMarshal(rt_testing.ExtrinsicsWrapper[uint64]{T: uint64(1)}) + x0Hash := runtime.BlakeTwo256{}.Hash(x0[1:]) + x1Hash := runtime.BlakeTwo256{}.Hash(x1[1:]) + index := []statemachine.IndexOperation{ + statemachine.IndexOperationInsert{ + Extrinsic: 0, + Hash: x0Hash.Bytes(), + Size: uint32(len(x0)) - 1, + }, + statemachine.IndexOperationInsert{ + Extrinsic: 1, + Hash: x1Hash.Bytes(), + Size: uint32(len(x1)) - 1, + }, + } + hash, err := insertBlock(t, backend, + 0, + "", + nil, + "", + []rt_testing.ExtrinsicsWrapper[uint64]{{T: 0}, {T: 1}}, + index, + ) + require.NoError(t, err) + bc := backend.Blockchain() + tx, err := bc.IndexedTransaction(x0Hash) + require.NoError(t, err) + require.NotNil(t, tx) + require.Equal(t, x0[1:], tx) + tx, err = bc.IndexedTransaction(x1Hash) + require.NoError(t, err) + require.NotNil(t, tx) + require.Equal(t, x1[1:], tx) + + hash0 := bc.Info().GenesisHash + // Push one more blocks and make sure block is pruned and transaction index is cleared. + block1, err := insertBlock(t, backend, + 1, + hash, + nil, + "", + []rt_testing.ExtrinsicsWrapper[uint64]{}, + nil, + ) + require.NoError(t, err) + err = backend.FinalizeBlock(block1, nil) + require.NoError(t, err) + body, err := bc.Body(hash0) + require.NoError(t, err) + require.Nil(t, body) + tx, err = bc.IndexedTransaction(x0Hash) + require.NoError(t, err) + require.Nil(t, tx) + tx, err = bc.IndexedTransaction(x1Hash) + require.NoError(t, err) + require.Nil(t, tx) + }) + + t.Run("index_invalid_size", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(1), 10) + + x0 := scale.MustMarshal(rt_testing.ExtrinsicsWrapper[uint64]{T: uint64(0)}) + x1 := scale.MustMarshal(rt_testing.ExtrinsicsWrapper[uint64]{T: uint64(1)}) + x0Hash := runtime.BlakeTwo256{}.Hash(x0) + x1Hash := runtime.BlakeTwo256{}.Hash(x1) + index := []statemachine.IndexOperation{ + statemachine.IndexOperationInsert{ + Extrinsic: 0, + Hash: x0Hash.Bytes(), + Size: uint32(len(x0)), + }, + statemachine.IndexOperationInsert{ + Extrinsic: 1, + Hash: x1Hash.Bytes(), + Size: uint32(len(x1)) + 1, + }, + } + _, err := insertBlock(t, backend, + 0, + "", + nil, + "", + []rt_testing.ExtrinsicsWrapper[uint64]{{T: 0}, {T: 1}}, + index, + ) + require.NoError(t, err) + bc := backend.Blockchain() + tx, err := bc.IndexedTransaction(x0Hash) + require.NoError(t, err) + require.NotNil(t, tx) + require.Equal(t, x0, tx) + tx, err = bc.IndexedTransaction(x1Hash) + require.NoError(t, err) + require.Nil(t, tx) + }) + + t.Run("renew_transaction_storage", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(2), 10) + var blocks []hash.H256 + var prevHash hash.H256 + x1 := scale.MustMarshal(rt_testing.ExtrinsicsWrapper[uint64]{T: uint64(0)}) + x1Hash := runtime.BlakeTwo256{}.Hash(x1[1:]) + for i := 0; i < 10; i++ { + index := []statemachine.IndexOperation{} + if i == 0 { + index = append(index, statemachine.IndexOperationInsert{ + Extrinsic: 0, + Hash: x1Hash.Bytes(), + Size: uint32(len(x1) - 1), + }) + } else if i < 5 { + // keep renewing 1st + index = append(index, statemachine.IndexOperationRenew{ + Extrinsic: 0, + Hash: x1Hash.Bytes(), + }) + } // else stop removing + + hash, err := insertBlock(t, backend, + uint64(i), + prevHash, + nil, + "", + []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(i)}}, + index, + ) + require.NoError(t, err) + blocks = append(blocks, hash) + prevHash = hash + } + + for i := 0; i < 10; i++ { + op, err := backend.BeginOperation() + require.NoError(t, err) + err = backend.BeginStateOperation(op, blocks[4]) + require.NoError(t, err) + err = op.MarkFinalized(blocks[i], nil) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + bc := backend.Blockchain() + if i < 6 { + tx, err := bc.IndexedTransaction(x1Hash) + require.NoError(t, err) + require.NotNil(t, tx) + } else { + tx, err := bc.IndexedTransaction(x1Hash) + require.NoError(t, err) + require.Nil(t, tx) + } + } + }) + + t.Run("remove_leaf_block", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(2), 10) + var blocks []hash.H256 + var prevHash hash.H256 + for i := uint64(0); i < 2; i++ { + hash, err := insertBlock(t, backend, + i, + prevHash, + nil, + "", + []rt_testing.ExtrinsicsWrapper[uint64]{{T: i}}, + nil, + ) + require.NoError(t, err) + blocks = append(blocks, hash) + prevHash = hash + } + + for i := uint64(0); i < 2; i++ { + hash, err := insertBlock(t, backend, + 2, + blocks[1], + nil, + hash.NewRandomH256(), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: i}}, + nil, + ) + require.NoError(t, err) + blocks = append(blocks, hash) + } + + // insert a fork at block 1, which becomes best block + bestHash, err := insertBlock(t, backend, + uint64(1), + blocks[0], + nil, + hash.NewRandomH256(), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(42)}}, + nil, + ) + require.NoError(t, err) + + require.Equal(t, bestHash, backend.Blockchain().Info().BestHash) + err = backend.RemoveLeafBlock(bestHash) + require.Error(t, err) + + leaves, err := backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{blocks[2], blocks[3], bestHash}, leaves) + children, err := backend.Blockchain().Children(blocks[1]) + require.NoError(t, err) + require.Equal(t, []hash.H256{blocks[2], blocks[3]}, children) + + require.True(t, backend.HaveStateAt(blocks[3], 2)) + header, err := backend.Blockchain().Header(blocks[3]) + require.NoError(t, err) + require.NotNil(t, header) + err = backend.RemoveLeafBlock(blocks[3]) + require.NoError(t, err) + require.False(t, backend.HaveStateAt(blocks[3], 2)) + header, err = backend.Blockchain().Header(blocks[3]) + require.NoError(t, err) + require.Nil(t, header) + leaves, err = backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{blocks[2], bestHash}, leaves) + children, err = backend.Blockchain().Children(blocks[1]) + require.NoError(t, err) + require.Equal(t, []hash.H256{blocks[2]}, children) + + require.True(t, backend.HaveStateAt(blocks[2], 2)) + header, err = backend.Blockchain().Header(blocks[2]) + require.NoError(t, err) + require.NotNil(t, header) + err = backend.RemoveLeafBlock(blocks[2]) + require.NoError(t, err) + require.False(t, backend.HaveStateAt(blocks[2], 2)) + header, err = backend.Blockchain().Header(blocks[2]) + require.NoError(t, err) + require.Nil(t, header) + leaves, err = backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{bestHash, blocks[1]}, leaves) + children, err = backend.Blockchain().Children(blocks[1]) + require.NoError(t, err) + require.Nil(t, children) + + require.True(t, backend.HaveStateAt(blocks[1], 1)) + header, err = backend.Blockchain().Header(blocks[1]) + require.NoError(t, err) + require.NotNil(t, header) + err = backend.RemoveLeafBlock(blocks[1]) + require.NoError(t, err) + require.False(t, backend.HaveStateAt(blocks[1], 1)) + header, err = backend.Blockchain().Header(blocks[1]) + require.NoError(t, err) + require.Nil(t, header) + leaves, err = backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{bestHash}, leaves) + children, err = backend.Blockchain().Children(blocks[0]) + require.NoError(t, err) + require.Equal(t, []hash.H256{bestHash}, children) + }) + + t.Run("import_existing_block_as_new_head", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(10), 3) + + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + block1 := insertHeader(t, backend, 1, block0, nil, hash.H256("")) + block2 := insertHeader(t, backend, 2, block1, nil, hash.H256("")) + block3 := insertHeader(t, backend, 3, block2, nil, hash.H256("")) + block4 := insertHeader(t, backend, 4, block3, nil, hash.H256("")) + block5 := insertHeader(t, backend, 5, block4, nil, hash.H256("")) + require.Equal(t, block5, backend.Blockchain().Info().BestHash) + + // Insert 1 as best again. This should fail because canonicalization_delay == 3 + // and best == 5 + trie := triedb.NewEmptyTrieDB[hash.H256, runtime.BlakeTwo256]( + trie.NewPrefixedMemoryDB[hash.H256, runtime.BlakeTwo256](), + ) + trie.SetVersion(triedb.V1) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 1, + dbHash(""), + trie.MustHash(), + block0, + runtime.Digest{}, + ) + op, err := backend.BeginOperation() + require.NoError(t, err) + err = op.SetBlockData(header, nil, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.ErrorIs(t, err, p_blockchain.ErrSetHeadTooOld) + + // Insert 2 as best again + header2, err := backend.Blockchain().Header(block2) + require.NoError(t, err) + require.NotNil(t, header2) + op, err = backend.BeginOperation() + require.NoError(t, err) + err = op.SetBlockData(*header2, nil, nil, nil, api.NewBlockStateBest) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + require.Equal(t, block2, backend.Blockchain().Info().BestHash) + }) + + t.Run("impoort_existing_state_fails", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + + genesis, err := insertBlock(t, backend, 0, "", nil, "", nil, nil) + require.NoError(t, err) + + _, err = insertBlock(t, backend, 1, genesis, nil, "", nil, nil) + require.NoError(t, err) + + _, err = insertBlock(t, backend, 1, genesis, nil, "", nil, nil) + require.ErrorIs(t, err, p_blockchain.ErrStateDatabase) + }) + + t.Run("leaves_not_created_for_ancient_blocks", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + + block1a := insertHeader(t, backend, 1, block0, nil, "") + block2a := insertHeader(t, backend, 2, block1a, nil, "") + err := backend.FinalizeBlock(block1a, nil) + require.NoError(t, err) + leaves, err := backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{block2a}, leaves) + + // Insert a fork prior to finalization point. Leave should not be created. + insertHeaderNoHead(t, backend, 1, block0, hash.H256(bytes.Repeat([]byte{1}, 32))) + leaves, err = backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{block2a}, leaves) + }) + + t.Run("revert_finalized_blocks", func(t *testing.T) { + pruningModes := []BlocksPruning{ + BlocksPruningSome(10), + BlocksPruningKeepAll{}, + } + + // we will create a chain with 11 blocks, finalize block #8 and then + // attempt to revert 5 blocks. + for _, pruningMode := range pruningModes { + t.Run(fmt.Sprintf("%T", pruningMode), func(t *testing.T) { + backend := NewTestBackend(t, pruningMode, 1) + + var parent hash.H256 + for i := uint64(0); i <= 10; i++ { + var err error + parent, err = insertBlock(t, backend, i, parent, nil, "", nil, nil) + require.NoError(t, err) + } + + require.Equal(t, uint64(10), backend.Blockchain().Info().BestNumber) + + block8, err := backend.Blockchain().Hash(8) + require.NoError(t, err) + require.NotNil(t, block8) + err = backend.FinalizeBlock(*block8, nil) + require.NoError(t, err) + _, _, err = backend.Revert(5, true) + require.NoError(t, err) + + _, ok := pruningMode.(BlocksPruningSome) + if ok { + // we can only revert to blocks for which we have state, if pruning is enabled + // then the last state available will be that of the latest finalized block + require.Equal(t, uint64(8), backend.Blockchain().Info().BestNumber) + } else { + // otherwise if we're not doing state pruning we can revert past finalized blocks + require.Equal(t, uint64(5), backend.Blockchain().Info().BestNumber) + } + + }) + } + }) + + t.Run("revert_non_best_blocks", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + + genesis, err := insertBlock(t, backend, 0, "", nil, "", nil, nil) + require.NoError(t, err) + + block1, err := insertBlock(t, backend, 1, genesis, nil, "", nil, nil) + require.NoError(t, err) + + block2, err := insertBlock(t, backend, 2, block1, nil, "", nil, nil) + require.NoError(t, err) + + var block3 hash.H256 + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, block1) + require.NoError(t, err) + trie := triedb.NewEmptyTrieDB[hash.H256, runtime.BlakeTwo256]( + trie.NewPrefixedMemoryDB[hash.H256, runtime.BlakeTwo256](), + ) + trie.SetVersion(triedb.V1) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 3, + dbHash(""), + trie.MustHash(), + block2, + runtime.Digest{}, + ) + + err = op.SetBlockData(header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateNormal) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + block3 = header.Hash() + } + + var block4 hash.H256 + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, block2) + require.NoError(t, err) + trie := triedb.NewEmptyTrieDB[hash.H256, runtime.BlakeTwo256]( + trie.NewPrefixedMemoryDB[hash.H256, runtime.BlakeTwo256](), + ) + trie.SetVersion(triedb.V1) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 4, + "", + trie.MustHash(), + block3, + runtime.Digest{}, + ) + + err = op.SetBlockData(header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateNormal) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + block4 = header.Hash() + } + + var block3Fork hash.H256 + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, block2) + require.NoError(t, err) + trie := triedb.NewEmptyTrieDB[hash.H256, runtime.BlakeTwo256]( + trie.NewPrefixedMemoryDB[hash.H256, runtime.BlakeTwo256](), + ) + trie.SetVersion(triedb.V1) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 3, + hash.NewH256FromLowUint64BigEndian(42), + trie.MustHash(), + block2, + runtime.Digest{}, + ) + + err = op.SetBlockData(header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateNormal) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + block3Fork = header.Hash() + } + + require.True(t, backend.HaveStateAt(block1, 1)) + require.True(t, backend.HaveStateAt(block2, 2)) + require.True(t, backend.HaveStateAt(block3, 3)) + require.True(t, backend.HaveStateAt(block4, 4)) + require.True(t, backend.HaveStateAt(block3Fork, 3)) + + leaves, err := backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{block4, block3Fork}, leaves) + + leaf := backend.blockchain.leaves.HighestLeaf() + require.NotNil(t, leaf) + require.Equal(t, uint64(4), leaf.Number) + + number, _, err := backend.Revert(1, false) + require.NoError(t, err) + require.Equal(t, uint64(3), number) + + require.True(t, backend.HaveStateAt(block1, 1)) + require.False(t, backend.HaveStateAt(block2, 2)) + require.False(t, backend.HaveStateAt(block3, 3)) + require.False(t, backend.HaveStateAt(block4, 4)) + require.False(t, backend.HaveStateAt(block3Fork, 3)) + + leaves, err = backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{block1}, leaves) + + leaf = backend.blockchain.leaves.HighestLeaf() + require.NotNil(t, leaf) + require.Equal(t, uint64(1), leaf.Number) + }) + + t.Run("no_duplicated_leaves_allowed", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(10), 10) + + block0 := insertHeader(t, backend, 0, hash.H256(""), nil, hash.H256("")) + block1 := insertHeader(t, backend, 1, block0, nil, hash.H256("")) + // Add block 2 not as the best block + block2 := insertHeaderNoHead(t, backend, 2, block1, "") + leaves, err := backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{block2}, leaves) + require.Equal(t, block1, backend.Blockchain().Info().BestHash) + + // Add block 2 as the best block + block2 = insertHeader(t, backend, 2, block1, nil, "") + leaves, err = backend.Blockchain().Leaves() + require.NoError(t, err) + require.Equal(t, []hash.H256{block2}, leaves) + require.Equal(t, block2, backend.Blockchain().Info().BestHash) + }) + + t.Run("force_delayed_canonicalize_waiting_for_blocks_to_be_finalized", func(t *testing.T) { + pruningModes := []BlocksPruning{ + BlocksPruningSome(10), + BlocksPruningKeepAll{}, + BlocksPruningKeepFinalized{}, + } + + for _, pruningMode := range pruningModes { + t.Run(fmt.Sprintf("%T", pruningMode), func(t *testing.T) { + backend := NewTestBackend(t, pruningMode, 1) + + genesis, err := insertBlock(t, backend, 0, "", nil, "", nil, nil) + require.NoError(t, err) + + var block1 hash.H256 + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, genesis) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 1, + "", + "", + genesis, + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{ + {Key: []byte{1, 3, 5}, Value: nil}, + {Key: []byte{5, 5, 5}, Value: []byte{4, 5, 6}}, + } + + root, overlay := op.oldState.state.StorageRoot(deltas, storage.StateVersionV1) + err = op.UpdateDBStorage(overlay) + require.NoError(t, err) + header.SetStateRoot(root) + h := header.Hash() + + copiedDeltas := make(statemachine.StorageCollection, 0) + for _, delta := range deltas { + copiedDeltas = append(copiedDeltas, statemachine.StorageKeyValue{ + StorageKey: delta.Key, + StorageValue: delta.Value, + }) + } + err = op.UpdateStorage(copiedDeltas, nil) + require.NoError(t, err) + err = op.SetBlockData( + header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateNormal, + ) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + block1 = h + } + + _, ok := pruningMode.(BlocksPruningSome) + if ok { + require.Equal(t, statedb.LastCanonicalizedBlock(0), backend.storage.StateDB.LastCanonicalized()) + } + + // This should not trigger any forced canonicalization as we didn't have imported any + // best block by now. + var block2 hash.H256 + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, block1) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 2, + "", + "", + block1, + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{ + {Key: []byte{5, 5, 5}, Value: []byte{4, 5, 6, 2}}, + } + + root, overlay := op.oldState.state.StorageRoot(deltas, storage.StateVersionV1) + err = op.UpdateDBStorage(overlay) + require.NoError(t, err) + header.SetStateRoot(root) + h := header.Hash() + + copiedDeltas := make(statemachine.StorageCollection, 0) + for _, delta := range deltas { + copiedDeltas = append(copiedDeltas, statemachine.StorageKeyValue{ + StorageKey: delta.Key, + StorageValue: delta.Value, + }) + } + err = op.UpdateStorage(copiedDeltas, nil) + require.NoError(t, err) + err = op.SetBlockData( + header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateNormal, + ) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + block2 = h + } + + _, ok = pruningMode.(BlocksPruningSome) + if ok { + require.Equal(t, statedb.LastCanonicalizedBlock(0), backend.storage.StateDB.LastCanonicalized()) + } + + // This should also not trigger it yet, because we import a best block, but the best + // block from the POV of the db is still at 0. + var block3 hash.H256 + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, block2) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 3, + "", + "", + block2, + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{ + {Key: []byte{5, 5, 5}, Value: []byte{4, 5, 6, 3}}, + } + + root, overlay := op.oldState.state.StorageRoot(deltas, storage.StateVersionV1) + err = op.UpdateDBStorage(overlay) + require.NoError(t, err) + header.SetStateRoot(root) + h := header.Hash() + + copiedDeltas := make(statemachine.StorageCollection, 0) + for _, delta := range deltas { + copiedDeltas = append(copiedDeltas, statemachine.StorageKeyValue{ + StorageKey: delta.Key, + StorageValue: delta.Value, + }) + } + err = op.UpdateStorage(copiedDeltas, nil) + require.NoError(t, err) + err = op.SetBlockData( + header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateBest, + ) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + block3 = h + } + + _, ok = pruningMode.(BlocksPruningSome) + if ok { + require.Equal(t, statedb.LastCanonicalizedBlock(0), backend.storage.StateDB.LastCanonicalized()) + } + + // Now it should kick in. + var block4 hash.H256 + { + op := backend.beginOperation() + err := backend.BeginStateOperation(op, block3) + require.NoError(t, err) + header := generic.NewHeader[uint64, dbHash, runtime.BlakeTwo256]( + 4, + "", + "", + block3, + runtime.Digest{}, + ) + + deltas := []statemachine.Delta{ + {Key: []byte{5, 5, 5}, Value: []byte{4, 5, 6, 4}}, + } + + root, overlay := op.oldState.state.StorageRoot(deltas, storage.StateVersionV1) + err = op.UpdateDBStorage(overlay) + require.NoError(t, err) + header.SetStateRoot(root) + h := header.Hash() + + copiedDeltas := make(statemachine.StorageCollection, 0) + for _, delta := range deltas { + copiedDeltas = append(copiedDeltas, statemachine.StorageKeyValue{ + StorageKey: delta.Key, + StorageValue: delta.Value, + }) + } + err = op.UpdateStorage(copiedDeltas, nil) + require.NoError(t, err) + err = op.SetBlockData( + header, []rt_testing.ExtrinsicsWrapper[uint64]{}, nil, nil, api.NewBlockStateBest, + ) + require.NoError(t, err) + + err = backend.CommitOperation(op) + require.NoError(t, err) + + block4 = h + } + + _, ok = pruningMode.(BlocksPruningSome) + if ok { + require.Equal(t, statedb.LastCanonicalizedBlock(2), backend.storage.StateDB.LastCanonicalized()) + } + + hash, err := backend.Blockchain().Hash(1) + require.NoError(t, err) + require.NotNil(t, hash) + require.Equal(t, block1, *hash) + hash, err = backend.Blockchain().Hash(2) + require.NoError(t, err) + require.NotNil(t, hash) + require.Equal(t, block2, *hash) + hash, err = backend.Blockchain().Hash(3) + require.NoError(t, err) + require.NotNil(t, hash) + require.Equal(t, block3, *hash) + hash, err = backend.Blockchain().Hash(4) + require.NoError(t, err) + require.NotNil(t, hash) + require.Equal(t, block4, *hash) + }) + } + }) + + t.Run("pinned_blocks_on_finalize", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(1), 10) + var blocks []hash.H256 + var prevHash hash.H256 + + var buildJustification = func(i uint64) *runtime.Justification { + return &runtime.Justification{ + ConsensusEngineID: [4]byte{}, + EncodedJustification: []byte{uint8(i)}, + } + } + + // Block tree: + // 0 -> 1 -> 2 -> 3 -> 4 + for i := 0; i < 5; i++ { + hash, err := insertBlock(t, + backend, + uint64(i), + prevHash, + nil, + hash.H256(""), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(i)}}, + nil, + ) + require.NoError(t, err) + blocks = append(blocks, hash) + // Avoid block pruning. + err = backend.PinBlock(blocks[i]) + require.NoError(t, err) + + prevHash = hash + } + + bc := backend.Blockchain() + + // Check that we can properly access values when there is reference count + // but no value. + body, err := bc.Body(blocks[1]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(1)}}, body) + + // Block 1 gets pinned three times + err = backend.PinBlock(blocks[1]) + require.NoError(t, err) + err = backend.PinBlock(blocks[1]) + require.NoError(t, err) + + // Finalize all blocks. This will trigger pruning. + op := backend.beginOperation() + err = backend.BeginStateOperation(op, blocks[4]) + require.NoError(t, err) + for i := 1; i < 5; i++ { + err := op.MarkFinalized(blocks[i], buildJustification(uint64(i))) + require.NoError(t, err) + } + err = backend.CommitOperation(op) + require.NoError(t, err) + + // Block 0, 1, 2, 3 are pinned, so all values should be cached. + // Block 4 is inside the pruning window, its value is in db. + body, err = bc.Body(blocks[0]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(0)}}, body) + + body, err = bc.Body(blocks[1]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(1)}}, body) + justifications, err := bc.Justifications(blocks[1]) + require.NoError(t, err) + require.Equal(t, runtime.Justifications{*buildJustification(1)}, justifications) + + body, err = bc.Body(blocks[2]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(2)}}, body) + justifications, err = bc.Justifications(blocks[2]) + require.NoError(t, err) + require.Equal(t, runtime.Justifications{*buildJustification(2)}, justifications) + + body, err = bc.Body(blocks[3]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(3)}}, body) + justifications, err = bc.Justifications(blocks[3]) + require.NoError(t, err) + require.Equal(t, runtime.Justifications{*buildJustification(3)}, justifications) + + body, err = bc.Body(blocks[4]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(4)}}, body) + justifications, err = bc.Justifications(blocks[4]) + require.NoError(t, err) + require.Equal(t, runtime.Justifications{*buildJustification(4)}, justifications) + + // Unpin all blocks. Values should be removed from cache. + for _, block := range blocks { + backend.UnpinBlock(block) + } + + body, err = bc.Body(blocks[0]) + require.NoError(t, err) + require.Nil(t, body) + // Block 1 was pinned twice, we expect it to be still cached + body, err = bc.Body(blocks[1]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(1)}}, body) + justifications, err = bc.Justifications(blocks[1]) + require.NoError(t, err) + require.Equal(t, runtime.Justifications{*buildJustification(1)}, justifications) + // Headers should also be available while pinned + header, err := bc.Header(blocks[1]) + require.NoError(t, err) + require.NotNil(t, header) + body, err = bc.Body(blocks[2]) + require.NoError(t, err) + require.Nil(t, body) + justifications, err = bc.Justifications(blocks[2]) + require.NoError(t, err) + require.Nil(t, justifications) + body, err = bc.Body(blocks[3]) + require.NoError(t, err) + require.Nil(t, body) + justifications, err = bc.Justifications(blocks[3]) + require.NoError(t, err) + require.Nil(t, justifications) + + // After these unpins, block 1 should also be removed + backend.UnpinBlock(blocks[1]) + body, err = bc.Body(blocks[1]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(1)}}, body) + justifications, err = bc.Justifications(blocks[1]) + require.NoError(t, err) + require.Equal(t, runtime.Justifications{*buildJustification(1)}, justifications) + backend.UnpinBlock(blocks[1]) + body, err = bc.Body(blocks[1]) + require.NoError(t, err) + require.Nil(t, body) + justifications, err = bc.Justifications(blocks[1]) + require.NoError(t, err) + require.Nil(t, justifications) + + // Block 4 is inside the pruning window and still kept + body, err = bc.Body(blocks[4]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(4)}}, body) + justifications, err = bc.Justifications(blocks[4]) + require.NoError(t, err) + require.Equal(t, runtime.Justifications{*buildJustification(4)}, justifications) + + // Block tree: + // 0 -> 1 -> 2 -> 3 -> 4 -> 5 + hash, err := insertBlock( + t, backend, 5, prevHash, nil, "", []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(5)}}, nil, + ) + require.NoError(t, err) + blocks = append(blocks, hash) + + err = backend.PinBlock(blocks[4]) + require.NoError(t, err) + // Mark block 5 as finalized. + op = backend.beginOperation() + err = backend.BeginStateOperation(op, blocks[5]) + require.NoError(t, err) + err = op.MarkFinalized(blocks[5], buildJustification(uint64(5))) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + + body, err = bc.Body(blocks[0]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[1]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[2]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[3]) + require.NoError(t, err) + require.Nil(t, body) + + body, err = bc.Body(blocks[4]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(4)}}, body) + justifications, err = bc.Justifications(blocks[4]) + require.NoError(t, err) + require.Equal(t, runtime.Justifications{*buildJustification(4)}, justifications) + body, err = bc.Body(blocks[5]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(5)}}, body) + header, err = bc.Header(blocks[5]) + require.NoError(t, err) + require.NotNil(t, header) + + backend.UnpinBlock(blocks[4]) + body, err = bc.Body(blocks[4]) + require.NoError(t, err) + require.Nil(t, body) + justifications, err = bc.Justifications(blocks[4]) + require.NoError(t, err) + require.Nil(t, justifications) + + // Append a justification to block 5. + backend.AppendJustification(blocks[5], runtime.Justification{ + ConsensusEngineID: [4]byte{0, 0, 0, 1}, + EncodedJustification: []byte{42}, + }) + + hash, err = insertBlock( + t, backend, 6, blocks[5], nil, "", []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(6)}}, nil, + ) + require.NoError(t, err) + blocks = append(blocks, hash) + + // Pin block 5 so it gets loaded into the cache on prune + err = backend.PinBlock(blocks[5]) + require.NoError(t, err) + + // Finalize block 6 so block 5 gets pruned. Since it is pinned both justifications should be + // in memory. + op = backend.beginOperation() + err = backend.BeginStateOperation(op, blocks[6]) + require.NoError(t, err) + err = op.MarkFinalized(blocks[6], nil) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + + body, err = bc.Body(blocks[5]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(5)}}, body) + expected := runtime.Justifications{ + *buildJustification(5), + runtime.Justification{ + ConsensusEngineID: [4]byte{0, 0, 0, 1}, + EncodedJustification: []byte{42}, + }, + } + justifications, err = bc.Justifications(blocks[5]) + require.NoError(t, err) + require.Equal(t, expected, justifications) + }) + + t.Run("pinned_blocks_on_finalize_with_fork", func(t *testing.T) { + backend := NewTestBackend(t, BlocksPruningSome(1), 10) + var blocks []hash.H256 + var prevHash hash.H256 + + // Block tree: + // 0 -> 1 -> 2 -> 3 -> 4 + for i := 0; i < 5; i++ { + hash, err := insertBlock(t, + backend, + uint64(i), + prevHash, + nil, + hash.H256(""), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(i)}}, + nil, + ) + require.NoError(t, err) + blocks = append(blocks, hash) + + // Avoid block pruning. + err = backend.PinBlock(blocks[i]) + require.NoError(t, err) + + prevHash = hash + } + + // Insert a fork at the second block. + // Block tree: + // 0 -> 1 -> 2 -> 3 -> 4 + // \ -> 2 -> 3 + forkHashRoot, err := insertBlock(t, + backend, + 2, + blocks[1], + nil, + hash.NewRandomH256(), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: 2}}, + nil, + ) + require.NoError(t, err) + forkHash3, err := insertBlock(t, + backend, + 3, + forkHashRoot, + nil, + hash.NewRandomH256(), + []rt_testing.ExtrinsicsWrapper[uint64]{{T: 3}, {T: 11}}, + nil, + ) + require.NoError(t, err) + + // Do not prune the fork hash. + err = backend.PinBlock(forkHash3) + require.NoError(t, err) + + op, err := backend.BeginOperation() + require.NoError(t, err) + err = backend.BeginStateOperation(op, blocks[4]) + require.NoError(t, err) + err = op.MarkHead(blocks[4]) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + + for i := 1; i < 5; i++ { + op, err := backend.BeginOperation() + require.NoError(t, err) + err = backend.BeginStateOperation(op, blocks[4]) + require.NoError(t, err) + err = op.MarkFinalized(blocks[i], nil) + require.NoError(t, err) + err = backend.CommitOperation(op) + require.NoError(t, err) + } + + bc := backend.Blockchain() + body, err := bc.Body(blocks[0]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(0)}}, body) + body, err = bc.Body(blocks[1]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(1)}}, body) + body, err = bc.Body(blocks[2]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(2)}}, body) + body, err = bc.Body(blocks[3]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(3)}}, body) + body, err = bc.Body(blocks[4]) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{{T: uint64(4)}}, body) + // Check the fork hashes. + body, err = bc.Body(forkHashRoot) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(forkHash3) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{ + {T: 3}, + {T: 11}, + }, body) + + // Unpine all blocks, except the forked one + for _, block := range blocks { + backend.UnpinBlock(block) + } + + body, err = bc.Body(blocks[0]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[1]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[2]) + require.NoError(t, err) + require.Nil(t, body) + body, err = bc.Body(blocks[3]) + require.NoError(t, err) + require.Nil(t, body) + + body, err = bc.Body(forkHash3) + require.NoError(t, err) + require.Equal(t, []rt_testing.ExtrinsicsWrapper[uint64]{ + {T: 3}, + {T: 11}, + }, body) + backend.UnpinBlock(forkHash3) + body, err = bc.Body(forkHash3) + require.NoError(t, err) + require.Nil(t, body) + }) +} diff --git a/internal/client/db/children.go b/internal/client/db/children.go index 04c1434904..d3c27a78b1 100644 --- a/internal/client/db/children.go +++ b/internal/client/db/children.go @@ -13,7 +13,7 @@ import ( // Functionality for reading and storing children hashes from db. -// Returns the hashes of the children blocks of the block with `parentHash`. +// Returns the hashes of the children blocks of the block with parentHash. func readChildren[H comparable]( db database.Database[hash.H256], column database.ColumnID, prefix []byte, parentHash H, ) ([]H, error) { @@ -28,13 +28,13 @@ func readChildren[H comparable]( var children []H err := scale.Unmarshal(rawVal, &children) if err != nil { - return nil, fmt.Errorf("Error decoding children: %w", err) + return nil, fmt.Errorf("error decoding children: %w", err) } return children, nil } -// Insert the key-value pair (`parentHash`, `childrenHashes`) in the transaction. +// Inserts parentHash and childrenHashes in the transaction. // Any existing value is overwritten upon write. func writeChildren[H comparable]( tx *database.Transaction[hash.H256], column database.ColumnID, prefix []byte, parentHash H, childrenHashes []H, @@ -44,7 +44,7 @@ func writeChildren[H comparable]( tx.Set(column, key, scale.MustMarshal(childrenHashes)) } -// Prepare transaction to remove the children of `parent_hash`. +// Prepare transaction to remove the children of parentHash. func removeChildren[H comparable]( tx *database.Transaction[hash.H256], column database.ColumnID, prefix []byte, parentHash H, ) { diff --git a/internal/client/db/columns/columns.go b/internal/client/db/columns/columns.go index 1fe38db590..b0bc24e605 100644 --- a/internal/client/db/columns/columns.go +++ b/internal/client/db/columns/columns.go @@ -6,18 +6,15 @@ package columns import "github.com/ChainSafe/gossamer/internal/primitives/database" const ( - Meta database.ColumnID = 0 - State database.ColumnID = 1 - StateMeta database.ColumnID = 2 - // maps hashes to lookup keys and numbers to canon hashes. - KeyLookup database.ColumnID = 3 + Meta database.ColumnID = 0 + State database.ColumnID = 1 + StateMeta database.ColumnID = 2 + KeyLookup database.ColumnID = 3 // maps hashes to lookup keys and numbers to canon hashes. Header database.ColumnID = 4 Body database.ColumnID = 5 Justifications database.ColumnID = 6 Aux database.ColumnID = 8 - // Offchain workers local storage - Offchain database.ColumnID = 9 - // Transactions - Transaction database.ColumnID = 11 - BodyIndex database.ColumnID = 12 + Offchain database.ColumnID = 9 // offchain workers local storage + Transaction database.ColumnID = 11 + BodyIndex database.ColumnID = 12 ) diff --git a/internal/client/db/db.go b/internal/client/db/db.go index 816164df85..40e5e2f2ae 100644 --- a/internal/client/db/db.go +++ b/internal/client/db/db.go @@ -17,9 +17,12 @@ import ( "github.com/ChainSafe/gossamer/internal/primitives/runtime" "github.com/ChainSafe/gossamer/internal/primitives/runtime/generic" "github.com/ChainSafe/gossamer/pkg/scale" - "github.com/li1234yun/gods-generic/maps/linkedhashmap" + "github.com/ugurcsen/gods-generic/maps/linkedhashmap" ) +// Hash type that this backend uses for the database. +type dbHash = hash.H256 + const numCachedHeaders = 8 // An extrinsic entry in the database. @@ -35,6 +38,12 @@ func setDbExtrinsic[E runtime.Extrinsic, Value dbExtrinsicValues[E]](mvdt *dbExt mvdt.inner = value } +func newDbExtrinsic[E runtime.Extrinsic, Value dbExtrinsicValues[E]](val Value) dbExtrinsic[E] { + dbe := dbExtrinsic[E]{} + setDbExtrinsic(&dbe, val) + return dbe +} + func (mvdt *dbExtrinsic[E]) SetValue(value any) (err error) { switch value := value.(type) { case dbExtrinsicIndexed: @@ -100,9 +109,9 @@ type blockchainDB[H runtime.Hash, N runtime.Number, E runtime.Extrinsic, Header leaves api.LeafSet[H, N] leavesMtx sync.RWMutex headerMetadataCache blockchain.HeaderMetadataCache[H, N] - headerCache linkedhashmap.Map[H, *runtime.Header[N, H]] + headerCache linkedhashmap.Map[H, *Header] headerCacheMtx sync.Mutex - pinnedBlocksCache pinnedBlocksCache[H] + pinnedBlocksCache pinnedBlocksCache[H, E] pinnedBlocksCacheMtx sync.RWMutex } @@ -122,8 +131,8 @@ func newBlockchainDB[ leaves: leaves, meta: meta, headerMetadataCache: blockchain.NewHeaderMetadataCache[H, N](), - headerCache: *linkedhashmap.New[H, *runtime.Header[N, H]](), - pinnedBlocksCache: newPinnedBlocksCache[H](), + headerCache: *linkedhashmap.New[H, *Header](), + pinnedBlocksCache: newPinnedBlocksCache[H, E](), }, nil } @@ -226,7 +235,7 @@ func (bdb *blockchainDB[H, N, E, Header]) unpin(hash H) { } func (bdb *blockchainDB[H, N, E, Header]) justificationsUncached(hash H) (runtime.Justifications, error) { - blockID := generic.NewBlockID[H, N](generic.BlockIDHash[H]{Inner: hash}) + blockID := generic.NewBlockID[H, N](generic.BlockIDHash[H]{Hash: hash}) justificationsBytes, err := readDB[H, N](bdb.db, columns.KeyLookup, columns.Justifications, blockID) if err != nil { return nil, err @@ -242,22 +251,18 @@ func (bdb *blockchainDB[H, N, E, Header]) justificationsUncached(hash H) (runtim return nil, nil } -func (bdb *blockchainDB[H, N, E, Header]) bodyUncached(hash H) ([]runtime.Extrinsic, error) { - blockID := generic.NewBlockID[H, N](generic.BlockIDHash[H]{Inner: hash}) +func (bdb *blockchainDB[H, N, E, Header]) bodyUncached(hash H) ([]E, error) { + blockID := generic.NewBlockID[H, N](generic.BlockIDHash[H]{Hash: hash}) bodyBytes, err := readDB[H, N](bdb.db, columns.KeyLookup, columns.Body, blockID) if err != nil { return nil, err } if bodyBytes != nil { - var extrinsics []E - err := scale.Unmarshal(bodyBytes, &extrinsics) + var body []E + err := scale.Unmarshal(bodyBytes, &body) if err != nil { return nil, err } - var body []runtime.Extrinsic - for _, e := range extrinsics { - body = append(body, e) - } return body, nil } @@ -269,11 +274,11 @@ func (bdb *blockchainDB[H, N, E, Header]) bodyUncached(hash H) ([]runtime.Extrin return nil, nil } var index []dbExtrinsic[E] - err = scale.Unmarshal(indexBytes, index) + err = scale.Unmarshal(indexBytes, &index) if err != nil { return nil, err } - var body []runtime.Extrinsic + var body []E for _, ex := range index { dbex, err := ex.Value() if err != nil { @@ -287,11 +292,11 @@ func (bdb *blockchainDB[H, N, E, Header]) bodyUncached(hash H) ([]runtime.Extrin var ex E err := scale.Unmarshal(input, &ex) if err != nil { - return nil, fmt.Errorf("Error decoding indexed extrinsic: %w", err) + return nil, fmt.Errorf("error decoding indexed extrinsic: %w", err) } body = append(body, ex) } else { - return nil, fmt.Errorf("Missing indexed transaction %v", hash) + return nil, fmt.Errorf("missing indexed transaction %v", hash) } case dbExtrinsicFull[E]: body = append(body, dbex.Extrinsic) @@ -300,7 +305,7 @@ func (bdb *blockchainDB[H, N, E, Header]) bodyUncached(hash H) ([]runtime.Extrin return body, nil } -func (bdb *blockchainDB[H, N, E, Header]) cacheHeader(hash H, header *runtime.Header[N, H]) { +func (bdb *blockchainDB[H, N, E, Header]) cacheHeader(hash H, header *Header) { bdb.headerCache.Put(hash, header) for bdb.headerCache.Size() > numCachedHeaders { iterator := bdb.headerCache.Iterator() @@ -311,7 +316,7 @@ func (bdb *blockchainDB[H, N, E, Header]) cacheHeader(hash H, header *runtime.He } } -func (bdb *blockchainDB[H, N, E, Header]) Header(hash H) (runtime.Header[N, H], error) { +func (bdb *blockchainDB[H, N, E, Header]) header(hash H) (*Header, error) { bdb.headerCacheMtx.Lock() defer bdb.headerCacheMtx.Unlock() val, ok := bdb.headerCache.Get(hash) @@ -319,19 +324,27 @@ func (bdb *blockchainDB[H, N, E, Header]) Header(hash H) (runtime.Header[N, H], // TODO: create issue to fork linkedhashmap, and add cache.get_refresh(&hash) bdb.headerCache.Remove(hash) bdb.headerCache.Put(hash, val) - return *val, nil + return val, nil } header, err := readHeader[H, N, Header]( bdb.db, columns.KeyLookup, columns.Header, - generic.BlockIDHash[H]{Inner: hash}, + generic.BlockIDHash[H]{Hash: hash}, ) if err != nil { - return nil, err + return header, err } bdb.cacheHeader(hash, header) - return *header, nil + return header, nil +} + +func (bdb *blockchainDB[H, N, E, Header]) Header(hash H) (*Header, error) { + header, err := bdb.header(hash) + if err != nil { + return nil, err + } + return header, nil } func (bdb *blockchainDB[H, N, E, Header]) Info() blockchain.Info[H, N] { @@ -343,12 +356,14 @@ func (bdb *blockchainDB[H, N, E, Header]) Info() blockchain.Info[H, N] { GenesisHash: bdb.meta.GenesisHash, FinalizedHash: bdb.meta.FinalizedHash, FinalizedNumber: bdb.meta.FinalizedNumber, - FinalizedState: &struct { + NumberLeaves: bdb.leaves.Count(), + BlockGap: bdb.meta.BlockGap, + } + if bdb.meta.FinalizedState != nil { + info.FinalizedState = &struct { Hash H Number N - }{bdb.meta.FinalizedState.Hash, bdb.meta.FinalizedState.Number}, - NumberLeaves: bdb.leaves.Count(), - BlockGap: bdb.meta.BlockGap, + }{bdb.meta.FinalizedState.Hash, bdb.meta.FinalizedState.Number} } return info } @@ -377,24 +392,24 @@ func (bdb *blockchainDB[H, N, E, Header]) Hash(number N) (*H, error) { bdb.db, columns.KeyLookup, columns.Header, - generic.BlockIDNumber[N]{Inner: number}, + generic.BlockIDNumber[N]{Number: number}, ) if err != nil { return nil, err } - if header == nil { - return nil, nil + if header != nil { + h := (*header).Hash() + return &h, nil } - h := (*header).Hash() - return &h, nil + return nil, nil } func (bdb *blockchainDB[H, N, E, Header]) BlockHashFromID(id generic.BlockID) (*H, error) { switch id := id.(type) { case generic.BlockIDHash[H]: - return &id.Inner, nil + return &id.Hash, nil case generic.BlockIDNumber[N]: - return bdb.Hash(id.Inner) + return bdb.Hash(id.Number) default: panic("unsupported block id type") } @@ -403,15 +418,15 @@ func (bdb *blockchainDB[H, N, E, Header]) BlockHashFromID(id generic.BlockID) (* func (bdb *blockchainDB[H, N, E, Header]) BlockNumberFromID(id generic.BlockID) (*N, error) { switch id := id.(type) { case generic.BlockIDHash[H]: - return bdb.Number(id.Inner) + return bdb.Number(id.Hash) case generic.BlockIDNumber[N]: - return &id.Inner, nil + return &id.Number, nil default: panic("unsupported block id type") } } -func (bdb *blockchainDB[H, N, E, Header]) Body(hash H) ([]runtime.Extrinsic, error) { +func (bdb *blockchainDB[H, N, E, Header]) Body(hash H) ([]E, error) { bdb.pinnedBlocksCacheMtx.RLock() defer bdb.pinnedBlocksCacheMtx.RUnlock() body := bdb.pinnedBlocksCache.Body(hash) @@ -427,7 +442,7 @@ func (bdb *blockchainDB[H, N, E, Header]) Justifications(hash H) (runtime.Justif defer bdb.pinnedBlocksCacheMtx.RUnlock() justifications := bdb.pinnedBlocksCache.Justifications(hash) if justifications != nil { - return justifications, nil + return *justifications, nil } return bdb.justificationsUncached(hash) @@ -471,8 +486,8 @@ func (bdb *blockchainDB[H, N, E, Header]) LongestContaining(baseHash H, importLo importLock.RLock() defer importLock.RUnlock() info := bdb.Info() - if info.FinalizedNumber > baseHeader.Number() { - // `baseHeader` is on a dead fork. + if info.FinalizedNumber > (*baseHeader).Number() { + // baseHeader is on a dead fork. return nil, nil } return bdb.Leaves() @@ -497,14 +512,14 @@ func (bdb *blockchainDB[H, N, E, Header]) LongestContaining(baseHash H, importLo return nil, err } if currentHeader == nil { - return nil, fmt.Errorf("Failed to get header for hash %v", currentHash) + return nil, fmt.Errorf("failed to get header for hash %v", currentHash) } - if currentHeader.Number() < baseHeader.Number() { + if (*currentHeader).Number() < (*baseHeader).Number() { break } - currentHash = currentHeader.ParentHash() + currentHash = (*currentHeader).ParentHash() } } @@ -512,7 +527,7 @@ func (bdb *blockchainDB[H, N, E, Header]) LongestContaining(baseHash H, importLo // those which can still be finalized. // // FIXME: substrate issue #1558 only issue this warning when not on a dead fork - log.Printf("WARN: Block %v exists in chain but not found when following all leaves backwards\n", baseHash) + log.Printf("WARN: Block %v exists in chain but not found when following all leaves backwards", baseHash) return nil, nil } @@ -525,7 +540,7 @@ func (bdb *blockchainDB[H, N, E, Header]) HasIndexedTransaction(hash H) (bool, e } func (bdb *blockchainDB[H, N, E, Header]) BlockIndexedBody(hash H) ([][]byte, error) { - bodyBytes, err := readDB[H, N](bdb.db, columns.KeyLookup, columns.BodyIndex, generic.BlockIDHash[H]{Inner: hash}) + bodyBytes, err := readDB[H, N](bdb.db, columns.KeyLookup, columns.BodyIndex, generic.BlockIDHash[H]{Hash: hash}) if err != nil { return nil, err } @@ -535,13 +550,13 @@ func (bdb *blockchainDB[H, N, E, Header]) BlockIndexedBody(hash H) ([][]byte, er index := make([]dbExtrinsic[E], 0) err = scale.Unmarshal(bodyBytes, &index) if err != nil { - return nil, fmt.Errorf("Error decoding body list %w", err) + return nil, fmt.Errorf("error decoding body list %w", err) } var transactions [][]byte for _, ex := range index { hash, err := ex.Value() if err != nil { - return nil, fmt.Errorf("Error decoding body list %w", err) + return nil, fmt.Errorf("error decoding body list %w", err) } indexed, ok := hash.(dbExtrinsicIndexed) if !ok { @@ -549,7 +564,7 @@ func (bdb *blockchainDB[H, N, E, Header]) BlockIndexedBody(hash H) ([][]byte, er } t := bdb.db.Get(columns.Transaction, indexed.Hash.Bytes()) if t == nil { - return nil, fmt.Errorf("Missing indexed transaction %v", hash) + return nil, fmt.Errorf("missing indexed transaction %v", hash) } transactions = append(transactions, t) } @@ -566,9 +581,9 @@ func (bdb *blockchainDB[H, N, E, Header]) HeaderMetadata(hash H) (blockchain.Cac return blockchain.CachedHeaderMetadata[H, N]{}, err } if header == nil { - return blockchain.CachedHeaderMetadata[H, N]{}, fmt.Errorf("Header was not found in the database: %v\n", hash) + return blockchain.CachedHeaderMetadata[H, N]{}, fmt.Errorf("header was not found in the database: %v", hash) } - headerMetadata := blockchain.NewCachedHeaderMetadata(header) + headerMetadata := blockchain.NewCachedHeaderMetadata(*header) bdb.headerMetadataCache.InsertHeaderMetadata(headerMetadata.Hash, headerMetadata) return headerMetadata, nil } diff --git a/internal/client/db/db_test.go b/internal/client/db/db_test.go index 51a738a088..fa242eb061 100644 --- a/internal/client/db/db_test.go +++ b/internal/client/db/db_test.go @@ -22,12 +22,13 @@ func (noopExtrinsic) IsSigned() *bool { // Check for interface fulfilment var ( - _ blockchain.HeaderBackend[hash.H256, uint] = &blockchainDB[ + _ blockchain.HeaderBackend[hash.H256, uint, *generic.Header[uint, hash.H256, runtime.BlakeTwo256]] = &blockchainDB[ hash.H256, uint, noopExtrinsic, *generic.Header[uint, hash.H256, runtime.BlakeTwo256]]{} _ blockchain.HeaderMetadata[hash.H256, uint] = &blockchainDB[ hash.H256, uint, noopExtrinsic, *generic.Header[uint, hash.H256, runtime.BlakeTwo256]]{} - _ blockchain.Backend[hash.H256, uint] = &blockchainDB[ - hash.H256, uint, noopExtrinsic, *generic.Header[uint, hash.H256, runtime.BlakeTwo256]]{} + _ blockchain.Backend[ + hash.H256, uint, *generic.Header[uint, hash.H256, runtime.BlakeTwo256], noopExtrinsic, + ] = &blockchainDB[hash.H256, uint, noopExtrinsic, *generic.Header[uint, hash.H256, runtime.BlakeTwo256]]{} ) func TestNewBlockchainDB(t *testing.T) { @@ -140,7 +141,7 @@ func TestBlockchainDB_insertPersistedJustificationsIfPinned(t *testing.T) { db.insertPersistedJustificationsIfPinned(someHash) assert.False(t, db.pinnedBlocksCache.Contains(someHash)) - // nothing in the db, but will pin `runtime.Justifications(nil)` + // nothing in the db, but will pin runtime.Justifications(nil) db.pinnedBlocksCache.Pin(someHash) err = db.insertPersistedJustificationsIfPinned(someHash) assert.NoError(t, err) @@ -163,7 +164,7 @@ func TestBlockchainDB_insertPersistedBodyIfPinned(t *testing.T) { db.insertPersistedBodyIfPinned(someHash) assert.False(t, db.pinnedBlocksCache.Contains(someHash)) - // nothing in the db, but will pin `[]runtime.Extrinsic(nil)` + // nothing in the db, but will pin []runtime.Extrinsic(nil) db.pinnedBlocksCache.Pin(someHash) err = db.insertPersistedBodyIfPinned(someHash) assert.NoError(t, err) diff --git a/internal/client/db/metakeys/metakeys.go b/internal/client/db/metakeys/metakeys.go index 8e6b9ce7e8..a5959599d3 100644 --- a/internal/client/db/metakeys/metakeys.go +++ b/internal/client/db/metakeys/metakeys.go @@ -1,10 +1,9 @@ // Copyright 2024 ChainSafe Systems (ON) // SPDX-License-Identifier: LGPL-3.0-only +// package metakeys contain the keys of entries in meta column. package metakeys -// Keys of entries in COLUMN_META. - // Type of storage (full or light). var Type = []byte("type") @@ -14,7 +13,7 @@ var BestBlock = []byte("best") // FinalizedBlock is last finalized block key. var FinalizedBlock = []byte("final") -// FinalizedStgate is last finalized state key. +// FinalizedState is last finalized state key. var FinalizedState = []byte("fstate") // BlockGap key. diff --git a/internal/client/db/offchain/offchain.go b/internal/client/db/offchain/offchain.go new file mode 100644 index 0000000000..f91f6a7c7d --- /dev/null +++ b/internal/client/db/offchain/offchain.go @@ -0,0 +1,94 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package offchain + +import ( + "bytes" + "log" + "sync" + + "github.com/ChainSafe/gossamer/internal/client/db/columns" + "github.com/ChainSafe/gossamer/internal/primitives/core/hash" + "github.com/ChainSafe/gossamer/internal/primitives/database" +) + +// LocalStorage is local offchain storage. Implements offchain.OffchainStorage +type LocalStorage struct { + db database.Database[hash.H256] + locks map[string]*sync.Mutex + locksMtx sync.Mutex +} + +// Create offchain local storage with given backend. +func NewLocalStorage(db database.Database[hash.H256]) *LocalStorage { + return &LocalStorage{ + db: db, + locks: make(map[string]*sync.Mutex), + } +} + +func (ls *LocalStorage) Set(prefix, key, value []byte) { + var tx database.Transaction[hash.H256] + tx.Set(columns.Offchain, ConcatenatePrefixAndKey(prefix, key), value) + + err := ls.db.Commit(tx) + if err != nil { + log.Printf("ERROR: error setting on local storage: %v", err) + } +} + +func (ls *LocalStorage) Remove(prefix, key []byte) { + var tx database.Transaction[hash.H256] + tx.Remove(columns.Offchain, ConcatenatePrefixAndKey(prefix, key)) + + err := ls.db.Commit(tx) + if err != nil { + log.Printf("ERROR: error removing on local storage: %v", err) + } +} + +func (ls *LocalStorage) Get(prefix, key []byte) []byte { + return ls.db.Get(columns.Offchain, ConcatenatePrefixAndKey(prefix, key)) +} + +func (ls *LocalStorage) CompareAndSet(prefix, itemKey, oldValue, newValue []byte) bool { + key := ConcatenatePrefixAndKey(prefix, itemKey) + + ls.locksMtx.Lock() + _, ok := ls.locks[string(key)] + if !ok { + ls.locks[string(key)] = &sync.Mutex{} + } + keyLock := ls.locks[string(key)] + ls.locksMtx.Unlock() + + var isSet bool + { + keyLock.Lock() + val := ls.db.Get(columns.Offchain, key) + isSet = bytes.Equal(val, oldValue) + + if isSet { + ls.Set(prefix, itemKey, newValue) + } + } + + // clean the lock map if we're the only entry + ls.locksMtx.Lock() + { + keyLock.Unlock() + _, ok := ls.locks[string(key)] + if ok { + delete(ls.locks, string(key)) + } + } + ls.locksMtx.Unlock() + + return isSet +} + +// ConcatenatePrefixAndKey will concatenate the prefix and key to create an offchain key in the db. +func ConcatenatePrefixAndKey(prefix, key []byte) []byte { + return append(prefix, key...) +} diff --git a/internal/client/db/offchain/offchain_test.go b/internal/client/db/offchain/offchain_test.go new file mode 100644 index 0000000000..8836da3e26 --- /dev/null +++ b/internal/client/db/offchain/offchain_test.go @@ -0,0 +1,61 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package offchain + +import ( + "testing" + + memorykvdb "github.com/ChainSafe/gossamer/internal/kvdb/memory-kvdb" + "github.com/ChainSafe/gossamer/internal/primitives/core/hash" + "github.com/ChainSafe/gossamer/internal/primitives/database" + "github.com/stretchr/testify/require" +) + +// Create new offchain storage for tests (backed by memorydb) +func NewTestLocalStorage(t *testing.T) *LocalStorage { + t.Helper() + kvdb := memorykvdb.New(13) + db := database.NewDBAdapter[hash.H256](kvdb) + return NewLocalStorage(db) +} + +func TestLocalStorage(t *testing.T) { + t.Run("compare_and_set_and_clear_the_locks_map", func(t *testing.T) { + storage := NewTestLocalStorage(t) + prefix := []byte("prefix") + key := []byte("key") + value := []byte("value") + + storage.Set(prefix, key, value) + require.Equal(t, value, storage.Get(prefix, key)) + + require.True(t, storage.CompareAndSet(prefix, key, value, []byte("asd"))) + require.Equal(t, []byte("asd"), storage.Get(prefix, key)) + require.Empty(t, storage.locks) + }) + + t.Run("compare_and_set_on_empty_field", func(t *testing.T) { + storage := NewTestLocalStorage(t) + prefix := []byte("prefix") + key := []byte("key") + + require.True(t, storage.CompareAndSet(prefix, key, nil, []byte("asd"))) + require.Equal(t, []byte("asd"), storage.Get(prefix, key)) + require.Empty(t, storage.locks) + }) + + t.Run("remove", func(t *testing.T) { + storage := NewTestLocalStorage(t) + prefix := []byte("prefix") + key := []byte("key") + value := []byte("value") + + storage.Set(prefix, key, value) + require.Equal(t, value, storage.Get(prefix, key)) + + storage.Remove(prefix, key) + require.Nil(t, storage.Get(prefix, key)) + }) + +} diff --git a/internal/client/db/pinned_blocks_cache.go b/internal/client/db/pinned_blocks_cache.go index 5909a2e436..a529e9b464 100644 --- a/internal/client/db/pinned_blocks_cache.go +++ b/internal/client/db/pinned_blocks_cache.go @@ -4,7 +4,6 @@ package db import ( - "log" "math" "github.com/ChainSafe/gossamer/internal/primitives/runtime" @@ -12,18 +11,18 @@ import ( ) // Entry for pinned blocks cache. -type pinnedBlocksCacheEntry struct { +type pinnedBlocksCacheEntry[E runtime.Extrinsic] struct { // How many times this item has been pinned refCount uint32 // Cached justifications for this block - Justifications runtime.Justifications + Justifications *runtime.Justifications // Cached body for this block - Body *[]runtime.Extrinsic + Body *[]E } -func (pbce *pinnedBlocksCacheEntry) DecreaseRef() { +func (pbce *pinnedBlocksCacheEntry[E]) DecreaseRef() { if pbce.refCount > 0 { pbce.refCount-- } else { @@ -31,7 +30,7 @@ func (pbce *pinnedBlocksCacheEntry) DecreaseRef() { } } -func (pbce *pinnedBlocksCacheEntry) IncreaseRef() { +func (pbce *pinnedBlocksCacheEntry[E]) IncreaseRef() { if pbce.refCount < math.MaxUint32 { pbce.refCount++ } else { @@ -39,81 +38,81 @@ func (pbce *pinnedBlocksCacheEntry) IncreaseRef() { } } -func (pbce *pinnedBlocksCacheEntry) HasNoReferences() bool { +func (pbce *pinnedBlocksCacheEntry[E]) HasNoReferences() bool { return pbce.refCount == 0 } // Reference counted cache for pinned block bodies and justifications. -type pinnedBlocksCache[H comparable] struct { - cache *lru.Cache[H, *pinnedBlocksCacheEntry] +type pinnedBlocksCache[H comparable, E runtime.Extrinsic] struct { + cache *lru.Cache[H, *pinnedBlocksCacheEntry[E]] } -func newPinnedBlocksCache[H comparable]() pinnedBlocksCache[H] { - cache, err := lru.NewWithEvict[H, *pinnedBlocksCacheEntry](1024, func(key H, value *pinnedBlocksCacheEntry) { +func newPinnedBlocksCache[H comparable, E runtime.Extrinsic]() pinnedBlocksCache[H, E] { + cache, err := lru.NewWithEvict[H, *pinnedBlocksCacheEntry[E]](1024, func(key H, value *pinnedBlocksCacheEntry[E]) { // If reference count was larger than 0 on removal, // the item was removed due to capacity limitations. // Since the cache should be large enough for pinned items, // we want to know about these evictions. if value.refCount > 0 { - log.Printf("TRACE: Pinned block cache limit reached. Evicting value. hash = %v\n", key) + logger.Tracef("Pinned block cache limit reached. Evicting value. hash = %v", key) } else { - log.Printf("TRACE: Evicting value from pinned block cache. hash = %v\n", key) + logger.Tracef("Evicting value from pinned block cache. hash = %v", key) } }) if err != nil { panic(err) } - return pinnedBlocksCache[H]{cache} + return pinnedBlocksCache[H, E]{cache} } // Increase reference count of an item. // Create an entry with empty value in the cache if necessary. -func (pbc *pinnedBlocksCache[H]) Pin(hash H) { - prev, ok, _ := pbc.cache.PeekOrAdd(hash, &pinnedBlocksCacheEntry{refCount: 1}) +func (pbc *pinnedBlocksCache[H, E]) Pin(hash H) { + prev, ok, _ := pbc.cache.PeekOrAdd(hash, &pinnedBlocksCacheEntry[E]{refCount: 1}) if ok { prev.IncreaseRef() - log.Printf("TRACE: Bumped cache refcount. hash = %v, num_entries = %v\n", hash, pbc.cache.Len()) + logger.Tracef("Bumped cache refcount. hash = %v, num_entries = %v", hash, pbc.cache.Len()) pbc.cache.Add(hash, prev) } else { - log.Printf("TRACE: Unable to bump reference count. hash = %v\n", hash) + logger.Tracef("Unable to bump reference count. hash = %v", hash) } } // Clear the cache -func (pbc *pinnedBlocksCache[H]) Clear() { +func (pbc *pinnedBlocksCache[H, E]) Clear() { pbc.cache.Purge() } // Check if item is contained in the cache -func (pbc *pinnedBlocksCache[H]) Contains(hash H) bool { +func (pbc *pinnedBlocksCache[H, E]) Contains(hash H) bool { return pbc.cache.Contains(hash) } // Attach body to an existing cache item -func (pbc *pinnedBlocksCache[H]) InsertBody(hash H, extrinsics []runtime.Extrinsic) { +func (pbc *pinnedBlocksCache[H, E]) InsertBody(hash H, extrinsics []E) { val, ok := pbc.cache.Peek(hash) if ok { val.Body = &extrinsics - log.Printf("TRACE: Cached body. hash = %v, num_entries = %v\n", hash, pbc.cache.Len()) + logger.Tracef("Cached body. hash = %v, num_entries = %v", hash, pbc.cache.Len()) } else { - log.Printf("TRACE: Unable to insert body for uncached item. hash = %v\n", hash) + logger.Tracef("Unable to insert body for uncached item. hash = %v", hash) } } // Attach justification to an existing cache item -func (pbc *pinnedBlocksCache[H]) InsertJustifications(hash H, justifications runtime.Justifications) { +func (pbc *pinnedBlocksCache[H, E]) InsertJustifications(hash H, justifications runtime.Justifications) { val, ok := pbc.cache.Peek(hash) if ok { - val.Justifications = justifications - log.Printf("TRACE: Cached justification. hash = %v, num_entries = %v\n", hash, pbc.cache.Len()) + val.Justifications = &justifications + logger.Tracef("Cached justification. hash = %v, num_entries = %v", hash, pbc.cache.Len()) } else { - log.Printf("TRACE: Unable to insert justifications for uncached item. hash = %v\n", hash) + logger.Tracef("Unable to insert justifications for uncached item. hash = %v", hash) } } // Decreases reference count of an item. // If the count hits 0, the item is removed. -func (pbc *pinnedBlocksCache[H]) Unpin(hash H) { +func (pbc *pinnedBlocksCache[H, E]) Unpin(hash H) { val, ok := pbc.cache.Peek(hash) if ok { val.DecreaseRef() @@ -124,7 +123,7 @@ func (pbc *pinnedBlocksCache[H]) Unpin(hash H) { } // Get justifications for cached block -func (pbc *pinnedBlocksCache[H]) Justifications(hash H) runtime.Justifications { +func (pbc *pinnedBlocksCache[H, E]) Justifications(hash H) *runtime.Justifications { val, ok := pbc.cache.Peek(hash) if ok { return val.Justifications @@ -133,7 +132,7 @@ func (pbc *pinnedBlocksCache[H]) Justifications(hash H) runtime.Justifications { } // Get body for cached block -func (pbc *pinnedBlocksCache[H]) Body(hash H) *[]runtime.Extrinsic { +func (pbc *pinnedBlocksCache[H, E]) Body(hash H) *[]E { val, ok := pbc.cache.Peek(hash) if ok { return val.Body diff --git a/internal/client/db/pinned_blocks_cache_test.go b/internal/client/db/pinned_blocks_cache_test.go index 41a2e30a02..5f8bb7c036 100644 --- a/internal/client/db/pinned_blocks_cache_test.go +++ b/internal/client/db/pinned_blocks_cache_test.go @@ -11,37 +11,39 @@ import ( ) func TestPinnedBlocksCache(t *testing.T) { - cache := newPinnedBlocksCache[uint]() + cache := newPinnedBlocksCache[uint, noopExtrinsic]() cache.Pin(1) value, ok := cache.cache.Peek(1) assert.True(t, ok) - assert.Equal(t, pinnedBlocksCacheEntry{refCount: 1}, *value) + assert.Equal(t, pinnedBlocksCacheEntry[noopExtrinsic]{refCount: 1}, *value) + + assert.True(t, cache.Contains(1)) cache.Pin(1) value, ok = cache.cache.Peek(1) assert.True(t, ok) - assert.Equal(t, pinnedBlocksCacheEntry{refCount: 2}, *value) + assert.Equal(t, pinnedBlocksCacheEntry[noopExtrinsic]{refCount: 2}, *value) cache.Pin(1) value, ok = cache.cache.Peek(1) assert.True(t, ok) - assert.Equal(t, pinnedBlocksCacheEntry{refCount: 3}, *value) + assert.Equal(t, pinnedBlocksCacheEntry[noopExtrinsic]{refCount: 3}, *value) - cache.InsertBody(1, []runtime.Extrinsic{}) + cache.InsertBody(1, []noopExtrinsic{}) value, ok = cache.cache.Peek(1) assert.True(t, ok) - body := []runtime.Extrinsic{} - assert.Equal(t, pinnedBlocksCacheEntry{refCount: 3, Body: &body}, *value) + body := []noopExtrinsic{} + assert.Equal(t, pinnedBlocksCacheEntry[noopExtrinsic]{refCount: 3, Body: &body}, *value) cache.InsertJustifications(1, runtime.Justifications{{ ConsensusEngineID: runtime.ConsensusEngineID{1, 1, 1, 1}, }}) value, ok = cache.cache.Peek(1) assert.True(t, ok) - assert.Equal(t, pinnedBlocksCacheEntry{ + assert.Equal(t, pinnedBlocksCacheEntry[noopExtrinsic]{ refCount: 3, Body: &body, - Justifications: runtime.Justifications{{ + Justifications: &runtime.Justifications{{ ConsensusEngineID: runtime.ConsensusEngineID{1, 1, 1, 1}, }}, }, *value) @@ -49,10 +51,10 @@ func TestPinnedBlocksCache(t *testing.T) { cache.Unpin(1) value, ok = cache.cache.Peek(1) assert.True(t, ok) - assert.Equal(t, pinnedBlocksCacheEntry{ + assert.Equal(t, pinnedBlocksCacheEntry[noopExtrinsic]{ refCount: 2, Body: &body, - Justifications: runtime.Justifications{{ + Justifications: &runtime.Justifications{{ ConsensusEngineID: runtime.ConsensusEngineID{1, 1, 1, 1}, }}, }, *value) @@ -60,10 +62,10 @@ func TestPinnedBlocksCache(t *testing.T) { cache.Unpin(1) value, ok = cache.cache.Peek(1) assert.True(t, ok) - assert.Equal(t, pinnedBlocksCacheEntry{ + assert.Equal(t, pinnedBlocksCacheEntry[noopExtrinsic]{ refCount: 1, Body: &body, - Justifications: runtime.Justifications{{ + Justifications: &runtime.Justifications{{ ConsensusEngineID: runtime.ConsensusEngineID{1, 1, 1, 1}, }}, }, *value) diff --git a/internal/client/db/utils.go b/internal/client/db/utils.go index a35d18de72..5028a31b01 100644 --- a/internal/client/db/utils.go +++ b/internal/client/db/utils.go @@ -5,9 +5,10 @@ package db import ( "bytes" + "errors" "fmt" - "log" "math" + "reflect" "github.com/ChainSafe/gossamer/internal/client/db/columns" "github.com/ChainSafe/gossamer/internal/client/db/metakeys" @@ -18,6 +19,10 @@ import ( "github.com/ChainSafe/gossamer/pkg/scale" ) +// Number of columns in the db. Must be the same for both full && light dbs. +// There are currently 13 columns in columns package. +const NumColumns uint32 = 13 + // Meta column. The set of keys in the column is shared by full && light storages. const columnMeta = columns.Meta @@ -61,6 +66,58 @@ func newNumberIndexKey[N runtime.Number](num N) (numberIndexKey, error) { return numberIndexKey{byte(n >> 24), byte((n >> 16) & 0xff), byte((n >> 8) & 0xff), byte(n & 0xff)}, nil } +// Convert number and hash into long lookup key for blocks that are +// not in the canonical chain. +func newLookupKey[N runtime.Number, H runtime.Hash](number N, hash H) ([]byte, error) { + lookupKey, err := newNumberIndexKey(number) + if err != nil { + return nil, err + } + key := append(lookupKey[:], hash.Bytes()...) + return key, nil +} + +// Delete number to hash mapping in DB transaction. +func removeNumberToKeyMapping[N runtime.Number]( + transaction *database.Transaction[hash.H256], keyLookupCol uint32, number N, +) error { + lookupKey, err := newNumberIndexKey(number) + if err != nil { + return err + } + transaction.Remove(database.ColumnID(keyLookupCol), lookupKey[:]) + return nil +} + +// Place a number mapping into the database. This maps number to current perceived +// block hash at that position. +func insertNumberToKeyMapping[H runtime.Hash, N runtime.Number]( + transaction *database.Transaction[hash.H256], keyLookupCol uint32, number N, hash H, +) error { + numberIndexKey, err := newNumberIndexKey(number) + if err != nil { + return err + } + lookupKey, err := newLookupKey(number, hash) + if err != nil { + return err + } + transaction.Set(database.ColumnID(keyLookupCol), numberIndexKey[:], lookupKey) + return nil +} + +// Insert a hash to key mapping in the database. +func insertHashToKeyMapping[H runtime.Hash, N runtime.Number]( + transaction *database.Transaction[hash.H256], keyLookupCol uint32, number N, hash H, +) error { + lookupKey, err := newLookupKey(number, hash) + if err != nil { + return err + } + transaction.Set(database.ColumnID(keyLookupCol), hash.Bytes(), lookupKey) + return nil +} + // Convert block id to block lookup key. // block lookup key is the DB-key header, block and justification are stored under. // looks up lookup key by hash from DB as necessary. @@ -69,13 +126,13 @@ func blockIDToLookupKey[H runtime.Hash, N runtime.Number]( ) ([]byte, error) { switch id := id.(type) { case generic.BlockIDNumber[N]: - key, err := newNumberIndexKey(id.Inner) + key, err := newNumberIndexKey(id.Number) if err != nil { return nil, err } return db.Get(keyLookupCol, key[:]), nil case generic.BlockIDHash[H]: - return db.Get(keyLookupCol, id.Inner.Bytes()), nil + return db.Get(keyLookupCol, id.Hash.Bytes()), nil default: panic("unsupported generic.BlockID") } @@ -95,10 +152,28 @@ func readDB[H runtime.Hash, N runtime.Number]( return nil, nil } +// Remove database column entry for the given block. +func removeFromDB[H runtime.Hash, N runtime.Number]( + transaction *database.Transaction[hash.H256], + db database.Database[hash.H256], + colIndex uint32, + col uint32, + id generic.BlockID, +) error { + key, err := blockIDToLookupKey[H, N](db, database.ColumnID(colIndex), id) + if err != nil { + return err + } + if key != nil { + transaction.Remove(database.ColumnID(col), key) + } + return nil +} + // Read a header from the database. func readHeader[H runtime.Hash, N runtime.Number, Header runtime.Header[N, H]]( db database.Database[hash.H256], colIndex, col database.ColumnID, id generic.BlockID, -) (*runtime.Header[N, H], error) { +) (*Header, error) { headerBytes, err := readDB[H, N](db, colIndex, col, id) if err != nil { return nil, err @@ -106,15 +181,17 @@ func readHeader[H runtime.Hash, N runtime.Number, Header runtime.Header[N, H]]( if headerBytes == nil { return nil, nil } - var header Header - err = scale.Unmarshal(headerBytes, &header) + t := reflect.TypeOf((*new(Header))).Elem() + header := reflect.New(t).Interface() + err = scale.Unmarshal(headerBytes, header) if err != nil { return nil, err } - ret := runtime.Header[N, H](header) - return &ret, nil + h := header.(Header) + return &h, nil } +// Read meta from the database. func readMeta[H runtime.Hash, N runtime.Number, Header runtime.Header[N, H]]( db database.Database[hash.H256], colHeader database.ColumnID, ) (meta[H, N], error) { @@ -135,14 +212,16 @@ func readMeta[H runtime.Hash, N runtime.Number, Header runtime.Header[N, H]]( if headerBytes == nil { return } - var header = new(Header) + t := reflect.TypeOf((*new(Header))).Elem() + header := reflect.New(t).Interface() err = scale.Unmarshal(headerBytes, header) if err != nil { return } - hash = (*header).Hash() - log.Printf("DEBUG: Opened blockchain db, fetched %v = %v (%v)\n", desc, hash, (*header).Number()) - return hash, (*header).Number(), nil + h := header.(Header) + hash = h.Hash() + logger.Debugf("Opened blockchain db, fetched %v = %v (%v)", desc, hash, h.Number()) + return hash, h.Number(), nil } bestHash, bestNumber, err := loadMetaBlock("best", metakeys.BestBlock) @@ -200,3 +279,15 @@ func readGenesisHash[H any](db database.Database[hash.H256]) (*H, error) { func joinInput(i1 []byte, i2 []byte) []byte { return bytes.Join([][]byte{i1, i2}, nil) } + +var ( + errDoesNotExist = errors.New("database does not exist at given location") +) + +func openDatabase(dbSource DatabaseSource, create bool) (database.Database[hash.H256], error) { + if dbSource.RequireCreateFlag && !create { + return nil, errDoesNotExist + } + + return dbSource.DB, nil +} diff --git a/internal/client/state-db/noncanonical.go b/internal/client/state-db/noncanonical.go index 9d039d526a..50b8f684bc 100644 --- a/internal/client/state-db/noncanonical.go +++ b/internal/client/state-db/noncanonical.go @@ -5,19 +5,22 @@ package statedb import ( "fmt" - "log" "math/bits" + "github.com/ChainSafe/gossamer/internal/log" + "github.com/ChainSafe/gossamer/pkg/scale" "github.com/gammazero/deque" ) +var logger = log.NewFromGlobal(log.AddContext("pkg", "client/state-db")) + var lastCanonical = []byte("last_canonical") const maxBlocksPerLevel uint64 = 32 // nonCanonicalOverlay maintains trees of block overlays and allows discarding trees/roots. -// The overlays are added in `Insert` and removed in `Canonicalize`. +// The overlays are added in Insert and removed in Canonicalize. type nonCanonicalOverlay[BlockHash Hash, Key Hash] struct { lastCanonicalized *hashBlock[BlockHash] levels deque.Deque[overlayLevel[BlockHash, Key]] @@ -49,7 +52,7 @@ func newNonCanonicalOverlay[BlockHash Hash, Key Hash](db MetaDB) (nonCanonicalOv var lastCanonicalized *hashBlock[BlockHash] if lastCanonicalizedMeta != nil { bhk := hashBlock[BlockHash]{} - err := scale.Unmarshal(*lastCanonicalizedMeta, &bhk) + err := scale.Unmarshal(lastCanonicalizedMeta, &bhk) if err != nil { return nonCanonicalOverlay[BlockHash, Key]{}, err } @@ -64,7 +67,7 @@ func newNonCanonicalOverlay[BlockHash Hash, Key Hash](db MetaDB) (nonCanonicalOv if lastCanonicalized != nil { block := lastCanonicalized.Block hash := lastCanonicalized.Hash - log.Printf("TRACE: Reading uncanonicalized journal. Last canonicalized %v (%v)", block, hash) + logger.Tracef("Reading uncanonicalized journal. Last canonicalized %v (%v)", block, hash) var total uint64 block += 1 for { @@ -76,7 +79,7 @@ func newNonCanonicalOverlay[BlockHash Hash, Key Hash](db MetaDB) (nonCanonicalOv return nonCanonicalOverlay[BlockHash, Key]{}, err } if record != nil { - recordBytes := *record + recordBytes := record var record journalRecord[BlockHash, Key] err := scale.Unmarshal(recordBytes, &record) if err != nil { @@ -94,7 +97,7 @@ func newNonCanonicalOverlay[BlockHash Hash, Key Hash](db MetaDB) (nonCanonicalOv deleted: record.Deleted, } insertValues(values, record.Inserted) - log.Printf("TRACE: Uncanonicalized journal entry %v.%v (%v) (%v inserted, %v deleted)\n", + logger.Tracef("Uncanonicalized journal entry %v.%v (%v) (%v inserted, %v deleted)", block, index, record.Hash, @@ -112,7 +115,7 @@ func newNonCanonicalOverlay[BlockHash Hash, Key Hash](db MetaDB) (nonCanonicalOv levels.PushBack(level) block += 1 } - log.Printf("TRACE: Finished reading uncanonicalized journal, %v entries\n", total) + logger.Tracef("Finished reading uncanonicalized journal, %v entries", total) } return nonCanonicalOverlay[BlockHash, Key]{ lastCanonicalized: lastCanonicalized, @@ -146,10 +149,9 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Insert( }) nco.lastCanonicalized = &lastCanonicalized } else if nco.lastCanonicalized != nil { - if number < frontBlockNumber || number > frontBlockNumber+uint64(nco.levels.Len()) { //nolint:gosec - log.Printf( - "TRACE: Failed to insert block %v, current is %v .. %v)\n", - number, frontBlockNumber, frontBlockNumber+uint64(nco.levels.Len())) //nolint:gosec + if number < frontBlockNumber || number > frontBlockNumber+uint64(nco.levels.Len()) { + logger.Tracef("Failed to insert block %v, current is %v .. %v)", + number, frontBlockNumber, frontBlockNumber+uint64(nco.levels.Len())) return CommitSet[Key]{}, ErrInvalidBlockNumber } // check for valid parent if inserting on second level or higher @@ -163,13 +165,13 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Insert( } var level overlayLevel[BlockHash, Key] = newOverlayLevel[BlockHash, Key]() var levelIndex int - if nco.levels.Len() == 0 || number == frontBlockNumber+uint64(nco.levels.Len()) { //nolint:gosec + if nco.levels.Len() == 0 || number == frontBlockNumber+uint64(nco.levels.Len()) { nco.levels.PushBack(newOverlayLevel[BlockHash, Key]()) level = nco.levels.Back() levelIndex = nco.levels.Len() - 1 } else { - level = nco.levels.At(int(number - frontBlockNumber)) //nolint:gosec - levelIndex = int(number - frontBlockNumber) //nolint:gosec + level = nco.levels.At(int(number - frontBlockNumber)) + levelIndex = int(number - frontBlockNumber) } if len(level.blocks) >= int(maxBlocksPerLevel) { @@ -177,9 +179,7 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Insert( for _, block := range level.blocks { hashes = append(hashes, block.hash) } - log.Printf( - "TRACE: Too many sibling blocks at %v: %v\n", - number, hashes) + logger.Tracef("Too many sibling blocks at %v: %v", number, hashes) return CommitSet[Key]{}, fmt.Errorf("too many sibling blocks at %d inserted", number) } for _, block := range level.blocks { @@ -213,7 +213,7 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Insert( Deleted: changeset.Deleted, } commit.Meta.Inserted = append(commit.Meta.Inserted, HashDBValue[[]byte]{journalKey, scale.MustMarshal(journalRecord)}) - log.Printf("TRACE: Inserted uncanonicalized changeset %v.%v %v (%v inserted, %v deleted)\n", + logger.Tracef("Inserted uncanonicalized changeset %v.%v %v (%v inserted, %v deleted)", number, index, hash, len(journalRecord.Inserted), len(journalRecord.Deleted)) insertValues(nco.values, journalRecord.Inserted) return commit, nil @@ -221,10 +221,10 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Insert( func (nco *nonCanonicalOverlay[BlockHash, Key]) discardJournals( levelIndex uint, discardedJournals *[][]byte, hash BlockHash) { - if levelIndex >= uint(nco.levels.Len()) { //nolint:gosec + if levelIndex >= uint(nco.levels.Len()) { return } - level := nco.levels.At(int(levelIndex)) //nolint:gosec + level := nco.levels.At(int(levelIndex)) for _, overlay := range level.blocks { parent, ok := nco.parents[overlay.hash] if !ok { @@ -266,13 +266,13 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Sync() { } // Canonicalize will select a top-level root and canonicalized it. Discards all sibling subtrees and the root. -// Add a set of changes of the canonicalized block to a provided `CommitSet` +// Add a set of changes of the canonicalized block to a provided CommitSet // Return the block number of the canonicalized block func (nco *nonCanonicalOverlay[BlockHash, Key]) Canonicalize( hash BlockHash, commit *CommitSet[Key], ) (uint64, error) { - log.Printf("TRACE: Canonicalizing %v\n", hash) + logger.Tracef("Canonicalizing %v", hash) if nco.levels.Len() == 0 { return 0, ErrInvalidBlock } @@ -354,7 +354,7 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Canonicalize( Hash: toMetaKey(lastCanonical, struct{}{}), DBValue: scale.MustMarshal(canonicalized), }) - log.Printf("TRACE: Discarding %v records\n", len(commit.Meta.Deleted)) + logger.Tracef("Discarding %v records", len(commit.Meta.Deleted)) num := canonicalized.Block nco.lastCanonicalized = &canonicalized @@ -362,12 +362,12 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Canonicalize( } // Get a value from the node overlay. This searches in every existing changeset. -func (nco *nonCanonicalOverlay[BlockHash, Key]) Get(key Key) *DBValue { +func (nco *nonCanonicalOverlay[BlockHash, Key]) Get(key Key) DBValue { cv, ok := nco.values[key] if !ok { return nil } - return &cv.value + return cv.value } // HaveBlock checks if the block is in the canonicalization queue. @@ -376,7 +376,7 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) HaveBlock(hash BlockHash) bool { return ok } -// RevertOne will revert a single level. Returns commit set that deletes the journal or `nil` if not +// RevertOne will revert a single level. Returns commit set that deletes the journal or nil if not // possible. func (nco *nonCanonicalOverlay[BlockHash, Key]) RevertOne() *CommitSet[Key] { if nco.levels.Len() == 0 { @@ -392,7 +392,7 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) RevertOne() *CommitSet[Key] { return &commit } -// Remove will revert a single block. Returns commit set that deletes the journal or `nil` if not +// Remove will revert a single block. Returns commit set that deletes the journal or nil if not // possible. func (nco *nonCanonicalOverlay[BlockHash, Key]) Remove(hash BlockHash) *CommitSet[Key] { commit := CommitSet[Key]{} @@ -413,12 +413,12 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Remove(hash BlockHash) *CommitSe if levelIndex != levelCount-1 { for _, h := range nco.parents { if h == hash { - log.Printf("DEBUG: Trying to remove block %v with children\n", hash) + logger.Debugf("Trying to remove block %v with children", hash) return nil } } } - overlay := level.remove(uint(index)) //nolint:gosec + overlay := level.remove(uint(index)) nco.levels.Set(levelIndex, level) commit.Meta.Deleted = append(commit.Meta.Deleted, overlay.journalKey) delete(nco.parents, overlay.hash) @@ -438,7 +438,7 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Remove(hash BlockHash) *CommitSe func (nco *nonCanonicalOverlay[BlockHash, Key]) Pin(hash BlockHash) { refs := nco.pinned[hash] if refs == 0 { - log.Println("TRACE: Pinned non-canon block:", hash) + logger.Tracef("Pinned non-canon block: %s", hash) } refs += 1 nco.pinned[hash] = refs @@ -474,7 +474,7 @@ func (nco *nonCanonicalOverlay[BlockHash, Key]) Unpin(hash BlockHash) { entry.count -= 1 if entry.count == 0 { delete(nco.pinnedInsertions, hash) - log.Println("TRACE: Discarding unpinned non-canon block:", hash) + logger.Tracef("Discarding unpinned non-canon block:", hash) discardValues(nco.values, entry.keys) delete(nco.parents, hash) } @@ -496,7 +496,7 @@ func (ol *overlayLevel[BlockHash, Key]) push(overlay blockOverlay[BlockHash, Key } func (ol *overlayLevel[BlockHash, Key]) availableIndex() uint64 { - return uint64(bits.TrailingZeros64(^ol.usedIndices)) //nolint:gosec + return uint64(bits.TrailingZeros64(^ol.usedIndices)) } func (ol *overlayLevel[BlockHash, Key]) remove(index uint) blockOverlay[BlockHash, Key] { @@ -639,7 +639,7 @@ func discardDescendants[BlockHash Hash, Key Hash]( panic("there is a parent entry for each entry in levels; qed") } if h == hash { - index = uint(i) //nolint:gosec + index = uint(i) overlay := level.remove(index) numPinned := discardDescendants(remainder, values, parents, pinned, pinnedInsertions, overlay.hash) if _, ok := pinned[overlay.hash]; ok { diff --git a/internal/client/state-db/noncanonical_test.go b/internal/client/state-db/noncanonical_test.go index 91125dcd85..add52c3815 100644 --- a/internal/client/state-db/noncanonical_test.go +++ b/internal/client/state-db/noncanonical_test.go @@ -193,7 +193,7 @@ func TestRestoreFromJournalAfterCanonicalize(t *testing.T) { func contains(overlay nonCanonicalOverlay[hash.H256, hash.H256], key uint64) bool { val := overlay.Get(hash.NewH256FromLowUint64BigEndian(key)) - return val != nil && string(*val) == string(hash.NewH256FromLowUint64BigEndian(key)) + return val != nil && string(val) == string(hash.NewH256FromLowUint64BigEndian(key)) } func TestInsertCanonicalizeTwo(t *testing.T) { diff --git a/internal/client/state-db/pruning.go b/internal/client/state-db/pruning.go index 96cbaeb5ff..40541eeba9 100644 --- a/internal/client/state-db/pruning.go +++ b/internal/client/state-db/pruning.go @@ -4,8 +4,6 @@ package statedb import ( - "log" - "github.com/ChainSafe/gossamer/internal/primitives/core/hash" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/gammazero/deque" @@ -42,7 +40,7 @@ func newPruningWindow[BlockHash Hash, Key Hash]( return pruningWindow[BlockHash, Key]{}, err } if val != nil { - err = scale.Unmarshal(*val, &base) + err = scale.Unmarshal(val, &base) if err != nil { return pruningWindow[BlockHash, Key]{}, err } @@ -50,8 +48,8 @@ func newPruningWindow[BlockHash Hash, Key Hash]( } if windowSize > 1000 { - log.Printf( - "TRACE: Large pruning window of %d detected! THIS CAN LEAD TO HIGH MEMORY USAGE AND CRASHES. Reduce the pruning window.", //nolint:lll + logger.Tracef("Large pruning window of %d detected! "+ + "THIS CAN LEAD TO HIGH MEMORY USAGE AND CRASHES. Reduce the pruning window.", windowSize) } @@ -89,14 +87,14 @@ func (rw *pruningWindow[BlockHash, Key]) HaveBlock(hash BlockHash, number uint64 return rw.queue.HaveBlock(hash, uint(number-rw.base)) } -// Prune next block. Expects at least one block in the window. Adds changes to `commit`. +// Prune next block. Expects at least one block in the window. Adds changes to commit. func (rw *pruningWindow[BlockHash, Key]) PruneOne(commit *CommitSet[Key]) error { pruned, err := rw.queue.PopFront(rw.base) if err != nil { return err } if pruned != nil { - log.Printf("TRACE: Pruning %v (%v deleted)", pruned.hash, len(pruned.deleted)) + logger.Tracef("Pruning %v (%v deleted)", pruned.hash, len(pruned.deleted)) index := rw.base commit.Data.Deleted = append(commit.Data.Deleted, maps.Keys(pruned.deleted)...) commit.Meta.Inserted = append(commit.Meta.Inserted, HashDBValue[[]byte]{ @@ -107,12 +105,12 @@ func (rw *pruningWindow[BlockHash, Key]) PruneOne(commit *CommitSet[Key]) error rw.base += 1 return nil } else { - log.Printf("TRACE: Trying to prune when there's nothing to prune") + logger.Tracef("Trying to prune when there's nothing to prune") return ErrBlockUnavailable } } -// Add a change set to the window. Creates a journal record and pushes it to `commit` +// Add a change set to the window. Creates a journal record and pushes it to commit func (rw *pruningWindow[BlockHash, Key]) NoteCanonical(hash BlockHash, number uint64, commit *CommitSet[Key]) error { if rw.base == 0 && rw.isEmpty() && number > 0 { // This branch is taken if the node imports the target block of a warp sync. @@ -126,8 +124,7 @@ func (rw *pruningWindow[BlockHash, Key]) NoteCanonical(hash BlockHash, number ui } else if (rw.base + rw.WindowSize()) != number { return ErrInvalidBlockNumber } - log.Printf( - "TRACE: Adding to pruning window: %v (%v inserted, %v deleted)", + logger.Tracef("Adding to pruning window: %v (%v inserted, %v deleted)", hash, len(commit.Data.Inserted), len(commit.Data.Deleted), ) var inserted []Key @@ -158,7 +155,7 @@ type deathRowQueue[BlockHash Hash, Key Hash] interface { type inMemDeathRowQueue[BlockHash Hash, Key Hash] struct { // A queue of keys that should be deleted for each block in the pruning window. deathRows deque.Deque[deathRow[BlockHash, Key]] - // An index that maps each key from `death_rows` to block number. + // An index that maps each key from deathRows to block number. deathIndex map[Key]uint64 } @@ -167,7 +164,7 @@ func newInMemDeathRowQueue[BlockHash Hash, Key Hash](db MetaDB, base uint64) (de queue := &inMemDeathRowQueue[BlockHash, Key]{ deathIndex: make(map[Key]uint64), } - log.Printf("TRACE: Reading pruning journal for the memory queue. Pending #%v\n", base) + logger.Tracef("Reading pruning journal for the memory queue. Pending #%v", base) for { journalKey := toPruningJournalKey(block) val, err := db.GetMeta(journalKey) @@ -176,12 +173,11 @@ func newInMemDeathRowQueue[BlockHash Hash, Key Hash](db MetaDB, base uint64) (de } if val != nil { var record pruningJournalRecord[BlockHash, Key] - err := scale.Unmarshal(*val, &record) + err := scale.Unmarshal(val, &record) if err != nil { return nil, err } - log.Printf( - "TRACE: Pruning journal entry %v (%v inserted, %v deleted)", + logger.Tracef("Pruning journal entry %v (%v inserted, %v deleted)", block, len(record.Inserted), len(record.Deleted)) queue.Import(base, block, record) } else { @@ -201,17 +197,17 @@ func (drqim *inMemDeathRowQueue[BlockHash, Key]) Import( inserted = journalRecord.Inserted deleted = journalRecord.Deleted ) - log.Printf("TRACE: Importing %v, base=%v\n", num, base) + logger.Tracef("Importing %v, base=%v", num, base) // remove all re-inserted keys from death rows for _, k := range inserted { block, ok := drqim.deathIndex[k] if ok { delete(drqim.deathIndex, k) - delete(drqim.deathRows.At(int(block-base)).deleted, k) //nolint:gosec + delete(drqim.deathRows.At(int(block-base)).deleted, k) } } // add new keys - importedBlock := base + uint64(drqim.deathRows.Len()) //nolint:gosec + importedBlock := base + uint64(drqim.deathRows.Len()) deletedMap := make(map[Key]any) for _, k := range deleted { drqim.deathIndex[k] = importedBlock @@ -220,7 +216,7 @@ func (drqim *inMemDeathRowQueue[BlockHash, Key]) Import( drqim.deathRows.PushBack(deathRow[BlockHash, Key]{hash, deletedMap}) } -// Pop out one block from the front of the queue, `base` is the block number +// Pop out one block from the front of the queue, base is the block number // of the first block of the queue func (drqim *inMemDeathRowQueue[BlockHash, Key]) PopFront(base uint64) (*deathRow[BlockHash, Key], error) { if drqim.deathRows.Len() == 0 { @@ -233,10 +229,10 @@ func (drqim *inMemDeathRowQueue[BlockHash, Key]) PopFront(base uint64) (*deathRo return &row, nil } -// Check if the block at the given `index` of the queue exist -// it is the caller's responsibility to ensure `index` won't be out of bounds +// Check if the block at the given index of the queue exist +// it is the caller's responsibility to ensure index won't be out of bounds func (drqim *inMemDeathRowQueue[BlockHash, Key]) HaveBlock(hash BlockHash, index uint) haveBlock { - if drqim.deathRows.At(int(index)).hash == hash { //nolint:gosec + if drqim.deathRows.At(int(index)).hash == hash { return haveBlockYes } return haveBlockNo @@ -244,7 +240,7 @@ func (drqim *inMemDeathRowQueue[BlockHash, Key]) HaveBlock(hash BlockHash, index // Return the number of block in the pruning window func (drqim *inMemDeathRowQueue[BlockHash, Key]) Len(base uint64) uint64 { - return uint64(drqim.deathRows.Len()) //nolint:gosec + return uint64(drqim.deathRows.Len()) } // Get the hash of the next pruning block diff --git a/internal/client/state-db/pruning_test.go b/internal/client/state-db/pruning_test.go index 70887c7e17..7c76dd981c 100644 --- a/internal/client/state-db/pruning_test.go +++ b/internal/client/state-db/pruning_test.go @@ -204,7 +204,7 @@ func TestRefWindow_ReinsertedSurvivesPending(t *testing.T) { // Ensure that after warp syncing the state is stored correctly in the db. The warp sync target // block is imported with all its state at once. This test ensures that after a restart -// `pruning` still knows that this block was imported. +// pruning still knows that this block was imported. func TestRefWindow_StoreCorrectStateAfterWarpSyncing(t *testing.T) { db := NewTestDB([]uint64{}) pruning, err := newPruningWindow[hash.H256, hash.H256](db, defaultMaxBlockConstraint) @@ -221,7 +221,7 @@ func TestRefWindow_StoreCorrectStateAfterWarpSyncing(t *testing.T) { assert.Equal(t, haveBlockYes, pruning.HaveBlock(h, block)) // load a new queue from db - // `cache` should be the same + // cache should be the same pruning, err = newPruningWindow[hash.H256, hash.H256](db, defaultMaxBlockConstraint) assert.NoError(t, err) assert.Equal(t, haveBlockYes, pruning.HaveBlock(h, block)) diff --git a/internal/client/state-db/statedb.go b/internal/client/state-db/statedb.go index ad601c7f97..e9170705d2 100644 --- a/internal/client/state-db/statedb.go +++ b/internal/client/state-db/statedb.go @@ -6,7 +6,6 @@ package statedb import ( "errors" "fmt" - "log" "sync" "github.com/ChainSafe/gossamer/pkg/scale" @@ -36,13 +35,13 @@ type HashDBValue[H any] struct { // MetaDB is the backend database interface for metadata. Read-only. type MetaDB interface { // Get meta value, such as the journal. - GetMeta(key []byte) (*DBValue, error) + GetMeta(key []byte) (DBValue, error) } // NodeDB is the backend database interface. Read-only. type NodeDB[Key comparable] interface { // Get state trie node. - Get(key Key) (*DBValue, error) + Get(key Key) (DBValue, error) } var ( @@ -115,6 +114,10 @@ func NewPruningModeFromID(id []byte) PruningMode { // PruningModeConstrained will maintain a constrained pruning window. type PruningModeConstrained Constraints +func NewPruningModeConstrained(numBlocks uint32) PruningModeConstrained { + return PruningModeConstrained{MaxBlocks: &numBlocks} +} + // IsArchive returns whether or not this mode will archive entire history. func (pmc PruningModeConstrained) IsArchive() bool { return false @@ -223,9 +226,9 @@ func (sdbs *stateDBSync[BlockHash, Key]) insertBlock( } func (sdbs *stateDBSync[BlockHash, Key]) canonicalizeBlock(hash BlockHash) (CommitSet[Key], error) { - // NOTE: it is important that the change to `lastCanonical` (emit from - // `nonCanonicalOverlay.Canonicalize`) and the insert of the new pruning journal (emit from - // `pruningWindow.NoteCanonical`) are collected into the same `CommitSet` and are committed to + // NOTE: it is important that the change to lastCanonical (emit from + // nonCanonicalOverlay.Canonicalize) and the insert of the new pruning journal (emit from + // pruningWindow.NoteCanonical) are collected into the same CommitSet and are committed to // the database atomically to keep their consistency when restarting the node commit := CommitSet[Key]{} if _, ok := sdbs.mode.(PruningModeArchiveAll); ok { @@ -295,7 +298,7 @@ func (sdbs *stateDBSync[BlockHash, Key]) isPruned(hash BlockHash, number uint64) // We don't know for sure. return IsPrunedMaybePruned default: - panic("wtf?") + panic("unreachable") } } @@ -325,7 +328,7 @@ func (sdbs *stateDBSync[BlockHash, Key]) prune(commit *CommitSet[Key]) error { } } err = sdbs.pruning.PruneOne(commit) - // this branch should not reach as previous `next_hash` don't return error + // this branch should not reach as previous next_hash don't return error // keeping it for robustness if err != nil { if errors.Is(err, ErrBlockUnavailable) { @@ -339,7 +342,7 @@ func (sdbs *stateDBSync[BlockHash, Key]) prune(commit *CommitSet[Key]) error { } // Revert all non-canonical blocks with the best block number. -// Returns a database commit or `None` if not possible. +// Returns a database commit or None if not possible. // For archive an empty commit set is returned. func (sdbs *stateDBSync[BlockHash, Key]) revertOne() *CommitSet[Key] { switch sdbs.mode.(type) { @@ -348,7 +351,7 @@ func (sdbs *stateDBSync[BlockHash, Key]) revertOne() *CommitSet[Key] { case PruningModeArchiveCanonical, PruningModeConstrained: return sdbs.nonCanonical.RevertOne() default: - panic("wtf?") + panic("unreachable") } } @@ -359,7 +362,7 @@ func (sdbs *stateDBSync[BlockHash, Key]) remove(hash BlockHash) *CommitSet[Key] case PruningModeArchiveCanonical, PruningModeConstrained: return sdbs.nonCanonical.Remove(hash) default: - panic("wtf?") + panic("unreachable") } } @@ -386,7 +389,7 @@ func (sdbs *stateDBSync[BlockHash, Key]) pin(hash BlockHash, number uint64, hint if haveBlock { refs := sdbs.pinned[hash] if refs == 0 { - log.Println("TRACE: Pinned block:", hash) + logger.Tracef("Pinned block: %s", hash) sdbs.nonCanonical.Pin(hash) } sdbs.pinned[hash] += 1 @@ -394,20 +397,20 @@ func (sdbs *stateDBSync[BlockHash, Key]) pin(hash BlockHash, number uint64, hint } return ErrInvalidBlock default: - panic("wtf?") + panic("unreachable") } } func (sdbs *stateDBSync[BlockHash, Key]) unpin(hash BlockHash) { - entry, ok := sdbs.pinned[hash] + _, ok := sdbs.pinned[hash] if ok { sdbs.pinned[hash] -= 1 - if entry == 0 { - log.Println("TRACE: Unpinned block:", hash) + if sdbs.pinned[hash] == 0 { + logger.Tracef("Unpinned block: %s", hash) delete(sdbs.pinned, hash) sdbs.nonCanonical.Unpin(hash) } else { - log.Println("TRACE: Releasing reference for ", hash) + logger.Tracef("Releasing reference for %s", hash) } } } @@ -416,7 +419,7 @@ func (sdbs *stateDBSync[BlockHash, Key]) sync() { sdbs.nonCanonical.Sync() } -func (sdbs *stateDBSync[BlockHash, Key]) get(key Key, db NodeDB[Key]) (*DBValue, error) { +func (sdbs *stateDBSync[BlockHash, Key]) get(key Key, db NodeDB[Key]) (DBValue, error) { val := sdbs.nonCanonical.Get(key) if val != nil { return val, nil @@ -447,7 +450,7 @@ func (sdbs *stateDBSync[BlockHash, Key]) get(key Key, db NodeDB[Key]) (*DBValue, // unfinalized block may be forced. // // # Pruning. -// See `pruningWindow` for pruning algorithm details. `StateDB` prunes on each canonicalization until +// See pruningWindow for pruning algorithm details. StateDB prunes on each canonicalization until // pruning constraints are satisfied. type StateDB[BlockHash Hash, Key Hash] struct { db stateDBSync[BlockHash, Key] @@ -491,7 +494,7 @@ func NewStateDB[BlockHash Hash, Key Hash]( } selectedMode = mode default: - panic("wtf?") + panic("unreachable") } var dbInitCommitSet CommitSet[Key] @@ -545,7 +548,7 @@ func (sdb *StateDB[BlockHash, Key]) CanonicalizeBlock(hash BlockHash) (CommitSet } // Pin prevents pruning of specified block and its descendants. -// `hint` used for further checking if the given block exists +// hint used for further checking if the given block exists func (sdb *StateDB[BlockHash, Key]) Pin(hash BlockHash, number uint64, hint func() bool) error { sdb.Lock() defer sdb.Unlock() @@ -568,14 +571,14 @@ func (sdb *StateDB[BlockHash, Key]) Sync() { } // Get a value from non-canonical/pruning overlay or the backing DB. -func (sdb *StateDB[BlockHash, Key]) Get(key Key, db NodeDB[Key]) (*DBValue, error) { +func (sdb *StateDB[BlockHash, Key]) Get(key Key, db NodeDB[Key]) (DBValue, error) { sdb.RLock() defer sdb.RUnlock() return sdb.db.get(key, db) } // RevertOne will revert all non-canonical blocks with the best block number. -// Returns a database commit or `nil` if not possible. +// Returns a database commit or nil if not possible. // For archive an empty commit set is returned. func (sdb *StateDB[BlockHash, Key]) RevertOne() *CommitSet[Key] { sdb.Lock() @@ -584,7 +587,7 @@ func (sdb *StateDB[BlockHash, Key]) RevertOne() *CommitSet[Key] { } // Remove specified non-canonical block. -// Returns a database commit or `nil` if not possible. +// Returns a database commit or nil if not possible. func (sdb *StateDB[BlockHash, Key]) Remove(hash BlockHash) *CommitSet[Key] { sdb.Lock() defer sdb.Unlock() @@ -617,7 +620,7 @@ func (sdb *StateDB[BlockHash, Key]) Reset(db MetaDB) error { return nil } -// The result returned by `StateDB.IsPruned()` +// The result returned by StateDB.IsPruned() type IsPruned uint const ( @@ -668,11 +671,11 @@ func fetchStoredPruningMode(db MetaDB) (PruningMode, error) { return nil, err } if val == nil { - return nil, nil //nolint: nilnil + return nil, nil } - mode := NewPruningModeFromID(*val) + mode := NewPruningModeFromID(val) if mode != nil { return mode, nil } - return nil, fmt.Errorf("invalid value stored for PRUNING_MODE: %v", *val) + return nil, fmt.Errorf("invalid value stored for PRUNING_MODE: %v", val) } diff --git a/internal/client/state-db/statedb_test.go b/internal/client/state-db/statedb_test.go index 826c05fdf6..d691b2964a 100644 --- a/internal/client/state-db/statedb_test.go +++ b/internal/client/state-db/statedb_test.go @@ -26,12 +26,12 @@ func NewTestDB(inserted []uint64) TestDB { } } -func (tdb TestDB) GetMeta(key []byte) (*DBValue, error) { +func (tdb TestDB) GetMeta(key []byte) (DBValue, error) { val, ok := tdb.Meta[string(key)] if !ok { return nil, nil } - return &val, nil + return val, nil } func (tdb *TestDB) Commit(commitSet CommitSet[hash.H256]) { @@ -164,7 +164,7 @@ func TestStateDB_BlockRecordUnavailable(t *testing.T) { assert.Equal(t, IsPrunedPruned, stateDB.IsPruned(hash.NewH256FromLowUint64BigEndian(3), 3)) // canonicalize block 5 but not commit it to db, block 4 is not pruned due to it is not - // commit to db yet (unavailable), return `MaybePruned` here because `apply_pending` is not + // commit to db yet (unavailable), return MaybePruned here because apply_pending is not // called and block 3 is still in cache c2, err := stateDB.CanonicalizeBlock(hash.NewH256FromLowUint64BigEndian(5)) assert.NoError(t, err) diff --git a/internal/cost-lru/cost_lru.go b/internal/cost-lru/cost_lru.go index e7e04631d3..cedc44449c 100644 --- a/internal/cost-lru/cost_lru.go +++ b/internal/cost-lru/cost_lru.go @@ -19,13 +19,13 @@ type LRU[K comparable, V any] struct { *freelru.LRU[K, V] } -// Costructor for [LRU]. +// New is constructor for [LRU]. func New[K comparable, V any]( maxCost uint, hash freelru.HashKeyCallback[K], costFunc func(K, V) uint32, ) (*LRU[K, V], error) { - var capacity = uint32(math.MaxUint32) - if maxCost < math.MaxUint32 { - capacity = uint32(maxCost) + var capacity = uint32(math.MaxUint32) / 8 + if (maxCost / 8) < uint(capacity) { + capacity = uint32(maxCost / 8) } lru, err := freelru.New[K, V](capacity, hash) if err != nil { @@ -58,7 +58,7 @@ func (l *LRU[K, V]) costRemove(key K, value V) (cost uint32, removed bool, canAd for uint(cost)+l.currentCost > l.maxCost { _, _, removed = l.LRU.RemoveOldest() if !removed { - panic("huh?") + panic("should be removed") } } return cost, removed, true diff --git a/internal/hash-db/hash_db.go b/internal/hash-db/hash_db.go index 4b2d891a6d..aea78cf72c 100644 --- a/internal/hash-db/hash_db.go +++ b/internal/hash-db/hash_db.go @@ -5,8 +5,7 @@ package hashdb import "golang.org/x/exp/constraints" -// A trie node prefix, it is the nibble path from the trie root -// to the trie node. +// Prefix is a trie node prefix, it is the nibble path from the trie root to the trie node. // For a node containing no partial key value it is the full key. // For a value node or node containing a partial key, it is the full key minus its node partial // nibbles (the node key can be split into prefix and node partial). @@ -18,7 +17,7 @@ type Prefix struct { Padded *byte } -// An empty prefix constant. +// EmptyPrefix is the empty prefix constant. // Can be use when the prefix is not used internally or for root nodes. var EmptyPrefix = Prefix{} diff --git a/internal/kvdb/kvdb.go b/internal/kvdb/kvdb.go new file mode 100644 index 0000000000..0114f3265b --- /dev/null +++ b/internal/kvdb/kvdb.go @@ -0,0 +1,148 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package kvdb + +import "iter" + +// DBValue is the database value. +type DBValue []byte + +// DBKey is the database key. +type DBKey []byte + +type DBKeyValue struct { + Key DBKey + Value DBValue +} + +// DBOp is a database operation. +type DBOp interface { + Key() []byte + Col() uint32 +} + +type InsertDBOp struct { + col uint32 + key DBKey + Value DBValue +} + +func (idbo InsertDBOp) Key() []byte { + return idbo.key +} +func (idbo InsertDBOp) Col() uint32 { + return idbo.col +} + +type DeleteDBOp struct { + col uint32 + key DBKey +} + +func (idbo DeleteDBOp) Key() []byte { + return idbo.key +} +func (idbo DeleteDBOp) Col() uint32 { + return idbo.col +} + +type DeletePrefixDBOp struct { + col uint32 + Prefix DBKey +} + +func (idbo DeletePrefixDBOp) Key() []byte { + return idbo.Prefix +} +func (idbo DeletePrefixDBOp) Col() uint32 { + return idbo.col +} + +// DBTransaction is a write transaction. Batches a sequence of put/delete operations for efficiency. +type DBTransaction struct { + // Database operations. + Ops []DBOp +} + +// NewDBTransaction creates new transaction. +func NewDBTransaction() DBTransaction { + return DBTransaction{ + Ops: make([]DBOp, 0), + } +} + +// Put inserts a key-value pair in the transaction. Any existing value will be overwritten upon write. +func (dbt *DBTransaction) Put(col uint32, key, value []byte) { + dbt.Ops = append(dbt.Ops, InsertDBOp{ + col: col, + key: key, + Value: value, + }) +} + +// Delete the value at the given key. +func (dbt *DBTransaction) Delete(col uint32, key []byte) { + dbt.Ops = append(dbt.Ops, DeleteDBOp{ + col: col, + key: key, + }) +} + +// DeletePrefix deletes all values with the given key prefix. +// Using an empty prefix here will remove all keys +// (all keys start with the empty prefix). +func (dbt *DBTransaction) DeletePrefix(col uint32, prefix []byte) { + dbt.Ops = append(dbt.Ops, DeletePrefixDBOp{ + col: col, + Prefix: prefix, + }) +} + +// KeyValueDB is a generic key-value database. +// +// KeyValueDB deals with "column families", which can be thought of as distinct +// stores within a database. Keys written in one column family will not be accessible from +// any other. The number of column families must be specified at initialization, with a +// differing interface for each database. +type KeyValueDB interface { + // Get a value by key. + Get(col uint32, key []byte) (DBValue, error) + + // Get the first value matching the given prefix. + PrefixGet(col uint32, prefix []byte) (DBValue, error) + + // Write a transaction of changes to the backing store. + Write(transaction DBTransaction) error + + // Iterate over the data for a given column. + Iter(col uint32) iter.Seq2[DBKeyValue, error] + + // Iterate over the data for a given column, returning all key/value pairs + // where the key starts with the given prefix. + PrefixIter(col uint32, prefix []byte) iter.Seq2[DBKeyValue, error] + + // Check for the existence of a value by key. + HasKey(col uint32, key []byte) (bool, error) + + // Check for the existence of a value by prefix. + HasPrefix(col uint32, prefix []byte) (bool, error) +} + +// EndPrefix when called for a given start prefix (inclusive), returns the correct end prefix (non-inclusive). +// This assumes the key bytes are ordered in lexicographical order. +// Since key length is not limited, for some case we return nil because there is +// no bounded limit (every keys in the series [], [255], [255, 255] ...). +func EndPrefix(prefix []byte) []byte { + for len(prefix) > 0 && prefix[len(prefix)-1] == 0xff { + prefix = prefix[:len(prefix)-1] + } + if len(prefix) > 0 { + last := prefix[len(prefix)-1] + last += 1 + prefix[len(prefix)-1] = last + return prefix + } else { + return nil + } +} diff --git a/internal/kvdb/kvdb_test.go b/internal/kvdb/kvdb_test.go new file mode 100644 index 0000000000..9a01c8c346 --- /dev/null +++ b/internal/kvdb/kvdb_test.go @@ -0,0 +1,26 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package kvdb + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_EndPrefix(t *testing.T) { + require.Equal(t, []byte{5, 6, 8}, EndPrefix([]byte{5, 6, 7})) + require.Equal(t, []byte{5, 7}, EndPrefix([]byte{5, 6, 255})) + // This is not equal as the result is before start. + require.NotEqual(t, []byte{5, 255}, EndPrefix([]byte{5, 255, 255})) + // This is equal ([5, 255] will not be deleted because + // it is before start). + require.Equal(t, []byte{6}, EndPrefix([]byte{5, 255, 255})) + require.Nil(t, EndPrefix([]byte{255, 255, 255})) + + require.Equal(t, []byte{0x01}, EndPrefix([]byte{0x00, 0xff})) + require.Nil(t, EndPrefix([]byte{0xff})) + require.Nil(t, EndPrefix([]byte{})) + require.Equal(t, []byte("1"), EndPrefix([]byte("0"))) +} diff --git a/internal/kvdb/memory-kvdb/memory_kvdb.go b/internal/kvdb/memory-kvdb/memory_kvdb.go new file mode 100644 index 0000000000..dd990cf186 --- /dev/null +++ b/internal/kvdb/memory-kvdb/memory_kvdb.go @@ -0,0 +1,173 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package memorykvdb + +import ( + "errors" + "iter" + "slices" + "strings" + "sync" + + "github.com/ChainSafe/gossamer/internal/kvdb" + "github.com/tidwall/btree" +) + +// A key-value database fulfilling the KeyValueDB interface, living in memory. +// This is generally intended for tests and is not particularly optimised. +type MemoryKVDB struct { + columns map[uint32]*btree.Map[string, []byte] + sync.RWMutex +} + +var ErrInvalidColumn = errors.New("no such column family") + +// Create an in-memory database with the given number of columns. +// Columns will be indexable by 0..numCols +func New(numCols uint32) *MemoryKVDB { + cols := make(map[uint32]*btree.Map[string, []byte]) + for i := uint32(0); i < numCols; i++ { + cols[i] = btree.NewMap[string, []byte](0) + } + return &MemoryKVDB{ + columns: cols, + } +} + +func (im *MemoryKVDB) Get(col uint32, key []byte) (kvdb.DBValue, error) { + im.RLock() + defer im.RUnlock() + _, ok := im.columns[col] + if !ok { + return nil, ErrInvalidColumn + } + val, found := im.columns[col].Get(string(key)) + if found { + return val, nil + } + return nil, nil +} + +func (im *MemoryKVDB) PrefixGet(col uint32, prefix []byte) (kvdb.DBValue, error) { + im.RLock() + defer im.RUnlock() + _, ok := im.columns[col] + if !ok { + return nil, ErrInvalidColumn + } + var val kvdb.DBValue + im.columns[col].Scan(func(key string, value []byte) bool { + idx := strings.Index(key, string(prefix)) + if idx == 0 { + val = value + return false + } + return true + }) + return val, nil +} + +func (im *MemoryKVDB) Write(transaction kvdb.DBTransaction) error { + im.Lock() + defer im.Unlock() + for _, op := range transaction.Ops { + switch op := op.(type) { + case kvdb.InsertDBOp: + _, ok := im.columns[op.Col()] + if ok { + im.columns[op.Col()].Set(string(op.Key()), op.Value) + } + case kvdb.DeleteDBOp: + _, ok := im.columns[op.Col()] + if ok { + im.columns[op.Col()].Delete(string(op.Key())) + } + case kvdb.DeletePrefixDBOp: + _, ok := im.columns[op.Col()] + if ok { + if len(op.Prefix) == 0 { + im.columns[op.Col()].Clear() + } else { + var keys []string + startRange := slices.Clone(op.Prefix) + endRange := kvdb.EndPrefix(op.Prefix) + if endRange != nil { + im.columns[op.Col()].Scan(func(key string, value []byte) bool { + if strings.Compare(key, string(startRange)) >= 0 && strings.Compare(key, string(endRange)) < 0 { + keys = append(keys, key) + } + if strings.Compare(key, string(endRange)) >= 0 { + return false + } + return true + }) + } else { + im.columns[op.Col()].Scan(func(key string, value []byte) bool { + if strings.Compare(key, string(startRange)) >= 0 { + keys = append(keys, key) + } + return true + }) + } + for _, key := range keys { + im.columns[op.Col()].Delete(key) + } + } + } + default: + panic("unreachable") + } + } + return nil +} + +func (im *MemoryKVDB) Iter(col uint32) iter.Seq2[kvdb.DBKeyValue, error] { + return func(yield func(kvdb.DBKeyValue, error) bool) { + im.RLock() + defer im.RUnlock() + _, ok := im.columns[col] + if !ok { + yield(kvdb.DBKeyValue{}, ErrInvalidColumn) + return + } + im.columns[col].Scan(func(key string, value []byte) bool { + return yield(kvdb.DBKeyValue{Key: []byte(key), Value: value}, nil) + }) + } +} + +func (im *MemoryKVDB) PrefixIter(col uint32, prefix []byte) iter.Seq2[kvdb.DBKeyValue, error] { + return func(yield func(kvdb.DBKeyValue, error) bool) { + im.RLock() + defer im.RUnlock() + _, ok := im.columns[col] + if !ok { + yield(kvdb.DBKeyValue{}, ErrInvalidColumn) + return + } + im.columns[col].Scan(func(key string, value []byte) bool { + idx := strings.Index(key, string(prefix)) + if idx == 0 { + return yield(kvdb.DBKeyValue{Key: []byte(key), Value: value}, nil) + } + return true + }) + } +} + +func (im *MemoryKVDB) HasKey(col uint32, key []byte) (bool, error) { + val, err := im.Get(col, key) + if err != nil { + return false, err + } + return val != nil, nil +} + +func (im *MemoryKVDB) HasPrefix(col uint32, prefix []byte) (bool, error) { + val, err := im.PrefixGet(col, prefix) + if err != nil { + return false, err + } + return val != nil, nil +} diff --git a/internal/kvdb/memory-kvdb/memory_kvdb_test.go b/internal/kvdb/memory-kvdb/memory_kvdb_test.go new file mode 100644 index 0000000000..3e50dce2b1 --- /dev/null +++ b/internal/kvdb/memory-kvdb/memory_kvdb_test.go @@ -0,0 +1,323 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package memorykvdb + +import ( + "bytes" + "testing" + + "github.com/ChainSafe/gossamer/internal/kvdb" + "github.com/stretchr/testify/require" +) + +var _ kvdb.KeyValueDB = &MemoryKVDB{} + +const DeletePrefixNumColumns uint32 = 7 + +func Test_MemoryKVDB(t *testing.T) { + t.Run("get_fails_with_non_existing_column", func(t *testing.T) { + db := New(1) + _, err := db.Get(1, []byte{}) + require.Error(t, err) + }) + + t.Run("put_and_get", func(t *testing.T) { + db := New(1) + key1 := []byte("key1") + + var transaction kvdb.DBTransaction + transaction.Put(0, key1, []byte("horse")) + require.NoError(t, db.Write(transaction)) + + val, err := db.Get(0, key1) + require.NoError(t, err) + require.Equal(t, kvdb.DBValue("horse"), val) + }) + + t.Run("delete_and_get", func(t *testing.T) { + db := New(DeletePrefixNumColumns) + key1 := []byte("key1") + + var transaction kvdb.DBTransaction = kvdb.NewDBTransaction() + transaction.Put(0, key1, []byte("horse")) + require.NoError(t, db.Write(transaction)) + + val, err := db.Get(0, key1) + require.NoError(t, err) + require.Equal(t, kvdb.DBValue("horse"), val) + + transaction = kvdb.NewDBTransaction() + transaction.Delete(0, key1) + err = db.Write(transaction) + require.NoError(t, err) + val, err = db.Get(0, key1) + require.NoError(t, err) + require.Nil(t, val) + }) + + t.Run("delete_prefix", func(t *testing.T) { + db := New(DeletePrefixNumColumns) + keys := [][]byte{ + {}, + {0}, + {0, 1}, + {1}, + {1, 0}, + {1, 255}, + {1, 255, 255}, + {2}, + {2, 0}, + {2, 255}, + bytes.Repeat([]byte{255}, 16), + } + + var initDB = func(ix uint32) { + var transaction kvdb.DBTransaction + for i, key := range keys { + transaction.Put(ix, key, []byte{uint8(i)}) + } + err := db.Write(transaction) + require.NoError(t, err) + } + + var checkDB = func(ix uint32, content [11]bool) { + var state [11]bool + for i, key := range keys { + val, err := db.Get(ix, key) + require.NoError(t, err) + if val != nil { + state[i] = true + } + } + require.Equal(t, content, state) + } + + tests := []struct { + Prefix []byte + Content [11]bool + }{ + // standard + { + Prefix: []byte{1}, + Content: [11]bool{true, true, true, false, false, false, false, true, true, true, true}, + }, + // edge + { + Prefix: []byte{1, 255, 255}, + Content: [11]bool{true, true, true, true, true, true, false, true, true, true, true}, + }, + // none 1 + { + Prefix: []byte{1, 2}, + Content: [11]bool{true, true, true, true, true, true, true, true, true, true, true}, + }, + // none 2 + { + Prefix: []byte{8}, + Content: [11]bool{true, true, true, true, true, true, true, true, true, true, true}, + }, + // last value + { + Prefix: []byte{255, 255}, + Content: [11]bool{true, true, true, true, true, true, true, true, true, true, false}, + }, + // last value, limit prefix + { + Prefix: []byte{255}, + Content: [11]bool{true, true, true, true, true, true, true, true, true, true, false}, + }, + // all + { + Prefix: []byte{}, + Content: [11]bool{false, false, false, false, false, false, false, false, false, false, false}, + }, + } + + for i, test := range tests { + ix := uint32(i) + initDB(ix) + batch := kvdb.NewDBTransaction() + batch.DeletePrefix(ix, test.Prefix) + err := db.Write(batch) + require.NoError(t, err) + checkDB(ix, test.Content) + } + }) + + t.Run("iter", func(t *testing.T) { + db := New(1) + key1 := []byte("key1") + key2 := []byte("key2") + + transaction := kvdb.NewDBTransaction() + transaction.Put(0, key1, key1) + transaction.Put(0, key2, key2) + require.NoError(t, db.Write(transaction)) + + var contents []kvdb.DBKeyValue + for kv, err := range db.Iter(0) { + require.NoError(t, err) + contents = append(contents, kv) + } + + require.Len(t, contents, 2) + require.Equal(t, kvdb.DBKey(key1), contents[0].Key) + require.Equal(t, kvdb.DBValue(key1), contents[0].Value) + require.Equal(t, kvdb.DBKey(key2), contents[1].Key) + require.Equal(t, kvdb.DBValue(key2), contents[1].Value) + }) + + t.Run("iter_with_prefix", func(t *testing.T) { + db := New(1) + key1 := []byte("0") + key2 := []byte("ab") + key3 := []byte("abc") + key4 := []byte("abcd") + + transaction := kvdb.NewDBTransaction() + transaction.Put(0, key1, key1) + transaction.Put(0, key2, key2) + transaction.Put(0, key3, key3) + transaction.Put(0, key4, key4) + require.NoError(t, db.Write(transaction)) + + // empty prefix + var contents []kvdb.DBKeyValue + for kv, err := range db.PrefixIter(0, []byte("")) { + require.NoError(t, err) + contents = append(contents, kv) + } + require.Len(t, contents, 4) + require.Equal(t, kvdb.DBKey(key1), contents[0].Key) + require.Equal(t, kvdb.DBKey(key2), contents[1].Key) + require.Equal(t, kvdb.DBKey(key3), contents[2].Key) + require.Equal(t, kvdb.DBKey(key4), contents[3].Key) + + // prefix a + contents = nil + for kv, err := range db.PrefixIter(0, []byte("a")) { + require.NoError(t, err) + contents = append(contents, kv) + } + require.Equal(t, kvdb.DBKey(key2), contents[0].Key) + require.Equal(t, kvdb.DBKey(key3), contents[1].Key) + require.Equal(t, kvdb.DBKey(key4), contents[2].Key) + + // prefix abc + contents = nil + for kv, err := range db.PrefixIter(0, []byte("abc")) { + require.NoError(t, err) + contents = append(contents, kv) + } + require.Equal(t, kvdb.DBKey(key3), contents[0].Key) + require.Equal(t, kvdb.DBKey(key4), contents[1].Key) + + // prefix abcde + contents = nil + for kv, err := range db.PrefixIter(0, []byte("abcde")) { + require.NoError(t, err) + contents = append(contents, kv) + } + require.Len(t, contents, 0) + + // prefix 0 + contents = nil + for kv, err := range db.PrefixIter(0, []byte("0")) { + require.NoError(t, err) + contents = append(contents, kv) + } + require.Len(t, contents, 1) + require.Equal(t, kvdb.DBKey(key1), contents[0].Key) + }) + + t.Run("complex", func(t *testing.T) { + db := New(1) + key1 := []byte("02c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc") + key2 := []byte("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc") + key3 := []byte("04c00000000b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc") + key4 := []byte("04c01111110b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc") + key5 := []byte("04c02222220b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc") + + transaction := kvdb.NewDBTransaction() + transaction.Put(0, key1, []byte("cat")) + transaction.Put(0, key2, []byte("dog")) + transaction.Put(0, key3, []byte("caterpillar")) + transaction.Put(0, key4, []byte("beef")) + transaction.Put(0, key5, []byte("fish")) + require.NoError(t, db.Write(transaction)) + + val, err := db.Get(0, key1) + require.NoError(t, err) + require.Equal(t, kvdb.DBValue("cat"), val) + + var contents []kvdb.DBKeyValue + for kv, err := range db.Iter(0) { + require.NoError(t, err) + contents = append(contents, kv) + } + require.Len(t, contents, 5) + require.Equal(t, kvdb.DBKey(key1), contents[0].Key) + require.Equal(t, kvdb.DBValue("cat"), contents[0].Value) + require.Equal(t, kvdb.DBKey(key2), contents[1].Key) + require.Equal(t, kvdb.DBValue("dog"), contents[1].Value) + + contents = nil + for kv, err := range db.PrefixIter(0, []byte("04c0")) { + require.NoError(t, err) + contents = append(contents, kv) + } + require.Len(t, contents, 3) + require.Equal(t, kvdb.DBValue("caterpillar"), contents[0].Value) + require.Equal(t, kvdb.DBValue("beef"), contents[1].Value) + require.Equal(t, kvdb.DBValue("fish"), contents[2].Value) + + transaction = kvdb.NewDBTransaction() + transaction.Delete(0, key1) + require.NoError(t, db.Write(transaction)) + + val, err = db.Get(0, key1) + require.NoError(t, err) + require.Nil(t, val) + + transaction = kvdb.NewDBTransaction() + transaction.Put(0, key1, []byte("cat")) + require.NoError(t, db.Write(transaction)) + + transaction = kvdb.NewDBTransaction() + transaction.Put(0, key3, []byte("elephant")) + transaction.Delete(0, key1) + require.NoError(t, db.Write(transaction)) + val, err = db.Get(0, key1) + require.NoError(t, err) + require.Nil(t, val) + val, err = db.Get(0, key3) + require.NoError(t, err) + require.Equal(t, kvdb.DBValue("elephant"), val) + + val, err = db.PrefixGet(0, key3) + require.NoError(t, err) + require.Equal(t, kvdb.DBValue("elephant"), val) + val, err = db.PrefixGet(0, key2) + require.NoError(t, err) + require.Equal(t, kvdb.DBValue("dog"), val) + + transaction = kvdb.NewDBTransaction() + transaction.Put(0, key1, []byte("horse")) + transaction.Delete(0, key3) + require.NoError(t, db.Write(transaction)) + val, err = db.Get(0, key3) + require.NoError(t, err) + require.Nil(t, val) + val, err = db.Get(0, key1) + require.NoError(t, err) + require.Equal(t, kvdb.DBValue("horse"), val) + + val, err = db.Get(0, key3) + require.NoError(t, err) + require.Nil(t, val) + val, err = db.Get(0, key1) + require.NoError(t, err) + require.Equal(t, kvdb.DBValue("horse"), val) + }) +} diff --git a/internal/memory-db/memory_db.go b/internal/memory-db/memory_db.go index 803c1563ae..b43a631a4c 100644 --- a/internal/memory-db/memory_db.go +++ b/internal/memory-db/memory_db.go @@ -24,7 +24,7 @@ type Value interface { ~[]byte } -// Reference-counted memory-based [hashdb.HashDB] implementation. +// MemoryDB is a reference-counted memory-based [hashdb.HashDB] implementation. type MemoryDB[H Hash, Hasher hashdb.Hasher[H], Key constraints.Ordered, KF KeyFunction[H, Key]] struct { data map[Key]dataRC hashedNullNode H @@ -65,7 +65,7 @@ func (mdb *MemoryDB[H, Hasher, Key, KF]) Purge() { } } -// Return the internal key-value Map, clearing the current state. +// Drain returns the internal key-value Map, clearing the current state. func (mdb *MemoryDB[H, Hasher, Key, KF]) Drain() map[Key]dataRC { data := mdb.data mdb.data = make(map[Key]dataRC) @@ -217,21 +217,21 @@ type KeyFunction[Hash constraints.Ordered, Key any] interface { Key(hash Hash, prefix hashdb.Prefix) Key } -// Key function that only uses the hash +// HashKey is KeyFunction that only uses the hash type HashKey[H Hash] struct{} func (HashKey[Hash]) Key(hash Hash, prefix hashdb.Prefix) Hash { return hash } -// Key function that concatenates prefix and hash. +// PrefixedKey is KeyFunction that concatenates prefix and hash. type PrefixedKey[H Hash] struct{} func (PrefixedKey[H]) Key(key H, prefix hashdb.Prefix) string { return string(NewPrefixedKey(key, prefix)) } -// Derive a database key from hash value of the node (key) and the node prefix. +// NewPrefixedKey derives a database key from hash value of the node (key) and the node prefix. func NewPrefixedKey[H Hash](key H, prefix hashdb.Prefix) []byte { prefixedKey := prefix.Key if prefix.Padded != nil { diff --git a/internal/primitives/blockchain/backend.go b/internal/primitives/blockchain/backend.go index 672708e6ce..47a1053a98 100644 --- a/internal/primitives/blockchain/backend.go +++ b/internal/primitives/blockchain/backend.go @@ -10,103 +10,95 @@ import ( "github.com/ChainSafe/gossamer/internal/primitives/runtime/generic" ) -// Header is the blockchain database header backend. Does not perform any validation. -type HeaderBackend[Hash runtime.Hash, N runtime.Number] interface { - // Get block header. Returns `nil` if block is not found. - Header(hash Hash) (runtime.Header[N, Hash], error) +// HeaderBackend is the blockchain database header backend. Does not perform any validation. +type HeaderBackend[Hash runtime.Hash, N runtime.Number, Header runtime.Header[N, Hash]] interface { + // Header returns the block header. Returns nil if block is not found. + Header(hash Hash) (*Header, error) - // Get blockchain info. + // Info returns blockchain [Info]. Info() Info[Hash, N] - // Get block status. + // Status returns [BlockStatus]. Status(hash Hash) (BlockStatus, error) - // Get block number by hash. Returns `nil` if the header is not in the chain. + // Number returns block number by hash. Returns nil if the header is not in the chain. Number(hash Hash) (*N, error) - // Get block hash by number. Returns `nil` if the header is not in the chain. + // Hash returns block hash by number. Returns nil if the header is not in the chain. Hash(number N) (*Hash, error) - // Convert an arbitrary block ID into a block hash. + // BlockHashFromID converts an arbitrary block ID into a block hash. BlockHashFromID(id generic.BlockID) (*Hash, error) - // Convert an arbitrary block ID into a block hash. + // BlockNumberFromID converts an arbitrary block ID into a block hash. BlockNumberFromID(id generic.BlockID) (*N, error) } -// Blockchain database backend. Does not perform any validation. -type Backend[Hash runtime.Hash, N runtime.Number] interface { - HeaderBackend[Hash, N] +// Backend is a blockchain database backend. Does not perform any validation. +type Backend[Hash runtime.Hash, N runtime.Number, Header runtime.Header[N, Hash], E runtime.Extrinsic] interface { + HeaderBackend[Hash, N, Header] HeaderMetadata[Hash, N] - // Get block body. Returns `nil` if block is not found. - Body(hash Hash) ([]runtime.Extrinsic, error) - // Get block justifications. Returns `nil` if no justification exists. + // Body returns block body. Returns nil if block is not found. + Body(hash Hash) ([]E, error) + // Justifications returns block justifications. Returns nil if no justification exists. Justifications(hash Hash) (runtime.Justifications, error) - // Get last finalized block hash. + // LastFinalized returns last finalized block hash. LastFinalized() (Hash, error) - // Returns hashes of all blocks that are leaves of the block tree. + // Leaves returns hashes of all blocks that are leaves of the block tree. // in other words, that have no children, are chain heads. // Results must be ordered best (longest, highest) chain first. Leaves() ([]Hash, error) - // Returns displaced leaves after the given block would be finalized. - // - // The returned leaves do not contain the leaves from the same height as `blockNumber`. + // DisplacedLeavesAfterFinalizing returns displaced leaves after the given block would be finalized. + // The returned leaves do not contain the leaves from the same height as blockNumber. DisplacedLeavesAfterFinalizing(blockNumber N) ([]Hash, error) - // Return hashes of all blocks that are children of the block with `parentHash`. + // Children returns hashes of all blocks that are children of the block with parentHash. Children(parentHash Hash) ([]Hash, error) - // Get the most recent block hash of the longest chain that contains - // a block with the given `baseHash`. - // + // LongestContaining gets the most recent block hash of the longest chain that contains + // a block with the given baseHash. + // The search space is always limited to blocks which are in the finalized // chain or descendents of it. // - // Returns `nil` if `basehash` is not found in search space. + // Returns nil if basehash is not found in search space. LongestContaining(baseHash Hash, importLock *sync.RWMutex) (*Hash, error) - // Get single indexed transaction by content hash. Note that this will only fetch transactions - // that are indexed by the runtime with `storage_index_transaction`. + // IndexedTransaction returns single indexed transaction by content hash. Note that this will only fetch transactions + // that are indexed by the runtime with storage_index_transaction. IndexedTransaction(hash Hash) ([]byte, error) - // Check if indexed transaction exists. + // HasIndexedTransaction checks if indexed transaction exists. HasIndexedTransaction(hash Hash) (bool, error) + // BlockIndexedBody will return the indexed body if it exists. BlockIndexedBody(hash Hash) ([][]byte, error) } -// Blockchain info +// Info is the blockchain info type Info[H, N any] struct { - // Best block hash. - BestHash H - // Best block number. - BestNumber N - // Genesis block hash. - GenesisHash H - // The head of the finalized chain. - FinalizedHash H - // Last finalized block number. - FinalizedNumber N - // Last finalized state. - FinalizedState *struct { + BestHash H // Best block hash. + BestNumber N // Best block number. + GenesisHash H // Genesis block hash. + FinalizedHash H // The head of the finalized chain. + FinalizedNumber N // Last finalized block number. + FinalizedState *struct { // Last finalized state. Hash H Number N } - // Number of concurrent leave forks. - NumberLeaves uint - // Missing blocks after warp sync. (start, end). - BlockGap *[2]N + NumberLeaves uint // Number of concurrent leave forks. + BlockGap *[2]N // Missing blocks after warp sync. (start, end). } // BlockStatus is block status. type BlockStatus uint const ( - // Already in the blockchain. + // BlockStatusInChain represents block is already in the blockchain. BlockStatusInChain BlockStatus = iota - // Not in the queue or the blockchain. + // BlockStatusUnknown represents block is not in the queue or the blockchain. BlockStatusUnknown ) diff --git a/internal/primitives/blockchain/error.go b/internal/primitives/blockchain/error.go new file mode 100644 index 0000000000..05af01c0f2 --- /dev/null +++ b/internal/primitives/blockchain/error.go @@ -0,0 +1,17 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package blockchain + +import "errors" + +var ( + ErrBackend = errors.New("backend error") + ErrUnknownBlock = errors.New("unknown block") + ErrNonSequentialFinalization = errors.New("did not finalize blocks in sequential order") + ErrNotInFinalizedChain = errors.New("potential long-range attack: block not in finalized chain") + ErrInvalidState = errors.New("provided state is invalid") + ErrBadJustification = errors.New("bad justification for header") + ErrStateDatabase = errors.New("state database error") + ErrSetHeadTooOld = errors.New("failed to set the chain head to a block that's too old") +) diff --git a/internal/primitives/blockchain/header_metadata.go b/internal/primitives/blockchain/header_metadata.go index 5e6aeb72a9..f498916bea 100644 --- a/internal/primitives/blockchain/header_metadata.go +++ b/internal/primitives/blockchain/header_metadata.go @@ -4,20 +4,211 @@ package blockchain import ( + "slices" "sync" "github.com/ChainSafe/gossamer/internal/primitives/runtime" lru "github.com/hashicorp/golang-lru/v2" ) -// Handles header metadata: hash, number, parent hash, etc. +// LowestCommonAncestor will get lowest common ancestor between two blocks in the tree. +// +// This implementation is efficient because our trees have very few and +// small branches, and because of our current query pattern: +// lca(best, final), lca(best + 1, final), lca(best + 2, final), etc. +// The first call is O(h) but the others are O(1). +func LowestCommonAncestor[H runtime.Hash, N runtime.Number]( + backend HeaderMetadata[H, N], id1 H, id2 H, +) (HashNumber[H, N], error) { + header1, err := backend.HeaderMetadata(id1) + if err != nil { + return HashNumber[H, N]{}, err + } + if header1.Parent == id2 { + return HashNumber[H, N]{Hash: id2, Number: header1.Number - 1}, nil + } + + header2, err := backend.HeaderMetadata(id2) + if err != nil { + return HashNumber[H, N]{}, err + } + if header2.Parent == id1 { + return HashNumber[H, N]{Hash: id1, Number: header1.Number}, nil + } + + origHeader1 := header1 + origHeader2 := header2 + + // We move through ancestor links as much as possible, since ancestor >= parent. + for header1.Number > header2.Number { + ancestor1, err := backend.HeaderMetadata(header1.ancestor) + if err != nil { + return HashNumber[H, N]{}, err + } + + if ancestor1.Number >= header2.Number { + header1 = ancestor1 + } else { + break + } + } + for header1.Number < header2.Number { + ancestor2, err := backend.HeaderMetadata(header2.ancestor) + if err != nil { + return HashNumber[H, N]{}, err + } + + if ancestor2.Number > header1.Number { + header2 = ancestor2 + } else { + break + } + } + + // Then we move the remaining path using parent links. + for header1.Hash != header2.Hash { + if header1.Number > header2.Number { + header1, err = backend.HeaderMetadata(header1.Parent) + if err != nil { + return HashNumber[H, N]{}, err + } + } else { + header2, err = backend.HeaderMetadata(header2.Parent) + if err != nil { + return HashNumber[H, N]{}, err + } + } + } + + // Update cached ancestor links. + if origHeader1.Number > header1.Number { + origHeader1.ancestor = header1.Hash + backend.InsertHeaderMetadata(origHeader1.Hash, origHeader1) + } + if origHeader2.Number > header2.Number { + origHeader2.ancestor = header1.Hash + backend.InsertHeaderMetadata(origHeader2.Hash, origHeader2) + } + + return HashNumber[H, N]{Hash: header1.Hash, Number: header1.Number}, nil +} + +// NewTreeRoute computes a [TreeRoute] between two blocks. See [TreeRoute] docs for more details. +func NewTreeRoute[H runtime.Hash, N runtime.Number]( + backend HeaderMetadata[H, N], from, to H, +) (TreeRoute[H, N], error) { + fromMeta, err := backend.HeaderMetadata(from) + if err != nil { + return TreeRoute[H, N]{}, err + } + toMeta, err := backend.HeaderMetadata(to) + if err != nil { + return TreeRoute[H, N]{}, err + } + + var ( + fromBranch []HashNumber[H, N] + toBranch []HashNumber[H, N] + ) + + for toMeta.Number > fromMeta.Number { + toBranch = append(toBranch, HashNumber[H, N]{Hash: toMeta.Hash, Number: toMeta.Number}) + + toMeta, err = backend.HeaderMetadata(toMeta.Parent) + if err != nil { + return TreeRoute[H, N]{}, err + } + } + + for fromMeta.Number > toMeta.Number { + fromBranch = append(fromBranch, HashNumber[H, N]{Hash: fromMeta.Hash, Number: fromMeta.Number}) + + fromMeta, err = backend.HeaderMetadata(fromMeta.Parent) + if err != nil { + return TreeRoute[H, N]{}, err + } + } + + // numbers are equal now. walk backwards until the block is the same + for toMeta.Hash != fromMeta.Hash { + toBranch = append(toBranch, HashNumber[H, N]{Hash: toMeta.Hash, Number: toMeta.Number}) + toMeta, err = backend.HeaderMetadata(toMeta.Parent) + if err != nil { + return TreeRoute[H, N]{}, err + } + + fromBranch = append(fromBranch, HashNumber[H, N]{Hash: fromMeta.Hash, Number: fromMeta.Number}) + fromMeta, err = backend.HeaderMetadata(fromMeta.Parent) + if err != nil { + return TreeRoute[H, N]{}, err + } + } + + // add the pivot block. and append the reversed to-branch + // (note that it's reverse order originals) + pivot := uint(len(fromBranch)) + fromBranch = append(fromBranch, HashNumber[H, N]{Hash: toMeta.Hash, Number: toMeta.Number}) + slices.Reverse(toBranch) + fromBranch = append(fromBranch, toBranch...) + + return TreeRoute[H, N]{Route: fromBranch, Pivot: pivot}, nil +} + +// HashNumber is the hash and number of a block. +type HashNumber[H runtime.Hash, N runtime.Number] struct { + // The number of the block. + Number N + // The hash of the block. + Hash H +} + +// A TreeRoute from one block to another in the chain. +// +// All blocks prior to the pivot in the deque is the reverse-order unique ancestry +// of the first block, the block at the pivot index is the common ancestor, +// and all blocks after the pivot is the ancestry of the second block, in +// order. +// +// The ancestry sets will include the given blocks, and thus the tree-route is +// never empty. +// +// Tree route from R1 to E2. Retracted is [R1, R2, R3], Common is C, enacted [E1, E2] +// <- R3 <- R2 <- R1 +// / +// C +// \-> E1 -> E2 +// +// Tree route from C to E2. Retracted empty. Common is C, enacted [E1, E2] +// C -> E1 -> E2 +type TreeRoute[H runtime.Hash, N runtime.Number] struct { + // route: Vec>, + Route []HashNumber[H, N] + // pivot: usize, + Pivot uint +} + +// Get a slice of all retracted blocks in reverse order (towards common ancestor). +func (tr TreeRoute[H, N]) Retracted() []HashNumber[H, N] { + return tr.Route[:tr.Pivot] +} + +func (tr TreeRoute[H, N]) CommonBlock() HashNumber[H, N] { + return tr.Route[tr.Pivot] +} + +// Get a slice of enacted blocks (descendents of the common ancestor) +func (tr TreeRoute[H, N]) Enacted() []HashNumber[H, N] { + return tr.Route[tr.Pivot+1:] +} + +// HeaderMetadata handles header metadata: hash, number, parent hash, etc. type HeaderMetadata[H, N any] interface { HeaderMetadata(hash H) (CachedHeaderMetadata[H, N], error) InsertHeaderMetadata(hash H, headerMetadata CachedHeaderMetadata[H, N]) RemoveHeaderMetadata(hash H) } -// Caches header metadata in an in-memory LRU cache. +// HeaderMetadataCache caches header metadata in an in-memory LRU cache. type HeaderMetadataCache[H comparable, N any] struct { cache *lru.Cache[H, CachedHeaderMetadata[H, N]] sync.RWMutex @@ -38,7 +229,7 @@ func NewHeaderMetadataCache[H comparable, N any](capacity ...uint32) HeaderMetad } } -// HeaderMetadata returns the CachedHeaderMetadata for a given hash or `nil` if not found. +// HeaderMetadata returns the CachedHeaderMetadata for a given hash or nil if not found. func (hmc *HeaderMetadataCache[H, N]) HeaderMetadata(hash H) *CachedHeaderMetadata[H, N] { hmc.RLock() defer hmc.RUnlock() @@ -49,14 +240,14 @@ func (hmc *HeaderMetadataCache[H, N]) HeaderMetadata(hash H) *CachedHeaderMetada return &val } -// InsertHeaderMetadata inserts a supplied `metadata` for a `hash`. +// InsertHeaderMetadata inserts a supplied metadata for a hash. func (hmc *HeaderMetadataCache[H, N]) InsertHeaderMetadata(hash H, metadata CachedHeaderMetadata[H, N]) { hmc.Lock() defer hmc.Unlock() hmc.cache.Add(hash, metadata) } -// RemoveHeaderMetadata removes the `metadata` for a `hash`. +// RemoveHeaderMetadata removes the metadata for a hash. func (hmc *HeaderMetadataCache[H, N]) RemoveHeaderMetadata(hash H) { hmc.Lock() defer hmc.Unlock() @@ -77,7 +268,7 @@ type CachedHeaderMetadata[H, N any] struct { ancestor H } -// NewCachedHeaderMetadata is constructor for CachedHeaderMetadata +// NewCachedHeaderMetadata is constructor for [CachedHeaderMetadata] func NewCachedHeaderMetadata[H runtime.Hash, N runtime.Number](header runtime.Header[N, H]) CachedHeaderMetadata[H, N] { return CachedHeaderMetadata[H, N]{ Hash: header.Hash(), diff --git a/internal/primitives/consensus/grandpa/grandpa.go b/internal/primitives/consensus/grandpa/grandpa.go index 3cf60a82a0..01141a4683 100644 --- a/internal/primitives/consensus/grandpa/grandpa.go +++ b/internal/primitives/consensus/grandpa/grandpa.go @@ -12,7 +12,7 @@ import ( "golang.org/x/exp/constraints" ) -var logger = log.NewFromGlobal(log.AddContext("consensus", "grandpa")) +var logger = log.NewFromGlobal(log.AddContext("pkg", "consensus/grandpa")) // AuthorityID is the identity of a Grandpa authority. type AuthorityID = app.Public diff --git a/internal/primitives/core/crypto/crypto.go b/internal/primitives/core/crypto/crypto.go index bcdd7427e8..5eb60e2625 100644 --- a/internal/primitives/core/crypto/crypto.go +++ b/internal/primitives/core/crypto/crypto.go @@ -18,7 +18,7 @@ const DevPhrase = "bottom drive obey lake curtain smoke basket hold race lonely // DeriveJunction is a since derivation junction description. It is the single parameter // used when creating a new secret key from an existing secret key and, in the case of -// `SoftRaw` and `SoftIndex` a new public key from an existing public key. +// SoftRaw and SoftIndex a new public key from an existing public key. type DeriveJunction struct { inner any } @@ -113,7 +113,7 @@ var junctionRegex = regexp.MustCompile(`/(/?[^/]+)`) // Trait used for types that are really just a fixed-length array. type Bytes interface { - // Return a `Vec` filled with raw data. + // Return a []byte filled with raw data. Bytes() []byte } @@ -125,29 +125,29 @@ type Public[Signature any] interface { Verify(sig Signature, message []byte) bool } -// SecretURI A secret uri (`SURI`) that can be used to generate a key pair. +// SecretURI A secret uri (SURI) that can be used to generate a key pair. // -// The `SURI` can be parsed from a string. The string is interpreted in the following way: +// The SURI can be parsed from a string. The string is interpreted in the following way: // -// - If `string` is a possibly `0x` prefixed 64-digit hex string, then it will be interpreted -// directly as a secret key (aka "seed" in `subkey`). -// - If `string` is a valid BIP-39 key phrase of 12, 15, 18, 21 or 24 words, then the key will +// - If string is a possibly 0x prefixed 64-digit hex string, then it will be interpreted +// directly as a secret key (aka "seed" in subkey). +// - If string is a valid BIP-39 key phrase of 12, 15, 18, 21 or 24 words, then the key will // be derived from it. In this case: -// - the phrase may be followed by one or more items delimited by `/` characters. -// - the path may be followed by `///`, in which case everything after the `///` is treated +// - the phrase may be followed by one or more items delimited by "/" characters. +// - the path may be followed by "///", in which case everything after the "///" is treated // // as a password. -// - If `string` begins with a `/` character it is prefixed with the public `DevPhrase` +// - If string begins with a "/" character it is prefixed with the public DevPhrase // and interpreted as above. // // In this case they are interpreted as HDKD junctions; purely numeric items are interpreted as -// integers, non-numeric items as strings. Junctions prefixed with `/` are interpreted as soft -// junctions, and with `//` as hard junctions. +// integers, non-numeric items as strings. Junctions prefixed with "/"" are interpreted as soft +// junctions, and with "//" as hard junctions. // -// There is no correspondence mapping between `SURI` strings and the keys they represent. +// There is no correspondence mapping between SURI strings and the keys they represent. // Two different non-identical strings can actually lead to the same secret being derived. // Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. -// Similarly an empty password (ending the `SURI` with `///`) is perfectly valid and will +// Similarly an empty password (ending the SURI with "///") is perfectly valid and will // generally be equivalent to no password at all. type SecretURI struct { // The phrase to derive the private key. diff --git a/internal/primitives/core/ed25519/ed25519.go b/internal/primitives/core/ed25519/ed25519.go index d7db6c21c7..e3f14d9527 100644 --- a/internal/primitives/core/ed25519/ed25519.go +++ b/internal/primitives/core/ed25519/ed25519.go @@ -21,7 +21,7 @@ import ( // A secret seed. type seed [32]byte -// A Public key. +// Public is a public key. type Public [32]byte // Bytes returns a byte slice @@ -34,7 +34,7 @@ func (p Public) Verify(sig Signature, message []byte) bool { return ed25519.Verify(p[:], message, sig[:]) } -// NewPublic creates a new instance from the given 32-byte `data`. +// NewPublic creates a new instance from the given 32-byte data. // // NOTE: No checking goes on to ensure this is a real public key. Only use it if // you are certain that the array actually is a pubkey. @@ -88,10 +88,10 @@ func (p Pair) Seed() [32]byte { func (p Pair) Public() crypto.Public[Signature] { pubKey, ok := p.public.(ed25519.PublicKey) if !ok { - panic("huh?") + panic("unexpected type") } if len(pubKey) != 32 { - panic("huh?") + panic("unexpected length") } var pub Public copy(pub[:], pubKey) @@ -102,7 +102,7 @@ func (p Pair) Public() crypto.Public[Signature] { func (p Pair) Sign(message []byte) Signature { signed := ed25519.Sign(p.secret, message) if len(signed) != 64 { - panic("huh?") + panic("unexpected length") } var sig Signature copy(sig[:], signed) @@ -112,7 +112,7 @@ func (p Pair) Sign(message []byte) Signature { // NewGeneratedPair will generate new secure (random) key pair. // // This is only for ephemeral keys really, since you won't have access to the secret key -// for storage. If you want a persistent key pair, use `generate_with_phrase` instead. +// for storage. If you want a persistent key pair, use [NewGeneratedPairWithPhrase] instead. func NewGeneratedPair() (Pair, [32]byte) { seedSlice := make([]byte, 32) _, err := rand.Read(seedSlice) @@ -127,9 +127,9 @@ func NewGeneratedPair() (Pair, [32]byte) { // NewGeneratedPairWithPhrase will generate new secure (random) key pair and provide the recovery phrase. // -// You can recover the same key later with `from_phrase`. +// You can recover the same key later with NewPairFromPhrase. // -// This is generally slower than `generate()`, so prefer that unless you need to persist +// This is generally slower than generate(), so prefer that unless you need to persist // the key from the current session. func NewGeneratedPairWithPhrase(password *string) (Pair, string, [32]byte) { entropy, err := bip39.NewEntropy(128) @@ -147,7 +147,7 @@ func NewGeneratedPairWithPhrase(password *string) (Pair, string, [32]byte) { return pair, phrase, seed } -// NewPairFromPhrase returns the KeyPair from the English BIP39 seed `phrase`, or `None` if it's invalid. +// NewPairFromPhrase returns the KeyPair from the English BIP39 seed phrase. func NewPairFromPhrase(phrase string, password *string) (pair Pair, seed [32]byte, err error) { pass := "" if password != nil { @@ -159,7 +159,7 @@ func NewPairFromPhrase(phrase string, password *string) (pair Pair, seed [32]byt } if !(32 <= len(bigSeed)) { - panic("huh?") + panic("unexpected length") } seedSlice := bigSeed[:][0:32] @@ -167,18 +167,17 @@ func NewPairFromPhrase(phrase string, password *string) (pair Pair, seed [32]byt return NewPairFromSeedSlice(seedSlice), seed, nil } -// NewPairFromSeed will generate new key pair from the provided `seed`. +// NewPairFromSeed will generate new key pair from the provided seed. // -// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed +// @WARNING: THIS WILL ONLY BE SECURE IF THE seed IS SECURE. If it can be guessed // by an attacker then they can also derive your key. func NewPairFromSeed(seed [32]byte) Pair { return NewPairFromSeedSlice(seed[:]) } -// NewPairFromSeedSlice will make a new key pair from secret seed material. The slice must be the correct size or -// it will return `None`. +// NewPairFromSeedSlice will make a new key pair from secret seed material. // -// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed +// @WARNING: THIS WILL ONLY BE SECURE IF THE seed IS SECURE. If it can be guessed // by an attacker then they can also derive your key. func NewPairFromSeedSlice(seedSlice []byte) Pair { secret := ed25519.NewKeyFromSeed(seedSlice) @@ -189,37 +188,37 @@ func NewPairFromSeedSlice(seedSlice []byte) Pair { } } -// NewPairFromStringWithSeed interprets the string `s` in order to generate a key Pair. Returns +// NewPairFromStringWithSeed interprets the string s in order to generate a key Pair. Returns // both the pair and an optional seed, in the case that the pair can be expressed as a direct // derivation from a seed (some cases, such as Sr25519 derivations with path components, cannot). // // This takes a helper function to do the key generation from a phrase, password and // junction iterator. // -// - If `s` is a possibly `0x` prefixed 64-digit hex string, then it will be interpreted -// directly as a secret key (aka "seed" in `subkey`). -// - If `s` is a valid BIP-39 key phrase of 12, 15, 18, 21 or 24 words, then the key will +// - If s is a possibly "0x" prefixed 64-digit hex string, then it will be interpreted +// directly as a secret key (aka "seed" in subkey). +// - If s is a valid BIP-39 key phrase of 12, 15, 18, 21 or 24 words, then the key will // be derived from it. In this case: -// - the phrase may be followed by one or more items delimited by `/` characters. -// - the path may be followed by `///`, in which case everything after the `///` is treated +// - the phrase may be followed by one or more items delimited by "/" characters. +// - the path may be followed by "///", in which case everything after the "///" is treated // // as a password. -// - If `s` begins with a `/` character it is prefixed with the Substrate public `DevPhrase` +// - If s begins with a "/" character it is prefixed with the Substrate public DevPhrase // and // // interpreted as above. // // In this case they are interpreted as HDKD junctions; purely numeric items are interpreted as -// integers, non-numeric items as strings. Junctions prefixed with `/` are interpreted as soft -// junctions, and with `//` as hard junctions. +// integers, non-numeric items as strings. Junctions prefixed with "/" are interpreted as soft +// junctions, and with "//" as hard junctions. // // There is no correspondence mapping between SURI strings and the keys they represent. // Two different non-identical strings can actually lead to the same secret being derived. // Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. -// Similarly an empty password (ending the SURI with `///`) is perfectly valid and will +// Similarly an empty password (ending the SURI with "///") is perfectly valid and will // generally be equivalent to no password at all. // -// `nil` is returned if no matches are found. +// nil is returned if no matches are found. func NewPairFromStringWithSeed(s string, passwordOverride *string) ( pair crypto.Pair[[32]byte, Signature], seed [32]byte, err error, ) { @@ -255,7 +254,7 @@ func NewPairFromStringWithSeed(s string, passwordOverride *string) ( return root.Derive(sURI.Junctions, &seed) } -// NewPairFromString interprets the string `s` in order to generate a key pair. +// NewPairFromString interprets the string s in order to generate a key pair. func NewPairFromString(s string, passwordOverride *string) (crypto.Pair[[32]byte, Signature], error) { pair, _, err := NewPairFromStringWithSeed(s, passwordOverride) return pair, err @@ -266,7 +265,7 @@ var _ crypto.Pair[[32]byte, Signature] = Pair{} // Signature is a signature (a 512-bit value). type Signature [64]byte -// NewSignatureFromRaw constructors a new instance from the given 64-byte `data`. +// NewSignatureFromRaw constructors a new instance from the given 64-byte data. // // NOTE: No checking goes on to ensure this is a real signature. Only use it if // you are certain that the array actually is a signature. diff --git a/internal/primitives/core/hash/hash.go b/internal/primitives/core/hash/hash.go index 9458afc8b8..7c44688ec0 100644 --- a/internal/primitives/core/hash/hash.go +++ b/internal/primitives/core/hash/hash.go @@ -17,6 +17,10 @@ type H256 string // Bytes returns a byte slice func (h256 H256) Bytes() []byte { + if h256 == "" { + arr := [32]byte{} + return arr[:] + } return []byte(h256) } diff --git a/internal/primitives/core/offchain/offchain.go b/internal/primitives/core/offchain/offchain.go new file mode 100644 index 0000000000..0a8c289c12 --- /dev/null +++ b/internal/primitives/core/offchain/offchain.go @@ -0,0 +1,35 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package offchain + +// OffchainStorage is offchain DB persisted (non-fork-aware) storage. +type OffchainStorage interface { + // Set persists a value in storage under given key and prefix. + Set(prefix, key, value []byte) + + // Remove clears a storage entry under given key and prefix. + Remove(prefix, key []byte) + + // Get retrieves a value from storage under given key and prefix. + Get(prefix, key []byte) []byte + + // CompareAndSet will replace the value in storage if given oldValue matches the current one. + // + // Returns true if the value has been set and false otherwise. + CompareAndSet(prefix, key, oldValue, newValue []byte) bool +} + +// OffchainOverlayedChanges is a change to be applied to the offchain worker db in regards to a key. +type OffchainOverlayedChanges interface { + OffchainOverlayedChangeRemove | OffchainOverlayedChangeSetValue +} + +// OffchainOverlayedChange is a change to be applied to the offchain worker db in regards to a key. +type OffchainOverlayedChange any + +// OffchainOverlayedChangeRemove removes the data associated with the key +type OffchainOverlayedChangeRemove struct{} + +// OffchainOverlayedChangeSetValue overwrites the value of an associated key +type OffchainOverlayedChangeSetValue []byte diff --git a/internal/primitives/database/database.go b/internal/primitives/database/database.go index 90bf362e7b..ea34ddf6bb 100644 --- a/internal/primitives/database/database.go +++ b/internal/primitives/database/database.go @@ -38,8 +38,8 @@ func (Remove) isChange() {} // Store will store the preimage of hash type Store[H any] struct { ColumnID - Hash H - Preimage []byte + Hash H + Value []byte } func (Store[H]) isChange() {} @@ -61,44 +61,44 @@ type Release[H any] struct { func (Release[H]) isChange() {} // Transaction is a series of changes to the database that can be committed atomically. They do not take effect until -// passed into `Database.Commit`. +// passed into Database.Commit. type Transaction[H any] []Change -// Set the value of `key` in `col` to `value`, replacing anything that is there currently. +// Set the value of key in col to value, replacing anything that is there currently. func (t *Transaction[H]) Set(col ColumnID, key []byte, value []byte) { *t = append(*t, Set{col, key, value}) } -// Remove the value of `key` in `col`. +// Remove the value of key in col. func (t *Transaction[H]) Remove(col ColumnID, key []byte) { *t = append(*t, Remove{col, key}) } -// Store the `preimage` of `hash` into the database, so that it may be looked up later with -// `Database.Get`. This may be called multiple times, but subsequent -// calls will ignore `preimage` and simply increase the number of references on `hash`. +// Store the preimage of hash into the database, so that it may be looked up later with +// Database.Get. This may be called multiple times, but subsequent +// calls will ignore preimage and simply increase the number of references on hash. func (t *Transaction[H]) Store(col ColumnID, hash H, preimage []byte) { *t = append(*t, Store[H]{col, hash, preimage}) } -// Reference will increase the number of references for `hash` in the database. +// Reference will increase the number of references for hash in the database. func (t *Transaction[H]) Reference(col ColumnID, hash H) { *t = append(*t, Reference[H]{col, hash}) } -// Release the preimage of `hash` from the database. An equal number of these to the number of corresponding `store`s -// must have been given before it is legal for `Database::get` to be unable to provide the preimage. +// Release the preimage of hash from the database. An equal number of these to the number of corresponding stores +// must have been given before Get() will be unable to provide the preimage. func (t *Transaction[H]) Release(col ColumnID, hash H) { *t = append(*t, Release[H]{col, hash}) } // Database is the interface to commit transactions as well as retrieve values type Database[H runtime.Hash] interface { - // Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` + // Commit the transaction to the database atomically. Any further calls to get or lookup // will reflect the new state. Commit(transaction Transaction[H]) error - // Retrieve the value previously stored against `key` or `nil` if `key` is not currently in the database. + // Retrieve the value previously stored against key or nil if key is not currently in the database. Get(col ColumnID, key []byte) []byte // Check if the value exists in the database without retrieving it. diff --git a/internal/primitives/database/kvdb.go b/internal/primitives/database/kvdb.go new file mode 100644 index 0000000000..9e7cce7e53 --- /dev/null +++ b/internal/primitives/database/kvdb.go @@ -0,0 +1,116 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package database + +import ( + "encoding/binary" + "fmt" + + "github.com/ChainSafe/gossamer/internal/kvdb" + "github.com/ChainSafe/gossamer/internal/primitives/runtime" +) + +// DBAdapter is a wrapper around [kvdb.KeyValueDB] that implements [Database] interface +type DBAdapter[H runtime.Hash] struct { + db kvdb.KeyValueDB +} + +func NewDBAdapter[H runtime.Hash](db kvdb.KeyValueDB) *DBAdapter[H] { + return &DBAdapter[H]{ + db: db, + } +} + +func (dba *DBAdapter[H]) readCounter(col ColumnID, key []byte) (counterKey []byte, counter *uint32, err error) { + // Add a key suffix for the counter + counterKey = key + counterKey = append(counterKey, 0) + val, err := dba.db.Get(uint32(col), counterKey) + if err != nil { + return nil, nil, err + } + if val != nil { + if len(val) != 4 { + return nil, nil, fmt.Errorf("unexpected counter len: %d", len(val)) + } + counterData := val + counter := binary.LittleEndian.Uint32(counterData) + return counterKey, &counter, nil + } + return counterKey, nil, nil +} + +func (dba *DBAdapter[H]) Commit(transaction Transaction[H]) error { + tx := kvdb.NewDBTransaction() + for _, change := range transaction { + switch change := change.(type) { + case Set: + tx.Put(uint32(change.ColumnID), change.Key, change.Value) + case Remove: + tx.Delete(uint32(change.ColumnID), change.Key) + case Store[H]: + counterKey, counter, err := dba.readCounter(change.ColumnID, change.Hash.Bytes()) + if err != nil { + return err + } + if counter != nil { + *counter += 1 + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, *counter) + tx.Put(uint32(change.ColumnID), counterKey, buf) + } else { + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, 1) + tx.Put(uint32(change.ColumnID), counterKey, buf) + tx.Put(uint32(change.ColumnID), change.Hash.Bytes(), change.Value) + } + case Reference[H]: + counterKey, counter, err := dba.readCounter(change.ColumnID, change.Hash.Bytes()) + if err != nil { + return err + } + if counter != nil { + *counter += 1 + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, *counter) + tx.Put(uint32(change.ColumnID), counterKey, buf) + } + case Release[H]: + counterKey, counter, err := dba.readCounter(change.ColumnID, change.Hash.Bytes()) + if err != nil { + return err + } + if counter != nil { + *counter -= 1 + if *counter == 0 { + tx.Delete(uint32(change.ColumnID), counterKey) + tx.Delete(uint32(change.ColumnID), change.Hash.Bytes()) + } else { + buf := make([]byte, 4) + binary.LittleEndian.PutUint32(buf, *counter) + tx.Put(uint32(change.ColumnID), counterKey, buf) + } + } + default: + panic("unreachable") + } + } + return dba.db.Write(tx) +} + +func (dba *DBAdapter[H]) Get(col ColumnID, key []byte) []byte { + val, err := dba.db.Get(uint32(col), key) + if err != nil { + panic(err) + } + return val +} + +func (dba *DBAdapter[H]) Contains(col ColumnID, key []byte) bool { + has, err := dba.db.HasKey(uint32(col), key) + if err != nil { + panic(err) + } + return has +} diff --git a/internal/primitives/database/mem.go b/internal/primitives/database/mem.go index 30f6e3acab..a007192a21 100644 --- a/internal/primitives/database/mem.go +++ b/internal/primitives/database/mem.go @@ -15,7 +15,7 @@ type refCountValue struct { value []byte } -// MemDB implements `Database` as an in-memory hash map. `Commit` is not atomic. +// MemDB implements Database as an in-memory hash map. Commit is not atomic. type MemDB[H runtime.Hash] struct { inner map[ColumnID]map[string]refCountValue sync.RWMutex @@ -28,7 +28,7 @@ func NewMemDB[H runtime.Hash]() *MemDB[H] { } } -// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` +// Commit the transaction to the database atomically. Any further calls to get or lookup // will reflect the new state. func (mdb *MemDB[H]) Commit(transaction Transaction[H]) error { mdb.Lock() @@ -57,7 +57,7 @@ func (mdb *MemDB[H]) Commit(transaction Transaction[H]) error { cv.refCount += 1 mdb.inner[change.ColumnID][string(change.Hash.Bytes())] = cv } else { - mdb.inner[change.ColumnID][string(change.Hash.Bytes())] = refCountValue{1, change.Preimage} + mdb.inner[change.ColumnID][string(change.Hash.Bytes())] = refCountValue{1, change.Value} } case Reference[H]: _, ok := mdb.inner[change.ColumnID] @@ -88,7 +88,7 @@ func (mdb *MemDB[H]) Commit(transaction Transaction[H]) error { return nil } -// Retrieve the value previously stored against `key` or `nil` if `key` is not currently in the database. +// Get retrieves the value previously stored against key or nil if key is not currently in the database. func (mdb *MemDB[H]) Get(col ColumnID, key []byte) []byte { mdb.RLock() defer mdb.RUnlock() @@ -103,7 +103,7 @@ func (mdb *MemDB[H]) Get(col ColumnID, key []byte) []byte { return nil } -// Check if the value exists in the database without retrieving it. +// Contains checks if the value exists in the database without retrieving it. func (mdb *MemDB[H]) Contains(col ColumnID, key []byte) bool { return mdb.Get(col, key) != nil } diff --git a/internal/primitives/runtime/digest.go b/internal/primitives/runtime/digest.go index 09eb081c6f..6b60bb5670 100644 --- a/internal/primitives/runtime/digest.go +++ b/internal/primitives/runtime/digest.go @@ -9,13 +9,12 @@ import ( "github.com/ChainSafe/gossamer/pkg/scale" ) -// Digest item that is able to encode/decode 'system' digest items and -// provide opaque access to other items. +// DigestItemTypes is interface constraint of [DigestItem] type DigestItemTypes interface { PreRuntime | Consensus | Seal | Other | RuntimeEnvironmentUpdated } -// Digest item that is able to encode/decode 'system' digest items and +// DigestItem is able to encode/decode "system" digest items and // provide opaque access to other items. type DigestItem struct { inner any @@ -95,15 +94,15 @@ func (mvdt DigestItem) String() string { return fmt.Sprintf("%s", mvdt.inner) } -// A pre-runtime digest. +// PreRuntime is a pre-runtime digest. // // These are messages from the consensus engine to the runtime, although // the consensus engine can (and should) read them itself to avoid // code and state duplication. It is erroneous for a runtime to produce // these, but this is not (yet) checked. // -// NOTE: the runtime is not allowed to panic or fail in an `on_initialize` -// call if an expected `PreRuntime` digest is not present. It is the +// NOTE: the runtime is not allowed to panic or fail in an on_initialize +// call if an expected PreRuntime digest is not present. It is the // responsibility of a external block verifier to check this. Runtime API calls // will initialize the block without pre-runtime digests, so initialization // cannot fail when they are missing. @@ -112,7 +111,7 @@ type PreRuntime struct { Bytes []byte } -// A message from the runtime to the consensus engine. This should *never* +// Consensus is a message from the runtime to the consensus engine. This should *never* // be generated by the native code of any consensus engine, but this is not // checked (yet). type Consensus struct { @@ -127,10 +126,10 @@ type Seal struct { Bytes []byte } -// Some other thing. Unsupported and experimental. +// Some Other thing. Unsupported and experimental. type Other []byte -// An indication for the light clients that the runtime execution +// RuntimeEnvironmentUpdated is an indication for the light clients that the runtime execution // environment is updated. type RuntimeEnvironmentUpdated struct{} diff --git a/internal/primitives/runtime/generic/block.go b/internal/primitives/runtime/generic/block.go index a08acf42b6..a43cbb6c6a 100644 --- a/internal/primitives/runtime/generic/block.go +++ b/internal/primitives/runtime/generic/block.go @@ -4,31 +4,45 @@ package generic import ( + "fmt" + "github.com/ChainSafe/gossamer/internal/primitives/core/hash" "github.com/ChainSafe/gossamer/internal/primitives/runtime" ) -// Something to identify a block. -type BlockID any +// BlockID is used to identify a block. +type BlockID interface { + isBlockID() +} -// BlockIDTypes is the interface constraint of `BlockID`. -type BlockIDTypes[H, N any] interface { +// BlockIDTypes is the interface constraint of BlockID. +type BlockIDTypes[H runtime.Hash, N runtime.Number] interface { BlockIDHash[H] | BlockIDNumber[N] } -// NewBlockID is the constructor for `BlockID`. -func NewBlockID[H, N any, T BlockIDTypes[H, N]](blockID T) BlockID { +// NewBlockID is the constructor for BlockID. +func NewBlockID[H runtime.Hash, N runtime.Number, T BlockIDTypes[H, N]](blockID T) BlockID { return BlockID(blockID) } // BlockIDHash is id by block header hash. -type BlockIDHash[H any] struct { - Inner H +type BlockIDHash[H runtime.Hash] struct { + Hash H +} + +func (BlockIDHash[H]) isBlockID() {} +func (id BlockIDHash[H]) String() string { + return fmt.Sprintf("%s", id.Hash) } // BlockIDNumber is id by block number. -type BlockIDNumber[N any] struct { - Inner N +type BlockIDNumber[N runtime.Number] struct { + Number N +} + +func (BlockIDNumber[N]) isBlockID() {} +func (id BlockIDNumber[H]) String() string { + return fmt.Sprintf("%d", id.Number) } // Block is a block. @@ -60,7 +74,7 @@ func (b Block[N, H, Hasher]) Hash() H { return hasher.HashEncoded(b.header) } -// NewBlock is the constructor for `Block`. +// NewBlock is the constructor for Block. func NewBlock[N runtime.Number, H runtime.Hash, Hasher runtime.Hasher[H]]( header runtime.Header[N, H], extrinsics []runtime.Extrinsic) Block[N, H, Hasher] { return Block[N, H, Hasher]{ diff --git a/internal/primitives/runtime/generic/header.go b/internal/primitives/runtime/generic/header.go index ef10c50ab4..0739f6949f 100644 --- a/internal/primitives/runtime/generic/header.go +++ b/internal/primitives/runtime/generic/header.go @@ -11,7 +11,7 @@ import ( "github.com/ChainSafe/gossamer/pkg/scale" ) -// Header is a block header, and implements a compatible encoding to `sp_runtime::generic::Header` +// Header is a block header, and implements a compatible encoding to sp_runtime::generic::Header type Header[N runtime.Number, H runtime.Hash, Hasher runtime.Hasher[H]] struct { // The parent hash. parentHash H @@ -113,7 +113,7 @@ func (h Header[N, H, Hasher]) Hash() H { return hasher.HashEncoded(h) } -// NewHeader is the constructor for `Header` +// NewHeader is the constructor for Header func NewHeader[N runtime.Number, H runtime.Hash, Hasher runtime.Hasher[H]]( number N, extrinsicsRoot H, diff --git a/internal/primitives/runtime/runtime.go b/internal/primitives/runtime/runtime.go index 75fceaae28..d30fabe6b0 100644 --- a/internal/primitives/runtime/runtime.go +++ b/internal/primitives/runtime/runtime.go @@ -3,11 +3,13 @@ package runtime +import "slices" + // Justification is an abstraction over justification for a block's validity under a consensus algorithm. // // Essentially a finality proof. The exact formulation will vary between consensus algorithms. In the case where there // are multiple valid proofs, inclusion within the block itself would allow swapping justifications to change the -// block's hash (and thus fork the chain). Sending a `Justification` alongside a block instead bypasses this problem. +// block's hash (and thus fork the chain). Sending a Justification alongside a block instead bypasses this problem. // // Each justification is provided as an encoded blob, and is tagged with an ID to identify the consensus engine that // generated the proof (we might have multiple justifications from different engines for the same block). @@ -23,6 +25,24 @@ type EncodedJustification []byte // different consensus engines for the same block. type Justifications []Justification +func (j *Justifications) Append(justification Justification) bool { + if j.Get(justification.ConsensusEngineID) != nil { + return false + } + *j = append(*j, justification) + return true +} + +func (j Justifications) Get(engineID ConsensusEngineID) *EncodedJustification { + index := slices.IndexFunc(j, func(j Justification) bool { + return j.ConsensusEngineID == engineID + }) + if index >= 0 { + return &j[index].EncodedJustification + } + return nil +} + // EncodedJustification returns a copy of the encoded justification for the given consensus engine, if it exists func (j Justifications) EncodedJustification(engineID ConsensusEngineID) *EncodedJustification { for _, justification := range j { diff --git a/internal/primitives/runtime/testing/testing.go b/internal/primitives/runtime/testing/testing.go new file mode 100644 index 0000000000..f83e08f06d --- /dev/null +++ b/internal/primitives/runtime/testing/testing.go @@ -0,0 +1,13 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package testing + +// An opaque extrinsic wrapper type. +type ExtrinsicsWrapper[T any] struct { + T T +} + +func (ExtrinsicsWrapper[T]) IsSigned() *bool { + return nil +} diff --git a/internal/primitives/state-machine/backend.go b/internal/primitives/state-machine/backend.go index 67a7b009c2..44b2671c03 100644 --- a/internal/primitives/state-machine/backend.go +++ b/internal/primitives/state-machine/backend.go @@ -12,24 +12,24 @@ import ( "github.com/ChainSafe/gossamer/pkg/trie/triedb" ) -// A struct containing arguments for iterating over the storage. +// IterArgs is a struct containing arguments for iterating over the storage. type IterArgs struct { - // The prefix of the keys over which to iterate. + // Prefix of the keys over which to iterate. Prefix []byte - // The prefix from which to start the iteration from. + // StartAt is the prefix from which to start the iteration from. // // This is inclusive and the iteration will include the key which is specified here. StartAt []byte - // If this is true then the iteration will *not* include + // If StartAtExclusive is true then the iteration will *not* include // the key specified in StartAt, if there is such a key. StartAtExclusive bool - // The info of the child trie over which to iterate over. + // ChildInfo is the info of the child trie over which to iterate over. ChildInfo storage.ChildInfo - // Whether to stop iteration when a missing trie node is reached. + // StopOnIncompleteDatabase represents whether to stop iteration when a missing trie node is reached. // // When a missing trie node is reached the iterator will: // - return an error if this is set to false (default) @@ -37,7 +37,7 @@ type IterArgs struct { StopOnIncompleteDatabase bool } -// An interface for a raw storage iterator. +// StorageIterator is the interface for a raw storage iterator. type StorageIterator[Hash runtime.Hash, Hasher runtime.Hasher[Hash]] interface { // Fetches the next key from the storage. NextKey(backend *TrieBackend[Hash, Hasher]) (StorageKey, error) @@ -49,7 +49,7 @@ type StorageIterator[Hash runtime.Hash, Hasher runtime.Hasher[Hash]] interface { Complete() bool } -// An iterator over storage keys and values. +// PairsIter is an iterator over storage keys and values. type PairsIter[H runtime.Hash, Hasher runtime.Hasher[H]] struct { backend *TrieBackend[H, Hasher] rawIter StorageIterator[H, Hasher] @@ -76,7 +76,7 @@ func (pi *PairsIter[H, Hasher]) All() iter.Seq2[StorageKeyValue, error] { } } -// An iterator over storage keys. +// KeysIter is an iterator over storage keys. type KeysIter[H runtime.Hash, Hasher runtime.Hasher[H]] struct { backend *TrieBackend[H, Hasher] rawIter StorageIterator[H, Hasher] @@ -103,7 +103,7 @@ func (ki *KeysIter[H, Hasher]) All() iter.Seq2[StorageKey, error] { } } -// The transaction type used by [Backend]. +// BackendTransaction is the transaction type used by [Backend]. // // This transaction contains all the changes that need to be applied to the backend to create the // state for a new block. @@ -115,71 +115,69 @@ func NewBackendTransaction[Hash runtime.Hash, Hasher runtime.Hasher[Hash]]() Bac return BackendTransaction[Hash, Hasher]{trie.NewPrefixedMemoryDB[Hash, Hasher]()} } -// Reexport of [trie.KeyValue] +// Delta is reexport of [trie.KeyValue] type Delta = trie.KeyValue +// ChildDelta is child trie deltas type ChildDelta struct { storage.ChildInfo Deltas []Delta } -// A state backend is used to read state data and can have changes committed -// to it. -// -// The clone operation (if implemented) should be cheap. +// A state Backend is used to read state data and can have changes committed to it. type Backend[Hash runtime.Hash, H runtime.Hasher[Hash]] interface { - // Get keyed storage or nil if there is nothing associated. + // Storage gets keyed storage or nil if there is nothing associated. Storage(key []byte) (StorageValue, error) - // Get keyed storage value hash or nil if there is nothing associated. + // StorageHash get keyed storage value hash or nil if there is nothing associated. StorageHash(key []byte) (*Hash, error) - // Get the merkle value or nil if there is nothing associated. + // ClosestMerkleValue gets the merkle value or nil if there is nothing associated. ClosestMerkleValue(key []byte) (triedb.MerkleValue[Hash], error) - // Get the child merkle value or nil if there is nothing associated. + // ChildClosestMerkleValue gets the child merkle value or nil if there is nothing associated. ChildClosestMerkleValue(childInfo storage.ChildInfo, key []byte) (triedb.MerkleValue[Hash], error) - // Get keyed child storage or nil if there is nothing associated. + // ChildStorage gets keyed child storage or nil if there is nothing associated. ChildStorage(childInfo storage.ChildInfo, key []byte) (StorageValue, error) - // Get child keyed storage value hash or nil if there is nothing associated. + // ChildStorageHash gets child keyed storage value hash or nil if there is nothing associated. ChildStorageHash(childInfo storage.ChildInfo, key []byte) (*Hash, error) - // true if a key exists in storage. + // ExistsStorage returns true if a key exists in storage. ExistsStorage(key []byte) (bool, error) - // true if a key exists in child storage. + // ExistsChildStorage returns true if a key exists in child storage. ExistsChildStorage(childInfo storage.ChildInfo, key []byte) (bool, error) - // Return the next key in storage in lexicographic order or nil if there is no value. + // NextStorageKey returns the next key in storage in lexicographic order or nil if there is no value. NextStorageKey(key []byte) (StorageKey, error) - // Return the next key in child storage in lexicographic order or nil if there is no value. + // NextChildStorageKey returns the next key in child storage in lexicographic order or nil if there is no value. NextChildStorageKey(childInfo storage.ChildInfo, key []byte) (StorageKey, error) - // Calculate the storage root, with given delta over what is already stored in + // StorageRoot calculates the storage root, with given delta over what is already stored in // the backend, and produce a "transaction" that can be used to commit. // Does not include child storage updates. StorageRoot(delta []Delta, stateVersion storage.StateVersion) (Hash, BackendTransaction[Hash, H]) - // Calculate the child storage root, with given delta over what is already stored in + // ChildStorageRoot calculates the child storage root, with given delta over what is already stored in // the backend, and produce a "transaction" that can be used to commit. The second argument // is true if child storage root equals default storage root. ChildStorageRoot( childInfo storage.ChildInfo, delta []Delta, stateVersion storage.StateVersion, ) (Hash, bool, BackendTransaction[Hash, H]) - // Returns a lifetimeless raw storage iterator. + // RawIter returns a raw storage iterator. RawIter(args IterArgs) (StorageIterator[Hash, H], error) - // Get an iterator over key/value pairs. + // Pairs returns an iterator over key/value pairs. Pairs(args IterArgs) (PairsIter[Hash, H], error) - // Get an iterator over keys. + // Keys returns an iterator over keys. Keys(args IterArgs) (KeysIter[Hash, H], error) - // Calculate the storage root, with given delta over what is already stored + // FullStorageRoot calculates the storage root, with given delta over what is already stored // in the backend, and produce a "transaction" that can be used to commit. // Does include child storage updates. FullStorageRoot( diff --git a/internal/primitives/state-machine/in_memory_backend.go b/internal/primitives/state-machine/in_memory_backend.go index 883aed4a38..4ec2d182d9 100644 --- a/internal/primitives/state-machine/in_memory_backend.go +++ b/internal/primitives/state-machine/in_memory_backend.go @@ -14,7 +14,7 @@ type MemoryDBTrieBackend[H runtime.Hash, Hasher runtime.Hasher[H]] struct { *TrieBackend[H, Hasher] } -// Create a new empty instance of in-memory backend. +// NewMemoryDBTrieBackend creates a new empty instance of in-memory backend. func NewMemoryDBTrieBackend[H runtime.Hash, Hasher runtime.Hasher[H]]() MemoryDBTrieBackend[H, Hasher] { mdb := trie.NewPrefixedMemoryDB[H, Hasher]() root := (*new(Hasher)).Hash([]byte{0}) diff --git a/internal/primitives/state-machine/overlayed_changes.go b/internal/primitives/state-machine/overlayed_changes.go index 6497d15f9b..bcf7a98847 100644 --- a/internal/primitives/state-machine/overlayed_changes.go +++ b/internal/primitives/state-machine/overlayed_changes.go @@ -3,17 +3,65 @@ package statemachine -// Storage key. +import "github.com/ChainSafe/gossamer/internal/primitives/core/offchain" + +// StorageKey is a storage key. type StorageKey []byte -// Storage value. Value can be nil +// StorageValue is a storage value. Value can be nil type StorageValue []byte -// Storage key and value. +// StorageKeyValue is storage key and value. type StorageKeyValue struct { StorageKey StorageValue } -// In memory array of storage values. +// StorageCollection is a slice of storage values. type StorageCollection []StorageKeyValue + +// ChildStorageCollection is a slice of storage values for multiple child tries. +type ChildStorageCollection []struct { + StorageKey + StorageCollection +} + +// OffchainChangesCollection is slice of storage values. +type OffchainChangesCollection []struct { + PrefixKey struct { + Prefix []byte + Key []byte + } + ValueOperation offchain.OffchainOverlayedChange +} + +// IndexOperations is interface constraint of [IndexOperation]. +type IndexOperations interface { + IndexOperationInsert | IndexOperationRenew +} + +// IndexOperation is a transaction index operation. +type IndexOperation interface { + isIndexOperation() +} + +// IndexOperationInsert is an insert transaction into index. +type IndexOperationInsert struct { + // Extrinsic index in the current block. + Extrinsic uint32 + // Data content hash. + Hash []byte + // Indexed data size. + Size uint32 +} + +// IndexOperationRenew renews existing transaction storage. +type IndexOperationRenew struct { + // Extrinsic index in the current block. + Extrinsic uint32 + // Referenced index hash. + Hash []byte +} + +func (IndexOperationInsert) isIndexOperation() {} +func (IndexOperationRenew) isIndexOperation() {} diff --git a/internal/primitives/state-machine/trie_backend.go b/internal/primitives/state-machine/trie_backend.go index fb0b20d1ea..89032848d2 100644 --- a/internal/primitives/state-machine/trie_backend.go +++ b/internal/primitives/state-machine/trie_backend.go @@ -18,9 +18,9 @@ import ( triedb "github.com/ChainSafe/gossamer/pkg/trie/triedb" ) -// A provider of trie caches that are compatible with [triedb.TrieDB]. +// TrieCacheProvider is a provider of trie caches that are compatible with [triedb.TrieDB]. type TrieCacheProvider[H runtime.Hash, Cache triedb.TrieCache[H]] interface { - // Return a [triedb.TrieDB] compatible cache. + // TrieCache returns a [triedb.TrieDB] compatible cache. // // The storage_root parameter *must* be the storage root of the trie this cache is used for. // @@ -28,7 +28,7 @@ type TrieCacheProvider[H runtime.Hash, Cache triedb.TrieCache[H]] interface { // may belong to different tries. TrieCache(storageRoot H) (cache Cache, unlock func()) - // Returns a cache that can be used with a [triedb.TrieDB] where mutations are performed. + // TrieCacheMut returns a cache that can be used with a [triedb.TrieDB] where mutations are performed. // // When finished with the operation on the trie, it is required to call [TrieCacheProvider.Merge] to // merge the cached items for the correct storage root. @@ -46,14 +46,14 @@ type cachedIter[H runtime.Hash, Hasher runtime.Hasher[H]] struct { iter rawIter[H, Hasher] } -// Patricia trie-based backend. Transaction type is an overlay of changes to commit. +// TrieBackend is a patricia trie-based backend. Transaction type is an overlay of changes to commit. type TrieBackend[H runtime.Hash, Hasher runtime.Hasher[H]] struct { essence trieBackendEssence[H, Hasher] nextStorageKeyCache *cachedIter[H, Hasher] nextStorageKeyCacheMtx sync.Mutex } -// Constructor for [TrieBackend]. +// NewTrieBackend is constructor for [TrieBackend]. func NewTrieBackend[H runtime.Hash, Hasher runtime.Hasher[H]]( storage TrieBackendStorage[H], root H, @@ -65,7 +65,7 @@ func NewTrieBackend[H runtime.Hash, Hasher runtime.Hasher[H]]( } } -// Wrap the given [TrieBackend]. +// NewWrappedTrieBackend will wrap the given [TrieBackend]. // // This can be used for example if all accesses to the trie should // be recorded while some other functionality still uses the non-recording @@ -85,7 +85,7 @@ func NewWrappedTrieBackend[H runtime.Hash, Hasher runtime.Hasher[H]]( ) } -// Create a backend used for checking the proof. +// NewProofCheckTrieBackend creates a backend used for checking the proof. // // proof and root must match, i.e. root must be the correct root of proof nodes. func NewProofCheckTrieBackend[H runtime.Hash, Hasher runtime.Hasher[H]]( diff --git a/internal/primitives/state-machine/trie_backend_essence.go b/internal/primitives/state-machine/trie_backend_essence.go index defeb0f9b8..2a2414725b 100644 --- a/internal/primitives/state-machine/trie_backend_essence.go +++ b/internal/primitives/state-machine/trie_backend_essence.go @@ -444,7 +444,7 @@ func (tbe *trieBackendEssence[H, Hasher]) ChildClosestMerkleValue( return nil, err } if root == nil { - return nil, nil //nolint:nilnil + return nil, nil } childRoot = *root diff --git a/internal/primitives/state-machine/trie_backend_test.go b/internal/primitives/state-machine/trie_backend_test.go index 5993817877..8f4c2c944e 100644 --- a/internal/primitives/state-machine/trie_backend_test.go +++ b/internal/primitives/state-machine/trie_backend_test.go @@ -856,10 +856,10 @@ func TestTrieBackend(t *testing.T) { } }) - // Test to ensure that recording the same `key` for different tries works as expected. + // Test to ensure that recording the same key for different tries works as expected. // // Each trie stores a different value under the same key. The values are big enough to - // be not inlined with `StateVersion::V1`, this is important to test the expected behavior. The + // be not inlined with StateVersionV1, this is important to test the expected behavior. The // trie recorder is expected to differentiate key access based on the different storage roots // of the tries. t.Run("recording_same_key_access_in_different_tries", func(t *testing.T) { diff --git a/internal/primitives/storage/keys/keys.go b/internal/primitives/storage/keys/keys.go new file mode 100644 index 0000000000..832b862782 --- /dev/null +++ b/internal/primitives/storage/keys/keys.go @@ -0,0 +1,23 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package keys + +import ( + "strings" +) + +// List of all well known keys and prefixes in storage. +var ( + // DefaultChildStorageKeyPrefix is a prefix of the default child storage keys in the top trie. + DefaultChildStorageKeyPrefix = []byte(":child_storage:default:") +) + +// IsChildStorageKey returns whether a key is a child storage key. +// +// This is convenience function which basically checks if the given key starts +// with [DefaultChildStorageKeyPrefix]. +func IsChildStorageKey(key []byte) bool { + i := strings.Index(string(key), string(DefaultChildStorageKeyPrefix)) + return i == 0 +} diff --git a/internal/primitives/storage/keys/keys_test.go b/internal/primitives/storage/keys/keys_test.go new file mode 100644 index 0000000000..107c8f6076 --- /dev/null +++ b/internal/primitives/storage/keys/keys_test.go @@ -0,0 +1,19 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package keys + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_IsChildStorageKey(t *testing.T) { + require.True(t, IsChildStorageKey([]byte(":child_storage:default:bleh"))) + require.True(t, IsChildStorageKey([]byte(":child_storage:default:"))) + + require.False(t, IsChildStorageKey([]byte("notequal"))) + require.False(t, IsChildStorageKey([]byte(""))) + require.False(t, IsChildStorageKey(nil)) +} diff --git a/internal/primitives/storage/storage.go b/internal/primitives/storage/storage.go index 0eb048f7e6..ff95d2b727 100644 --- a/internal/primitives/storage/storage.go +++ b/internal/primitives/storage/storage.go @@ -6,62 +6,77 @@ package storage import ( "strings" + "github.com/ChainSafe/gossamer/internal/primitives/storage/keys" "github.com/ChainSafe/gossamer/pkg/trie" + "github.com/tidwall/btree" ) -// Storage key. +// StorageKey is a storage key. type StorageKey []byte -// Storage key of a child trie, it contains the prefix to the key. +// PrefixedStorageKey is a storage key of a child trie, it contains the prefix to the key. type PrefixedStorageKey []byte -// Information related to a child state. +// StorageChild is child trie storage data. +type StorageChild struct { + Data btree.Map[string, []byte] // Child data for storage. + ChildInfo ChildInfo // Associated child info for a child trie. +} + +// Storage contains data needed for a storage. +type Storage struct { + // Top trie storage data. + Top btree.Map[string, []byte] + // Children trie storage data. Key does not include prefix, only for the default trie kind, + // of [ChildTypeParentKeyID] type. + ChildrenDefault map[string]StorageChild +} + +// ChildInfo is information related to a child state. type ChildInfo interface { - // Returns byte sequence (keyspace) that can be use by underlying db to isolate keys. + // Keyspace returns byte sequence (keyspace) that can be use by underlying db to isolate keys. // This is a unique id of the child trie. The collision resistance of this value - // depends on the type of child info use. For `ChildInfo::Default` it is and need to be. + // depends on the type of child info use. For [ChildTypeParentKeyID] it is and need to be. Keyspace() []byte - // Returns a reference to the location in the direct parent of - // this trie but without the common prefix for this kind of - // child trie. + // StorageKey returns a reference to the location in the direct parent of + // this trie but without the common prefix for this kind of child trie. StorageKey() StorageKey - // Return a the full location in the direct parent of - // this trie. + // PrefixedStorageKey returns the full location in the direct parent of this trie. PrefixedStorageKey() PrefixedStorageKey - // Returns the type for this child info. + // ChildType returns the type for this child info. ChildType() ChildType } -// This is the one used by default. +// ChildInfoParentKeyID is the default ChildTrieParentKeyID. type ChildInfoParentKeyID ChildTrieParentKeyID -// Returns byte sequence (keyspace) that can be use by underlying db to isolate keys. +// Keyspace returns byte sequence (keyspace) that can be use by underlying db to isolate keys. // This is a unique id of the child trie. The collision resistance of this value // depends on the type of child info use. func (cipkid ChildInfoParentKeyID) Keyspace() []byte { return cipkid.StorageKey() } -// Returns a reference to the location in the direct parent of +// StorageKey returns a reference to the location in the direct parent of // this trie but without the common prefix for this kind of // child trie. func (cipkid ChildInfoParentKeyID) StorageKey() StorageKey { return ChildTrieParentKeyID(cipkid).data } -// Return a the full location in the direct parent of +// PrefixedStorageKey returns a the full location in the direct parent of // this trie. func (cipkid ChildInfoParentKeyID) PrefixedStorageKey() PrefixedStorageKey { return ChildTypeParentKeyID.NewPrefixedKey(cipkid.data) } -// Returns the type for this child info. +// ChildType returns the type for this child info. func (cipkid ChildInfoParentKeyID) ChildType() ChildType { return ChildTypeParentKeyID } -// Instantiates child information for a default child trie -// of kind `ChildType::ParentKeyId`, using an unprefixed parent +// NewDefaultChildInfo instantiates child information for a default child trie +// of kind ChildInfoParentKeyID, using an unprefixed parent // storage key. func NewDefaultChildInfo(storageKey []byte) ChildInfo { return ChildInfoParentKeyID{ @@ -69,7 +84,7 @@ func NewDefaultChildInfo(storageKey []byte) ChildInfo { } } -// Type of child. +// ChildType is the type of child. // It does not strictly define different child type, it can also // be related to technical consideration or api variant. type ChildType uint32 @@ -80,7 +95,7 @@ const ( ChildTypeParentKeyID ChildType = iota + 1 ) -// Transform a prefixed key into a tuple of the child type +// NewChildTypeFromPrefixedKey transforms a prefixed key into a tuple of the child type // and the unprefixed representation of the key. func NewChildTypeFromPrefixedKey(storageKey PrefixedStorageKey) *struct { ChildType @@ -98,28 +113,25 @@ func NewChildTypeFromPrefixedKey(storageKey PrefixedStorageKey) *struct { } } -// Produce a prefixed key for a given child type. +// NewPrefixedKey produces a prefixed key for a given child type. func (ct ChildType) NewPrefixedKey(key []byte) PrefixedStorageKey { parentPrefix := ct.ParentPrefix() result := append(parentPrefix, key...) return PrefixedStorageKey(result) } -// Prefix of the default child storage keys in the top trie. -var DefaultChildStorageKeyPrefix = []byte(":child_storage:default:") - -// Returns the location reserved for this child trie in their parent trie if there +// ParentPrefix returns the location reserved for this child trie in their parent trie if there // is one. func (ct ChildType) ParentPrefix() []byte { switch ct { case ChildTypeParentKeyID: - return DefaultChildStorageKeyPrefix + return keys.DefaultChildStorageKeyPrefix default: panic("unreachable") } } -// A child trie of default type. +// ChildTrieParentKeyID is a child trie of default type. // // It uses the same default implementation as the top trie, top trie being a child trie with no // keyspace and no storage key. Its keyspace is the variable (unprefixed) part of its storage key. @@ -131,7 +143,7 @@ type ChildTrieParentKeyID struct { data []byte } -// Different possible state version. +// StateVersion represents different possible state version. // // V0 and V1 uses a same trie implementation, but V1 will write external value node in the trie for // value with size greater than 32 bytes. diff --git a/internal/primitives/trie/cache/cache.go b/internal/primitives/trie/cache/cache.go index 3fc5efb720..3a03742baa 100644 --- a/internal/primitives/trie/cache/cache.go +++ b/internal/primitives/trie/cache/cache.go @@ -4,15 +4,17 @@ package cache import ( - "log" "sync" costlru "github.com/ChainSafe/gossamer/internal/cost-lru" + "github.com/ChainSafe/gossamer/internal/log" "github.com/ChainSafe/gossamer/internal/primitives/runtime" "github.com/ChainSafe/gossamer/pkg/trie/triedb" "github.com/elastic/go-freelru" ) +var logger = log.NewFromGlobal(log.AddContext("pkg", "primitives/cache")) + // The maximum number of existing keys in the shared cache that a single local cache // can promote to the front of the LRU cache in one go. // @@ -56,7 +58,7 @@ func (nc nodeCached[H]) ByteSize() uint { return nc.Node.ByteSize() } -// The local trie cache. +// LocalTrieCache is a local trie cache. // // This cache should be used per state instance created by the backend. One state instance is // referring to the state of one block. It will cache all the accesses that are done to the state @@ -135,7 +137,7 @@ func (ltc *LocalTrieCache[H]) Commit() { sharedInner.valueCache.Update(added, accessed) } -// Returns a [triedb.TrieDB] compatible [triedb.TrieCache]. +// TrieCache returns a [triedb.TrieDB] compatible [triedb.TrieCache]. // // The given storageRoot needs to be the storage root of the trie this cache is used for. func (ltc *LocalTrieCache[H]) TrieCache(storageRoot H) (cache *TrieCache[H], unlock func()) { @@ -160,7 +162,7 @@ func (ltc *LocalTrieCache[H]) TrieCache(storageRoot H) (cache *TrieCache[H], unl }, unlock } -// Returns a [triedb.TrieDB] compatible [triedb.TrieCache]. +// TrieCacheMut returns a [triedb.TrieDB] compatible [triedb.TrieCache]. // // After finishing all operations with [triedb.TrieDB] and having obtained // the new storage root, [TrieCache.MergeInto] should be called to update this local @@ -246,7 +248,7 @@ func (fsrvc forStorageRootValueCache[H]) insert(key []byte, value triedb.CachedV fsrvc.localValueCache.Add(vck.ValueCacheKeyComparable(), value) } -// The [triedb.TrieCache] implementation. +// TrieCache is a [triedb.TrieCache] implementation. // // If this instance was created using [LocalTrieCache.TrieCacheMut], it needs to // be merged back into the [LocalTrieCache] with [LocalTrieCache.MergeInto] after all operations are @@ -257,7 +259,7 @@ type TrieCache[H runtime.Hash] struct { valueCache valueCache[H] } -// Merge this cache into the given [LocalTrieCache]. +// MergeInto merges this cache into the given [LocalTrieCache]. // // This function is only required to be called when this instance was created through // [LocalTrieCache.TrieCacheMut], otherwise this method is a no-op. The given @@ -280,7 +282,7 @@ func (tc *TrieCache[H]) MergeInto(local *LocalTrieCache[H], storageRoot H) { vck.StorageKey = []byte(k) ok, _ := local.valueCache.Add(vck.ValueCacheKeyComparable(), v) if !ok { - panic("huh?") + panic("should be added") } } } @@ -300,17 +302,17 @@ func (tc *TrieCache[H]) GetOrInsertNode( // It was not in the local cache; try the shared cache. shared := tc.sharedCache.PeekNode(hash) if shared != nil { - log.Printf("TRACE: Serving node from shared cache: %s\n", hash) + logger.Tracef("Serving node from shared cache: %s\n", hash) node = nodeCached[H]{Node: shared, FromSharedCache: true} } else { // It was not in the shared cache; try fetching it from the database. var fetched triedb.CachedNode[H] fetched, err = fetchNode() if err != nil { - log.Printf("TRACE: Serving node from database failed: %s\n", hash) + logger.Tracef("Serving node from database failed: %s\n", hash) return nil, err } else { - log.Printf("TRACE: Serving node from database: %s\n", hash) + logger.Tracef("Serving node from database: %s\n", hash) node = nodeCached[H]{Node: fetched, FromSharedCache: false} } } @@ -318,7 +320,7 @@ func (tc *TrieCache[H]) GetOrInsertNode( } if isLocalCacheHit { - log.Printf("TRACE: Serving node from local cache: %s\n", hash) + logger.Tracef("Serving node from local cache: %s\n", hash) } return node.Node, nil @@ -336,10 +338,10 @@ func (tc *TrieCache[H]) GetNode(hash H) triedb.CachedNode[H] { // It was not in the local cache; try the shared cache. peeked := tc.sharedCache.PeekNode(hash) if peeked != nil { - log.Printf("TRACE: Serving node from shared cache: %s\n", hash) + logger.Tracef("Serving node from shared cache: %s\n", hash) node = &nodeCached[H]{Node: peeked, FromSharedCache: true} } else { - log.Printf("TRACE: Serving node from cahe failed: %s\n", hash) + logger.Tracef("Serving node from cahe failed: %s\n", hash) return nil } } else { @@ -347,7 +349,7 @@ func (tc *TrieCache[H]) GetNode(hash H) triedb.CachedNode[H] { } if isLocalCacheHit { - log.Printf("TRACE: Serving node from local cache: %s\n", hash) + logger.Tracef("Serving node from local cache: %s\n", hash) } return node.Node @@ -355,11 +357,11 @@ func (tc *TrieCache[H]) GetNode(hash H) triedb.CachedNode[H] { func (tc *TrieCache[H]) GetValue(key []byte) triedb.CachedValue[H] { cached := tc.valueCache.get(key, tc.sharedCache) - log.Printf("TRACE: Looked up value for key: %x\n", key) + logger.Tracef("Looked up value for key: %x\n", key) return cached } func (tc *TrieCache[H]) SetValue(key []byte, value triedb.CachedValue[H]) { - log.Printf("TRACE: Caching value for key: %x\n", key) + logger.Tracef("Caching value for key: %x\n", key) tc.valueCache.insert(key, value) } diff --git a/internal/primitives/trie/cache/shared_cache.go b/internal/primitives/trie/cache/shared_cache.go index 1f0041705c..c481cf87ce 100644 --- a/internal/primitives/trie/cache/shared_cache.go +++ b/internal/primitives/trie/cache/shared_cache.go @@ -4,7 +4,6 @@ package cache import ( - "log" "sync" costlru "github.com/ChainSafe/gossamer/internal/cost-lru" @@ -29,7 +28,7 @@ type hasher[K comparable] struct { } func (h hasher[K]) Hash(key K) uint32 { - return uint32(h.Hasher.Hash(key)) //nolint:gosec + return uint32(h.Hasher.Hash(key)) } // Constructor for [sharedNodeCache] with fixed size in number of bytes. @@ -40,7 +39,7 @@ func newSharedNodeCache[H runtime.Hash](sizeBytes uint) *sharedNodeCache[H] { var err error snc.lru, err = costlru.New(sizeBytes, h.Hash, func(hash H, node triedb.CachedNode[H]) uint32 { - return uint32(node.ByteSize()) //nolint:gosec + return uint32(node.ByteSize()) }) if err != nil { panic(err) @@ -63,7 +62,7 @@ func (snc *sharedNodeCache[H]) Update(list []updateItem[H]) { addCount := uint(0) snc.itemsEvicted = 0 - maxItemsEvicted := uint(snc.lru.Len()*100) / sharedNodeCacheMaxReplacePercent //nolint:gosec + maxItemsEvicted := uint(snc.lru.Len()*100) / sharedNodeCacheMaxReplacePercent for _, ui := range list { if ui.nodeCached.FromSharedCache { _, ok := snc.lru.Get(ui.Hash) @@ -88,8 +87,8 @@ func (snc *sharedNodeCache[H]) Update(list []updateItem[H]) { } } - log.Printf( - "DEBUG: Updated the shared node cache: %d accesses, %d new values, %d/%d evicted (length = %d, size=%d/%d)\n", + logger.Debugf( + "Updated the shared node cache: %d accesses, %d new values, %d/%d evicted (length = %d, size=%d/%d)", accessCount, addCount, snc.itemsEvicted, maxItemsEvicted, snc.lru.Len(), snc.lru.Cost(), snc.lru.MaxCost(), ) } @@ -99,7 +98,8 @@ func (snc *sharedNodeCache[H]) Reset() { snc.lru.Purge() } -// The comparable type that identifies this instance of storage root and storage key, used in [sharedValueCache] LRU. +// ValueCacheKeyComparable is the comparable type that identifies this instance of storage root and storage key, used +// in sharedValueCache LRU. type ValueCacheKeyComparable[H runtime.Hash] struct { StorageRoot H StorageKey string @@ -112,7 +112,7 @@ func (vckh ValueCacheKeyComparable[H]) ValueCacheKey() ValueCacheKey[H] { } } -// The key type that is being used to address a [CachedValue]. +// ValueCacheKey is the key type that is being used to address a [CachedValue]. type ValueCacheKey[H runtime.Hash] struct { // The storage root of the trie this key belongs to. StorageRoot H @@ -135,7 +135,7 @@ type sharedValueCache[H runtime.Hash] struct { itemsEvicted uint } -// Constructor for [sharedValueCache]. +// Constructor for sharedValueCache. func newSharedValueCache[H runtime.Hash](size uint) *sharedValueCache[H] { var svc sharedValueCache[H] itemsEvictedPtr := &svc.itemsEvicted @@ -143,14 +143,14 @@ func newSharedValueCache[H runtime.Hash](size uint) *sharedValueCache[H] { h := hasher[ValueCacheKeyComparable[H]]{maphash.NewHasher[ValueCacheKeyComparable[H]]()} svc.lru, err = costlru.New(size, h.Hash, func(key ValueCacheKeyComparable[H], value triedb.CachedValue[H]) uint32 { - keyCost := uint32(len(key.StorageKey)) //nolint:gosec + keyCost := uint32(len(key.StorageKey)) switch value := value.(type) { case triedb.NonExistingCachedValue[H]: return keyCost + 1 case triedb.ExistingHashCachedValue[H]: - return keyCost + uint32(value.Hash.Length()) //nolint:gosec + return keyCost + uint32(value.Hash.Length()) case triedb.ExistingCachedValue[H]: - return keyCost + uint32(value.Hash.Length()+len(value.Data)) //nolint:gosec + return keyCost + uint32(value.Hash.Length()+len(value.Data)) default: panic("unreachable") } @@ -198,7 +198,7 @@ func (svc *sharedValueCache[H]) Update(added []sharedValueCacheAdded[H], accesse // we don't evict the whole shared cache nor we keep spinning our wheels // evicting items which we've added ourselves in previous iterations of this loop. svc.itemsEvicted = 0 - maxItemsEvicted := uint(svc.lru.Len()) * 100 / sharedValueCacheMaxReplacePercent //nolint:gosec + maxItemsEvicted := uint(svc.lru.Len()) * 100 / sharedValueCacheMaxReplacePercent for _, svca := range added { added, _ := svc.lru.Add(svca.ValueCacheKey.ValueCacheKeyComparable(), svca.CachedValue) @@ -212,8 +212,8 @@ func (svc *sharedValueCache[H]) Update(added []sharedValueCacheAdded[H], accesse } } - log.Printf( - "DEBUG: Updated the shared value cache: %d accesses, %d new values, %d/%d evicted (length = %d, size=%d/%d)\n", + logger.Debugf( + "Updated the shared value cache: %d accesses, %d new values, %d/%d evicted (length = %d, size=%d/%d)", accessCount, addCount, svc.itemsEvicted, maxItemsEvicted, svc.lru.Len(), svc.lru.Cost(), svc.lru.MaxCost(), ) } @@ -228,7 +228,7 @@ type sharedTrieCacheInner[H runtime.Hash] struct { valueCache *sharedValueCache[H] } -// The shared trie cache. +// SharedTrieCache is a shared trie cache. // // It should be instantiated once per node. It will hold the trie nodes and values of all // operations to the state. To not use all available memory it will ensure to stay in the @@ -240,7 +240,7 @@ type SharedTrieCache[H runtime.Hash] struct { mtx sync.RWMutex } -// Create a new [SharedTrieCache]. +// NewSharedTrieCache creates a new [SharedTrieCache]. func NewSharedTrieCache[H runtime.Hash](size uint) *SharedTrieCache[H] { totalBudget := size @@ -256,11 +256,11 @@ func NewSharedTrieCache[H runtime.Hash](size uint) *SharedTrieCache[H] { } } -// Create a new [LocalTrieCache] instance from this shared cache. +// LocalTrieCache creates a new [LocalTrieCache] instance from this shared cache. func (stc *SharedTrieCache[H]) LocalTrieCache() LocalTrieCache[H] { h := hasher[H]{maphash.NewHasher[H]()} nodeCache, err := costlru.New(localNodeCacheMaxSize, h.Hash, func(hash H, node nodeCached[H]) uint32 { - return uint32(node.ByteSize()) //nolint:gosec + return uint32(node.ByteSize()) }) if err != nil { panic(err) @@ -270,14 +270,14 @@ func (stc *SharedTrieCache[H]) LocalTrieCache() LocalTrieCache[H] { localValueCacheMaxSize, hasher[ValueCacheKeyComparable[H]]{maphash.NewHasher[ValueCacheKeyComparable[H]]()}.Hash, func(key ValueCacheKeyComparable[H], value triedb.CachedValue[H]) uint32 { - keyCost := uint32(len(key.StorageKey)) //nolint:gosec + keyCost := uint32(len(key.StorageKey)) switch value := value.(type) { case triedb.NonExistingCachedValue[H]: return keyCost + 1 case triedb.ExistingHashCachedValue[H]: - return keyCost + uint32(value.Hash.Length()) //nolint:gosec + return keyCost + uint32(value.Hash.Length()) case triedb.ExistingCachedValue[H]: - return keyCost + uint32(value.Hash.Length()+len(value.Data)) //nolint:gosec + return keyCost + uint32(value.Hash.Length()+len(value.Data)) default: panic("unreachable") } @@ -310,7 +310,7 @@ func (stc *SharedTrieCache[H]) Unlock() { stc.mtx.Unlock() } -// Get a copy of the node for key. +// PeekNode gets a copy of the node for key. // // This will temporarily lock the shared cache for reading. // @@ -325,7 +325,7 @@ func (stc *SharedTrieCache[H]) PeekNode(key H) triedb.CachedNode[H] { return nil } -// Get a copy of the [triedb.CachedValue] for key. +// PeekValueByHash gets a copy of the [triedb.CachedValue] for key. // // This will temporarily lock the shared cache for reading. // @@ -342,14 +342,19 @@ func (stc *SharedTrieCache[H]) PeekValueByHash( return nil } -// Reset the node cache. +func (stc *SharedTrieCache[H]) Reset() { + stc.ResetNodeCache() + stc.ResetValueCache() +} + +// ResetNodeCache resets the node cache. func (stc *SharedTrieCache[H]) ResetNodeCache() { stc.mtx.Lock() defer stc.mtx.Unlock() stc.inner.nodeCache.Reset() } -// Reset the value cache. +// ResetValueCache resets the value cache. func (stc *SharedTrieCache[H]) ResetValueCache() { stc.mtx.Lock() defer stc.mtx.Unlock() diff --git a/internal/primitives/trie/recorder/recorder.go b/internal/primitives/trie/recorder/recorder.go index 485533b491..3321c9910c 100644 --- a/internal/primitives/trie/recorder/recorder.go +++ b/internal/primitives/trie/recorder/recorder.go @@ -5,15 +5,18 @@ package recorder import ( "fmt" - "log" "sync" + "github.com/ChainSafe/gossamer/internal/log" + "github.com/ChainSafe/gossamer/internal/primitives/runtime" "github.com/ChainSafe/gossamer/internal/primitives/trie" "github.com/ChainSafe/gossamer/pkg/scale" "github.com/ChainSafe/gossamer/pkg/trie/triedb" ) +var logger = log.NewFromGlobal(log.AddContext("pkg", "primitives/trie/recorder")) + // Stores all the information per transaction. type transaction[H comparable] struct { // Stores transaction information about recorder keys. @@ -60,14 +63,14 @@ type Recorder[H runtime.Hash] struct { encodedSizeEstimationMtx sync.Mutex } -// Constructor for [Recorder]. +// NewRecorder is constructor for [Recorder]. func NewRecorder[H runtime.Hash]() *Recorder[H] { return &Recorder[H]{ inner: newRecorderInner[H](), } } -// Returns the recorder as an implementation of [triedb.TrieRecorder]. +// TrieRecorder returns the recorder as an implementation of [triedb.TrieRecorder]. // // The storage root supplied is of the trie for which accesses are recorded. // This is important when recording access to different tries at once (like top and child tries). @@ -80,7 +83,7 @@ func (r *Recorder[H]) TrieRecorder(storageRoot H) triedb.TrieRecorder { } } -// Drain the recording into a [StorageProof]. +// DrainStorageProof drains the recording into a [StorageProof]. // // While a recorder can be cloned, all share the same internal state. After calling this // function, all other instances will have their internal state reset as well. @@ -107,7 +110,7 @@ func (r *Recorder[H]) storageProof() trie.StorageProof { return trie.NewStorageProof(values) } -// Convert the recording to a [StorageProof]. +// StorageProof converts the recording to a [StorageProof]. // // In contrast to [Recorder.DrainStorageProof] this doesn't consume and clear the // recordings. @@ -119,7 +122,7 @@ func (r *Recorder[H]) StorageProof() trie.StorageProof { return r.storageProof() } -// Returns the estimated encoded size of the proof. +// EstimateEncodedSize returns the estimated encoded size of the proof. // // The estimation is based on all the nodes that were accessed until now while // accessing the trie. @@ -136,14 +139,14 @@ func (r *Recorder[H]) Reset() { r.inner = newRecorderInner[H]() } -// Start a new transaction. +// StartTransaction starts a new transaction. func (r *Recorder[H]) StartTransaction() { r.innerMtx.Lock() defer r.innerMtx.Unlock() r.inner.transactions = append(r.inner.transactions, newTransaction[H]()) } -// Rollback the latest transaction. +// RollBackTransaction will rollback the latest transaction. // // Returns an error if there wasn't any active transaction. func (r *Recorder[H]) RollBackTransaction() error { @@ -196,7 +199,7 @@ func (r *Recorder[H]) RollBackTransaction() error { return nil } -// Commit the latest transaction. +// CommitTransaction commits the latest transaction. // // Returns an error if there wasn't any active transaction. func (r *Recorder[H]) CommitTransaction() error { @@ -294,7 +297,7 @@ func (tr *trieRecorder[H]) Record(access triedb.TrieAccess) { var encodedSizeUpdate uint switch access := access.(type) { case triedb.CachedNodeAccess[H]: - log.Printf("TRACE: Recording node: %v", access.Hash) + logger.Tracef("Recording node: %v", access.Hash) _, ok := tr.inner.accessedNodes[access.Hash] if !ok { node := access.Node.Encoded() @@ -308,7 +311,7 @@ func (tr *trieRecorder[H]) Record(access triedb.TrieAccess) { tr.inner.accessedNodes[access.Hash] = node } case triedb.EncodedNodeAccess[H]: - log.Printf("TRACE: Recording node: %v", access.Hash) + logger.Tracef("Recording node: %v", access.Hash) _, ok := tr.inner.accessedNodes[access.Hash] if !ok { node := access.EncodedNode @@ -322,7 +325,7 @@ func (tr *trieRecorder[H]) Record(access triedb.TrieAccess) { tr.inner.accessedNodes[access.Hash] = node } case triedb.ValueAccess[H]: - log.Printf("TRACE: Recording value {hash:%v value:%v}", access.Hash, access.FullKey) + logger.Tracef("Recording value {hash:%v value:%v}", access.Hash, access.FullKey) _, ok := tr.inner.accessedNodes[access.Hash] if !ok { value := access.Value @@ -336,18 +339,18 @@ func (tr *trieRecorder[H]) Record(access triedb.TrieAccess) { } tr.updateRecordedKeys(access.FullKey, triedb.RecordedValue) case triedb.HashAccess: - log.Printf("TRACE: Recorded hash access for key: %s", access.FullKey) + logger.Tracef("Recorded hash access for key: %s", access.FullKey) // We don't need to update the encodedSizeUpdate as the hash was already // accounted for by the recorded node that holds the hash. tr.updateRecordedKeys(access.FullKey, triedb.RecordedHash) case triedb.NonExistingNodeAccess: - log.Printf("TRACE: Recorded non-existing value access for key for key: %s", access.FullKey) + logger.Tracef("Recorded non-existing value access for key for key: %s", access.FullKey) // Non-existing access means we recorded all trie nodes up to the value. // Not the actual value, as it doesn't exist, but all trie nodes to know // that the value doesn't exist in the trie. tr.updateRecordedKeys(access.FullKey, triedb.RecordedValue) case triedb.InlineValueAccess: - log.Printf("TRACE: Recorded inline value access for key: %s", access.FullKey) + logger.Tracef("Recorded inline value access for key: %s", access.FullKey) // A value was accessed that is stored inline a node and we recorded all trie nodes // to access this value. tr.updateRecordedKeys(access.FullKey, triedb.RecordedValue) diff --git a/internal/primitives/trie/storage_proof.go b/internal/primitives/trie/storage_proof.go index 5df11b37cf..104ef51a65 100644 --- a/internal/primitives/trie/storage_proof.go +++ b/internal/primitives/trie/storage_proof.go @@ -9,7 +9,7 @@ import ( "github.com/tidwall/btree" ) -// A proof that some set of key-value pairs are included in the storage trie. The proof contains +// StorageProof is proof that some set of key-value pairs are included in the storage trie. The proof contains // the storage values so that the partial storage backend can be reconstructed by a verifier that // does not already have access to the key-value pairs. // @@ -20,7 +20,7 @@ type StorageProof struct { trieNodes btree.Set[string] } -// Constructs a [StorageProof] from a subset of encoded trie nodes. +// NewStorageProof constructs a [StorageProof] from a subset of encoded trie nodes. func NewStorageProof(trieNodes [][]byte) StorageProof { set := btree.Set[string]{} for _, trieNode := range trieNodes { @@ -31,12 +31,12 @@ func NewStorageProof(trieNodes [][]byte) StorageProof { } } -// Returns whether this is an empty proof. +// Empty returns whether this is an empty proof. func (sp *StorageProof) Empty() bool { return sp.trieNodes.Len() == 0 } -// Returns all the encoded trie ndoes in lexigraphical order from the proof. +// Nodes returns all the encoded trie ndoes in lexigraphical order from the proof. func (sp *StorageProof) Nodes() [][]byte { var ret [][]byte sp.trieNodes.Scan(func(v string) bool { @@ -46,7 +46,7 @@ func (sp *StorageProof) Nodes() [][]byte { return ret } -// Constructs a [MemoryDB] from a [StorageProof] +// NewMemoryDBFromStorageProof constructs a [MemoryDB] from a [StorageProof] func NewMemoryDBFromStorageProof[H runtime.Hash, Hasher runtime.Hasher[H]](sp StorageProof) *MemoryDB[H, Hasher] { db := NewMemoryDB[H, Hasher]() sp.trieNodes.Scan(func(v string) bool { diff --git a/internal/primitives/trie/trie.go b/internal/primitives/trie/trie.go index ce6142c6ec..3bea2a31a7 100644 --- a/internal/primitives/trie/trie.go +++ b/internal/primitives/trie/trie.go @@ -12,26 +12,26 @@ import ( triedb "github.com/ChainSafe/gossamer/pkg/trie/triedb" ) -// Reexport from [memorydb.MemoryDB] where supplied [memorydb.KeyFunction] is [memorydb.PrefixedKey] for prefixing -// keys internally (avoiding key conflict for non random keys). +// PrefixedMemoryDB is reexport from [memorydb.MemoryDB] where supplied [memorydb.KeyFunction] is [memorydb.PrefixedKey] +// for prefixing keys internally (avoiding key conflict for non random keys). type PrefixedMemoryDB[Hash runtime.Hash, Hasher hashdb.Hasher[Hash]] struct { memorydb.MemoryDB[Hash, Hasher, string, memorydb.PrefixedKey[Hash]] } -// Constructor for [PrefixedMemoryDB] +// NewPrefixedMemoryDB is constructor for [PrefixedMemoryDB] func NewPrefixedMemoryDB[Hash runtime.Hash, Hasher hashdb.Hasher[Hash]]() *PrefixedMemoryDB[Hash, Hasher] { return &PrefixedMemoryDB[Hash, Hasher]{ memorydb.NewMemoryDB[Hash, Hasher, string, memorydb.PrefixedKey[Hash]]([]byte{0}), } } -// Reexport from [memorydb.MemoryDB] where supplied [memorydb.KeyFunction] is [memorydb.HashKey] which is a noop -// operation on the supplied prefix, and only uses the hash. +// MemoryDB is reexport from [memorydb.MemoryDB] where supplied [memorydb.KeyFunction] is [memorydb.HashKey] which is +// a noop operation on the supplied prefix, and only uses the hash. type MemoryDB[Hash runtime.Hash, Hasher runtime.Hasher[Hash]] struct { memorydb.MemoryDB[Hash, Hasher, Hash, memorydb.HashKey[Hash]] } -// Constructor for [MemoryDB]. +// NewMemoryDB is constructor for [MemoryDB]. func NewMemoryDB[Hash runtime.Hash, Hasher runtime.Hasher[Hash]]() *MemoryDB[Hash, Hasher] { return &MemoryDB[Hash, Hasher]{ MemoryDB: memorydb.NewMemoryDB[Hash, Hasher, Hash, memorydb.HashKey[Hash]]([]byte{0}), @@ -44,7 +44,7 @@ type KeyValue struct { Value []byte } -// Determine a trie root given a hash DB and delta values. +// DeltaTrieRoot determines a trie root given a hash DB and delta values. func DeltaTrieRoot[H runtime.Hash, Hasher runtime.Hasher[H]]( db hashdb.HashDB[H], root H, @@ -85,7 +85,7 @@ func DeltaTrieRoot[H runtime.Hash, Hasher runtime.Hasher[H]]( return hash, err } -// Read a value from the trie. +// ReadTrieValue reads a value from the trie. func ReadTrieValue[H runtime.Hash, Hasher runtime.Hasher[H]]( db hashdb.HashDB[H], root H, @@ -106,7 +106,7 @@ func ReadTrieValue[H runtime.Hash, Hasher runtime.Hasher[H]]( return nil, nil } -// Read a value from the trie with given [triedb.Query]. +// ReadTrieValueWith reads a value from the trie with given [triedb.Query]. func ReadTrieValueWith[H runtime.Hash, Hasher runtime.Hasher[H]]( db hashdb.HashDB[H], root H, @@ -128,7 +128,7 @@ func ReadTrieValueWith[H runtime.Hash, Hasher runtime.Hasher[H]]( return nil, nil } -// Read the [triedb.MerkleValue] of the node that is the closest descendant for +// ReadTrieFirstDescendantValue reads the [triedb.MerkleValue] of the node that is the closest descendant for // the provided key. func ReadTrieFirstDescendantValue[H runtime.Hash, Hasher runtime.Hasher[H]]( db hashdb.HashDB[H], @@ -170,7 +170,7 @@ func ChildDeltaTrieRoot[H runtime.Hash, Hasher runtime.Hasher[H]]( return DeltaTrieRoot[H, Hasher](ksdb, root, delta, recorder, cache, stateVersion) } -// Read a value from the child trie. +// ReadChildTrieValue reads a value from the child trie. func ReadChildTrieValue[H runtime.Hash, Hasher runtime.Hasher[H]]( keyspace []byte, db hashdb.HashDB[H], @@ -194,7 +194,7 @@ func ReadChildTrieValue[H runtime.Hash, Hasher runtime.Hasher[H]]( return nil, nil } -// Read a hash from the child trie. +// ReadChildTrieHash reads a hash from the child trie. func ReadChildTrieHash[H runtime.Hash, Hasher runtime.Hasher[H]]( keyspace []byte, db hashdb.HashDB[H], @@ -211,7 +211,7 @@ func ReadChildTrieHash[H runtime.Hash, Hasher runtime.Hasher[H]]( return trieDB.GetHash(key) } -// Read the [triedb.MerkleValue] of the node that is the closest descendant for +// ReadChildTrieFirstDescendantValue reads the [triedb.MerkleValue] of the node that is the closest descendant for // the provided child key. func ReadChildTrieFirstDescendantValue[H runtime.Hash, Hasher runtime.Hasher[H]]( keyspace []byte, @@ -236,7 +236,7 @@ type KeyspacedDB[Hash comparable] struct { keySpace []byte } -// Constructor for [KeyspacedDB] +// NewKeyspacedDB is constructor for [KeyspacedDB] func NewKeyspacedDB[Hash comparable](db hashdb.HashDB[Hash], ks []byte) *KeyspacedDB[Hash] { return &KeyspacedDB[Hash]{ db: db, diff --git a/internal/saturating/saturating.go b/internal/saturating/saturating.go new file mode 100644 index 0000000000..3945a6c195 --- /dev/null +++ b/internal/saturating/saturating.go @@ -0,0 +1,67 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package saturating + +import ( + "math" + "unsafe" + + "golang.org/x/exp/constraints" +) + +func getMax(n uintptr) uint { + switch n { + case 1: + return math.MaxUint8 + case 2: + return math.MaxUint16 + case 4: + return math.MaxUint32 + case 8: + return math.MaxUint64 + default: + panic("unsupported") + } +} + +func Sub[T, U constraints.Unsigned](a T, b U) T { + sizeA := unsafe.Sizeof(a) + sizeB := unsafe.Sizeof(b) + + switch { + case sizeB > sizeA: + if uint(b) <= getMax(sizeA) { + if T(b) > a { + return 0 + } else { + return a - T(b) + } + } else { + return 0 + } + default: + // sizeb <= sizeA + if T(b) <= a { + return a - T(b) + } else { + return 0 + } + } +} + +func Into[T, U constraints.Unsigned](n T) (dst U) { + sizeDst := unsafe.Sizeof(dst) + sizeN := unsafe.Sizeof(n) + + switch { + case sizeDst < sizeN: + if uint(n) > getMax(sizeDst) { + return U(getMax(sizeN)) + } else { + return U(n) + } + default: + return U(n) + } +} diff --git a/internal/saturating/saturating_test.go b/internal/saturating/saturating_test.go new file mode 100644 index 0000000000..9b136e34ab --- /dev/null +++ b/internal/saturating/saturating_test.go @@ -0,0 +1,34 @@ +// Copyright 2024 ChainSafe Systems (ON) +// SPDX-License-Identifier: LGPL-3.0-only + +package saturating + +import ( + "math" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSub(t *testing.T) { + require.Equal(t, uint32(0), Sub(uint32(25), uint64(50))) + require.Equal(t, uint32(0), Sub(uint32(25), uint64(25))) + require.Equal(t, uint32(25), Sub(uint32(50), uint64(25))) + require.Equal(t, uint32(0), Sub(uint32(math.MaxUint32), uint64(math.MaxUint64))) + require.Equal(t, uint32(1), Sub(uint32(math.MaxUint32), uint64(math.MaxUint32-1))) + require.Equal(t, uint32(25), Sub(uint32(50), uint64(25))) + require.Equal(t, uint64(0), Sub(uint64(math.MaxUint32), uint32(math.MaxUint32))) + require.Equal(t, uint64(0), Sub(uint64(math.MaxUint32-1), uint32(math.MaxUint32))) + require.Equal(t, uint64(math.MaxUint64-math.MaxUint32), Sub(uint64(math.MaxUint64), uint32(math.MaxUint32))) + require.Equal(t, uint(25), Sub(uint(50), uint(25))) + require.Equal(t, uint8(0), Sub(uint8(math.MaxUint8), uint16(math.MaxUint16))) + require.Equal(t, uint16(0), Sub(uint16(math.MaxUint16), uint32(math.MaxUint32))) +} + +func TestInto(t *testing.T) { + require.Equal(t, uint32(math.MaxUint32), Into[uint64, uint32](math.MaxUint64)) + require.Equal(t, uint32(math.MaxUint32), Into[uint64, uint32](math.MaxUint32)) + require.Equal(t, uint32(math.MaxUint32-1), Into[uint64, uint32](math.MaxUint32-1)) + require.Equal(t, uint32(math.MaxUint32), Into[uint32, uint32](math.MaxUint32)) + require.Equal(t, uint64(math.MaxUint32), Into[uint32, uint64](math.MaxUint32)) +} diff --git a/lib/blocktree/blocktree_test.go b/lib/blocktree/blocktree_test.go index bca3a93e16..5a5af6697f 100644 --- a/lib/blocktree/blocktree_test.go +++ b/lib/blocktree/blocktree_test.go @@ -49,8 +49,7 @@ func Test_BlockTree_GetBlock(t *testing.T) { if n == nil { t.Fatal("node is nil") } - - if !bytes.Equal(hashes[2][:], n.hash[:]) { + if !bytes.Equal(hashes[2][:], (*n).hash[:]) { t.Fatalf("Fail: got %x expected %x", n.hash, hashes[2]) } diff --git a/pkg/finality-grandpa/dummy_chain_test.go b/pkg/finality-grandpa/dummy_chain_test.go index e8c296f7e1..98a8cf039e 100644 --- a/pkg/finality-grandpa/dummy_chain_test.go +++ b/pkg/finality-grandpa/dummy_chain_test.go @@ -108,7 +108,7 @@ func (dc *dummyChain) PushBlocks(parent string, blocks []string) { case b.number > a.number: return 1 default: - panic("huh?") + panic("unreachable") } }) @@ -127,7 +127,7 @@ func (dc *dummyChain) PushBlocks(parent string, blocks []string) { func (dc *dummyChain) Number(hash string) uint32 { e, ok := dc.inner[hash] if !ok { - panic("huh?") + panic("should exist") } return e.number } diff --git a/pkg/finality-grandpa/voter_test.go b/pkg/finality-grandpa/voter_test.go index 5117c6cb12..88df7c377a 100644 --- a/pkg/finality-grandpa/voter_test.go +++ b/pkg/finality-grandpa/voter_test.go @@ -656,7 +656,7 @@ func TestVoter_PickUpFromPriorWithGrandparentStatus(t *testing.T) { waitForPrevote: for sme := range roundIn { if sme.Error != nil { - t.Errorf("wtf?") + t.Errorf("should contain error") } msg := sme.SignedMessage.Message.inner diff --git a/pkg/scale/varying_data_type_test.go b/pkg/scale/varying_data_type_test.go index f70b5a5a62..ffad098aa8 100644 --- a/pkg/scale/varying_data_type_test.go +++ b/pkg/scale/varying_data_type_test.go @@ -530,7 +530,7 @@ func TestVaryingDataType_EncodeArray(t *testing.T) { bytes, err := Marshal(mvdtArray) if err != nil { - t.Errorf("wtf %v", err) + t.Errorf("marshal error %v", err) } assert.NoError(t, err) assert.Equal(t, expected, bytes) diff --git a/pkg/trie/triedb/cache.go b/pkg/trie/triedb/cache.go index ee4e24f208..36b02dc26b 100644 --- a/pkg/trie/triedb/cache.go +++ b/pkg/trie/triedb/cache.go @@ -242,7 +242,7 @@ func (no ValueCachedNode[H]) Encoded() []byte { return no.Value } func (no EmptyCachedNode[H]) ByteSize() uint { return (uint)(unsafe.Sizeof(no)) } func (no LeafCachedNode[H]) ByteSize() uint { - return (uint)(unsafe.Sizeof(no)) + uint(len(no.PartialKey.Inner())+len(no.Value.data())) //nolint:gosec + return (uint)(unsafe.Sizeof(no)) + uint(len(no.PartialKey.Inner())+len(no.Value.data())) } func (no BranchCachedNode[H]) ByteSize() uint { selfSize := (uint)(unsafe.Sizeof(no)) diff --git a/pkg/trie/triedb/lookup.go b/pkg/trie/triedb/lookup.go index f6577e2048..82c5dca1cc 100644 --- a/pkg/trie/triedb/lookup.go +++ b/pkg/trie/triedb/lookup.go @@ -130,7 +130,7 @@ func (l *TrieLookup[H, Hasher, QueryItem]) LookupFirstDescendant( // (descendent), but not the other way around. if !node.PartialKey.StartsWithNibbles(partial) { l.recordAccess(NonExistingNodeAccess{FullKey: fullKey}) - return nil, nil //nolint:nilnil + return nil, nil } if partial.Len() != node.PartialKey.Len() { @@ -154,14 +154,14 @@ func (l *TrieLookup[H, Hasher, QueryItem]) LookupFirstDescendant( } return HashMerkleValue[H]{Hash: hash}, nil } - return nil, nil //nolint:nilnil + return nil, nil } // Partial key is longer or equal than the branch slice. // Ensure partial key starts with the branch slice. if !partial.StartsWithNibbleSlice(node.PartialKey) { l.recordAccess(NonExistingNodeAccess{FullKey: fullKey}) - return nil, nil //nolint:nilnil + return nil, nil } // Partial key starts with the branch slice. @@ -183,11 +183,11 @@ func (l *TrieLookup[H, Hasher, QueryItem]) LookupFirstDescendant( nextNode = child } else { l.recordAccess(NonExistingNodeAccess{fullKey}) - return nil, nil //nolint:nilnil + return nil, nil } case EmptyCachedNode[H]: l.recordAccess(NonExistingNodeAccess{FullKey: fullKey}) - return nil, nil //nolint:nilnil + return nil, nil default: panic("unreachable") }