Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions core/state/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,11 @@ type Trie interface {
// in the trie with provided address.
UpdateAccount(address common.Address, account *types.StateAccount, codeLen int) error

// UpdateAccountAsync will abstract the write of an account to the secure trie.
// The actual value of the account is not resolved from the passed function until
// it is needed when hashing the trie.
UpdateAccountAsync(address common.Address, accountResolver func() (*types.StateAccount, int)) error

// UpdateStorage associates key with value in the trie. If value has length zero,
// any existing value is deleted from the trie. The value bytes must not be modified
// by the caller while they are stored in the trie. If a node was not found in the
Expand Down
31 changes: 21 additions & 10 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -566,14 +566,11 @@ func (s *StateDB) GetTransientState(addr common.Address, key common.Hash) common
// Setting, updating & deleting state object methods.
//

// updateStateObject writes the given object to the trie.
func (s *StateDB) updateStateObject(obj *stateObject) {
// Encode the account and update the account trie
if err := s.trie.UpdateAccount(obj.Address(), &obj.data, len(obj.code)); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", obj.Address(), err))
}
if obj.dirtyCode {
s.trie.UpdateContractCode(obj.Address(), common.BytesToHash(obj.CodeHash()), obj.code)
// updateStateObject writes the given object to the trie. The actual value is
// only resolved from the provided function when it is needed during trie hashing.
func (s *StateDB) updateStateObject(addr common.Address, resolver func() (*types.StateAccount, int)) {
if err := s.trie.UpdateAccountAsync(addr, resolver); err != nil {
s.setError(fmt.Errorf("updateStateObject (%x) error: %v", addr, err))
}
}

Expand Down Expand Up @@ -829,11 +826,18 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
// later time.
workers.SetLimit(1)
}

type stateAccountWithCodeLen struct {
*types.StateAccount
codeLen int
}
stateObjectsResolve := make(map[common.Address]func() (*types.StateAccount, int))
for addr, op := range s.mutations {
if op.applied || op.isDelete() {
continue
}
obj := s.stateObjects[addr] // closure for the task runner below
complete := make(chan stateAccountWithCodeLen)
workers.Go(func() error {
if s.db.TrieDB().IsVerkle() {
obj.updateTrie()
Expand All @@ -846,8 +850,14 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.witness.AddState(obj.trie.Witness())
}
}
complete <- stateAccountWithCodeLen{&obj.data, 0}
return nil
})

stateObjectsResolve[addr] = func() (*types.StateAccount, int) {
res := <-complete
return res.StateAccount, res.codeLen
}
}
// If witness building is enabled, gather all the read-only accesses.
// Skip witness collection in Verkle mode, they will be gathered
Expand Down Expand Up @@ -898,7 +908,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
}
}
}
workers.Wait()

s.StorageUpdates += time.Since(start)

// Now we're about to start to write changes to the trie. The trie is so far
Expand Down Expand Up @@ -939,7 +949,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
if op.isDelete() {
deletedAddrs = append(deletedAddrs, addr)
} else {
s.updateStateObject(s.stateObjects[addr])
s.updateStateObject(addr, stateObjectsResolve[addr])
s.AccountUpdated += 1
}
usedAddrs = append(usedAddrs, addr) // Copy needed for closure
Expand All @@ -966,6 +976,7 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash {
s.witnessStats.Add(witness, common.Hash{})
}
}

return hash
}

Expand Down
6 changes: 3 additions & 3 deletions trie/committer.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,8 +157,8 @@ func (c *committer) store(path []byte, n node) node {
// length of leaves should be exactly same.
if c.collectLeaf {
if sn, ok := n.(*shortNode); ok {
if val, ok := sn.Val.(valueNode); ok {
c.nodes.AddLeaf(nhash, val)
if val, ok := sn.Val.(*valueNode); ok {
c.nodes.AddLeaf(nhash, val.resolve())
}
}
}
Expand All @@ -182,7 +182,7 @@ func forGatherChildren(n node, onChild func(hash common.Hash)) {
}
case hashNode:
onChild(common.BytesToHash(n))
case valueNode, nil:
case *valueNode, nil:
default:
panic(fmt.Sprintf("unknown node type: %T", n))
}
Expand Down
4 changes: 2 additions & 2 deletions trie/hasher.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ func (h *hasher) encodeShortNode(n *shortNode) []byte {
if hasTerm(n.Key) {
var ln leafNodeEncoder
ln.Key = hexToCompact(n.Key)
ln.Val = n.Val.(valueNode)
ln.Val = n.Val.(*valueNode).resolve()
ln.encode(h.encbuf)
return h.encodedBytes()
}
Expand Down Expand Up @@ -162,7 +162,7 @@ func (h *hasher) encodeFullNode(n *fullNode) []byte {
}
}
if n.Children[16] != nil {
fn.Children[16] = n.Children[16].(valueNode)
fn.Children[16] = n.Children[16].(*valueNode).resolve()
}
fn.encode(h.encbuf)
fnEncoderPool.Put(fn)
Expand Down
8 changes: 4 additions & 4 deletions trie/iterator.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ func (it *nodeIterator) Leaf() bool {

func (it *nodeIterator) LeafKey() []byte {
if len(it.stack) > 0 {
if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
if _, ok := it.stack[len(it.stack)-1].node.(*valueNode); ok {
return hexToKeybytes(it.path)
}
}
Expand All @@ -224,16 +224,16 @@ func (it *nodeIterator) LeafKey() []byte {

func (it *nodeIterator) LeafBlob() []byte {
if len(it.stack) > 0 {
if node, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
return node
if node, ok := it.stack[len(it.stack)-1].node.(*valueNode); ok {
return node.resolve()
}
}
panic("not at leaf")
}

func (it *nodeIterator) LeafProof() [][]byte {
if len(it.stack) > 0 {
if _, ok := it.stack[len(it.stack)-1].node.(valueNode); ok {
if _, ok := it.stack[len(it.stack)-1].node.(*valueNode); ok {
hasher := newHasher(false)
defer returnHasherToPool(hasher)
proofs := make([][]byte, 0, len(it.stack))
Expand Down
30 changes: 23 additions & 7 deletions trie/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,10 @@ type (
flags nodeFlag
}
hashNode []byte
valueNode []byte
valueNode struct {
resolver func() []byte
val []byte
}

// fullnodeEncoder is a type used exclusively for encoding fullNode.
// Briefly instantiating a fullnodeEncoder and initializing with
Expand All @@ -68,6 +71,19 @@ type (
}
)

func newValueNode(resolver func() []byte) *valueNode {
return &valueNode{
resolver: resolver,
}
}

func (n *valueNode) resolve() []byte {
if n.val == nil {
n.val = n.resolver()
}
return n.val
}

// EncodeRLP encodes a full node into the consensus RLP format.
func (n *fullNode) EncodeRLP(w io.Writer) error {
eb := rlp.NewEncoderBuffer(w)
Expand All @@ -91,13 +107,13 @@ func (n nodeFlag) copy() nodeFlag {
func (n *fullNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
func (n *shortNode) cache() (hashNode, bool) { return n.flags.hash, n.flags.dirty }
func (n hashNode) cache() (hashNode, bool) { return nil, true }
func (n valueNode) cache() (hashNode, bool) { return nil, true }
func (n *valueNode) cache() (hashNode, bool) { return nil, true }

// Pretty printing.
func (n *fullNode) String() string { return n.fstring("") }
func (n *shortNode) String() string { return n.fstring("") }
func (n hashNode) String() string { return n.fstring("") }
func (n valueNode) String() string { return n.fstring("") }
func (n *valueNode) String() string { return n.fstring("") }

func (n *fullNode) fstring(ind string) string {
resp := fmt.Sprintf("[\n%s ", ind)
Expand All @@ -117,8 +133,8 @@ func (n *shortNode) fstring(ind string) string {
func (n hashNode) fstring(ind string) string {
return fmt.Sprintf("<%x> ", []byte(n))
}
func (n valueNode) fstring(ind string) string {
return fmt.Sprintf("%x ", []byte(n))
func (n *valueNode) fstring(ind string) string {
return fmt.Sprintf("%x ", n.resolve())
}

// mustDecodeNode is a wrapper of decodeNode and panic if any error is encountered.
Expand Down Expand Up @@ -185,7 +201,7 @@ func decodeShort(hash, elems []byte) (node, error) {
if err != nil {
return nil, fmt.Errorf("invalid value node: %v", err)
}
return &shortNode{key, valueNode(val), flag}, nil
return &shortNode{key, newValueNode(func() []byte { return val }), flag}, nil
}
r, _, err := decodeRef(rest)
if err != nil {
Expand All @@ -208,7 +224,7 @@ func decodeFull(hash, elems []byte) (*fullNode, error) {
return n, err
}
if len(val) > 0 {
n.Children[16] = valueNode(val)
n.Children[16] = newValueNode(func() []byte { return val })
}
return n, nil
}
Expand Down
4 changes: 2 additions & 2 deletions trie/node_enc.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,6 @@ func (n hashNode) encode(w rlp.EncoderBuffer) {
w.WriteBytes(n)
}

func (n valueNode) encode(w rlp.EncoderBuffer) {
w.WriteBytes(n)
func (n *valueNode) encode(w rlp.EncoderBuffer) {
w.WriteBytes(n.resolve())
}
18 changes: 9 additions & 9 deletions trie/proof.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,8 @@ func VerifyProof(rootHash common.Hash, key []byte, proofDb ethdb.KeyValueReader)
case hashNode:
key = keyrest
copy(wantHash[:], cld)
case valueNode:
return cld, nil
case *valueNode:
return cld.resolve(), nil
}
}
}
Expand Down Expand Up @@ -191,8 +191,8 @@ func proofToPath(rootHash common.Hash, root node, key []byte, proofDb ethdb.KeyV
if err != nil {
return nil, nil, err
}
case valueNode:
valnode = cld
case *valueNode:
valnode = cld.resolve()
}
// Link the parent and child.
switch pnode := parent.(type) {
Expand Down Expand Up @@ -298,7 +298,7 @@ findFork:
}
// Only one proof points to non-existent key.
if shortForkRight != 0 {
if _, ok := rn.Val.(valueNode); ok {
if _, ok := rn.Val.(*valueNode); ok {
// The fork point is root node, unset the entire trie
if parent == nil {
return true, nil
Expand All @@ -309,7 +309,7 @@ findFork:
return false, unset(rn, rn.Val, left[pos:], len(rn.Key), false)
}
if shortForkLeft != 0 {
if _, ok := rn.Val.(valueNode); ok {
if _, ok := rn.Val.(*valueNode); ok {
// The fork point is root node, unset the entire trie
if parent == nil {
return true, nil
Expand Down Expand Up @@ -396,7 +396,7 @@ func unset(parent node, child node, key []byte, pos int, removeLeft bool) error
}
return nil
}
if _, ok := cld.Val.(valueNode); ok {
if _, ok := cld.Val.(*valueNode); ok {
fn := parent.(*fullNode)
fn.Children[key[pos-1]] = nil
return nil
Expand Down Expand Up @@ -432,7 +432,7 @@ func hasRightElement(node node, key []byte) bool {
return bytes.Compare(rn.Key, key[pos:]) > 0
}
node, pos = rn.Val, pos+len(rn.Key)
case valueNode:
case *valueNode:
return false // We have resolved the whole path
default:
panic(fmt.Sprintf("%T: invalid node: %v", node, node)) // hashnode
Expand Down Expand Up @@ -612,7 +612,7 @@ func get(tn node, key []byte, skipResolved bool) ([]byte, node) {
return key, n
case nil:
return key, nil
case valueNode:
case *valueNode:
return nil, n
default:
panic(fmt.Sprintf("%T: invalid node: %v", tn, tn))
Expand Down
22 changes: 22 additions & 0 deletions trie/secure_trie.go
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,28 @@ func (t *StateTrie) UpdateAccount(address common.Address, acc *types.StateAccoun
return nil
}

// UpdateAccountAsync will abstract the write of an account to the secure trie.
// The actual value of the account is not resolved from the passed function until
// it is needed when hashing the trie.
func (t *StateTrie) UpdateAccountAsync(address common.Address, accountResolve func() (*types.StateAccount, int)) error {
hk := crypto.Keccak256(address.Bytes())
resolve := func() []byte {
acc, _ := accountResolve()
data, err := rlp.EncodeToBytes(acc)
if err != nil {
panic(err) // TODO: what do do here?
}
return data
}
if err := t.trie.UpdateAsync(hk, resolve); err != nil {
return err
}
if t.preimages != nil {
t.secKeyCache[common.Hash(hk)] = address.Bytes()
}
return nil
}

func (t *StateTrie) UpdateContractCode(_ common.Address, _ common.Hash, _ []byte) error {
return nil
}
Expand Down
4 changes: 2 additions & 2 deletions trie/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -612,15 +612,15 @@ func (s *Sync) children(req *nodeRequest, object node) ([]*nodeRequest, error) {
for _, child := range children {
// Notify any external watcher of a new key/value node
if req.callback != nil {
if node, ok := (child.node).(valueNode); ok {
if node, ok := (child.node).(*valueNode); ok {
var paths [][]byte
if len(child.path) == 2*common.HashLength {
paths = append(paths, hexToKeybytes(child.path))
} else if len(child.path) == 4*common.HashLength {
paths = append(paths, hexToKeybytes(child.path[:2*common.HashLength]))
paths = append(paths, hexToKeybytes(child.path[2*common.HashLength:]))
}
if err := req.callback(paths, child.path, node, req.hash, req.path); err != nil {
if err := req.callback(paths, child.path, node.resolve(), req.hash, req.path); err != nil {
return nil, err
}
}
Expand Down
4 changes: 4 additions & 0 deletions trie/transition.go
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,10 @@ func (t *TransitionTrie) UpdateAccount(addr common.Address, account *types.State
// only needs to know what the account trie does now.
return t.overlay.UpdateAccount(addr, account, codeLen)
}
func (t *TransitionTrie) UpdateAccountAsync(address common.Address, accountResolver func() (*types.StateAccount, int)) error {
acct, codeLen := accountResolver()
return t.overlay.UpdateAccount(address, acct, codeLen)
}

// DeleteStorage removes any existing value for key from the trie. If a node was not
// found in the database, a trie.MissingNodeError is returned.
Expand Down
Loading
Loading