diff --git a/cmd/config.go b/cmd/config.go index 00f375014..8283b8ea2 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -96,7 +96,8 @@ type Config struct { func GetStringSliceWorkaround(flagName string) []string { value := viper.GetString(flagName) if value == "" || value == " " { - return []string{} + // Backwards compatibility for n0 scripts. + return viper.GetStringSlice(flagName) } return strings.Split(value, ",") } diff --git a/collections/lru.go b/collections/lru.go new file mode 100644 index 000000000..891ef6bda --- /dev/null +++ b/collections/lru.go @@ -0,0 +1,75 @@ +package collections + +import lru "github.com/hashicorp/golang-lru/v2" + +// We implement our own LRU cache as a wrapper around a dependency. This allows us to +// easily change the underlying implementation in the future if needed with minimal +// changes outside of this file. + +type LruCache[K comparable, V any] struct { + underlyingCache *lru.Cache[K, V] +} + +func NewLruCache[K comparable, V any](maxSize int) (*LruCache[K, V], error) { + underlyingCache, err := lru.New[K, V](maxSize) + if err != nil { + return nil, err + } + return &LruCache[K, V]{underlyingCache}, nil +} + +func (lruCache *LruCache[K, V]) Put(key K, value V) { + lruCache.underlyingCache.Add(key, value) +} + +func (lruCache *LruCache[K, V]) Get(key K) (V, bool) { + return lruCache.underlyingCache.Get(key) +} + +func (lruCache *LruCache[K, V]) Exists(key K) bool { + return lruCache.underlyingCache.Contains(key) +} + +func (lruCache *LruCache[K, V]) Delete(key K) { + lruCache.underlyingCache.Remove(key) +} + +func (lruCache *LruCache[K, V]) Purge() { + lruCache.underlyingCache.Purge() +} + +func (lruCache *LruCache[K, V]) Keys() []K { + return lruCache.underlyingCache.Keys() +} + +type LruSet[K comparable] struct { + underlyingCache *lru.Cache[K, struct{}] +} + +func NewLruSet[K comparable](maxSize int) (*LruSet[K], error) { + underlyingCache, err := lru.New[K, struct{}](maxSize) + if err != nil { + return nil, err + } + return &LruSet[K]{underlyingCache}, nil +} + +func (lruSet *LruSet[K]) Put(key K) { + lruSet.underlyingCache.Add(key, struct{}{}) +} + +func (lruSet *LruSet[K]) Contains(key K) bool { + return lruSet.underlyingCache.Contains(key) +} + +func (lruSet *LruSet[K]) Delete(key K) { + lruSet.underlyingCache.Remove(key) +} + +func (lruSet *LruSet[K]) Purge() { + lruSet.underlyingCache.Purge() +} + +func (lruSet *LruSet[K]) Items() []K { + return lruSet.underlyingCache.Keys() +} diff --git a/go.mod b/go.mod index 0b5355f47..b33f9ec7a 100644 --- a/go.mod +++ b/go.mod @@ -14,12 +14,12 @@ require ( github.com/bxcodec/faker v2.0.1+incompatible github.com/cloudflare/circl v1.5.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/decred/dcrd/container/lru v1.0.0 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 github.com/deso-protocol/go-deadlock v1.0.1 github.com/deso-protocol/go-merkle-tree v1.0.0 github.com/deso-protocol/uint256 v1.3.2 github.com/dgraph-io/badger/v3 v3.2103.5 + github.com/dgraph-io/ristretto v0.2.0 github.com/emirpasic/gods v1.18.1 github.com/ethereum/go-ethereum v1.14.11 github.com/fatih/color v1.17.0 @@ -38,11 +38,11 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.18.2 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/tyler-smith/go-bip39 v1.1.0 github.com/unrolled/secure v1.16.0 - golang.org/x/crypto v0.28.0 - golang.org/x/sync v0.8.0 + golang.org/x/crypto v0.29.0 + golang.org/x/sync v0.9.0 gopkg.in/DataDog/dd-trace-go.v1 v1.69.0 ) @@ -75,7 +75,6 @@ require ( github.com/coreos/go-semver v0.3.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect - github.com/dgraph-io/ristretto v0.2.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect github.com/ebitengine/purego v0.8.0 // indirect @@ -101,7 +100,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.17.1 // indirect + github.com/klauspost/compress v1.17.11 // indirect github.com/kyokomi/emoji/v2 v2.2.13 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -145,10 +144,10 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect golang.org/x/mod v0.21.0 // indirect - golang.org/x/net v0.30.0 // indirect - golang.org/x/sys v0.26.0 // indirect - golang.org/x/term v0.25.0 // indirect - golang.org/x/text v0.19.0 // indirect + golang.org/x/net v0.31.0 // indirect + golang.org/x/sys v0.27.0 // indirect + golang.org/x/term v0.26.0 // indirect + golang.org/x/text v0.20.0 // indirect golang.org/x/time v0.7.0 // indirect golang.org/x/tools v0.26.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect diff --git a/go.sum b/go.sum index a21ccd144..dd7d70b0c 100644 --- a/go.sum +++ b/go.sum @@ -100,8 +100,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/container/lru v1.0.0 h1:7foQymtbu18aQWYiY9RnNIeE+kvpiN+fiBQ3+viyJjI= -github.com/decred/dcrd/container/lru v1.0.0/go.mod h1:vlPwj0l+IzAHhQSsbgQnJgO5Cte78+yI065V+Mc5PRQ= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8= github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= @@ -250,8 +248,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.17.1 h1:NE3C767s2ak2bweCZo3+rdP4U/HoyVXLv/X9f2gPS5g= -github.com/klauspost/compress v1.17.1/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -384,8 +382,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= @@ -440,8 +438,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= +golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY= golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8= @@ -476,8 +474,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= -golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= -golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= +golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo= +golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -488,8 +486,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -520,15 +518,15 @@ golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -536,8 +534,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/integration_testing/connection_bridge.go b/integration_testing/connection_bridge.go index cc79f19b3..b5a194d09 100644 --- a/integration_testing/connection_bridge.go +++ b/integration_testing/connection_bridge.go @@ -103,7 +103,7 @@ func (bridge *ConnectionBridge) createInboundConnection(node *cmd.Node) *lib.Pee panic(err) } - ip, _, err := net.ParseCIDR(netAddress.IP.String()) + ip, _, err := net.ParseCIDR(netAddress.ToLegacy().IP.String()) if err != nil { panic(err) } diff --git a/lib/block_producer.go b/lib/block_producer.go index 46c6c85b1..0b814cc57 100644 --- a/lib/block_producer.go +++ b/lib/block_producer.go @@ -285,9 +285,12 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) desoBlockProducer.postgres, desoBlockProducer.chain.snapshot, nil) // Parse the public key that should be used for the block reward. - blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutput.PublicKey) - if err != nil { - return nil, nil, nil, errors.Wrapf(err, "DeSoBlockProducer._getBlockTemplate: problem parsing block reward output public key: ") + blockRewardOutputPublicKey := NewPublicKey(blockRewardOutput.PublicKey) + if blockRewardOutputPublicKey == nil { + return nil, nil, nil, fmt.Errorf( + "DeSoBlockProducer._getBlockTemplate: problem parsing block reward output public key: %v", + blockRewardOutput.PublicKey, + ) } // Skip the block reward, which is the first txn in the block. @@ -305,13 +308,14 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) if blockRet.Header.Height >= uint64(desoBlockProducer.params.ForkHeights.BlockRewardPatchBlockHeight) { if txnInBlock.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { // Parse the transactor's public key to compare with the block reward output public key. - transactorPublicKey, err := btcec.ParsePubKey(txnInBlock.PublicKey) - if err != nil { + transactorPublicKey := NewPublicKey(txnInBlock.PublicKey) + if transactorPublicKey == nil { return nil, nil, nil, - errors.Wrapf(err, - "DeSoBlockProducer._getBlockTemplate: problem parsing transactor public key: ") + fmt.Errorf( + "DeSoBlockProducer._getBlockTemplate: problem parsing transactor public key: %v", + txnInBlock.PublicKey) } - includeFeesInBlockReward = !transactorPublicKey.IsEqual(blockRewardOutputPublicKey) + includeFeesInBlockReward = !transactorPublicKey.Equal(*blockRewardOutputPublicKey) } else { // In the case of atomic transaction wrappers, we must parse and process each inner transaction // independently. We let includeFeesInBlockRewards remain true but decrement feeNanos whenever @@ -367,8 +371,8 @@ func (desoBlockProducer *DeSoBlockProducer) _getBlockTemplate(publicKey []byte) blockRet.Header.TransactionMerkleRoot = merkleRoot // Compute the next difficulty target given the current tip. - diffTarget, err := CalcNextDifficultyTarget( - lastNode, CurrentHeaderVersion, desoBlockProducer.params) + diffTarget, err := desoBlockProducer.chain.CalcNextDifficultyTarget( + lastNode, CurrentHeaderVersion) if err != nil { return nil, nil, nil, errors.Wrapf(err, "DeSoBlockProducer._getBlockTemplate: Problem computing next difficulty: ") } @@ -465,10 +469,12 @@ func RecomputeBlockRewardWithBlockRewardOutputPublicKey( blockRewardOutputPublicKeyBytes []byte, params *DeSoParams, ) (*MsgDeSoBlock, error) { - blockRewardOutputPublicKey, err := btcec.ParsePubKey(blockRewardOutputPublicKeyBytes) - if err != nil { - return nil, errors.Wrap( - fmt.Errorf("RecomputeBlockRewardWithBlockRewardOutpubPublicKey: Problem parsing block reward output public key: %v", err), "") + blockRewardOutputPublicKey := NewPublicKey(blockRewardOutputPublicKeyBytes) + if blockRewardOutputPublicKey == nil { + return nil, + fmt.Errorf( + "RecomputeBlockRewardWithBlockRewardOutpubPublicKey: Problem parsing block reward output public key: %v", + blockRewardOutputPublicKeyBytes) } // Find all transactions in block that have transactor == block reward output public key @@ -476,14 +482,15 @@ func RecomputeBlockRewardWithBlockRewardOutputPublicKey( totalFees := uint64(0) for _, txn := range block.Txns[1:] { if txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { - transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey) - if err != nil { - glog.Errorf("DeSoMiner._startThread: Error parsing transactor public key: %v", err) + transactorPublicKey := NewPublicKey(txn.PublicKey) + if transactorPublicKey == nil { + glog.Errorf("DeSoMiner._startThread: Error parsing transactor public key: %v", txn.PublicKey) continue } - if transactorPublicKey.IsEqual(blockRewardOutputPublicKey) { + if transactorPublicKey.Equal(*blockRewardOutputPublicKey) { continue } + var err error totalFees, err = SafeUint64().Add(totalFees, txn.TxnFeeNanos) if err != nil { glog.Errorf("DeSoMiner._startThread: Error adding txn fee: %v", err) diff --git a/lib/block_view.go b/lib/block_view.go index 5a148b0ea..c026437d5 100644 --- a/lib/block_view.go +++ b/lib/block_view.go @@ -547,8 +547,7 @@ func (bav *UtxoView) CopyUtxoView() *UtxoView { // Copy the Derived Key data newView.DerivedKeyToDerivedEntry = make(map[DerivedKeyMapKey]*DerivedKeyEntry, len(bav.DerivedKeyToDerivedEntry)) for entryKey, entry := range bav.DerivedKeyToDerivedEntry { - newEntry := *entry - newView.DerivedKeyToDerivedEntry[entryKey] = &newEntry + newView.DerivedKeyToDerivedEntry[entryKey] = entry.Copy() } // Copy the DAO Coin Limit Order Entries @@ -4348,7 +4347,7 @@ func (bav *UtxoView) ConnectBlock( } blockHeader := desoBlock.Header - var blockRewardOutputPublicKey *btcec.PublicKey + var blockRewardOutputPublicKey *PublicKey // If the block height is greater than or equal to the block reward patch height, // we will verify that there is only one block reward output and we'll parse // that public key @@ -4365,11 +4364,12 @@ func (bav *UtxoView) ConnectBlock( if len(desoBlock.Txns[0].TxOutputs) != 1 { return nil, errors.Wrap(RuleErrorBlockRewardTxnMustHaveOneOutput, "ConnectBlock: Block reward transaction must have exactly one output") } - var err error - blockRewardOutputPublicKey, err = - btcec.ParsePubKey(desoBlock.Txns[0].TxOutputs[0].PublicKey) - if err != nil { - return nil, fmt.Errorf("ConnectBlock: Problem parsing block reward public key: %v", err) + blockRewardOutputPublicKey = + NewPublicKey(desoBlock.Txns[0].TxOutputs[0].PublicKey) + if blockRewardOutputPublicKey == nil { + return nil, fmt.Errorf( + "ConnectBlock: Problem parsing block reward public key: incorrect number of bytes in public key: %v", + desoBlock.Txns[0].TxOutputs[0].PublicKey) } } @@ -4407,18 +4407,17 @@ func (bav *UtxoView) ConnectBlock( if blockHeight >= uint64(bav.Params.ForkHeights.BlockRewardPatchBlockHeight) && txn.TxnMeta.GetTxnType() != TxnTypeBlockReward && txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { - transactorPubKey, err := btcec.ParsePubKey(txn.PublicKey) - if err != nil { - return nil, fmt.Errorf("ConnectBlock: Problem parsing transactor public key: %v", err) + transactorPubKey := NewPublicKey(txn.PublicKey) + if transactorPubKey == nil { + return nil, fmt.Errorf( + "ConnectBlock: Problem parsing transactor public key: incorrect number of bytes in txn.PublicKey: %v", + txn.PublicKey) } - includeFeesInBlockReward = !transactorPubKey.IsEqual(blockRewardOutputPublicKey) + includeFeesInBlockReward = !transactorPubKey.Equal(*blockRewardOutputPublicKey) } if includeFeesInBlockReward { if txn.TxnMeta.GetTxnType() != TxnTypeAtomicTxnsWrapper { - // Compute the BMF given the current fees paid in the block. - _, utilityFee = computeBMF(currentFees) - // Add the fees from this txn to the total fees. If any overflow occurs // mark the block as invalid and return a rule error. Note that block reward // txns should count as having zero fees. @@ -4426,14 +4425,19 @@ func (bav *UtxoView) ConnectBlock( return nil, RuleErrorTxnOutputWithInvalidAmount } totalFees += currentFees - - // For PoS, the maximum block reward is based on the maximum utility fee. - // Add the utility fees to the max utility fees. If any overflow - // occurs mark the block as invalid and return a rule error. - maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) - if err != nil { - return nil, errors.Wrapf(RuleErrorPoSBlockRewardWithInvalidAmount, - "ConnectBlock: error computing maxUtilityFee: %v", err) + // Only compute BMF if we're passed the PoS cutover. + if blockHeight >= uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + // Compute the BMF given the current fees paid in the block. + _, utilityFee = computeBMF(currentFees) + + // For PoS, the maximum block reward is based on the maximum utility fee. + // Add the utility fees to the max utility fees. If any overflow + // occurs mark the block as invalid and return a rule error. + maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) + if err != nil { + return nil, errors.Wrapf(RuleErrorPoSBlockRewardWithInvalidAmount, + "ConnectBlock: error computing maxUtilityFee: %v", err) + } } } else { txnMeta, ok := txn.TxnMeta.(*AtomicTxnsWrapperMetadata) @@ -4450,11 +4454,14 @@ func (bav *UtxoView) ConnectBlock( return nil, errors.Wrap( err, "ConnectBlock: error adding non-block-reward recipient fees from atomic transaction") } - _, utilityFee = computeBMF(nonBlockRewardRecipientFees) - maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) - if err != nil { - return nil, errors.Wrap(err, - "ConnectBlock: error computing maxUtilityFee: %v") + // Only compute BMF if we're passed the PoS cutover. + if blockHeight >= uint64(bav.Params.ForkHeights.ProofOfStake2ConsensusCutoverBlockHeight) { + _, utilityFee = computeBMF(nonBlockRewardRecipientFees) + maxUtilityFee, err = SafeUint64().Add(maxUtilityFee, utilityFee) + if err != nil { + return nil, errors.Wrap(err, + "ConnectBlock: error computing maxUtilityFee: %v") + } } } } @@ -5114,11 +5121,14 @@ func (bav *UtxoView) GetSpendableDeSoBalanceNanosForPublicKey(pkBytes []byte, // but we do have the header. As a result, this condition always evaluates to false and thus // we only process the block reward for the previous block instead of all immature block rewards // as defined by the params. - if blockNode.Parent != nil { - nextBlockHash = blockNode.Parent.Hash - } else { - nextBlockHash = GenesisBlockHash - } + // NOTE: we are not using .GetParent here as it changes the meaning of this code. + // In order to minimize code changes, we just jump back to the genesis block as the code did previously + // since the Parent attribute was removed from the BlockNode struct. + //if blockNode.Header != nil { + // nextBlockHash = blockNode.Header.PrevBlockHash + //} else { + nextBlockHash = GenesisBlockHash + //} } } diff --git a/lib/block_view_atomic_txns.go b/lib/block_view_atomic_txns.go index 8e0fa950d..9554791cd 100644 --- a/lib/block_view_atomic_txns.go +++ b/lib/block_view_atomic_txns.go @@ -5,7 +5,6 @@ import ( "fmt" "io" - "github.com/btcsuite/btcd/btcec/v2" "github.com/pkg/errors" ) @@ -445,18 +444,21 @@ func _verifyAtomicTxnsChain(txnMeta *AtomicTxnsWrapperMetadata) error { return nil } -func filterOutBlockRewardRecipientFees(txns []*MsgDeSoTxn, publicRewardPublicKey *btcec.PublicKey) (uint64, error) { +func filterOutBlockRewardRecipientFees(txns []*MsgDeSoTxn, publicRewardPublicKey *PublicKey) (uint64, error) { var nonBlockRewardRecipientFees uint64 for _, txn := range txns { // If the transaction is performed by any public key other than block reward recipient transaction, // add the fees to the total. - transactorPublicKey, err := btcec.ParsePubKey(txn.PublicKey) - if err != nil { - return 0, errors.Wrap(err, "filterBlockRewardRecipientFees: failed to parse public key") + transactorPublicKey := NewPublicKey(txn.PublicKey) + if transactorPublicKey == nil { + return 0, fmt.Errorf( + "filterBlockRewardRecipientFees: failed to parse public key: incorrect number of bytes: %v", + txn.PublicKey) } - if transactorPublicKey.IsEqual(publicRewardPublicKey) { + if transactorPublicKey.Equal(*publicRewardPublicKey) { continue } + var err error nonBlockRewardRecipientFees, err = SafeUint64().Add(nonBlockRewardRecipientFees, txn.TxnFeeNanos) if err != nil { return 0, errors.Wrap(err, "filterBlockRewardRecipientFees: failed to add fees") diff --git a/lib/block_view_bitcoin_test.go b/lib/block_view_bitcoin_test.go index c12b8af0a..b68a18efd 100644 --- a/lib/block_view_bitcoin_test.go +++ b/lib/block_view_bitcoin_test.go @@ -50,7 +50,6 @@ func GetTestParamsCopy( paramsCopy := *paramss headerHash := (BlockHash)(startHeader.BlockHash()) paramsCopy.BitcoinStartBlockNode = NewBlockNode( - nil, /*ParentNode*/ &headerHash, /*Hash*/ startHeight, _difficultyBitsToHash(startHeader.Bits), diff --git a/lib/block_view_like.go b/lib/block_view_like.go index 3af7983aa..a6dd2350a 100644 --- a/lib/block_view_like.go +++ b/lib/block_view_like.go @@ -77,7 +77,7 @@ func (bav *UtxoView) GetLikesForPostHash(postHash *BlockHash) (_likerPubKeys [][ handle := bav.Handle dbPrefix := append([]byte{}, Prefixes.PrefixLikedPostHashToLikerPubKey...) dbPrefix = append(dbPrefix, postHash[:]...) - keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true) + keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true, false) // Iterate over all the db keys & values and load them into the view. expectedKeyLength := 1 + HashSizeBytes + btcec.PubKeyBytesLenCompressed diff --git a/lib/block_view_lockups_test.go b/lib/block_view_lockups_test.go index e788e3e03..3ab91972a 100644 --- a/lib/block_view_lockups_test.go +++ b/lib/block_view_lockups_test.go @@ -2462,7 +2462,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.NoError(t, utxoView.FlushToDb(blk2.Header.Height)) // Update the tip - testMeta.chain.bestChain = testMeta.chain.bestChain[:len(testMeta.chain.bestChain)-1] + testMeta.chain.blockIndex.tip = testMeta.chain.blockIndex.tip.GetParent(testMeta.chain.blockIndex) // Validate the state update utxoView = NewUtxoView( @@ -2517,7 +2517,7 @@ func TestLockupBlockConnectsAndDisconnects(t *testing.T) { require.NoError(t, utxoView.FlushToDb(blk1.Header.Height)) // Update the tip - testMeta.chain.bestChain = testMeta.chain.bestChain[:len(testMeta.chain.bestChain)-1] + testMeta.chain.blockIndex.setTip(testMeta.chain.blockIndex.tip.GetParent(testMeta.chain.blockIndex)) // Verify we return back to the initial state utxoView = NewUtxoView( diff --git a/lib/block_view_post.go b/lib/block_view_post.go index a6b9e6d30..e76745fbb 100644 --- a/lib/block_view_post.go +++ b/lib/block_view_post.go @@ -592,7 +592,7 @@ func (bav *UtxoView) GetDiamondSendersForPostHash(postHash *BlockHash) (_pkidToD // FIXME: Db operation like this shouldn't happen in utxoview. dbPrefix := append([]byte{}, Prefixes.PrefixDiamondedPostHashDiamonderPKIDDiamondLevel...) dbPrefix = append(dbPrefix, postHash[:]...) - keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true) + keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true, false) diamondPostEntry := bav.GetPostEntryForPostHash(postHash) receiverPKIDEntry := bav.GetPKIDForPublicKey(diamondPostEntry.PosterPublicKey) @@ -633,7 +633,7 @@ func (bav *UtxoView) GetRepostsForPostHash(postHash *BlockHash) (_reposterPubKey // FIXME: Db operation like this shouldn't happen in utxoview. dbPrefix := append([]byte{}, Prefixes.PrefixRepostedPostHashReposterPubKey...) dbPrefix = append(dbPrefix, postHash[:]...) - keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true) + keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true, false) // Iterate over all the db keys & values and load them into the view. expectedKeyLength := 1 + HashSizeBytes + btcec.PubKeyBytesLenCompressed @@ -670,7 +670,7 @@ func (bav *UtxoView) GetQuoteRepostsForPostHash(postHash *BlockHash, // FIXME: Db operation like this shouldn't happen in utxoview. dbPrefix := append([]byte{}, Prefixes.PrefixRepostedPostHashReposterPubKeyRepostPostHash...) dbPrefix = append(dbPrefix, postHash[:]...) - keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true) + keysFound, _ := EnumerateKeysForPrefix(handle, dbPrefix, true, false) // Iterate over all the db keys & values and load them into the view. expectedKeyLength := 1 + HashSizeBytes + btcec.PubKeyBytesLenCompressed + HashSizeBytes diff --git a/lib/block_view_stake.go b/lib/block_view_stake.go index 7826ce18d..b411673af 100644 --- a/lib/block_view_stake.go +++ b/lib/block_view_stake.go @@ -2999,6 +2999,7 @@ func (bav *UtxoView) _checkStakeTxnSpendingLimitAndUpdateDerivedKey( for _, validatorPKID := range []*PKID{validatorEntry.ValidatorPKID, &ZeroPKID} { // Retrieve DerivedKeyEntry.TransactionSpendingLimit. stakeLimitKey := MakeStakeLimitKey(validatorPKID) + // TODO: check that StakeLimitMap is not nil. spendingLimit, exists := derivedKeyEntry.TransactionSpendingLimitTracker.StakeLimitMap[stakeLimitKey] if !exists { continue diff --git a/lib/block_view_test.go b/lib/block_view_test.go index 7e9f275d2..0f835cc02 100644 --- a/lib/block_view_test.go +++ b/lib/block_view_test.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/hex" "fmt" + "github.com/deso-protocol/core/collections" "math" _ "net/http/pprof" "reflect" @@ -13,7 +14,6 @@ import ( "github.com/deso-protocol/core/bls" "github.com/btcsuite/btcd/btcec/v2" - "github.com/decred/dcrd/container/lru" "github.com/dgraph-io/badger/v3" embeddedpostgres "github.com/fergusstrange/embedded-postgres" "github.com/golang/glog" @@ -702,8 +702,8 @@ func (tes *transactionTestSuite) testDisconnectBlock(tm *transactionTestMeta, te require.NoError(err) // sanity-check that the last block hash is the same as the last header hash. require.Equal(true, bytes.Equal( - tm.chain.bestChain[len(tm.chain.bestChain)-1].Hash.ToBytes(), - tm.chain.bestHeaderChain[len(tm.chain.bestHeaderChain)-1].Hash.ToBytes())) + tm.chain.blockIndex.GetTip().Hash.ToBytes(), + tm.chain.blockIndex.GetHeaderTip().Hash.ToBytes())) // Last block shouldn't be nil, and the number of expectedTxns should be the same as in the testVectorBlock + 1, // because of the additional block reward. require.NotNil(lastBlock) @@ -791,15 +791,14 @@ func (tes *transactionTestSuite) testDisconnectBlock(tm *transactionTestMeta, te // TODO: if ever needed we can call tm.chain.eventManager.blockDisconnected() here. // Update the block and header metadata chains. - tm.chain.bestChain = tm.chain.bestChain[:len(tm.chain.bestChain)-1] - tm.chain.bestHeaderChain = tm.chain.bestHeaderChain[:len(tm.chain.bestHeaderChain)-1] - delete(tm.chain.bestChainMap, *lastBlockHash) - delete(tm.chain.bestHeaderChainMap, *lastBlockHash) + tm.chain.blockIndex.setTip(tm.chain.BlockTip().GetParent(tm.chain.blockIndex)) + tm.chain.blockIndex.setHeaderTip(tm.chain.HeaderTip().GetParent(tm.chain.blockIndex)) // We don't pass the chain's snapshot above to prevent certain concurrency issues. As a // result, we need to reset the snapshot's db cache to get rid of stale data. if tm.chain.snapshot != nil { - tm.chain.snapshot.DatabaseCache = *lru.NewMap[string, []byte](DatabaseCacheSize) + tm.chain.snapshot.DatabaseCache, err = collections.NewLruCache[string, []byte](int(DatabaseCacheSize)) + require.NoError(err) } // Note that unlike connecting test vectors, when disconnecting, we don't need to verify db entries. diff --git a/lib/blockchain.go b/lib/blockchain.go index 076ee8c74..0a645fe97 100644 --- a/lib/blockchain.go +++ b/lib/blockchain.go @@ -12,13 +12,12 @@ import ( "net/http" "reflect" "runtime/debug" + "slices" "sort" "strings" "sync" "time" - "github.com/decred/dcrd/container/lru" - "github.com/deso-protocol/core/collections" "github.com/deso-protocol/uint256" @@ -59,7 +58,9 @@ const ( // have room for multiple forks each an entire history's length with this value). If // each node takes up 100 bytes of space this amounts to around 500MB, which also seems // like a reasonable size. - MaxBlockIndexNodes = 5000000 + // UPDATE: now that we don't keep everything in memory, we reduced this value from + // 50000000 to 1000000 + MaxBlockIndexNodes = 1000000 // TODO: trim this down somehow... ) type BlockStatus uint32 @@ -121,9 +122,19 @@ func (nn *BlockNode) IsValidateFailed() bool { } // IsCommitted returns true if a BlockNode has passed all validations, and it has been committed to -// the Blockchain according to the Fast HotStuff commit rule. +// the Blockchain according to the Fast HotStuff commit rule. This function previously considered +// as PoW blocks to be committed regardless of status, but since we no longer keep the entire best +// chain in-memory, we needed to update all PoW blocks in the best chain to be Committed such that +// we could distinguish them from forks that occurred in PoW and still determine if a node is in the +// best chain or not. func (nn *BlockNode) IsCommitted() bool { - return nn.Status&StatusBlockCommitted != 0 || !blockNodeProofOfStakeCutoverMigrationTriggered(nn.Height) + return nn.Status&StatusBlockCommitted != 0 +} + +// ClearCommittedStatus is used to clear the committed status from a BlockNode. This is useful +// only for PoW blocks when handling forks. +func (nn *BlockNode) ClearCommittedStatus() { + nn.Status &= BlockStatus(^uint32(StatusBlockCommitted)) } // IsFullyProcessed determines if the BlockStatus corresponds to a fully processed and stored block. @@ -181,9 +192,6 @@ func (blockStatus BlockStatus) String() string { // Add some fields in addition to the header to aid in the selection // of the best chain. type BlockNode struct { - // Pointer to a node representing the block's parent. - Parent *BlockNode `fake:"skip"` - // The hash computed on this block. Hash *BlockHash @@ -236,6 +244,18 @@ func (nn *BlockNode) GetEncoderType() EncoderType { return EncoderTypeBlockNode } +func (nn *BlockNode) GetParent(blockIndex *BlockIndex) *BlockNode { + // If we don't have a parent, try to get it from the block index. We + // no longer have a guarantee that we have set the parent node since + // we no longer keep the entire block index in memory. + parentNode, exists := blockIndex.GetBlockNodeByHashAndHeight(nn.Header.PrevBlockHash, uint64(nn.Height-1)) + if !exists { + return nil + } + + return parentNode +} + // Append DeSo Encoder Metadata bytes to BlockNode bytes. func AddEncoderMetadataToBlockNodeBytes(blockNodeBytes []byte, blockHeight uint64) []byte { var blockData []byte @@ -368,16 +388,13 @@ func ExtractBitcoinExchangeTransactionsFromBitcoinBlock( } func (nn *BlockNode) String() string { - var parentHash *BlockHash - if nn.Parent != nil { - parentHash = nn.Parent.Hash - } + parentHash := nn.Header.PrevBlockHash tstamp := uint32(0) if nn.Header != nil { tstamp = uint32(nn.Header.GetTstampSecs()) } - return fmt.Sprintf("< TstampSecs: %d, Height: %d, Hash: %s, ParentHash %s, Status: %s, CumWork: %v>", - tstamp, nn.Header.Height, nn.Hash, parentHash, nn.Status, nn.CumWork) + return fmt.Sprintf("< TstampSecs: %d, Height: %d, Hash: %s, ParentHash %s, Status: %s, CumWork: %v, IsCommitted: %v >", + tstamp, nn.Header.Height, nn.Hash, parentHash, nn.Status, nn.CumWork, nn.IsCommitted()) } // NewBlockNode is a helper function to create a BlockNode @@ -385,7 +402,6 @@ func (nn *BlockNode) String() string { // have a committed status of COMMITTED. // TODO: Height not needed in this since it's in the header. func NewBlockNode( - parent *BlockNode, hash *BlockHash, height uint32, difficultyTarget *BlockHash, @@ -394,7 +410,6 @@ func NewBlockNode( status BlockStatus) *BlockNode { return &BlockNode{ - Parent: parent, Hash: hash, Height: height, DifficultyTarget: difficultyTarget, @@ -404,42 +419,37 @@ func NewBlockNode( } } -func (nn *BlockNode) Ancestor(height uint32) *BlockNode { +func (nn *BlockNode) Ancestor(height uint32, blockIndex *BlockIndex) *BlockNode { if height > nn.Height { return nil } node := nn - for ; node != nil && node.Height != height; node = node.Parent { + for ; node != nil && node.Height != height; node = node.GetParent(blockIndex) { // Keep iterating node until the condition no longer holds. + parent := node.GetParent(blockIndex) + if parent == nil { + return nil + } } return node } -// RelativeAncestor returns the ancestor block node a relative 'distance' blocks -// before this node. This is equivalent to calling Ancestor with the node's -// height minus provided distance. -// -// This function is safe for concurrent access. -func (nn *BlockNode) RelativeAncestor(distance uint32) *BlockNode { - return nn.Ancestor(nn.Height - distance) -} - // CalcNextDifficultyTarget computes the difficulty target expected of the // next block. -func CalcNextDifficultyTarget( - lastNode *BlockNode, version uint32, params *DeSoParams) (*BlockHash, error) { +func (bc *Blockchain) CalcNextDifficultyTarget( + lastNode *BlockNode, version uint32) (*BlockHash, error) { // Compute the blocks in each difficulty cycle. - blocksPerRetarget := uint32(params.TimeBetweenDifficultyRetargets / params.TimeBetweenBlocks) + blocksPerRetarget := uint32(bc.params.TimeBetweenDifficultyRetargets / bc.params.TimeBetweenBlocks) // We effectively skip the first difficulty retarget by returning the default // difficulty value for the first cycle. Not doing this (or something like it) // would cause the genesis block's timestamp, which could be off by several days // to significantly skew the first cycle in a way that is mostly annoying for // testing but also suboptimal for the mainnet. - minDiffBytes, err := hex.DecodeString(params.MinDifficultyTargetHex) + minDiffBytes, err := hex.DecodeString(bc.params.MinDifficultyTargetHex) if err != nil { return nil, errors.Wrapf(err, "CalcNextDifficultyTarget: Problem computing min difficulty") } @@ -460,19 +470,24 @@ func CalcNextDifficultyTarget( } // If we get here it means we reached a difficulty retarget point. - targetSecs := int64(params.TimeBetweenDifficultyRetargets / time.Second) - minRetargetTimeSecs := targetSecs / params.MaxDifficultyRetargetFactor - maxRetargetTimeSecs := targetSecs * params.MaxDifficultyRetargetFactor + targetSecs := int64(bc.params.TimeBetweenDifficultyRetargets / time.Second) + minRetargetTimeSecs := targetSecs / bc.params.MaxDifficultyRetargetFactor + maxRetargetTimeSecs := targetSecs * bc.params.MaxDifficultyRetargetFactor firstNodeHeight := lastNode.Height - blocksPerRetarget - firstNode := lastNode.Ancestor(firstNodeHeight) - if firstNode == nil { + firstNode, exists, err := bc.GetBlockFromBestChainByHeight(uint64(firstNodeHeight), true) + if err != nil { + return nil, errors.Wrapf(err, "CalcNextDifficultyTarget: Problem getting block at "+ + "beginning of retarget interval at height %d during retarget from height %d", + firstNodeHeight, lastNode.Height) + } + if firstNode == nil || !exists { return nil, fmt.Errorf("CalcNextDifficultyTarget: Problem getting block at "+ "beginning of retarget interval at height %d during retarget from height %d", firstNodeHeight, lastNode.Height) } - actualTimeDiffSecs := int64(lastNode.Header.GetTstampSecs() - firstNode.Header.GetTstampSecs()) + actualTimeDiffSecs := lastNode.Header.GetTstampSecs() - firstNode.Header.GetTstampSecs() clippedTimeDiffSecs := actualTimeDiffSecs if actualTimeDiffSecs < minRetargetTimeSecs { clippedTimeDiffSecs = minRetargetTimeSecs @@ -527,6 +542,159 @@ type CheckpointBlockInfoAndError struct { Error error } +// BlockIndex operates as a read-through cache for block nodes. It is used to +// look up any block node we know by its height or hash (but is faster if you +// provide both!). It also is used to determine which block nodes are in the +// best chain using the provided functions. Additionally, it always tracks +// the block tip and header tip. +type BlockIndex struct { + db *badger.DB + snapshot *Snapshot + blockIndexByHash *collections.LruCache[BlockHash, *BlockNode] + tip *BlockNode + headerTip *BlockNode +} + +// NewBlockIndex creates a new BlockIndex with the provided snapshot and tip node. +func NewBlockIndex(db *badger.DB, snapshot *Snapshot, tipNode *BlockNode) *BlockIndex { + blockIndexByHash, _ := collections.NewLruCache[BlockHash, *BlockNode](MaxBlockIndexNodes) // TODO: parameterize this? + return &BlockIndex{ + db: db, + snapshot: snapshot, + blockIndexByHash: blockIndexByHash, + tip: tipNode, + headerTip: tipNode, + } +} + +// setBlockIndexFromMap is a helper function only used in tests. It constructs the +// block index from the provided map of block hashes to block nodes. +func (bi *BlockIndex) setBlockIndexFromMap(input map[BlockHash]*BlockNode) { + newHashToBlockNodeMap, _ := collections.NewLruCache[BlockHash, *BlockNode](MaxBlockIndexNodes) // TODO: parameterize this? + bi.blockIndexByHash = newHashToBlockNodeMap + for _, val := range input { + bi.addNewBlockNodeToBlockIndex(val) + // This function is always used for tests. + // We assume that the tip is just the highest block in the block index. + if bi.tip == nil { + bi.tip = val + } else if val.Height > bi.tip.Height { + bi.tip = val + } + } +} + +// setHeaderTip sets the header tip of the block index to the provided block node. +// This must be called whenever we have a new tip added to the best header chain. +func (bi *BlockIndex) setHeaderTip(tip *BlockNode) { + // Just to be safe, we also add it to the block index. + bi.addNewBlockNodeToBlockIndex(tip) + bi.headerTip = tip +} + +// setTip sets the tip of the block index to the provided block node. This must be +// called whenever we have a new tip added to the best chain. +func (bi *BlockIndex) setTip(tip *BlockNode) { + // Just to be safe, we also add it to the block index. + bi.addNewBlockNodeToBlockIndex(tip) + bi.tip = tip +} + +// addNewBlockNodeToBlockIndex adds a new block node to the block index. This adds +// it to both the block index by hash and the block index by height. +func (bi *BlockIndex) addNewBlockNodeToBlockIndex(blockNode *BlockNode) { + bi.blockIndexByHash.Put(*blockNode.Hash, blockNode) +} + +// GetBlockNodeByHashOnly retrieves a block node from the block index by its hash. +// Note that this is the least efficient method of getting a block node from the block +// index if we don't have it in the cache as we need to first get the block height +// from the database and then get the block node from the database. +func (bi *BlockIndex) GetBlockNodeByHashOnly(blockHash *BlockHash) (*BlockNode, bool, error) { + // If we have it in the cache, just return it! + val, exists := bi.blockIndexByHash.Get(*blockHash) + if exists { + return val, true, nil + } + // If we don't have it in the cache, we need to get the height from the database. + height, err := GetHeightForHash(bi.db, bi.snapshot, blockHash) + if err != nil { + if errors.Is(err, badger.ErrKeyNotFound) { + return nil, false, nil + } + return nil, false, errors.Wrapf(err, "GetBlockNodeByHashOnly: Problem getting height for hash") + } + // Then we can get the block node from the database. + blockNode := GetHeightHashToNodeInfo(bi.db, bi.snapshot, uint32(height), blockHash, false) + if blockNode == nil { + return nil, false, nil + } + // Add it to the cache and return it. + bi.addNewBlockNodeToBlockIndex(blockNode) + return blockNode, true, nil +} + +// GetBlockNodeByHashAndHeight retrieves a block node from the block index by its hash +// height. This is the most efficient method of getting a block node from the block +// index as we do not need to get the block height from the database. +func (bi *BlockIndex) GetBlockNodeByHashAndHeight(blockHash *BlockHash, height uint64) (*BlockNode, bool) { + // If we have it in the cache, just return it! + val, exists := bi.blockIndexByHash.Get(*blockHash) + if exists { + return val, true + } + // If the height exceeds an uint32, we have a problem as the height hash to node + // prefix only supports uint32 heights. + if height > math.MaxUint32 { + glog.Fatalf("GetBlockNodeByHashAndHeight: Height %d is greater than math.MaxUint32", height) + } + // If we don't have it in the cache, we need to get the block node from the database. + bn := GetHeightHashToNodeInfo(bi.db, bi.snapshot, uint32(height), blockHash, false) + if bn == nil { + return nil, false + } + // Add it to the cache and return it. + bi.addNewBlockNodeToBlockIndex(bn) + return bn, true +} + +// GetBlockNodesByHeight retrieves all block nodes at a given height from the block index. +// We may have many block nodes at the same height, so the caller must determine which they +// want to use. +func (bi *BlockIndex) GetBlockNodesByHeight(height uint64) []*BlockNode { + // If the height exceeds an uint32, we have a problem as the height hash to node + // prefix only supports uint32 heights. + if height > math.MaxUint32 { + glog.Fatalf("GetBlockNodesByHeight: Height %d is greater than math.MaxUint32", height) + } + prefixKey := _heightHashToNodePrefixByHeight(uint32(height), false) + // Enumerate all block nodes for the prefix. + _, valsFound := EnumerateKeysForPrefix(bi.db, prefixKey, false, true) // skip prefetch. + blockNodes := []*BlockNode{} + for _, val := range valsFound { + // Deserialize all block nodes. + blockNode, err := DeserializeBlockNode(val) + if err != nil { + glog.Errorf("GetBlockNodesByHeight: Problem deserializing block node: %v", err) + continue + } + // Add them to the block index and append them to our result. + bi.addNewBlockNodeToBlockIndex(blockNode) + blockNodes = append(blockNodes, blockNode) + } + return blockNodes +} + +// GetTip retrieves the tip of the best chain. +func (bi *BlockIndex) GetTip() *BlockNode { + return bi.tip +} + +// GetHeaderTip retrieves the tip of the header chain. +func (bi *BlockIndex) GetHeaderTip() *BlockNode { + return bi.headerTip +} + type Blockchain struct { db *badger.DB postgres *Postgres @@ -554,19 +722,15 @@ type Blockchain struct { // These should only be accessed after acquiring the ChainLock. // - // An in-memory index of the "tree" of blocks we are currently aware of. - // This index includes forks and side-chains. - blockIndexByHash *collections.ConcurrentMap[BlockHash, *BlockNode] - // blockIndexByHeight is an in-memory map of block height to block nodes. This is - // used to quickly find the safe blocks from which the chain can be extended for PoS - blockIndexByHeight map[uint64]map[BlockHash]*BlockNode - // An in-memory slice of the blocks on the main chain only. The end of - // this slice is the best known tip that we have at any given time. - bestChain []*BlockNode - bestChainMap map[BlockHash]*BlockNode - - bestHeaderChain []*BlockNode - bestHeaderChainMap map[BlockHash]*BlockNode + // blockIndex is a read-through cache for block nodes. It is used to look up any block node we know + // by its height or hash (but is faster if you provide both!). It also is used to determine which + // block nodes are in the best chain using the provided functions. Additionally, it always tracks + // the block tip and header tip. + blockIndex *BlockIndex + // lowestBlockNotStored tracks the lowest known block height that is not stored in the database. + // This is only used for hypersync-archival syncing to determine at which height need to start + // requesting blocks in GetBlocksToStore. + lowestBlockNotStored uint64 // We keep track of orphan blocks with the following data structures. Orphans // are not written to disk and are only cached in memory. Moreover we only keep @@ -577,7 +741,7 @@ type Blockchain struct { blockView *UtxoView // cache block view for each block - blockViewCache lru.Map[BlockHash, *BlockViewAndUtxoOps] + blockViewCache *collections.LruCache[BlockHash, *BlockViewAndUtxoOps] // snapshot cache snapshotCache *SnapshotCache @@ -704,88 +868,91 @@ func getCheckpointBlockInfoFromProviderHelper(provider string) *CheckpointBlockI } } +// addNewBlockNodeToBlockIndex is a pass-through to calling the same function on the +// BlockIndex of a Blockchain. func (bc *Blockchain) addNewBlockNodeToBlockIndex(blockNode *BlockNode) { - bc.blockIndexByHash.Set(*blockNode.Hash, blockNode) - if _, exists := bc.blockIndexByHeight[uint64(blockNode.Height)]; !exists { - bc.blockIndexByHeight[uint64(blockNode.Height)] = make(map[BlockHash]*BlockNode) - } - bc.blockIndexByHeight[uint64(blockNode.Height)][*blockNode.Hash] = blockNode + bc.blockIndex.addNewBlockNodeToBlockIndex(blockNode) } +// CopyBlockIndexes returns a copy of the BlockIndex of a Blockchain. func (bc *Blockchain) CopyBlockIndexes() ( - _blockIndexByHash *collections.ConcurrentMap[BlockHash, *BlockNode], - _blockIndexByHeight map[uint64]map[BlockHash]*BlockNode, + _blockIndexByHash *collections.LruCache[BlockHash, *BlockNode], ) { - newBlockIndexByHash := collections.NewConcurrentMap[BlockHash, *BlockNode]() - newBlockIndexByHeight := make(map[uint64]map[BlockHash]*BlockNode) - bc.blockIndexByHash.Iterate(func(kk BlockHash, vv *BlockNode) { - newBlockIndexByHash.Set(kk, vv) - blockHeight := uint64(vv.Height) - if _, exists := newBlockIndexByHeight[blockHeight]; !exists { - newBlockIndexByHeight[blockHeight] = make(map[BlockHash]*BlockNode) - } - newBlockIndexByHeight[blockHeight][kk] = vv - }) - return newBlockIndexByHash, newBlockIndexByHeight -} - -func (bc *Blockchain) constructBlockIndexByHeight() map[uint64]map[BlockHash]*BlockNode { - newBlockIndex := make(map[uint64]map[BlockHash]*BlockNode) - bc.blockIndexByHash.Iterate(func(_ BlockHash, blockNode *BlockNode) { - blockHeight := uint64(blockNode.Height) - if _, exists := newBlockIndex[blockHeight]; !exists { - newBlockIndex[blockHeight] = make(map[BlockHash]*BlockNode) - } - newBlockIndex[blockHeight][*blockNode.Hash] = blockNode - }) - return newBlockIndex -} - -func (bc *Blockchain) getAllBlockNodesIndexedAtHeight(blockHeight uint64) []*BlockNode { - return collections.MapValues(bc.blockIndexByHeight[blockHeight]) -} - -func (bc *Blockchain) hasBlockNodesIndexedAtHeight(blockHeight uint64) bool { - blocksAtHeight, hasNestedMapAtHeight := bc.blockIndexByHeight[blockHeight] - if !hasNestedMapAtHeight { - return false + // Create a new lru cache. + newBlockIndexByHash, _ := collections.NewLruCache[BlockHash, *BlockNode](MaxBlockIndexNodes) + // Iterate over the keys of the block index and copy them to the new lru cache. + for _, key := range bc.blockIndex.blockIndexByHash.Keys() { + val, _ := bc.blockIndex.blockIndexByHash.Get(key) + newBlockIndexByHash.Put(key, val) } - return len(blocksAtHeight) > 0 + return newBlockIndexByHash } -func (bc *Blockchain) CopyBestChain() ([]*BlockNode, map[BlockHash]*BlockNode) { - newBestChain := []*BlockNode{} - newBestChainMap := make(map[BlockHash]*BlockNode) - newBestChain = append(newBestChain, bc.bestChain...) - for kk, vv := range bc.bestChainMap { - newBestChainMap[kk] = vv - } - - return newBestChain, newBestChainMap +// GetBlockIndex is a helper to return the BlockIndex of a Blockchain. +// This is used by the backend. +func (bc *Blockchain) GetBlockIndex() *BlockIndex { + return bc.blockIndex } -func (bc *Blockchain) CopyBestHeaderChain() ([]*BlockNode, map[BlockHash]*BlockNode) { - newBestChain := []*BlockNode{} - newBestChainMap := make(map[BlockHash]*BlockNode) - newBestChain = append(newBestChain, bc.bestHeaderChain...) - for kk, vv := range bc.bestHeaderChainMap { - newBestChainMap[kk] = vv - } - - return newBestChain, newBestChainMap +// hasBlockNodesIndexedAtHeight returns whether or not there are block nodes at a given height in the block index. +func (bc *Blockchain) hasBlockNodesIndexedAtHeight(blockHeight uint64) bool { + prefix := _heightHashToNodePrefixByHeight(uint32(blockHeight), false) + keysFound := EnumeratePaginatedLimitedKeysForPrefix(bc.db, prefix, prefix, 1) + return len(keysFound) > 0 } // IsFullyStored determines if there are block nodes that haven't been fully stored or processed in the best block chain. func (bc *Blockchain) IsFullyStored() bool { - if bc.ChainState() == SyncStateFullyCurrent { - for _, blockNode := range bc.bestChain { - if !blockNode.Status.IsFullyProcessed() { - return false - } + // TODO: figure out how to iterate over best chain w/o having entire thing in memory. + chainState := bc.ChainState() + if chainState != SyncStateFullyCurrent && !(chainState == SyncStateNeedBlocksss && + bc.headerTip().Height-bc.blockTip().Height <= 25) { + return false + } + // Get a sampling of blocks from the best chain and check if they are fully stored. + // We only need to check a few blocks to determine if the chain is fully stored. + blockTipHeight := uint64(bc.BlockTip().Height) + increment := blockTipHeight / 20 + if increment == 0 { + increment = 1 + } + blockHeights := []uint64{} + // Sample 20 blocks. If we have 200 blocks, we'll check the following blocks: + // 0, 10, 20, 30, ..., 180, 190, 200. + for ii := uint64(0); ii < blockTipHeight; ii += increment { + blockHeights = append(blockHeights, ii) + } + // Check the most recent 20 blocks if we're past block 100. + if blockTipHeight > 100 { + // Continuing the above example, if we have 200 blocks, we'll add the following blocks: + // 180, 181, 182, 183, ..., 198, 199, 200. + // Note that it's not a big deal that we may have the same block twice, we dedupe them + // when we convert to a set. + for ii := blockTipHeight - 20; ii < blockTipHeight; ii++ { + blockHeights = append(blockHeights, ii) + } + } + // Always add the tip height to the sample. + blockHeights = append(blockHeights, blockTipHeight) + // Convert all block heights to a set to dedupe them. + blockHeightSet := NewSet(blockHeights) + for _, blockHeight := range blockHeightSet.ToSlice() { + // Get the block node at the height from the best blockchain chain. + blockNode, exists, err := bc.GetBlockFromBestChainByHeight(blockHeight, false) + if err != nil { + glog.Errorf("IsFullyStored: Problem getting block at height %d: %v", blockHeight, err) + return false + } + // If the block node doesn't exist, the chain is not fully stored. + if !exists { + return false + } + // If the block node is not fully processed, the chain is not fully stored. + if !blockNode.Status.IsFullyProcessed() { + return false } - return true } - return false + return true } // _initChain initializes the in-memory data structures for the Blockchain object @@ -840,56 +1007,50 @@ func (bc *Blockchain) _initChain() error { // to previous blocks we've read in and error if they don't. This works because // reading blocks in height order as we do here ensures that we'll always // add a block's parents, if they exist, before adding the block itself. - var err error - if bc.postgres != nil { - bc.blockIndexByHash, err = bc.postgres.GetBlockIndex() - } else { - bc.blockIndexByHash, err = GetBlockIndex(bc.db, false /*bitcoinNodes*/, bc.params) - } - if err != nil { - return errors.Wrapf(err, "_initChain: Problem reading block index from db") - } - bc.blockIndexByHeight = bc.constructBlockIndexByHeight() - // At this point the blockIndexByHash should contain a full node tree with all - // nodes pointing to valid parent nodes. - { - // Find the tip node with the best node hash. - tipNode, exists := bc.blockIndexByHash.Get(*bestBlockHash) - if !exists { - return fmt.Errorf("_initChain(block): Best hash (%#v) not found in block index", bestBlockHash) - } + // Update: we no longer track the entire block index in memory. After the + // migration to Proof-of-Stake, we are producing blocks ~300x faster than before. + // It has become too memory intensive to keep the entire best chain and block index + // in memory. We now only keep track of the tip of the best header chain and best + // block chain. We still keep a cache of recently seen block nodes in order to speed + // up processing, but we use the BlockIndex struct as a read-through cache to look up + // blocks from the cache or database as needed. - // Walk back from the best node to the genesis block and store them all - // in bestChain. - bc.bestChain, err = GetBestChain(tipNode) - if err != nil { - return errors.Wrapf(err, "_initChain(block): Problem reading best chain from db") - } - for _, bestChainNode := range bc.bestChain { - bc.bestChainMap[*bestChainNode.Hash] = bestChainNode - } - } - - // TODO: This code is a bit repetitive but this seemed clearer than factoring it out. - { - // Find the tip node with the best node hash. - tipNode, exists := bc.blockIndexByHash.Get(*bestHeaderHash) + var err error + var tipNode *BlockNode + if bc.postgres != nil { + bc.blockIndex.blockIndexByHash, err = bc.postgres.GetBlockIndex() + var exists bool + tipNode, exists = bc.blockIndex.blockIndexByHash.Get(*bestBlockHash) if !exists { - return fmt.Errorf("_initChain(header): Best hash (%#v) not found in block index", bestHeaderHash) + return fmt.Errorf("_initChain: Best hash (%#v) not found in block index", bestBlockHash) } - - // Walk back from the best node to the genesis block and store them all - // in bestChain. - bc.bestHeaderChain, err = GetBestChain(tipNode) - if err != nil { - return errors.Wrapf(err, "_initChain(header): Problem reading best chain from db") - } - for _, bestHeaderChainNode := range bc.bestHeaderChain { - bc.bestHeaderChainMap[*bestHeaderChainNode.Hash] = bestHeaderChainNode + } else { + var tipNodeExists bool + // For badger, we only need the tip block to get started. + // Special case for looking up the genesis block. + if bestBlockHash.IsEqual(GenesisBlockHash) { + tipNode, tipNodeExists = bc.blockIndex.GetBlockNodeByHashAndHeight(bestBlockHash, 0) + } else { + tipNode, tipNodeExists, err = bc.blockIndex.GetBlockNodeByHashOnly(bestBlockHash) + if err != nil { + return errors.Wrapf(err, "_initChain: Problem reading best block from db") + } + if !tipNodeExists { + return fmt.Errorf("_initChain: Best hash (%#v) not found in block index", bestBlockHash) + } + // Walk back the last 6 hours of blocks. + currBlockCounter := 1 + parentNode := tipNode.GetParent(bc.blockIndex) + for currBlockCounter < 3600*6 && parentNode != nil { + parentNode = parentNode.GetParent(bc.blockIndex) + currBlockCounter++ + } } } - + // We start by simply setting the chain tip and header tip to the tip node. + bc.blockIndex.setTip(tipNode) + bc.blockIndex.setHeaderTip(tipNode) bc.isInitialized = true return nil @@ -931,20 +1092,13 @@ func (bc *Blockchain) _applyUncommittedBlocksToBestChain() error { } // Add the uncommitted blocks to the in-memory data structures. - if _, _, _, err := bc.tryApplyNewTip(uncommittedTipBlockNode, 0, lineageFromCommittedTip); err != nil { + if _, _, _, err = bc.tryApplyNewTip(uncommittedTipBlockNode, 0, lineageFromCommittedTip); err != nil { return errors.Wrapf(err, "_applyUncommittedBlocksToBestChain: ") } - ////////////////////////// Update the bestHeaderChain in-memory data structures ////////////////////////// - currentHeaderTip := bc.headerTip() - _, blocksToDetach, blocksToAttach := GetReorgBlocks(currentHeaderTip, uncommittedTipBlockNode) - bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( - bc.bestHeaderChain, - bc.bestHeaderChainMap, - blocksToDetach, - blocksToAttach, - ) - + // Set the tip of the best header chain and best block chain to the uncommitted tip. + bc.blockIndex.setTip(uncommittedTipBlockNode) + bc.blockIndex.setHeaderTip(uncommittedTipBlockNode) return nil } @@ -966,6 +1120,10 @@ func NewBlockchain( archivalMode bool, checkpointSyncingProviders []string, ) (*Blockchain, error) { + if err := RunBlockIndexMigrationOnce(db, params); err != nil { + return nil, errors.Wrapf(err, "NewBlockchain: Problem running block index migration") + } + trustedBlockProducerPublicKeys := make(map[PkMapKey]bool) for _, keyStr := range trustedBlockProducerPublicKeyStrs { pkBytes, _, err := Base58CheckDecode(keyStr) @@ -978,7 +1136,7 @@ func NewBlockchain( timer := &Timer{} timer.Initialize() - + blockViewCache, _ := collections.NewLruCache[BlockHash, *BlockViewAndUtxoOps](100) // TODO: parameterize bc := &Blockchain{ db: db, postgres: postgres, @@ -991,13 +1149,8 @@ func NewBlockchain( eventManager: eventManager, archivalMode: archivalMode, - blockIndexByHash: collections.NewConcurrentMap[BlockHash, *BlockNode](), - blockIndexByHeight: make(map[uint64]map[BlockHash]*BlockNode), - bestChainMap: make(map[BlockHash]*BlockNode), - - bestHeaderChainMap: make(map[BlockHash]*BlockNode), - - blockViewCache: *lru.NewMap[BlockHash, *BlockViewAndUtxoOps](100), // TODO: parameterize + blockIndex: NewBlockIndex(db, snapshot, nil), // This tip will be set in _initChain. + blockViewCache: blockViewCache, snapshotCache: NewSnapshotCache(), checkpointSyncingProviders: checkpointSyncingProviders, @@ -1028,158 +1181,13 @@ func NewBlockchain( return bc, nil } -// log2FloorMasks defines the masks to use when quickly calculating -// floor(log2(x)) in a constant log2(32) = 5 steps, where x is a uint32, using -// shifts. They are derived from (2^(2^x) - 1) * (2^(2^x)), for x in 4..0. -var log2FloorMasks = []uint32{0xffff0000, 0xff00, 0xf0, 0xc, 0x2} - -// fastLog2Floor calculates and returns floor(log2(x)) in a constant 5 steps. -func fastLog2Floor(n uint32) uint8 { - rv := uint8(0) - exponent := uint8(16) - for i := 0; i < 5; i++ { - if n&log2FloorMasks[i] != 0 { - rv += exponent - n >>= exponent - } - exponent >>= 1 - } - return rv -} - -// locateInventory returns the node of the block after the first known block in -// the locator along with the number of subsequent nodes needed to either reach -// the provided stop hash or the provided max number of entries. -// -// In addition, there are two special cases: -// -// - When no locators are provided, the stop hash is treated as a request for -// that block, so it will either return the node associated with the stop hash -// if it is known, or nil if it is unknown -// - When locators are provided, but none of them are known, nodes starting -// after the genesis block will be returned -// -// This is primarily a helper function for the locateBlocks and locateHeaders -// functions. -// -// This function MUST be called with the chain state lock held (for reads). -func locateInventory(locator []*BlockHash, stopHash *BlockHash, maxEntries uint32, - blockIndex *collections.ConcurrentMap[BlockHash, *BlockNode], bestChainList []*BlockNode, - bestChainMap map[BlockHash]*BlockNode) (*BlockNode, uint32) { - - // There are no block locators so a specific block is being requested - // as identified by the stop hash. - stopNode, stopNodeExists := blockIndex.Get(*stopHash) - if len(locator) == 0 { - if !stopNodeExists { - // No blocks with the stop hash were found so there is - // nothing to do. - return nil, 0 - } - return stopNode, 1 - } - - // Find the most recent locator block hash in the main chain. In the - // case none of the hashes in the locator are in the main chain, fall - // back to the genesis block. - startNode := bestChainList[0] - for _, hash := range locator { - node, bestChainContainsNode := bestChainMap[*hash] - if bestChainContainsNode { - startNode = node - break - } - } - - // Start at the block after the most recently known block. When there - // is no next block it means the most recently known block is the tip of - // the best chain, so there is nothing more to do. - nextNodeHeight := uint32(startNode.Header.Height) + 1 - if uint32(len(bestChainList)) <= nextNodeHeight { - return nil, 0 - } - startNode = bestChainList[nextNodeHeight] - - // Calculate how many entries are needed. - tip := bestChainList[len(bestChainList)-1] - total := uint32((tip.Header.Height - startNode.Header.Height) + 1) - if stopNodeExists && stopNode.Header.Height >= startNode.Header.Height { - - _, bestChainContainsStopNode := bestChainMap[*stopNode.Hash] - if bestChainContainsStopNode { - total = uint32((stopNode.Header.Height - startNode.Header.Height) + 1) - } - } - if total > maxEntries { - total = maxEntries - } - - return startNode, total -} - -// locateHeaders returns the headers of the blocks after the first known block -// in the locator until the provided stop hash is reached, or up to the provided -// max number of block headers. +// LatestLocator returns a block locator for the passed block node. Holding a chain +// lock isn't strictly necessary anymore. // -// See the comment on the exported function for more details on special cases. +// Leaving comment below as it provides context on the implementation of this +// function prior to the changes that core codebase in which the representation +// of the best chain and block index is not in memory. // -// This function MUST be called with the ChainLock held (for reads). -func locateHeaders(locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32, - blockIndex *collections.ConcurrentMap[BlockHash, *BlockNode], bestChainList []*BlockNode, - bestChainMap map[BlockHash]*BlockNode) []*MsgDeSoHeader { - - // Find the node after the first known block in the locator and the - // total number of nodes after it needed while respecting the stop hash - // and max entries. - node, total := locateInventory(locator, stopHash, maxHeaders, - blockIndex, bestChainList, bestChainMap) - if total == 0 { - return nil - } - - // Populate and return the found headers. - headers, err := SafeMakeSliceWithLengthAndCapacity[*MsgDeSoHeader](0, uint64(total)) - if err != nil { - // TODO: do we really want to introduce an error here? - } - for ii := uint32(0); ii < total; ii++ { - headers = append(headers, node.Header) - if uint32(len(headers)) == total { - break - } - node = bestChainList[node.Header.Height+1] - } - return headers -} - -// LocateBestBlockChainHeaders returns the headers of the blocks after the first known block -// in the locator until the provided stop hash is reached, or up to a max of -// wire.MaxBlockHeadersPerMsg headers. Note that it returns the best headers -// considering only headers for which we have blocks (that is, it considers the -// best *block* chain we have rather than the best *header* chain). This is -// the correct thing to do because in general this function is called in order -// to serve a response to a peer's GetHeaders request. -// -// In addition, there are two special cases: -// -// - When no locators are provided, the stop hash is treated as a request for -// that header, so it will either return the header for the stop hash itself -// if it is known, or nil if it is unknown -// - When locators are provided, but none of them are known, headers starting -// after the genesis block will be returned -// -// This function is safe for concurrent access. -func (bc *Blockchain) LocateBestBlockChainHeaders( - locator []*BlockHash, stopHash *BlockHash, maxHeaders uint32) []*MsgDeSoHeader { - - bc.ChainLock.RLock() - defer bc.ChainLock.RUnlock() - headers := locateHeaders(locator, stopHash, maxHeaders, - bc.blockIndexByHash, bc.bestChain, bc.bestChainMap) - - return headers -} - // LatestLocator returns a block locator for the passed block node. The passed // node can be nil in which case the block locator for the current tip // associated with the view will be returned. @@ -1201,82 +1209,115 @@ func (bc *Blockchain) LocateBestBlockChainHeaders( // [17a 16a 15 14 13 12 11 10 9 8 7 6 4 genesis] // // Caller is responsible for acquiring the ChainLock before calling this function. -func (bc *Blockchain) LatestLocator(tip *BlockNode) []*BlockHash { - - // Calculate the max number of entries that will ultimately be in the - // block locator. See the description of the algorithm for how these - // numbers are derived. - var maxEntries uint8 - if tip.Header.Height <= 12 { - maxEntries = uint8(tip.Header.Height) + 1 - } else { - // Requested hash itself + previous 10 entries + genesis block. - // Then floor(log2(height-10)) entries for the skip portion. - adjustedHeight := uint32(tip.Header.Height) - 10 - maxEntries = 12 + fastLog2Floor(adjustedHeight) - } - locator := make([]*BlockHash, 0, maxEntries) - - step := int32(1) +func (bc *Blockchain) LatestLocator(tip *BlockNode) ([]*BlockHash, []uint32) { + blockTip := bc.blockTip() + committedTip, _ := bc.GetCommittedTip() + locator := make([]*BlockHash, 0) + locatorHeights := make([]uint32, 0) + step := uint32(1) for tip != nil { locator = append(locator, tip.Hash) - - // Nothing more to add once the genesis block has been added. - if tip.Header.Height == 0 { + locatorHeights = append(locatorHeights, tip.Height) + if tip.Height == 0 { break } - // Calculate height of previous node to include ensuring the - // final node is the genesis block. - height := int32(tip.Header.Height) - step - if height < 0 { - height = 0 - } - - // When the node is in the current chain view, all of its - // ancestors must be too, so use a much faster O(1) lookup in - // that case. Otherwise, fall back to walking backwards through - // the nodes of the other chain to the correct ancestor. - if _, exists := bc.bestHeaderChainMap[*tip.Hash]; exists { - tip = bc.bestHeaderChain[height] + // Calculate height of previous node to include ensuring we + // include the block tip and the committed tip. + if step == 1 { + // We use .GetParent as it is likely faster. + prevHash := tip.Header.PrevBlockHash + tip = tip.GetParent(bc.blockIndex) + if tip == nil { + glog.Errorf("LatestLocator: Block node not found for hash %v", prevHash) + continue + } } else { - tip = tip.Ancestor(uint32(height)) + + var height uint32 + if tip.Height < step { + height = 0 + } else { + height = tip.Height - step + } + // Special cases to ensure we include the block tip, the committed tip, + // and the genesis block. + + // If the current node's height is greater than the block tip's height and + // value the height variable is less than the block tip's height, we set the + // variable height to the block tip's height. + // If the current node's height is greater than the committed tip's height and + // value the height variable is less than the committed tip's height, we set the + // variable height to the committed tip's height. + if tip.Height > blockTip.Height && height < blockTip.Height { + height = blockTip.Height + } else if tip.Height > committedTip.Height && height < committedTip.Height { + height = committedTip.Height + } + + var exists bool + var err error + tip, exists, err = bc.GetBlockFromBestChainByHeight(uint64(height), true) + if err != nil { + glog.Errorf("LatestLocator: Problem getting block from best chain by height: %v - continuing", err) + continue + } + if !exists { + glog.Errorf("LatestLocator: Block at height %d not found - continuing", height) + continue + } } - // Once 11 entries have been included, start doubling the - // distance between included hashes. - if len(locator) > 10 { + // Once 11 entries have been included, start doubling the distance + // between included hashes. + if len(locator) > 11 { step *= 2 } } + return locator, locatorHeights + // OLD RETURN VALUE: + // return []*BlockHash{headerTip.Hash, committedTip.Hash}, []uint32{headerTip.Height, committedTip.Height} - return locator + //currNode := headerTip + //hashes := []*BlockHash{headerTip.Hash} + //heights := []uint32{headerTip.Height} + //for !currNode.Hash.IsEqual(committedTip.Hash) { + // currNode = currNode.GetParent(bc.blockIndex) + // if currNode == nil { + // glog.Errorf("LatestHeaderLocator: Block node not found for hash %v", currNode.Hash) + // break + // } + // hashes = append(hashes, currNode.Hash) + // heights = append(heights, currNode.Height) + //} + // + //return hashes, heights } -func (bc *Blockchain) HeaderLocatorWithNodeHash(blockHash *BlockHash) ([]*BlockHash, error) { - // We can acquire the ChainLock because the only place this is called currently is from - // _handleHeaderBundle, which doesn't have the lock. - // If we do not acquire the lock, we may hit a concurrent map read write error which causes panic. - bc.ChainLock.RLock() - defer bc.ChainLock.RUnlock() - node, exists := bc.blockIndexByHash.Get(*blockHash) +func (bc *Blockchain) HeaderLocatorWithNodeHashAndHeight( + blockHash *BlockHash, + height uint64, +) ( + []*BlockHash, + []uint32, + error, +) { + node, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, height) if !exists { - return nil, fmt.Errorf("Blockchain.HeaderLocatorWithNodeHash: Node for hash %v is not in our blockIndexByHash", blockHash) + return nil, nil, fmt.Errorf( + "Blockchain.HeaderLocatorWithNodeHashAndHeight: Node for hash %v and height %v is not in our block index", + blockHash, + height, + ) } - - return bc.LatestLocator(node), nil + locator, locatorHeights := bc.LatestLocator(node) + return locator, locatorHeights, nil } -// LatestHeaderLocator calls LatestLocator in order to fetch a locator -// for the best header chain. -func (bc *Blockchain) LatestHeaderLocator() []*BlockHash { - // We can acquire the ChainLock here because all calls to this function happen in peer.go - // and server.go, which don't hold the lock. - // If we do not acquire the lock, we may hit a concurrent map read write error which causes panic. - bc.ChainLock.RLock() - defer bc.ChainLock.RUnlock() +// LatestHeaderLocator calls returns a block locator for the current tip of the +// header chain. +func (bc *Blockchain) LatestHeaderLocator() ([]*BlockHash, []uint32) { headerTip := bc.headerTip() - return bc.LatestLocator(headerTip) } @@ -1284,7 +1325,11 @@ func (bc *Blockchain) GetBlockNodesToFetch( numBlocks int, _maxHeight int, blocksToIgnore map[BlockHash]bool) []*BlockNode { // Get the tip of the main block chain. - bestBlockTip := bc.blockTip() + bestBlockTip, ok := bc.GetCommittedTip() + if !ok { + glog.Errorf("GetBlockNodesToFetch: Problem getting best block tip") + return nil + } // If the maxHeight is set to < 0, then we don't want to use it as a constraint. maxHeight := uint32(math.MaxUint32) @@ -1292,107 +1337,82 @@ func (bc *Blockchain) GetBlockNodesToFetch( maxHeight = uint32(_maxHeight) } - // If the tip of the best block chain is in the main header chain, make that - // the start point for our fetch. - headerNodeStart, blockTipExistsInBestHeaderChain := bc.bestHeaderChainMap[*bestBlockTip.Hash] - if !blockTipExistsInBestHeaderChain { - // If the hash of the tip of the best blockchain is not in the best header chain, then - // this is a case where the header chain has forked off from the best block - // chain. In this situation, the best header chain is taken as the source of truth - // and so we iterate backward over the best header chain starting at the tip - // until we find the first block that has StatusBlockProcessed. Then we fetch - // blocks starting from there. Note that, at minimum, the genesis block has - // StatusBlockProcessed so this loop is guaranteed to terminate successfully. - headerNodeStart = bc.headerTip() - for headerNodeStart != nil && (headerNodeStart.Status&StatusBlockProcessed) == 0 { - headerNodeStart = headerNodeStart.Parent + // Find the height of the node we want to back track from. + // If the max height is greater than the current header tip, + // we should use the header tip. + // If the header tip is still greater than bestBlockTip.Height + numBlocks, + // then we need to reduce the height limit. + blockNodesToFetch := []*BlockNode{} + heightLimit := uint64(maxHeight) + if heightLimit > bc.blockIndex.GetHeaderTip().Header.Height { + heightLimit = bc.blockIndex.GetHeaderTip().Header.Height + } + if heightLimit > bestBlockTip.Header.Height+uint64(numBlocks) { + heightLimit = bestBlockTip.Header.Height + uint64(numBlocks) + } + currentHeight := heightLimit + var backtrackingNode *BlockNode + if heightLimit == bc.blockIndex.GetHeaderTip().Header.Height { + backtrackingNode = bc.blockIndex.GetHeaderTip() + } else { + var backtrackingNodeExists bool + var backtrackingNodeErr error + backtrackingNode, backtrackingNodeExists, backtrackingNodeErr = bc.GetBlockFromBestChainByHeight(currentHeight, true) + if backtrackingNodeErr != nil { + glog.Errorf("GetBlockNodesToFetch: Problem getting maxNode block by height: %v", backtrackingNodeErr) + return nil } - - if headerNodeStart == nil { - // If for some reason we ended up with the headerNode being nil, log - // an error and set it to the genesis block. - glog.Errorf("GetBlockToFetch: headerNode was nil after iterating " + - "backward through best header chain; using genesis block") - headerNodeStart = bc.bestHeaderChain[0] + if !backtrackingNodeExists || backtrackingNode == nil { + glog.Errorf("GetBlockNodesToFetch: Block at height %d not found. Error finding max node.", heightLimit) + return nil } } - - // At this point, headerNodeStart should point to a node in the best header - // chain that has StatusBlockProcessed set. As such, the blocks we need to - // fetch are those right after this one. Fetch the desired number. - currentHeight := headerNodeStart.Height + 1 - blockNodesToFetch := []*BlockNode{} - heightLimit := maxHeight - if heightLimit >= uint32(len(bc.bestHeaderChain)) { - heightLimit = uint32(len(bc.bestHeaderChain) - 1) - } - for currentHeight <= heightLimit && - len(blockNodesToFetch) < numBlocks { - - // Get the current hash and increment the height. - currentNode := bc.bestHeaderChain[currentHeight] - currentHeight++ - - if _, exists := blocksToIgnore[*currentNode.Hash]; exists { - continue + // Walk back from the maxNode to the bestBlockTip. + for len(blockNodesToFetch) < numBlocks && + backtrackingNode.Height > bestBlockTip.Height { + // only include blocks we're not supposed to ignore and that are not already stored. + if _, exists := blocksToIgnore[*backtrackingNode.Hash]; !exists && !backtrackingNode.IsStored() { + blockNodesToFetch = append(blockNodesToFetch, backtrackingNode) + } + // Get these values before we try to update the back tracking node. + prevHeight := backtrackingNode.Height + prevHash := backtrackingNode.Hash + prevParentHash := backtrackingNode.Header.PrevBlockHash + backtrackingNode = backtrackingNode.GetParent(bc.blockIndex) + if backtrackingNode == nil { + glog.Errorf("GetBlockNodesToFetch: Parent of block (%v, %d) not found with back tracking. Parent hash %v", + prevHash, + prevHeight, + prevParentHash) + return nil } - - blockNodesToFetch = append(blockNodesToFetch, currentNode) } + slices.Reverse(blockNodesToFetch) + // Return the nodes for the blocks we should fetch. return blockNodesToFetch } -func (bc *Blockchain) HasHeader(headerHash *BlockHash) bool { - _, exists := bc.blockIndexByHash.Get(*headerHash) - return exists -} - -func (bc *Blockchain) HeaderAtHeight(blockHeight uint32) *BlockNode { - if blockHeight >= uint32(len(bc.bestHeaderChain)) { - return nil - } - - return bc.bestHeaderChain[blockHeight] +// HasHeader checks if a header exists in the block index given its hash. +func (bc *Blockchain) HasHeader(headerHash *BlockHash) (bool, error) { + _, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(headerHash) + return exists, errors.Wrap(err, "Blockchain.HasHeader: ") } -func (bc *Blockchain) HasBlock(blockHash *BlockHash) bool { - node, nodeExists := bc.blockIndexByHash.Get(*blockHash) - if !nodeExists { - glog.V(2).Infof("Blockchain.HasBlock: Node with hash %v does not exist in node index", blockHash) - return false - } - - if (node.Status & StatusBlockProcessed) == 0 { - glog.V(2).Infof("Blockchain.HasBlock: Node %v does not have StatusBlockProcessed so we don't have the block", node) - return false - } - - // Node exists with StatusBlockProcess set means we have it. - return true -} - -func (bc *Blockchain) HasBlockInBlockIndex(blockHash *BlockHash) bool { - bc.ChainLock.RLock() - defer bc.ChainLock.RUnlock() - - _, exists := bc.blockIndexByHash.Get(*blockHash) +// HasHeaderByHashAndHeight checks if a header exists in the block index given its hash and height. +func (bc *Blockchain) HasHeaderByHashAndHeight(headerHash *BlockHash, height uint64) bool { + _, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(headerHash, height) return exists } -// This needs to hold a lock on the blockchain because it read from an in-memory map that is -// not thread-safe. -func (bc *Blockchain) GetBlockHeaderFromIndex(blockHash *BlockHash) *MsgDeSoHeader { +// HasBlockInBlockIndex checks if a block exists in the block index given its hash. +func (bc *Blockchain) HasBlockInBlockIndex(blockHash *BlockHash) (bool, error) { bc.ChainLock.RLock() defer bc.ChainLock.RUnlock() - block, blockExists := bc.blockIndexByHash.Get(*blockHash) - if !blockExists { - return nil - } - - return block.Header + _, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(blockHash) + return exists, errors.Wrap(err, "Blockchain.HasBlockInBlockIndex: ") } // Don't need a lock because blocks don't get removed from the db after they're added @@ -1406,16 +1426,6 @@ func (bc *Blockchain) GetBlock(blockHash *BlockHash) *MsgDeSoBlock { return blk } -func (bc *Blockchain) GetBlockAtHeight(height uint32) *MsgDeSoBlock { - numBlocks := uint32(len(bc.bestChain)) - - if height >= numBlocks { - return nil - } - - return bc.GetBlock(bc.bestChain[height].Hash) -} - // GetBlockNodeWithHash looks for a block node in the bestChain list that matches the hash. func (bc *Blockchain) GetBlockNodeWithHash(hash *BlockHash) *BlockNode { if hash == nil { @@ -1423,7 +1433,11 @@ func (bc *Blockchain) GetBlockNodeWithHash(hash *BlockHash) *BlockNode { } bc.ChainLock.RLock() defer bc.ChainLock.RUnlock() - return bc.bestChainMap[*hash] + bn, bnExists, err := bc.blockIndex.GetBlockNodeByHashOnly(hash) + if !bnExists || err != nil { + return nil + } + return bn } // isTipMaxed compares the tip height to the MaxSyncBlockHeight height. @@ -1446,14 +1460,16 @@ func (bc *Blockchain) isTipCurrent(tip *BlockNode) bool { return tip.Height >= bc.MaxSyncBlockHeight } - minChainWorkBytes, _ := hex.DecodeString(bc.params.MinChainWorkHex) - // Not current if the cumulative work is below the threshold. - if bc.params.IsPoWBlockHeight(uint64(tip.Height)) && tip.CumWork.Cmp(BytesToBigint(minChainWorkBytes)) < 0 { - //glog.V(2).Infof("Blockchain.isTipCurrent: Tip not current because "+ - //"CumWork (%v) is less than minChainWorkBytes (%v)", - //tip.CumWork, BytesToBigint(minChainWorkBytes)) - return false + if bc.params.IsPoWBlockHeight(uint64(tip.Height)) { + minChainWorkBytes, _ := hex.DecodeString(bc.params.MinChainWorkHex) + + if tip.CumWork.Cmp(BytesToBigint(minChainWorkBytes)) < 0 { + //glog.V(2).Infof("Blockchain.isTipCurrent: Tip not current because "+ + //"CumWork (%v) is less than minChainWorkBytes (%v)", + //tip.CumWork, BytesToBigint(minChainWorkBytes)) + return false + } } // Not current if the tip has a timestamp older than the maximum @@ -1586,7 +1602,42 @@ func (bc *Blockchain) checkArchivalMode() bool { } firstSnapshotHeight := bc.snapshot.CurrentEpochSnapshotMetadata.FirstSnapshotBlockHeight - for _, blockNode := range bc.bestChain { + // @diamondhands - can we spot check just a few blocks such as firstSnapshotHeight - 1, + // firstSnapshotHeight / 2 - 1, and firstSnapshotHeight / 4 - 1 to see if they are stored? + // And then we can do check some intervals between firstSnapshotHeight and the tip of the best chain. + // We take a sampling of blocks to determine if we've downloaded all the blocks up to the first snapshot height. + blockHeights := []uint64{} + increment := firstSnapshotHeight / 10 + if increment == 0 { + increment = 1 + } + // We'll sample 10 blocks. If we have 200 as the first snapshot block height, + // we'll check the following blocks: + // 0, 20, 40, 60, ..., 180, 200. + for ii := uint64(0); ii < firstSnapshotHeight; ii += increment { + blockHeights = append(blockHeights, ii) + } + // Check the most recent 10 blocks if we're past block 10. + if firstSnapshotHeight > 10 { + for ii := firstSnapshotHeight - 10; ii < firstSnapshotHeight; ii++ { + blockHeights = append(blockHeights, ii) + } + } + + // Convert to set to de-dupe and check the block node for each height. + for _, height := range NewSet(append(blockHeights, firstSnapshotHeight)).ToSlice() { + blockNode, exists, err := bc.GetBlockFromBestChainByHeight(height, false) + if err != nil { + glog.Errorf("checkArchivalMode: Problem getting block by height: %v", err) + return false + } + // If the block node doesn't exist, we're not in for archival mode. + if !exists { + return false + } + // If the block node is greater than the first snapshot height, + // we're not ready for archival mode. This shouldn't happen since + // the move to not having the entire best chain in memory. if uint64(blockNode.Height) > firstSnapshotHeight { return false } @@ -1636,13 +1687,7 @@ func (bc *Blockchain) isHyperSyncCondition() bool { // main chain for blocks, which is why separate functions are required for // each of them. func (bc *Blockchain) headerTip() *BlockNode { - if len(bc.bestHeaderChain) == 0 { - return nil - } - - // Note this should always work because we should have the genesis block - // in here. - return bc.bestHeaderChain[len(bc.bestHeaderChain)-1] + return bc.blockIndex.GetHeaderTip() } func (bc *Blockchain) HeaderTip() *BlockNode { @@ -1674,39 +1719,137 @@ func (bc *Blockchain) Snapshot() *Snapshot { // invalidate and chop off the headers corresponding to those blocks and // their ancestors so the two generally stay in sync. func (bc *Blockchain) blockTip() *BlockNode { - var tip *BlockNode - - if len(bc.bestChain) == 0 { - return nil - } - - tip = bc.bestChain[len(bc.bestChain)-1] - - return tip + return bc.blockIndex.GetTip() } func (bc *Blockchain) BlockTip() *BlockNode { return bc.blockTip() } +// TODO: this won't work for now. Need to figure out how to handle this. func (bc *Blockchain) BestChain() []*BlockNode { - return bc.bestChain + panic("BestChain not supported.") +} + +// GetBlockFromBestChainByHashAndOptionalHeight returns a block node from the best chain +// given a block hash and an optional height. Providing the height is optional, but it can +// greatly improve the performance of calls to this function. +func (bc *Blockchain) GetBlockFromBestChainByHashAndOptionalHeight( + blockHash *BlockHash, + optionalHeight *uint64, + useHeaderChain bool, +) (*BlockNode, bool, error) { + var bn *BlockNode + var exists bool + var err error + if optionalHeight != nil { + bn, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, *optionalHeight) + } else { + bn, exists, err = bc.blockIndex.GetBlockNodeByHashOnly(blockHash) + } + if err != nil { + return nil, false, err + } + if !exists { + return nil, false, nil + } + if bn.IsCommitted() { + return bn, true, nil + // TODO: what do we do about header chain? they're not committed so we're going to + // have to get a bunch of parents in order to be sure it is part of the best header chain. I guess we could + // have a map, but kinda defeats the purpose of this refactor. + } + // TODO: is this legit? It seems like it's fair game... + if bc.isSyncing() && useHeaderChain && bn.IsHeaderValidated() { + return bn, true, nil + } + blockTip := bc.BlockTip() + if useHeaderChain { + blockTip = bc.HeaderTip() + } + if blockTip == nil { + return nil, false, fmt.Errorf("GetBlockFromBestChainByHashAndOptionalHeight: Block tip not found: use header chain: %v", useHeaderChain) + } + committedTip, exists := bc.GetCommittedTip() + if !exists { + return nil, false, errors.New("GetBlockFromBestChainByHashAndOptionalHeight: Committed tip not found") + } + if uint64(bn.Height) > uint64(blockTip.Height) || uint64(bn.Height) < uint64(committedTip.Height) { + return nil, false, nil + } + currNode := &BlockNode{} + *currNode = *blockTip + for currNode != nil && currNode.Height >= bn.Height { + if currNode.Height == bn.Height { + if currNode.Hash.IsEqual(blockHash) { + return currNode, true, nil + } + return nil, false, nil + } + currNode = currNode.GetParent(bc.blockIndex) + } + return nil, false, nil } -func (bc *Blockchain) SetBestChain(bestChain []*BlockNode) { - bc.bestChain = bestChain +// GetBlockFromBestChainByHeight returns a block node from the best chain given a height. +func (bc *Blockchain) GetBlockFromBestChainByHeight(height uint64, useHeaderChain bool) (*BlockNode, bool, error) { + // TODO: figure out an optimization for header chain handling uncommitted state. + if !useHeaderChain { + // If we're looking for a block from the best block chain, we first + // check if the height is greater than the committed tip. If it is, we + // know it's an uncommitted block and will need to trace it back from + // the best block tip. + committedTip, exists := bc.GetCommittedTip() + if !exists { + return nil, false, nil + } + if height >= uint64(committedTip.Height) { + // For this, we can just loop back from the tip block. + currentNode := bc.blockIndex.GetTip() + for currentNode != nil { + if uint64(currentNode.Height) == height { + return currentNode, true, nil + } + if currentNode.Height < committedTip.Height { + break + } + currentNode = currentNode.GetParent(bc.blockIndex) + } + return nil, false, nil + } + } + // Otherwise, get the block nodes by height and find the one in the best chain. + blockNodes := bc.blockIndex.GetBlockNodesByHeight(height) + if len(blockNodes) == 0 { + return nil, false, nil + } + for _, blockNode := range blockNodes { + // If block node is committed, then we know it is + // in the best chain, whether we're looking at the header chain or not. + if blockNode.IsCommitted() { + return blockNode, true, nil + } + // TODO: this is crude and incorrect. We can have multiple headers + // at a specific height. It's possible that none of the blocks at + // this height are committed yet, but one of them is in the best chain. + // How can we figure it out? + if useHeaderChain && blockNode.IsHeaderValidated() { + return blockNode, true, nil + } + } + return nil, false, nil } -func (bc *Blockchain) SetBestChainMap( - bestChain []*BlockNode, - bestChainMap map[BlockHash]*BlockNode, - blockIndexByHash *collections.ConcurrentMap[BlockHash, *BlockNode], - blockIndexByHeight map[uint64]map[BlockHash]*BlockNode, -) { - bc.bestChain = bestChain - bc.bestChainMap = bestChainMap - bc.blockIndexByHash = blockIndexByHash - bc.blockIndexByHeight = blockIndexByHeight +// TODO: need to figure out how to handle this for exchange api tests. +func (bc *Blockchain) SetBestChain(bestChain []*BlockNode) { + for _, blockNode := range bestChain { + bc.blockIndex.addNewBlockNodeToBlockIndex(blockNode) + if bc.blockIndex.GetTip() == nil { + bc.blockIndex.setTip(blockNode) + } else if bc.blockIndex.GetTip().Height < blockNode.Height { + bc.blockIndex.setTip(blockNode) + } + } } func (bc *Blockchain) _validateOrphanBlockPoW(desoBlock *MsgDeSoBlock) error { @@ -1851,26 +1994,35 @@ func (bc *Blockchain) MarkBlockInvalid(node *BlockNode, errOccurred RuleError) { //} } -func _FindCommonAncestor(node1 *BlockNode, node2 *BlockNode) *BlockNode { +// Note: we make some assumptions that we only care about ancestors in the best chain. This +// is only used for processBlockPoW so it's not a *huge* deal. +func (bc *Blockchain) _FindCommonAncestor(node1 *BlockNode, node2 *BlockNode) *BlockNode { if node1 == nil || node2 == nil { // If either node is nil then there can't be a common ancestor. return nil } - // Get the two nodes to be at the same height. + // If both nodes are at a height greater than the committed tip, then we know that + // we have valid parent pointers and can use the Ancestor function to get use to the right place. if node1.Height > node2.Height { - node1 = node1.Ancestor(node2.Height) - } else if node1.Height < node2.Height { - node2 = node2.Ancestor(node1.Height) + node1 = node1.Ancestor(node2.Height, bc.blockIndex) + } else if node2.Height > node1.Height { + node2 = node2.Ancestor(node1.Height, bc.blockIndex) } // Iterate the nodes backward until they're either the same or we // reach the end of the lists. We only need to check node1 for nil // since they're the same height and we are iterating both back // in tandem. - for node1 != nil && !node1.Hash.IsEqual(node2.Hash) { - node1 = node1.Parent - node2 = node2.Parent + for !node1.Hash.IsEqual(node2.Hash) { + node1 = node1.GetParent(bc.blockIndex) + if node1 == nil { + return nil + } + node2 = node2.GetParent(bc.blockIndex) + if node2 == nil { + return nil + } } // By now either node1 == node2 and we found the common ancestor or @@ -1950,16 +2102,29 @@ func CheckTransactionSanity(txn *MsgDeSoTxn, blockHeight uint32, params *DeSoPar return nil } -func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockNode, _detachNodes []*BlockNode, _attachNodes []*BlockNode) { +func (bc *Blockchain) GetReorgBlocks(tip *BlockNode, newNode *BlockNode) ( + _commonAncestor *BlockNode, _detachNodes []*BlockNode, _attachNodes []*BlockNode) { + // TODO: finding common ancestors is very expensive for txindex when txindex is very far + // behind. Currently, it requires loading the entire chain into memory. // Find the common ancestor of this block and the main header chain. - commonAncestor := _FindCommonAncestor(tip, newNode) + commonAncestor := bc._FindCommonAncestor(tip, newNode) + + if commonAncestor == nil { + glog.Fatalf("No common ancestor found between tip and new node: tip hash (%v), newNode hash (%v)", tip.Hash, newNode.Hash) + return + } // Log a warning if the reorg is going to be a big one. - numBlocks := tip.Height - commonAncestor.Height - if numBlocks > 10 { - glog.Warningf("GetReorgBlocks: Proceeding with reorg of (%d) blocks from "+ - "block (%v) at height (%d) to block (%v) at height of (%d)", - numBlocks, tip, tip.Height, newNode, newNode.Height) + if tip != nil { + numBlocks := tip.Height - commonAncestor.Height + if numBlocks > 10 { + glog.Warningf("GetReorgBlocks: Proceeding with reorg of (%d) blocks from "+ + "block (%v) at height (%d) to block (%v) at height of (%d)", + numBlocks, tip, tip.Height, newNode, newNode.Height) + } + } else { + glog.Fatal("GetReorgBlocks: Tip is nil") + return } // Get the blocks to detach. Start at the tip and work backwards to the @@ -1969,8 +2134,14 @@ func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockN // detachBlocks will have the current tip as its first element and parents // of the tip thereafter. detachBlocks := []*BlockNode{} - for currentBlock := tip; *currentBlock.Hash != *commonAncestor.Hash; currentBlock = currentBlock.Parent { + currentBlock := &BlockNode{} + *currentBlock = *tip + for currentBlock != nil && *currentBlock.Hash != *commonAncestor.Hash { detachBlocks = append(detachBlocks, currentBlock) + currentBlock = currentBlock.GetParent(bc.blockIndex) + if currentBlock == nil { + glog.Fatalf("GetReorgBlocks: Failed to find parent of block. Parent hash %v", currentBlock.Header.PrevBlockHash) + } } // Get the blocks to attach. Start at the new node and work backwards to @@ -1981,8 +2152,15 @@ func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockN // attachNodes will have the new node as its first element and work back to // the node right after the common ancestor as its last element. attachBlocks := []*BlockNode{} - for currentBlock := newNode; *currentBlock.Hash != *commonAncestor.Hash; currentBlock = currentBlock.Parent { + currentBlock = &BlockNode{} + *currentBlock = *newNode + for *currentBlock.Hash != *commonAncestor.Hash { attachBlocks = append(attachBlocks, currentBlock) + currentBlock = currentBlock.GetParent(bc.blockIndex) + if currentBlock == nil { + // TODO: what should we do here? + glog.Fatal("GetReorgBlocks: Failed to find parent of block") + } } // Reverse attachBlocks so that the node right after the common ancestor // will be the first element and the node at the end of the list will be @@ -1994,33 +2172,14 @@ func GetReorgBlocks(tip *BlockNode, newNode *BlockNode) (_commonAncestor *BlockN return commonAncestor, detachBlocks, attachBlocks } -func updateBestChainInMemory(mainChainList []*BlockNode, mainChainMap map[BlockHash]*BlockNode, detachBlocks []*BlockNode, attachBlocks []*BlockNode) ( - chainList []*BlockNode, chainMap map[BlockHash]*BlockNode) { - - // Remove the nodes we detached from the end of the best chain node list. - tipIndex := len(mainChainList) - 1 - for blockOffset := 0; blockOffset < len(detachBlocks); blockOffset++ { - blockIndex := tipIndex - blockOffset - delete(mainChainMap, *mainChainList[blockIndex].Hash) - } - mainChainList = mainChainList[:len(mainChainList)-len(detachBlocks)] - - // Add the nodes we attached to the end of the list. Note that this loop iterates - // forward because attachBlocks has the node right after the common ancestor - // first, with the new tip at the end. - for _, attachNode := range attachBlocks { - mainChainList = append(mainChainList, attachNode) - mainChainMap[*attachNode.Hash] = attachNode - } - - return mainChainList, mainChainMap -} - // Caller must acquire the ChainLock for writing prior to calling this. -func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *BlockHash) (_isMainChain bool, _isOrphan bool, _err error) { +func (bc *Blockchain) processHeaderPoW( + blockHeader *MsgDeSoHeader, + headerHash *BlockHash, +) (_blockNode *BlockNode, _isMainChain bool, _isOrphan bool, _err error) { // Only accept the header if its height is below the PoS cutover height. if !bc.params.IsPoWBlockHeight(blockHeader.Height) { - return false, false, HeaderErrorBlockHeightAfterProofOfStakeCutover + return nil, false, false, HeaderErrorBlockHeightAfterProofOfStakeCutover } // Only accept headers if the best chain is still in PoW. Once the best chain reaches the final @@ -2028,16 +2187,16 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // headers past this point because they will un-commit blocks that are already committed to the PoS // chain. if bc.BlockTip().Header.Height >= bc.params.GetFinalPoWBlockHeight() { - return false, false, HeaderErrorBestChainIsAtProofOfStakeCutover + return nil, false, false, HeaderErrorBestChainIsAtProofOfStakeCutover } // Start by checking if the header already exists in our node // index. If it does, then return an error. We should generally // expect that processHeaderPoW will only be called on headers we // haven't seen before. - _, nodeExists := bc.blockIndexByHash.Get(*headerHash) + blockNode, nodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(headerHash, blockHeader.Height) if nodeExists { - return false, false, HeaderErrorDuplicateHeader + return blockNode, false, false, HeaderErrorDuplicateHeader } // If we're here then it means we're processing a header we haven't @@ -2050,7 +2209,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B "MaxTstampOffsetSeconds %d. blockHeader.TstampSecs=%d; adjustedTime=%d", tstampDiff, bc.params.MaxTstampOffsetSeconds, blockHeader.GetTstampSecs(), bc.timeSource.AdjustedTime().Unix()) - return false, false, HeaderErrorBlockTooFarInTheFuture + return nil, false, false, HeaderErrorBlockTooFarInTheFuture } // Try to find this header's parent in our block index. @@ -2058,13 +2217,13 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // can return early because we don't process unconnectedTxns. // TODO: Should we just return an error if the header is an orphan? if blockHeader.PrevBlockHash == nil { - return false, false, HeaderErrorNilPrevHash + return nil, false, false, HeaderErrorNilPrevHash } - parentNode, parentNodeExists := bc.blockIndexByHash.Get(*blockHeader.PrevBlockHash) + parentNode, parentNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHeader.PrevBlockHash, blockHeader.Height-1) if !parentNodeExists { // This block is an orphan if its parent doesn't exist and we don't // process unconnectedTxns. - return false, true, nil + return nil, false, true, nil } // If the parent node is invalid then this header is invalid as well. Note that @@ -2072,7 +2231,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B // ValidateFailed. parentHeader := parentNode.Header if parentHeader == nil || (parentNode.Status&(StatusHeaderValidateFailed|StatusBlockValidateFailed)) != 0 { - return false, false, errors.Wrapf( + return nil, false, false, errors.Wrapf( HeaderErrorInvalidParent, "Parent header: %v, Status check: %v, Parent node status: %v, Parent node header: %v", parentHeader, (parentNode.Status&(StatusHeaderValidateFailed|StatusBlockValidateFailed)) != 0, parentNode.Status, @@ -2084,7 +2243,7 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B if blockHeader.Height != prevHeight+1 { glog.Errorf("processHeaderPoW: Height of block (=%d) is not equal to one greater "+ "than the parent height (=%d)", blockHeader.Height, prevHeight) - return false, false, HeaderErrorHeightInvalid + return nil, false, false, HeaderErrorHeightInvalid } // Make sure the block timestamp is greater than the previous block's timestamp. @@ -2115,24 +2274,26 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B "before timestamp of previous block %v", time.Unix(int64(blockHeader.GetTstampSecs()), 0), time.Unix(int64(parentHeader.GetTstampSecs()), 0)) - return false, false, HeaderErrorTimestampTooEarly + return nil, false, false, HeaderErrorTimestampTooEarly } + // @diamondhands - this check is significantly slower because we need to go + // to the DB to get a block at a specific height without knowledge of its hash. // Check that the proof of work beats the difficulty as calculated from // the parent block. Note that if the parent block is in the block index // then it has necessarily had its difficulty validated, and so using it to // do this check makes sense. - diffTarget, err := CalcNextDifficultyTarget( - parentNode, blockHeader.Version, bc.params) + diffTarget, err := bc.CalcNextDifficultyTarget( + parentNode, blockHeader.Version) if err != nil { - return false, false, errors.Wrapf(err, + return nil, false, false, errors.Wrapf(err, "ProcessBlock: Problem computing difficulty "+ "target from parent block %s", hex.EncodeToString(parentNode.Hash[:])) } diffTargetBigint := HashToBigint(diffTarget) blockHashBigint := HashToBigint(headerHash) if diffTargetBigint.Cmp(blockHashBigint) < 0 { - return false, false, + return nil, false, false, errors.Wrapf(HeaderErrorBlockDifficultyAboveTarget, "Target: %v, Actual: %v", diffTarget, headerHash) } @@ -2150,7 +2311,6 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B newWork := BytesToBigint(ExpectedWorkForBlockHash(diffTarget)[:]) cumWork := newWork.Add(newWork, parentNode.CumWork) newNode := NewBlockNode( - parentNode, headerHash, uint32(blockHeader.Height), diffTarget, @@ -2177,9 +2337,8 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B if bc.isSyncing() { bc.addNewBlockNodeToBlockIndex(newNode) } else { - newBlockIndexByHash, newBlockIndexByHeight := bc.CopyBlockIndexes() - bc.blockIndexByHash = newBlockIndexByHash - bc.blockIndexByHeight = newBlockIndexByHeight + newBlockIndexByHash := bc.CopyBlockIndexes() + bc.blockIndex.blockIndexByHash = newBlockIndexByHash bc.addNewBlockNodeToBlockIndex(newNode) } @@ -2190,32 +2349,35 @@ func (bc *Blockchain) processHeaderPoW(blockHeader *MsgDeSoHeader, headerHash *B headerTip := bc.headerTip() if headerTip.CumWork.Cmp(newNode.CumWork) < 0 { isMainChain = true + bc.blockIndex.setHeaderTip(newNode) + } - _, detachBlocks, attachBlocks := GetReorgBlocks(headerTip, newNode) - bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( - bc.bestHeaderChain, bc.bestHeaderChainMap, detachBlocks, attachBlocks) - - // Note that we don't store the best header hash here and so this is an - // in-memory-only adjustment. See the comment above on preventing attacks. + // Put the height hash to node info in the db. + if err = PutHeightHashToNodeInfo(bc.db, bc.snapshot, newNode, false /*bitcoinNodes*/, bc.eventManager); err != nil { + return nil, false, false, errors.Wrapf(err, "ProcessHeader: Problem calling PutHeightHashToNodeInfo") } - return isMainChain, false, nil + return newNode, isMainChain, false, nil } // ProcessHeader is a wrapper around processHeaderPoW and processHeaderPoS, which do the leg-work. -func (bc *Blockchain) ProcessHeader(blockHeader *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) (_isMainChain bool, _isOrphan bool, _err error) { +func (bc *Blockchain) ProcessHeader( + blockHeader *MsgDeSoHeader, + headerHash *BlockHash, + verifySignatures bool, +) (_blockNode *BlockNode, _isMainChain bool, _isOrphan bool, _err error) { bc.ChainLock.Lock() defer bc.ChainLock.Unlock() if blockHeader == nil { // If the header is nil then we return an error. Nothing we can do here. - return false, false, fmt.Errorf("ProcessHeader: Header is nil") + return nil, false, false, fmt.Errorf("ProcessHeader: Header is nil") } // If the header's height is after the PoS cut-over fork height, then we use the PoS header processing logic. // Otherwise, fall back to the PoW logic. if bc.params.IsPoSBlockHeight(blockHeader.Height) { - return bc.processHeaderPoS(blockHeader, verifySignatures) + return bc.processHeaderPoS(blockHeader, headerHash, verifySignatures) } return bc.processHeaderPoW(blockHeader, headerHash) @@ -2233,6 +2395,7 @@ func (bc *Blockchain) ProcessBlock(desoBlock *MsgDeSoBlock, verifySignatures boo // If the block's height is after the PoS cut-over fork height, then we use the PoS block processing logic. // Otherwise, fall back to the PoW logic. if bc.params.IsPoSBlockHeight(desoBlock.Header.Height) { + // TODO: can we pass some legit view number in here for non-validator nodes? return bc.processBlockPoS(desoBlock, 1, verifySignatures) } @@ -2322,12 +2485,16 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures bc.timer.Start("Blockchain.ProcessBlock: BlockNode") // See if a node for the block exists in our node index. - nodeToValidate, nodeExists := bc.blockIndexByHash.Get(*blockHash) + // TODO: validate that current height - 1 > 0 + nodeToValidate, nodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeader.Height) // If no node exists for this block at all, then process the header // first before we do anything. This should create a node and set // the header validation status for it. if !nodeExists { - _, isOrphan, err := bc.processHeaderPoW(blockHeader, blockHash) + // Note: it's okay that we don't write the block node for the header + // to the db here as it happens below when we call + // PutHeightHashToNodeInfo + _, _, isOrphan, err := bc.processHeaderPoW(blockHeader, blockHash) if err != nil { // If an error occurred processing the header, then the header // should be marked as invalid, which should be sufficient. @@ -2343,7 +2510,8 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Reset the pointers after having presumably added the header to the // block index. - nodeToValidate, nodeExists = bc.blockIndexByHash.Get(*blockHash) + // TODO: validate that current height - 1 > 0 + nodeToValidate, nodeExists = bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeader.Height) } // At this point if the node still doesn't exist or if the header's validation // failed then we should return an error for the block. Note that at this point @@ -2362,7 +2530,8 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // In this case go ahead and return early. If its parents are truly legitimate then we // should re-request it and its parents from a node and reprocess it // once it is no longer an orphan. - parentNode, parentNodeExists := bc.blockIndexByHash.Get(*blockHeader.PrevBlockHash) + // TODO: validate that current height - 1 > 0 + parentNode, parentNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHeader.PrevBlockHash, blockHeader.Height-1) if !parentNodeExists || (parentNode.Status&StatusBlockProcessed) == 0 { return false, true, nil } @@ -2613,6 +2782,11 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // update our data structures to actually make this connection. Do this // in a transaction so that it is atomic. if bc.postgres != nil { + if !nodeToValidate.IsCommitted() { + nodeToValidate.Status |= StatusBlockCommitted + bc.blockIndex.addNewBlockNodeToBlockIndex(nodeToValidate) + } + if err = bc.postgres.UpsertBlockAndTransactions(nodeToValidate, desoBlock); err != nil { return false, false, errors.Wrapf(err, "ProcessBlock: Problem upserting block and transactions") } @@ -2634,6 +2808,10 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures err = bc.db.Update(func(txn *badger.Txn) error { // This will update the node's status. bc.timer.Start("Blockchain.ProcessBlock: Transactions Db height & hash") + if !nodeToValidate.IsCommitted() { + nodeToValidate.Status |= StatusBlockCommitted + bc.blockIndex.addNewBlockNodeToBlockIndex(nodeToValidate) + } if innerErr := PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, nodeToValidate, false /*bitcoinNodes*/, bc.eventManager); innerErr != nil { return errors.Wrapf( innerErr, "ProcessBlock: Problem calling PutHeightHashToNodeInfo after validation") @@ -2680,8 +2858,11 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Now that we've set the best chain in the db, update our in-memory data // structure to reflect this. Do a quick check first to make sure it's consistent. - lastIndex := len(bc.bestChain) - 1 - bestChainHash := bc.bestChain[lastIndex].Hash + bestChainTip := bc.blockIndex.GetTip() + if bestChainTip == nil { + return false, false, fmt.Errorf("ProcessBlock: Best chain tip is nil") + } + bestChainHash := bestChainTip.Hash if !bestChainHash.IsEqual(nodeToValidate.Header.PrevBlockHash) { return false, false, fmt.Errorf("ProcessBlock: Last block in bestChain "+ @@ -2691,15 +2872,13 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // If we're syncing there's no risk of concurrency issues. Otherwise, we // need to make a copy in order to be save. - if bc.isSyncing() { - bc.bestChain = append(bc.bestChain, nodeToValidate) - bc.bestChainMap[*nodeToValidate.Hash] = nodeToValidate - } else { - newBestChain, newBestChainMap := bc.CopyBestChain() - newBestChain = append(newBestChain, nodeToValidate) - newBestChainMap[*nodeToValidate.Hash] = nodeToValidate - bc.bestChain, bc.bestChainMap = newBestChain, newBestChainMap - } + // We no longer need to worry about whether we're syncing or not. Just + // set the tip. + //if bc.isSyncing() { + // bc.blockIndex.setTip(nodeToValidate) + //} else { + bc.blockIndex.setTip(nodeToValidate) + //} // This node is on the main chain so set this variable. isMainChain = true @@ -2755,7 +2934,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Find the common ancestor of this block and the main chain. // TODO: Reorgs with postgres? - commonAncestor, detachBlocks, attachBlocks := GetReorgBlocks(currentTip, nodeToValidate) + commonAncestor, detachBlocks, attachBlocks := bc.GetReorgBlocks(currentTip, nodeToValidate) // Log a warning if the reorg is going to be a big one. numBlocks := currentTip.Height - commonAncestor.Height if numBlocks > 10 { @@ -2854,7 +3033,7 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // If the parent node has been marked as invalid then mark this node as // invalid as well. - if (attachNode.Parent.Status & StatusBlockValidateFailed) != 0 { + if (attachNode.GetParent(bc.blockIndex).Status & StatusBlockValidateFailed) != 0 { bc.MarkBlockInvalid(attachNode, RuleErrorPreviousBlockInvalid) continue } @@ -2923,6 +3102,14 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures if err := PutBestHashWithTxn(txn, bc.snapshot, newTipNode.Hash, ChainTypeDeSoBlock, bc.eventManager); err != nil { return err } + if !newTipNode.IsCommitted() { + newTipNode.Status |= StatusBlockCommitted + // update the block index to be safe. + bc.addNewBlockNodeToBlockIndex(newTipNode) + if err := PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, newTipNode, false, bc.eventManager); err != nil { + return err + } + } for _, detachNode := range detachBlocks { // Delete the utxo operations for the blocks we're detaching since we don't need @@ -2931,6 +3118,16 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures return errors.Wrapf(err, "ProcessBlock: Problem deleting utxo operations for block") } + // We also need to revert the committed state if applicable. + if detachNode.IsCommitted() { + detachNode.ClearCommittedStatus() + // update the block index to be safe. + bc.addNewBlockNodeToBlockIndex(detachNode) + if err = PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, detachNode, false, bc.eventManager); err != nil { + return errors.Wrapf(err, "ProcessBlock: Problem putting height hash to node info for detach node that is not committed.") + } + } + // Note we could be even more aggressive here by deleting the nodes and // corresponding blocks from the db here (i.e. not storing any side chain // data on the db). But this seems like a minor optimization that comes at @@ -2943,6 +3140,15 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures if err := PutUtxoOperationsForBlockWithTxn(txn, bc.snapshot, blockHeight, attachNode.Hash, utxoOpsForAttachBlocks[ii], bc.eventManager); err != nil { return errors.Wrapf(err, "ProcessBlock: Problem putting utxo operations for block") } + + if !attachNode.IsCommitted() { + attachNode.Status |= StatusBlockCommitted + // update the block index to be safe. + bc.addNewBlockNodeToBlockIndex(attachNode) + if err = PutHeightHashToNodeInfoWithTxn(txn, bc.snapshot, attachNode, false, bc.eventManager); err != nil { + return errors.Wrapf(err, "ProcessBlock: Problem putting height hash to node info for detach node that is not committed.") + } + } } // Write the modified utxo set to the view. @@ -2959,10 +3165,8 @@ func (bc *Blockchain) processBlockPoW(desoBlock *MsgDeSoBlock, verifySignatures // Now the db has been updated, update our in-memory best chain. Note that there // is no need to update the node index because it was updated as we went along. - newBestChain, newBestChainMap := bc.CopyBestChain() - newBestChain, newBestChainMap = updateBestChainInMemory( - newBestChain, newBestChainMap, detachBlocks, attachBlocks) - bc.bestChain, bc.bestChainMap = newBestChain, newBestChainMap + bc.blockIndex.setTip(newTipNode) + bc.blockIndex.setHeaderTip(newTipNode) // If we made it here then this block is on the main chain. isMainChain = true diff --git a/lib/blockchain_test.go b/lib/blockchain_test.go index b8a05309f..85817fb24 100644 --- a/lib/blockchain_test.go +++ b/lib/blockchain_test.go @@ -707,7 +707,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockA1.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockA1.Header, headerHash, false) + _, isMainChain, isOrphan, err := chain.ProcessHeader(blockA1.Header, headerHash, false) require.NoError(err) require.True(isMainChain) require.False(isOrphan) @@ -723,7 +723,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockA2.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockA2.Header, headerHash, false) + _, isMainChain, isOrphan, err := chain.ProcessHeader(blockA2.Header, headerHash, false) require.NoError(err) require.True(isMainChain) require.False(isOrphan) @@ -739,7 +739,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockB1.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockB1.Header, headerHash, false) + _, isMainChain, isOrphan, err := chain.ProcessHeader(blockB1.Header, headerHash, false) require.NoError(err) // Should not be main chain yet require.False(isMainChain) @@ -756,7 +756,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockB2.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockB2.Header, headerHash, false) + _, isMainChain, isOrphan, err := chain.ProcessHeader(blockB2.Header, headerHash, false) require.NoError(err) // Should not be main chain yet require.False(isMainChain) @@ -773,7 +773,7 @@ func TestProcessHeaderskReorgBlocks(t *testing.T) { require.Equal(uint64(1), GetUtxoNumEntries(db, chain.snapshot)) headerHash, err := blockB3.Header.Hash() require.NoError(err) - isMainChain, isOrphan, err := chain.ProcessHeader(blockB3.Header, headerHash, false) + _, isMainChain, isOrphan, err := chain.ProcessHeader(blockB3.Header, headerHash, false) require.NoError(err) // Should not be main chain yet require.True(isMainChain) @@ -1222,6 +1222,8 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1229,6 +1231,8 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1236,21 +1240,21 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( - lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, - nil, + big.NewInt(1), &MsgDeSoHeader{ // Blocks generating every 1 second, which is 2x too fast. TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) + require.NoError(PutHeightHashToNodeInfo(bc.db, nil, nodes[len(nodes)-1], false, nil)) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1273,21 +1277,21 @@ func TestCalcNextDifficultyTargetHalvingDoublingHitLimit(t *testing.T) { diffsAsInts = []int64{} for ii := 13; ii < 30; ii++ { lastNode := nodes[ii-1] - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( - lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, - nil, + big.NewInt(1), &MsgDeSoHeader{ // Blocks generating every 4 second, which is 2x too slow. TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 4)), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) + require.NoError(PutHeightHashToNodeInfo(bc.db, nil, nodes[len(nodes)-1], false, nil)) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1318,6 +1322,8 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1325,6 +1331,8 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1332,21 +1340,21 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( - lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, - nil, + big.NewInt(1), &MsgDeSoHeader{ // Blocks generating every 1 second, which is 2x too fast. TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) + require.NoError(PutHeightHashToNodeInfo(bc.db, nil, nodes[len(nodes)-1], false, nil)) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1369,21 +1377,21 @@ func TestCalcNextDifficultyTargetHittingLimitsSlow(t *testing.T) { diffsAsInts = []int64{} for ii := 13; ii < 30; ii++ { lastNode := nodes[ii-1] - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( - lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, - nil, + big.NewInt(1), &MsgDeSoHeader{ // Blocks generating every 8 second, which is >2x too slow. TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 4)), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) + require.NoError(PutHeightHashToNodeInfo(bc.db, nil, nodes[len(nodes)-1], false, nil)) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1414,6 +1422,8 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1421,6 +1431,8 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1428,21 +1440,21 @@ func TestCalcNextDifficultyTargetHittingLimitsFast(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( - lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, - nil, + big.NewInt(1), &MsgDeSoHeader{ // Blocks generating all at once. TstampNanoSecs: SecondsToNanoSeconds(0), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) + require.NoError(PutHeightHashToNodeInfo(bc.db, nil, nodes[len(nodes)-1], false, nil)) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1469,12 +1481,15 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, TimeBetweenBlocks: 2 * time.Second, MaxDifficultyRetargetFactor: 3, } + bc.params = fakeParams nodes := []*BlockNode{} diffsAsInts := []int64{} @@ -1483,21 +1498,21 @@ func TestCalcNextDifficultyTargetJustRight(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( - lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, - nil, + big.NewInt(1), &MsgDeSoHeader{ // Blocks generating every 2 second, which is under the limit. TstampNanoSecs: SecondsToNanoSeconds(int64(ii * 2)), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) + require.NoError(PutHeightHashToNodeInfo(bc.db, nil, nodes[len(nodes)-1], false, nil)) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1524,6 +1539,8 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { _ = assert _ = require + bc, _, _ := NewTestBlockchain(t) + fakeParams := &DeSoParams{ MinDifficultyTargetHex: hex.EncodeToString(BigintToHash(big.NewInt(100000))[:]), TimeBetweenDifficultyRetargets: 6 * time.Second, @@ -1531,6 +1548,8 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { MaxDifficultyRetargetFactor: 2, } + bc.params = fakeParams + nodes := []*BlockNode{} diffsAsInts := []int64{} for ii := 0; ii < 13; ii++ { @@ -1538,21 +1557,21 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { if ii > 0 { lastNode = nodes[ii-1] } - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( - lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, - nil, + big.NewInt(1), &MsgDeSoHeader{ // Blocks generating every 1 second, which is 2x too fast. TstampNanoSecs: SecondsToNanoSeconds(int64(ii)), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) + require.NoError(PutHeightHashToNodeInfo(bc.db, nil, nodes[len(nodes)-1], false, nil)) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } @@ -1575,21 +1594,21 @@ func TestCalcNextDifficultyTargetSlightlyOff(t *testing.T) { diffsAsInts = []int64{} for ii := 13; ii < 34; ii++ { lastNode := nodes[ii-1] - nextDiff, err := CalcNextDifficultyTarget(lastNode, HeaderVersion0, fakeParams) + nextDiff, err := bc.CalcNextDifficultyTarget(lastNode, HeaderVersion0) require.NoErrorf(err, "Block index: %d", ii) nodes = append(nodes, NewBlockNode( - lastNode, - nil, + NewBlockHash(RandomBytes(32)), uint32(ii), nextDiff, - nil, + big.NewInt(1), &MsgDeSoHeader{ // Blocks generating every 3 seconds, which is slow but under the limit. TstampNanoSecs: SecondsToNanoSeconds(int64(ii) * 3), }, - StatusNone, + StatusHeaderValidated, )) - + bc.blockIndex.setHeaderTip(nodes[len(nodes)-1]) + require.NoError(PutHeightHashToNodeInfo(bc.db, nil, nodes[len(nodes)-1], false, nil)) diffsAsInts = append(diffsAsInts, HashToBigint(nextDiff).Int64()) } diff --git a/lib/connection_manager.go b/lib/connection_manager.go index 3245b0e0c..7183355eb 100644 --- a/lib/connection_manager.go +++ b/lib/connection_manager.go @@ -2,6 +2,7 @@ package lib import ( "fmt" + "github.com/deso-protocol/core/collections" "math" "net" "sync" @@ -10,7 +11,6 @@ import ( "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/wire" - "github.com/decred/dcrd/container/lru" "github.com/golang/glog" ) @@ -53,7 +53,7 @@ type ConnectionManager struct { // TODO: seems like we don't use this. // Keep track of the nonces we've sent in our version messages so // we can prevent connections to ourselves. - sentNonces lru.Set[any] + sentNonces *collections.LruSet[uint64] // This section defines the data structures for storing all the // peers we're aware of. @@ -126,13 +126,14 @@ func NewConnectionManager( ValidateHyperSyncFlags(_hyperSync, _syncType) + sentNoncesCache, _ := collections.NewLruSet[uint64](1000) return &ConnectionManager{ srv: _srv, params: _params, listeners: _listeners, // We keep track of the last N nonces we've sent in order to detect // self connections. - sentNonces: *lru.NewSet[any](1000), + sentNonces: sentNoncesCache, //newestBlock: _newestBlock, // Initialize the peer data structures. diff --git a/lib/constants.go b/lib/constants.go index 142729848..44d37f6dd 100644 --- a/lib/constants.go +++ b/lib/constants.go @@ -1161,7 +1161,6 @@ var DeSoMainnetParams = DeSoParams{ // choose and the tip. This is done by running once, letting it fail, and then rerunning // with the value it outputs. BitcoinStartBlockNode: NewBlockNode( - nil, mustDecodeHexBlockHashBitcoin("000000000000000000092d577cc673bede24b6d7199ee69c67eeb46c18fc978c"), // Note the height is always one greater than the parent node. 653184, @@ -1524,7 +1523,6 @@ var DeSoTestnetParams = DeSoParams{ // See comment in mainnet config. BitcoinStartBlockNode: NewBlockNode( - nil, mustDecodeHexBlockHashBitcoin("000000000000003aae8fb976056413aa1d863eb5bee381ff16c9642283b1da1a"), 1897056, _difficultyBitsToHash(424073553), @@ -1957,5 +1955,10 @@ const DefaultTestnetCheckpointProvider = "https://test.deso.org" const RoutePathGetCommittedTipBlockInfo = "/api/v0/get-committed-tip-block-info" -// Constants that was removed from newer version of Btcec +// BlockIndexMigrationFileName is the name of the file that contains a boolean value +// that indicates whether the block index migration has been run. See RunBlockIndexMigrationOnce +// for more information. +const BlockIndexMigrationFileName = "block_index_migration.txt" + +// BtcecPubKeyBytesLenUncompressed is a constant that was removed from newer version of Btcec const BtcecPubKeyBytesLenUncompressed = 65 diff --git a/lib/db_utils.go b/lib/db_utils.go index a6201dd27..79fc455c9 100644 --- a/lib/db_utils.go +++ b/lib/db_utils.go @@ -2,18 +2,21 @@ package lib import ( "bytes" + "context" "crypto/rand" "encoding/binary" "encoding/hex" "encoding/json" "fmt" "github.com/deso-protocol/core/collections" + "github.com/dgraph-io/ristretto/z" "io" "log" "math" "math/big" "path/filepath" "reflect" + "runtime" "sort" "strings" "time" @@ -604,7 +607,15 @@ type DBPrefixes struct { // When reading and writing data to this prefixes, please acquire the snapshotDbMutex in the snapshot. PrefixHypersyncSnapshotDBPrefix []byte `prefix_id:"[97]"` - // NEXT_TAG: 98 + // PrefixHashToHeight is used to store the height of a block given its hash. + // This helps us map a block hash to its height so we can look up the full info + // in PrefixHeightHashToNodeInfo. Note that the block index migration will run + // to populate this index when upgrading the node to the new version that + // introduces this index. Before the introduction of this index, the only way + // to find a block node given its hash was to do a full scan of + // PrefixHeightHashToNodeInfo. + PrefixHashToHeight []byte `prefix_id:"[98]"` + // NEXT_TAG: 99 } // DecodeStateKey decodes a state key into a DeSoEncoder type. This is useful for encoders which don't have a stored @@ -1540,18 +1551,98 @@ func DBDeletePKIDMappingsWithTxn(txn *badger.Txn, snap *Snapshot, publicKey []by return nil } -func EnumerateKeysForPrefix(db *badger.DB, dbPrefix []byte, keysOnly bool) (_keysFound [][]byte, _valsFound [][]byte) { - return _enumerateKeysForPrefix(db, dbPrefix, keysOnly) +// _enumerateKeysForPrefixWithStream demonstrates scanning keys (and optional values) +// that share a prefix via the Badger Stream API. +func _enumerateKeysForPrefixWithStream(db *badger.DB, dbPrefix []byte, keysOnly bool) ([][]byte, [][]byte, error) { + keysFound := [][]byte{} + valsFound := [][]byte{} + + // Create a new stream on the DB. + stream := db.NewStream() + stream.NumGo = runtime.NumCPU() // use all cores + + // Restrict the stream to process only keys that match this prefix. + // The Stream API will fetch items in key order, parallelizing internally. + stream.Prefix = dbPrefix + + type StreamEntry struct { + key []byte + value []byte + } + + streamEntries := []StreamEntry{} + + // The Send callback receives batches of KVs. + stream.Send = func(buf *z.Buffer) error { + list, err := badger.BufferToKVList(buf) + if err != nil { + return err + } + for _, kv := range list.Kv { + // Double-check the prefix if you want a safeguard: + if !bytes.HasPrefix(kv.Key, dbPrefix) { + continue + } + + // Copy the key so it doesn't get overwritten by subsequent batches. + keyCopy := make([]byte, len(kv.Key)) + copy(keyCopy, kv.Key) + streamEntry := StreamEntry{ + key: keyCopy, + } + + // If we aren't in keysOnly mode, retrieve the value. + if !keysOnly { + // If KeyOnly = true above, kv.Value is empty. + // If KeyOnly = false, we can copy the value here. + valCopy := make([]byte, len(kv.Value)) + copy(valCopy, kv.Value) + //valsFound = append(valsFound, valCopy) + streamEntry.value = valCopy + } + streamEntries = append(streamEntries, streamEntry) + } + return nil + } + + // Execute the stream scan. + err := stream.Orchestrate(context.Background()) + if err != nil { + return nil, nil, err + } + + for _, streamEntry := range streamEntries { + keysFound = append(keysFound, streamEntry.key) + if !keysOnly { + valsFound = append(valsFound, streamEntry.value) + } + } + + return keysFound, valsFound, nil +} + +func EnumerateKeysForPrefix( + db *badger.DB, + dbPrefix []byte, + keysOnly bool, + skipPrefetch bool, +) (_keysFound [][]byte, _valsFound [][]byte) { + return _enumerateKeysForPrefix(db, dbPrefix, keysOnly, skipPrefetch) } // A helper function to enumerate all of the values for a particular prefix. -func _enumerateKeysForPrefix(db *badger.DB, dbPrefix []byte, keysOnly bool) (_keysFound [][]byte, _valsFound [][]byte) { +func _enumerateKeysForPrefix( + db *badger.DB, + dbPrefix []byte, + keysOnly bool, + skipPrefetch bool, +) (_keysFound [][]byte, _valsFound [][]byte) { keysFound := [][]byte{} valsFound := [][]byte{} dbErr := db.View(func(txn *badger.Txn) error { var err error - keysFound, valsFound, err = _enumerateKeysForPrefixWithTxn(txn, dbPrefix, keysOnly) + keysFound, valsFound, err = _enumerateKeysForPrefixWithTxn(txn, dbPrefix, keysOnly, skipPrefetch) if err != nil { return err } @@ -1565,12 +1656,12 @@ func _enumerateKeysForPrefix(db *badger.DB, dbPrefix []byte, keysOnly bool) (_ke return keysFound, valsFound } -func _enumerateKeysForPrefixWithTxn(txn *badger.Txn, dbPrefix []byte, keysOnly bool) (_keysFound [][]byte, _valsFound [][]byte, _err error) { +func _enumerateKeysForPrefixWithTxn(txn *badger.Txn, dbPrefix []byte, keysOnly bool, skipPrefetch bool) (_keysFound [][]byte, _valsFound [][]byte, _err error) { keysFound := [][]byte{} valsFound := [][]byte{} opts := badger.DefaultIteratorOptions - if keysOnly { + if keysOnly || skipPrefetch { opts.PrefetchValues = false } opts.Prefix = dbPrefix @@ -1598,6 +1689,20 @@ func _enumerateKeysOnlyForPrefixWithTxn(txn *badger.Txn, dbPrefix []byte) (_keys return _enumeratePaginatedLimitedKeysForPrefixWithTxn(txn, dbPrefix, dbPrefix, math.MaxUint32) } +func EnumeratePaginatedLimitedKeysForPrefix( + db *badger.DB, + dbPrefix []byte, + startKey []byte, + limit uint32, +) (_keysFound [][]byte) { + var keysFound [][]byte + _ = db.View(func(txn *badger.Txn) error { + keysFound = _enumeratePaginatedLimitedKeysForPrefixWithTxn(txn, dbPrefix, startKey, limit) + return nil + }) + return keysFound +} + // _enumeratePaginatedLimitedKeysForPrefixWithTxn will look for keys in the db that are GREATER OR EQUAL to the startKey // and satisfy the dbPrefix prefix. The total number of entries fetched will be EQUAL OR SMALLER than provided limit. func _enumeratePaginatedLimitedKeysForPrefixWithTxn(txn *badger.Txn, dbPrefix []byte, startKey []byte, limit uint32) (_keysFound [][]byte) { @@ -1917,7 +2022,7 @@ func DBGetMessageEntriesForPublicKey(handle *badger.DB, publicKey []byte) ( // Goes backwards to get messages in time sorted order. // Limit the number of keys to speed up load times. - _, valuesFound := _enumerateKeysForPrefix(handle, prefix, false) + _, valuesFound := _enumerateKeysForPrefix(handle, prefix, false, false) privateMessages := []*MessageEntry{} for _, valBytes := range valuesFound { @@ -2138,7 +2243,7 @@ func DBGetMessagingGroupEntriesForOwnerWithTxn(txn *badger.Txn, ownerPublicKey * // Setting the prefix to owner's public key will allow us to fetch all messaging keys // for the user. We enumerate this prefix. prefix := _dbSeekPrefixForMessagingGroupEntry(ownerPublicKey) - _, valuesFound, err := _enumerateKeysForPrefixWithTxn(txn, prefix, false) + _, valuesFound, err := _enumerateKeysForPrefixWithTxn(txn, prefix, false, false) if err != nil { return nil, errors.Wrapf(err, "DBGetMessagingGroupEntriesForOwnerWithTxn: "+ "problem enumerating messaging key entries for prefix (%v)", prefix) @@ -3361,7 +3466,7 @@ func DBGetAllMessagingGroupEntriesForMemberWithTxn(txn *badger.Txn, ownerPublicK // This function is used to fetch all messaging var messagingGroupEntries []*MessagingGroupEntry prefix := _dbSeekPrefixForMessagingGroupMember(ownerPublicKey) - _, valuesFound, err := _enumerateKeysForPrefixWithTxn(txn, prefix, false) + _, valuesFound, err := _enumerateKeysForPrefixWithTxn(txn, prefix, false, false) if err != nil { return nil, errors.Wrapf(err, "DBGetAllMessagingGroupEntriesForMemberWithTxn: "+ "problem enumerating messaging key entries for prefix (%v)", prefix) @@ -3627,7 +3732,7 @@ func DbGetPostHashesYouLike(handle *badger.DB, yourPublicKey []byte) ( _postHashes []*BlockHash, _err error) { prefix := _dbSeekPrefixForPostHashesYouLike(yourPublicKey) - keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true) + keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true, false) postHashesYouLike := []*BlockHash{} for _, keyBytes := range keysFound { @@ -3644,7 +3749,7 @@ func DbGetLikerPubKeysLikingAPostHash(handle *badger.DB, likedPostHash BlockHash _pubKeys [][]byte, _err error) { prefix := _dbSeekPrefixForLikerPubKeysLikingAPostHash(likedPostHash) - keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true) + keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true, false) userPubKeys := [][]byte{} for _, keyBytes := range keysFound { @@ -3757,7 +3862,7 @@ func DbGetReposterPubKeyRepostedPostHashToRepostEntryWithTxn(txn *badger.Txn, snap *Snapshot, userPubKey []byte, repostedPostHash BlockHash) *RepostEntry { key := _dbSeekKeyForReposterPubKeyRepostedPostHashToRepostPostHash(userPubKey, repostedPostHash) - keysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, key, true) + keysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, key, true, false) if err != nil { return nil } @@ -3805,7 +3910,7 @@ func DbDeleteRepostMappingsWithTxn(txn *badger.Txn, snap *Snapshot, repostEntry func DbDeleteAllRepostMappingsWithTxn(txn *badger.Txn, snap *Snapshot, userPubKey []byte, repostedPostHash BlockHash, eventManager *EventManager, entryIsDeleted bool) error { key := _dbSeekKeyForReposterPubKeyRepostedPostHashToRepostPostHash(userPubKey, repostedPostHash) - keysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, key, true) + keysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, key, true, false) if err != nil { return nil } @@ -3822,7 +3927,7 @@ func DbGetPostHashesYouRepost(handle *badger.DB, yourPublicKey []byte) ( _postHashes []*BlockHash, _err error) { prefix := _dbSeekPrefixForPostHashesYouRepost(yourPublicKey) - keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true) + keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true, false) postHashesYouRepost := []*BlockHash{} for _, keyBytes := range keysFound { @@ -3985,7 +4090,7 @@ func DbGetPKIDsYouFollow(handle *badger.DB, yourPKID *PKID) ( _pkids []*PKID, _err error) { prefix := _dbSeekPrefixForPKIDsYouFollow(yourPKID) - keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true) + keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true, false) pkidsYouFollow := []*PKID{} for _, keyBytes := range keysFound { @@ -4003,7 +4108,7 @@ func DbGetPKIDsFollowingYou(handle *badger.DB, yourPKID *PKID) ( _pkids []*PKID, _err error) { prefix := _dbSeekPrefixForPKIDsFollowingYou(yourPKID) - keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true) + keysFound, _ := _enumerateKeysForPrefix(handle, prefix, true, false) pkidsFollowingYou := []*PKID{} for _, keyBytes := range keysFound { @@ -4262,7 +4367,7 @@ func DbGetPKIDsThatDiamondedYouMap(handle *badger.DB, yourPKID *PKID, fetchYouDi diamondReceiverStartIdx = 1 + btcec.PubKeyBytesLenCompressed diamondReceiverEndIdx = 1 + 2*btcec.PubKeyBytesLenCompressed } - keysFound, valsFound := _enumerateKeysForPrefix(handle, prefix, false) + keysFound, valsFound := _enumerateKeysForPrefix(handle, prefix, false, false) pkidsToDiamondEntryMap := make(map[PKID][]*DiamondEntry) for ii, keyBytes := range keysFound { @@ -4336,7 +4441,7 @@ func DbGetDiamondEntriesForSenderToReceiver(handle *badger.DB, receiverPKID *PKI _diamondEntries []*DiamondEntry, _err error) { prefix := _dbSeekPrefixForReceiverPKIDAndSenderPKID(receiverPKID, senderPKID) - keysFound, valsFound := _enumerateKeysForPrefix(handle, prefix, false) + keysFound, valsFound := _enumerateKeysForPrefix(handle, prefix, false, false) var diamondEntries []*DiamondEntry for ii, keyBytes := range keysFound { // The DiamondEntry found must not be nil. @@ -4426,7 +4531,7 @@ func DbDeleteBitcoinBurnTxIDWithTxn(txn *badger.Txn, snap *Snapshot, bitcoinBurn } func DbGetAllBitcoinBurnTxIDs(handle *badger.DB) (_bitcoinBurnTxIDs []*BlockHash) { - keysFound, _ := _enumerateKeysForPrefix(handle, Prefixes.PrefixBitcoinBurnTxIDs, true) + keysFound, _ := _enumerateKeysForPrefix(handle, Prefixes.PrefixBitcoinBurnTxIDs, true, false) bitcoinBurnTxIDs := []*BlockHash{} for _, key := range keysFound { bbtxid := &BlockHash{} @@ -4892,7 +4997,6 @@ func SerializeBlockNode(blockNode *BlockNode) ([]byte, error) { func DeserializeBlockNode(data []byte) (*BlockNode, error) { blockNode := NewBlockNode( - nil, // Parent &BlockHash{}, // Hash 0, // Height &BlockHash{}, // DifficultyTarget @@ -5209,6 +5313,15 @@ func _heightHashToNodeIndexPrefix(bitcoinNodes bool) []byte { return prefix } +// _heightHashToNodePrefixByHeight returns the prefix for the height hash to node index +// for a given height. This is useful to find all blocks at a given height. +func _heightHashToNodePrefixByHeight(height uint32, bitcoinNodes bool) []byte { + prefix := _heightHashToNodeIndexPrefix(bitcoinNodes) + heightBytes := make([]byte, 4) + binary.BigEndian.PutUint32(heightBytes[:], height) + return append(prefix, heightBytes[:]...) +} + func _heightHashToNodeIndexKey(height uint32, hash *BlockHash, bitcoinNodes bool) []byte { prefix := _heightHashToNodeIndexPrefix(bitcoinNodes) @@ -5220,6 +5333,13 @@ func _heightHashToNodeIndexKey(height uint32, hash *BlockHash, bitcoinNodes bool return key } +// _hashToHeightIndexKey returns the key for the hash to height index. +func _hashToHeightIndexKey(hash *BlockHash) []byte { + key := append([]byte{}, Prefixes.PrefixHashToHeight...) + key = append(key, hash[:]...) + return key +} + func GetHeightHashToNodeInfoWithTxn(txn *badger.Txn, snap *Snapshot, height uint32, hash *BlockHash, bitcoinNodes bool) *BlockNode { @@ -5256,12 +5376,52 @@ func PutHeightHashToNodeInfoWithTxn(txn *badger.Txn, snap *Snapshot, return errors.Wrapf(err, "PutHeightHashToNodeInfoWithTxn: Problem serializing node") } + // Store the full block node in the hash height to block node index. if err := DBSetWithTxn(txn, snap, key, serializedNode, eventManager); err != nil { return err } + + // Also store the height to hash mapping. + hashToHeightKey := _hashToHeightIndexKey(node.Hash) + if err = DBSetWithTxn(txn, snap, hashToHeightKey, UintToBuf(uint64(node.Height)), eventManager); err != nil { + return err + } + return nil } +// PutHashToHeightBatch puts a map of block hashes to heights in the db in the hash to height index. +// This is only used for the block index migration. +func PutHashToHeightBatch(handle *badger.DB, snap *Snapshot, hashToHeight map[BlockHash]uint32, eventManager *EventManager) error { + return handle.Update(func(txn *badger.Txn) error { + for hash, height := range hashToHeight { + key := _hashToHeightIndexKey(&hash) + if err := DBSetWithTxn(txn, snap, key, UintToBuf(uint64(height)), eventManager); err != nil { + return errors.Wrap(err, "PutHashToHeightBatch: Problem setting hash to height") + } + } + return nil + }) +} + +// GetHeightForHash returns the height for a given block hash by using the hash to height index. +func GetHeightForHash(db *badger.DB, snap *Snapshot, hash *BlockHash) (uint64, error) { + var height uint64 + err := db.View(func(txn *badger.Txn) error { + key := _hashToHeightIndexKey(hash) + heightBytes, err := DBGetWithTxn(txn, snap, key) + if err != nil { + return err + } + height, _ = Uvarint(heightBytes) + return nil + }) + if err != nil { + return 0, err + } + return height, nil +} + func PutHeightHashToNodeInfoBatch(handle *badger.DB, snap *Snapshot, nodes []*BlockNode, bitcoinNodes bool, eventManager *EventManager) error { @@ -5311,7 +5471,7 @@ func DbBulkDeleteHeightHashToNodeInfo(handle *badger.DB, snap *Snapshot, nodes [ return nil } -// InitDbWithGenesisBlock initializes the database to contain only the genesis +// InitDbWithDeSoGenesisBlock initializes the database to contain only the genesis // block. func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, eventManager *EventManager, snap *Snapshot, postgres *Postgres) error { @@ -5323,13 +5483,12 @@ func InitDbWithDeSoGenesisBlock(params *DeSoParams, handle *badger.DB, diffTarget := MustDecodeHexBlockHash(params.MinDifficultyTargetHex) blockHash := MustDecodeHexBlockHash(params.GenesisBlockHashHex) genesisNode := NewBlockNode( - nil, // Parent blockHash, 0, // Height diffTarget, BytesToBigint(ExpectedWorkForBlockHash(diffTarget)[:]), // CumWork genesisBlock.Header, // Header - StatusHeaderValidated|StatusBlockProcessed|StatusBlockStored|StatusBlockValidated, // Status + StatusHeaderValidated|StatusBlockProcessed|StatusBlockStored|StatusBlockValidated|StatusBlockCommitted, // Status ) // Set the fields in the db to reflect the current state of our chain. @@ -5548,10 +5707,7 @@ func GetBlockIndex(handle *badger.DB, bitcoinNodes bool, params *DeSoParams) ( if blockNode.Height == 0 || (*blockNode.Header.PrevBlockHash == BlockHash{}) { continue } - if parent, ok := blockIndex.Get(*blockNode.Header.PrevBlockHash); ok { - // We found the parent node so connect it. - blockNode.Parent = parent - } else { + if _, ok := blockIndex.Get(*blockNode.Header.PrevBlockHash); !ok { // If we're syncing a DeSo node and we hit a PoS block, we expect there to // be orphan blocks in the block index. In this case, we don't throw an error. if bitcoinNodes == false && params.IsPoSBlockHeight(uint64(blockNode.Height)) { @@ -5571,9 +5727,172 @@ func GetBlockIndex(handle *badger.DB, bitcoinNodes bool, params *DeSoParams) ( return blockIndex, nil } -func GetBestChain(tipNode *BlockNode) ([]*BlockNode, error) { +// RunBlockIndexMigration runs a migration to populate the hash to height index from the height hash to +// block node index. We can't use the encoder migrations to handle this situation since it's a new index +// and not a modification of the existing entry type stored. This migration simply iterates over the keys in the +// height hash to block node index, extracts the height and hash, and puts a key of the hash and a value of the +// height in the hash to height prefix. +func RunBlockIndexMigration(handle *badger.DB, snapshot *Snapshot, eventManager *EventManager, params *DeSoParams) error { + // @diamondhands - if we want to migrate from a uint32 -> uint64 for height in the height hash to node index, + // this would be a good time to do it. It's not necessary, but it's a bit annoying that we use uint64 in some + // places and uint32 in others. Specifically, we don't always validate that we have a uint32 when we go to get + // the block from teh DB. + return handle.Update(func(txn *badger.Txn) error { + // Get the prefix for the height hash to node index. + prefix := _heightHashToNodeIndexPrefix(false) + opts := badger.DefaultIteratorOptions + opts.Prefix = prefix + // We don't need values for this migration since the height and hash are in the key. + opts.PrefetchValues = false + nodeIterator := txn.NewIterator(opts) + defer nodeIterator.Close() + // Initialize a map to store the hash to height mappings. + hashToHeightMap := make(map[BlockHash]uint32) + // Just in case we need it, get the height of the best hash. + bestHash := DbGetBestHash(handle, snapshot, ChainTypeDeSoBlock) + var bestHashHeight uint32 + // Iterate over all the keys in the height hash to node index, extract the height and hash, + // and batch write every 10k entries to the hash to height index. + startTime := time.Now() + for nodeIterator.Seek(prefix); nodeIterator.ValidForPrefix(prefix); nodeIterator.Next() { + item := nodeIterator.Item().Key() + + // Parse the key to get the height and hash. + height := binary.BigEndian.Uint32(item[1:5]) + hash := BlockHash{} + copy(hash[:], item[5:]) + hashToHeightMap[hash] = height + // If we have a best hash, we want to store the height of the best hash. + if bestHash != nil && bestHash.IsEqual(&hash) { + bestHashHeight = height + } + if height%100000 == 0 { + glog.V(0).Infof("Time to run block index migration to height %v: %v", height, time.Since(startTime)) + } + // If we have fewer than 10K entries, continue. + if len(hashToHeightMap) < 10000 { + continue + } + // If we have more than 10K entries, batch write the entries to the hash to height index + // and reset the map. + innerErr := PutHashToHeightBatch(handle, snapshot, hashToHeightMap, eventManager) + if innerErr != nil { + return errors.Wrap(innerErr, "RunBlockIndexMigration: Problem putting hash to height") + } + hashToHeightMap = make(map[BlockHash]uint32) + } + // If we have any entries left in the map, batch write them to the hash to height index. + if len(hashToHeightMap) > 0 { + innerErr := PutHashToHeightBatch(handle, snapshot, hashToHeightMap, eventManager) + if innerErr != nil { + return errors.Wrap(innerErr, "RunBlockIndexMigration: Problem putting hash to height") + } + } + glog.V(0).Infof("Time to run block index migration: %v", time.Since(startTime)) + // If we don't have a best hash, then we certainly haven't hit the first pos block height. + if bestHash == nil { + return nil + } + + glog.V(0).Infof("Running PoW block committed migration...") + + // We want to mark all PoW blocks as committed, so we'll get the first pos block + // and iterate backwards marking all blocks as committed. If the PoS cutover hasn't + // happened yet, then we'll just the current best hash from the DB to determine the + // current tip to iterate back from. This allows us to track the best chain even if + // we have multiple proof-of-work blocks at the same height in the DB. Before the + // change to not keep the entire best chain in memory, we would know if a block was + // in the best chain by looking it up in the best chain map. Now that we no longer + // have this, we rely on the IsCommitted function to determine if blocks are in the + // best chain. Without this, nodes upgrading to this code that have been running for + // a long time and experienced PoW forks will have issues determining which blocks + // are in the best chain for PoW block heights. + firstPoSBlockHeight := params.GetFirstPoSBlockHeight() + // Look up blocks at cutover height. + prefixKey := _heightHashToNodePrefixByHeight(uint32(firstPoSBlockHeight), false) + _, valsFound, err := _enumerateKeysForPrefixWithTxn(txn, prefixKey, false, true) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem enumerating keys for prefix") + } + // There should be 0 or 1 blocks at the cutover height. + if len(valsFound) > 1 { + return fmt.Errorf("RunBlockIndexMigration: More than one block found at PoS cutover height") + } + var blockNode *BlockNode + // In this case we have not reached the cutover, we need to find pull the best hash + // from the DB and iterate backwards. + if len(valsFound) == 0 { + glog.V(0).Infof("Found multiple blocks at PoS cutover height: %v num blocks", len(valsFound)) + blockNode = GetHeightHashToNodeInfoWithTxn(txn, snapshot, bestHashHeight, bestHash, false) + if blockNode == nil { + return fmt.Errorf("RunBlockIndexMigration: block with Best hash (%v) and height (%v) not found", bestHash, bestHashHeight) + } + } else { + // If we have one or more blocks at the cutover height, we need to find the block that is committed. + for _, val := range valsFound { + blockNode, err = DeserializeBlockNode(val) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem deserializing block node") + } + // If we found the committed block, break out. + if blockNode.IsCommitted() { + break + } + } + if !blockNode.IsCommitted() { + return fmt.Errorf("RunBlockIndexMigration: No committed block found at PoS cutover height") + } + } + startHeight := blockNode.Height + startTime = time.Now() + var blockNodeBatch []*BlockNode + for blockNode != nil { + // If the block is not committed, mark it as committed. + if !blockNode.IsCommitted() { + blockNode.Status |= StatusBlockCommitted + // Add it to the batch. + blockNodeBatch = append(blockNodeBatch, blockNode) + } + // Find the parent of this block. + parentBlockNode := GetHeightHashToNodeInfoWithTxn( + txn, snapshot, blockNode.Height-1, blockNode.Header.PrevBlockHash, false /*bitcoinNodes*/) + if blockNode.Height > 0 && parentBlockNode == nil { + return errors.New("RunBlockIndexMigration: Parent block node not found") + } + if blockNode.Height%10000 == 0 { + glog.V(0).Infof("Time to run PoW block committed migration from start height %v to height %v: %v", + startHeight, blockNode.Height, time.Since(startTime)) + } + // Jump up to the parent block node. + blockNode = parentBlockNode + // If we have fewer than 10K entries, continue. + if len(blockNodeBatch) < 10000 { + continue + } + // If we have more than 10K entries, write the batch and reset the slice. + err = PutHeightHashToNodeInfoBatch(handle, snapshot, blockNodeBatch, false /*bitcoinNodes*/, eventManager) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem putting block node batch") + } + blockNodeBatch = []*BlockNode{} + } + // If any entries are left in the batch, write them to the DB. + if len(blockNodeBatch) > 0 { + err = PutHeightHashToNodeInfoBatch(handle, snapshot, blockNodeBatch, false /*bitcoinNodes*/, eventManager) + if err != nil { + return errors.Wrap(err, "RunBlockIndexMigration: Problem putting block node batch") + } + } + return nil + }) +} + +// TODO: refactor to actually get the whole best chain if that's +// what someone wants. It'll take a while and a lot of memory. +func GetBestChain(tipNode *BlockNode, blockIndex *BlockIndex) ([]*BlockNode, error) { reversedBestChain := []*BlockNode{} - for tipNode != nil { + maxBestChainInitLength := 3600 * 100 // Cache up to 100 hours of blocks. + for tipNode != nil && len(reversedBestChain) < maxBestChainInitLength { if (tipNode.Status&StatusBlockValidated) == 0 && (tipNode.Status&StatusBitcoinHeaderValidated) == 0 { @@ -5581,7 +5900,7 @@ func GetBestChain(tipNode *BlockNode) ([]*BlockNode, error) { } reversedBestChain = append(reversedBestChain, tipNode) - tipNode = tipNode.Parent + tipNode = tipNode.GetParent(blockIndex) } bestChain := make([]*BlockNode, len(reversedBestChain)) @@ -5690,7 +6009,7 @@ func DbTxindexPublicKeyIndexToTxnKey(publicKey []byte, index uint32) []byte { func DbGetTxindexTxnsForPublicKeyWithTxn(txn *badger.Txn, publicKey []byte) []*BlockHash { txIDs := []*BlockHash{} - _, valsFound, err := _enumerateKeysForPrefixWithTxn(txn, DbTxindexPublicKeyPrefix(publicKey), false) + _, valsFound, err := _enumerateKeysForPrefixWithTxn(txn, DbTxindexPublicKeyPrefix(publicKey), false, false) if err != nil { return txIDs } @@ -8392,7 +8711,7 @@ func DBGetNFTEntriesForPostHash(handle *badger.DB, nftPostHash *BlockHash) (_nft nftEntries := []*NFTEntry{} prefix := append([]byte{}, Prefixes.PrefixPostHashSerialNumberToNFTEntry...) keyPrefix := append(prefix, nftPostHash[:]...) - _, entryByteStringsFound := _enumerateKeysForPrefix(handle, keyPrefix, false) + _, entryByteStringsFound := _enumerateKeysForPrefix(handle, keyPrefix, false, false) for _, byteString := range entryByteStringsFound { currentEntry := &NFTEntry{} rr := bytes.NewReader(byteString) @@ -8439,7 +8758,7 @@ func DBGetNFTEntriesForPKID(handle *badger.DB, ownerPKID *PKID) (_nftEntries []* var nftEntries []*NFTEntry prefix := append([]byte{}, Prefixes.PrefixPKIDIsForSaleBidAmountNanosPostHashSerialNumberToNFTEntry...) keyPrefix := append(prefix, ownerPKID[:]...) - _, entryByteStringsFound := _enumerateKeysForPrefix(handle, keyPrefix, false) + _, entryByteStringsFound := _enumerateKeysForPrefix(handle, keyPrefix, false, false) for _, byteString := range entryByteStringsFound { currentEntry := &NFTEntry{} rr := bytes.NewReader(byteString) @@ -8681,7 +9000,7 @@ func DBGetNFTBidEntriesForPKID(handle *badger.DB, bidderPKID *PKID) (_nftBidEntr { prefix := append([]byte{}, Prefixes.PrefixBidderPKIDPostHashSerialNumberToBidNanos...) keyPrefix := append(prefix, bidderPKID[:]...) - keysFound, valuesFound := _enumerateKeysForPrefix(handle, keyPrefix, false) + keysFound, valuesFound := _enumerateKeysForPrefix(handle, keyPrefix, false, false) bidderPKIDLength := len(bidderPKID[:]) for ii, keyFound := range keysFound { @@ -8718,7 +9037,7 @@ func DBGetNFTBidEntries(handle *badger.DB, nftPostHash *BlockHash, serialNumber prefix := append([]byte{}, Prefixes.PrefixPostHashSerialNumberBidNanosBidderPKID...) keyPrefix := append(prefix, nftPostHash[:]...) keyPrefix = append(keyPrefix, EncodeUint64(serialNumber)...) - keysFound, _ := _enumerateKeysForPrefix(handle, keyPrefix, true) + keysFound, _ := _enumerateKeysForPrefix(handle, keyPrefix, true, false) for _, keyFound := range keysFound { bidAmountStartIdx := 1 + HashSizeBytes + 8 // The length of prefix + the post hash + the serial #. bidAmountEndIdx := bidAmountStartIdx + 8 // Add the length of the bid amount (uint64). @@ -8886,7 +9205,7 @@ func DBGetAllOwnerToDerivedKeyMappings(handle *badger.DB, ownerPublicKey PublicK _entries []*DerivedKeyEntry, _err error) { prefix := _dbSeekPrefixForDerivedKeyMappings(ownerPublicKey) - _, valsFound := _enumerateKeysForPrefix(handle, prefix, false) + _, valsFound := _enumerateKeysForPrefix(handle, prefix, false, false) var derivedEntries []*DerivedKeyEntry for _, keyBytes := range valsFound { @@ -9354,7 +9673,7 @@ func DbGetBalanceEntriesYouHold(db *badger.DB, snap *Snapshot, pkid *PKID, filte { prefix := _dbGetPrefixForHODLerPKIDCreatorPKIDToBalanceEntry(isDAOCoin) keyPrefix := append(prefix, pkid[:]...) - _, entryByteStringsFound := _enumerateKeysForPrefix(db, keyPrefix, false) + _, entryByteStringsFound := _enumerateKeysForPrefix(db, keyPrefix, false, false) for _, byteString := range entryByteStringsFound { currentEntry := &BalanceEntry{} rr := bytes.NewReader(byteString) @@ -9376,7 +9695,7 @@ func DbGetBalanceEntriesHodlingYou(db *badger.DB, snap *Snapshot, pkid *PKID, fi { prefix := _dbGetPrefixForCreatorPKIDHODLerPKIDToBalanceEntry(isDAOCoin) keyPrefix := append(prefix, pkid[:]...) - _, entryByteStringsFound := _enumerateKeysForPrefix(db, keyPrefix, false) + _, entryByteStringsFound := _enumerateKeysForPrefix(db, keyPrefix, false, false) for _, byteString := range entryByteStringsFound { currentEntry := &BalanceEntry{} rr := bytes.NewReader(byteString) @@ -9897,7 +10216,7 @@ func DBGetAllDAOCoinLimitOrdersForThisTransactor( func _DBGetAllDAOCoinLimitOrdersByPrefix(handle *badger.DB, prefixKey []byte) ([]*DAOCoinLimitOrderEntry, error) { // Get all DAO coin limit orders containing this prefix. - _, valsFound := _enumerateKeysForPrefix(handle, prefixKey, false) + _, valsFound := _enumerateKeysForPrefix(handle, prefixKey, false, false) orders := []*DAOCoinLimitOrderEntry{} // Cast resulting values from bytes to order entries. @@ -10029,7 +10348,7 @@ func DbGetMempoolTxn(db *badger.DB, snap *Snapshot, mempoolTx *MempoolTx) *MsgDe } func DbGetAllMempoolTxnsSortedByTimeAdded(handle *badger.DB) (_mempoolTxns []*MsgDeSoTxn, _error error) { - _, valuesFound := _enumerateKeysForPrefix(handle, Prefixes.PrefixMempoolTxnHashToMsgDeSoTxn, false) + _, valuesFound := _enumerateKeysForPrefix(handle, Prefixes.PrefixMempoolTxnHashToMsgDeSoTxn, false, false) mempoolTxns := []*MsgDeSoTxn{} for _, mempoolTxnBytes := range valuesFound { @@ -10048,7 +10367,7 @@ func DbGetAllMempoolTxnsSortedByTimeAdded(handle *badger.DB) (_mempoolTxns []*Ms } func DbDeleteAllMempoolTxnsWithTxn(txn *badger.Txn, snap *Snapshot, eventManager *EventManager, entryIsDeleted bool) error { - txnKeysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, Prefixes.PrefixMempoolTxnHashToMsgDeSoTxn, true) + txnKeysFound, _, err := _enumerateKeysForPrefixWithTxn(txn, Prefixes.PrefixMempoolTxnHashToMsgDeSoTxn, true, false) if err != nil { return errors.Wrapf(err, "DbDeleteAllMempoolTxnsWithTxn: ") } @@ -10131,7 +10450,7 @@ func DbDeleteMempoolTxnKeyWithTxn(txn *badger.Txn, snap *Snapshot, txnKey []byte func LogDBSummarySnapshot(db *badger.DB) { keyCountMap := make(map[byte]int) for prefixByte := byte(0); prefixByte < byte(40); prefixByte++ { - keysForPrefix, _ := EnumerateKeysForPrefix(db, []byte{prefixByte}, true) + keysForPrefix, _ := EnumerateKeysForPrefix(db, []byte{prefixByte}, true, false) keyCountMap[prefixByte] = len(keysForPrefix) } glog.Info(spew.Printf("LogDBSummarySnapshot: Current DB summary snapshot: %v", keyCountMap)) @@ -11888,7 +12207,7 @@ func DbGetTransactorNonceEntriesToExpireAtBlockHeightWithTxn(txn *badger.Txn, bl } func DbGetAllTransactorNonceEntries(handle *badger.DB) []*TransactorNonceEntry { - keys, _ := EnumerateKeysForPrefix(handle, Prefixes.PrefixNoncePKIDIndex, true) + keys, _ := EnumerateKeysForPrefix(handle, Prefixes.PrefixNoncePKIDIndex, true, false) nonceEntries := []*TransactorNonceEntry{} for _, key := range keys { // Convert key to nonce entry. diff --git a/lib/db_utils_test.go b/lib/db_utils_test.go index 6a6a0f208..095a1db44 100644 --- a/lib/db_utils_test.go +++ b/lib/db_utils_test.go @@ -193,15 +193,10 @@ func TestBlockNodePutGet(t *testing.T) { require.Equal(b3.Hash[:], b3Ret.Hash[:]) require.Equal(b4.Hash[:], b4Ret.Hash[:]) - // Make sure the nodes are connected properly. - require.Nil(b1Ret.Parent) - require.Equal(b2Ret.Parent, b1Ret) - require.Equal(b3Ret.Parent, b2Ret) - require.Equal(b4Ret.Parent, b1Ret) - // Check that getting the best chain works. { - bestChain, err := GetBestChain(b3Ret) + bi := NewBlockIndex(db, nil, b4) + bestChain, err := GetBestChain(b3Ret, bi) require.NoError(err) require.Len(bestChain, 3) require.Equal(b1Ret, bestChain[0]) @@ -234,7 +229,8 @@ func TestInitDbWithGenesisBlock(t *testing.T) { require.Equal(&genesisHash, genesis.Hash) // Check the bestChain. - bestChain, err := GetBestChain(genesis) + bi := NewBlockIndex(db, nil, genesis) + bestChain, err := GetBestChain(genesis, bi) require.NoError(err) require.Len(bestChain, 1) require.Equal(genesis, bestChain[0]) diff --git a/lib/legacy_mempool.go b/lib/legacy_mempool.go index d7439821a..d489f1f7d 100644 --- a/lib/legacy_mempool.go +++ b/lib/legacy_mempool.go @@ -2774,7 +2774,7 @@ func (mp *DeSoMempool) LoadTxnsFromDB() { } } endTime := time.Now() - glog.V(1).Infof("LoadTxnsFromDB: Loaded %v txns in %v seconds", len(dbMempoolTxnsOrderedByTime), endTime.Sub(startTime).Seconds()) + glog.V(0).Infof("LoadTxnsFromDB: Loaded %v txns in %v seconds", len(dbMempoolTxnsOrderedByTime), endTime.Sub(startTime).Seconds()) } func (mp *DeSoMempool) Stop() { diff --git a/lib/network.go b/lib/network.go index e6c0aca83..611c89e2b 100644 --- a/lib/network.go +++ b/lib/network.go @@ -2757,6 +2757,11 @@ func (msg *MsgDeSoHeader) String() string { return fmt.Sprintf("< %d, %s, %v >", msg.Height, hash, msg.Version) } +func (msg *MsgDeSoHeader) ShortString() string { + hash, _ := msg.Hash() + return fmt.Sprintf("< %d, %s >", msg.Height, hash) +} + // ================================================================== // BLOCK Message // ================================================================== diff --git a/lib/network_manager.go b/lib/network_manager.go index 0bfcf101a..495d83bab 100644 --- a/lib/network_manager.go +++ b/lib/network_manager.go @@ -11,7 +11,6 @@ import ( "github.com/btcsuite/btcd/addrmgr" "github.com/btcsuite/btcd/wire" - "github.com/decred/dcrd/container/lru" "github.com/deso-protocol/core/bls" "github.com/deso-protocol/core/collections" "github.com/deso-protocol/core/consensus" @@ -69,7 +68,7 @@ type NetworkManager struct { NonValidatorInboundIndex *collections.ConcurrentMap[RemoteNodeId, *RemoteNode] // Cache of nonces used during handshake. - usedNonces lru.Set[uint64] + usedNonces *collections.LruSet[uint64] // The address manager keeps track of peer addresses we're aware of. When // we need to connect to a new outbound peer, it chooses one of the addresses @@ -121,7 +120,7 @@ func NewNetworkManager( minTxFeeRateNanosPerKB uint64, nodeServices ServiceFlag, ) *NetworkManager { - + usedNoncesCache, _ := collections.NewLruSet[uint64](1000) return &NetworkManager{ params: params, srv: srv, @@ -136,7 +135,7 @@ func NewNetworkManager( ValidatorOutboundIndex: collections.NewConcurrentMap[bls.SerializedPublicKey, *RemoteNode](), NonValidatorOutboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), NonValidatorInboundIndex: collections.NewConcurrentMap[RemoteNodeId, *RemoteNode](), - usedNonces: *lru.NewSet[uint64](1000), + usedNonces: usedNoncesCache, connectIps: connectIps, persistentIpToRemoteNodeIdsMap: collections.NewConcurrentMap[string, RemoteNodeId](), activeValidatorsMap: collections.NewConcurrentMap[bls.SerializedPublicKey, consensus.Validator](), @@ -1289,7 +1288,7 @@ func (nm *NetworkManager) handleHandshakeCompletePoSMessage(remoteNode *RemoteNo if remoteNode.IsInbound() { _, ok := nm.GetValidatorInboundIndex().Get(validatorPk.Serialize()) if ok { - return fmt.Errorf("NetworkManager.handleHandshakeCompletePoSMessage: Inbound RemoteNode with duplicate validator public key") + return fmt.Errorf("NetworkManager.handleHandshakeCompletePoSMessage: Inbound RemoteNode with duplicate validator public key: %v", validatorPk.ToString()) } return nil } diff --git a/lib/peer.go b/lib/peer.go index 9296db5bf..81915653a 100644 --- a/lib/peer.go +++ b/lib/peer.go @@ -2,6 +2,7 @@ package lib import ( "fmt" + "github.com/deso-protocol/core/collections" "github.com/deso-protocol/go-deadlock" "net" "sort" @@ -9,8 +10,6 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/golang-lru/v2" - "github.com/btcsuite/btcd/wire" "github.com/golang/glog" "github.com/pkg/errors" @@ -111,7 +110,7 @@ type Peer struct { // Inventory stuff. // The inventory that we know the peer already has. - knownInventory *lru.Cache[InvVect, struct{}] + knownInventory *collections.LruSet[InvVect] // Whether the peer is ready to receive INV messages. For a peer that // still needs a mempool download, this is false. @@ -292,7 +291,7 @@ func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) { for _, invVect := range msg.InvList { // No matter what, add the inv to the peer's known inventory. - pp.knownInventory.Add(*invVect, struct{}{}) + pp.knownInventory.Put(*invVect) // If this is a hash we are currently processing, no need to do anything. // This check serves to fill the gap between the time when we've decided @@ -330,7 +329,12 @@ func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) { } else if invVect.Type == InvTypeBlock { // For blocks, we check that the hash isn't known to us either in our // main header chain or in side chains. - if pp.srv.blockchain.HasHeader(¤tHash) { + exists, err := pp.srv.blockchain.HasHeader(¤tHash) + if exists { + continue + } + if err != nil { + glog.Errorf("Server._handleInv: Error checking if block exists: %v", err) continue } @@ -373,7 +377,14 @@ func (pp *Peer) HelpHandleInv(msg *MsgDeSoInv) { // - When the blocks come in, we process them by adding them to the chain // one-by-one. if len(blockHashList) > 0 { - locator := pp.srv.blockchain.LatestHeaderLocator() + locator, locatorHeights := pp.srv.blockchain.LatestHeaderLocator() + headerTip := pp.srv.blockchain.headerTip() + blockTip := pp.srv.blockchain.blockTip() + glog.V(2).Infof("Server._handleInv: Sending GET_HEADERS message to peer %v\n"+ + "Block Locator Hashes & Heights: (%v, %v)\n"+ + "Header Tip: (%v, %v)\nBlock Tip: (%v, %v)", + pp, locator, locatorHeights, headerTip.Hash, headerTip.Height, + blockTip.Hash, blockTip.Height) pp.AddDeSoMessage(&MsgDeSoGetHeaders{ StopHash: &BlockHash{}, BlockLocator: locator, @@ -640,7 +651,7 @@ func NewPeer(_id uint64, _conn net.Conn, _isOutbound bool, _netAddr *wire.NetAdd _syncType NodeSyncType, peerDisconnectedChan chan *Peer) *Peer { - knownInventoryCache, _ := lru.New[InvVect, struct{}](maxKnownInventory) + knownInventoryCache, _ := collections.NewLruSet[InvVect](maxKnownInventory) pp := Peer{ ID: _id, @@ -980,7 +991,7 @@ out: // Add the new inventory to the peer's knownInventory. for _, invVect := range invMsg.InvList { - pp.knownInventory.Add(*invVect, struct{}{}) + pp.knownInventory.Put(*invVect) } } diff --git a/lib/pos_block_producer.go b/lib/pos_block_producer.go index 705899a5b..e549e91a6 100644 --- a/lib/pos_block_producer.go +++ b/lib/pos_block_producer.go @@ -4,8 +4,6 @@ import ( "math" "time" - "github.com/btcsuite/btcd/btcec/v2" - "github.com/btcsuite/btcd/wire" "github.com/deso-protocol/core/bls" @@ -347,14 +345,9 @@ func (pbp *PosBlockProducer) getBlockTransactions( return nil, 0, errors.New("Error casting txn meta to AtomicSwapMetadata") } - blockProducerPublicKeyBtcec, err := btcec.ParsePubKey(blockProducerPublicKey.ToBytes()) - if err != nil { - return nil, 0, - errors.Wrapf(err, "Error parsing block producer public key: ") - } // Set fees to the sum of fees paid by public keys other than the block producer. fees, err = filterOutBlockRewardRecipientFees( - txnMeta.Txns, blockProducerPublicKeyBtcec) + txnMeta.Txns, blockProducerPublicKey) if err != nil { return nil, 0, errors.Wrapf(err, "error filtering out block reward recipient fees") diff --git a/lib/pos_blockchain.go b/lib/pos_blockchain.go index 7a60e84a0..81e49be49 100644 --- a/lib/pos_blockchain.go +++ b/lib/pos_blockchain.go @@ -38,107 +38,64 @@ import ( // StatusHeaderValidated or StatusHeaderValidateFailed. // 5. Exit early if the's view is less than the current header chain's tip. // 6. Reorg the best header chain if the header's view is higher than the current tip. -func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, verifySignatures bool) ( - _isMainChain bool, _isOrphan bool, _err error, +func (bc *Blockchain) processHeaderPoS(header *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) ( + _blockNode *BlockNode, _isMainChain bool, _isOrphan bool, _err error, ) { if !bc.params.IsPoSBlockHeight(header.Height) { - return false, false, errors.Errorf( + return nil, false, false, errors.Errorf( "processHeaderPoS: Header height %d is less than the ProofOfStake2ConsensusCutoverBlockHeight %d", header.Height, bc.params.GetFirstPoSBlockHeight(), ) } - headerHash, err := header.Hash() - if err != nil { - return false, false, errors.Wrapf(err, "processHeaderPoS: Problem hashing header") - } - // If the incoming header is already part of the best header chain, then we can exit early. // The header is not part of a fork, and is already an ancestor of the current header chain tip. - if _, isInBestHeaderChain := bc.bestHeaderChainMap[*headerHash]; isInBestHeaderChain { - return true, false, nil + blockNode, isInBestHeaderChain, err := bc.GetBlockFromBestChainByHashAndOptionalHeight( + headerHash, &header.Height, true) + if err != nil { + return nil, false, false, + errors.Wrapf(err, "processHeaderPoS: Problem getting block from best chain by hash: ") + } + if isInBestHeaderChain { + return blockNode, true, false, nil } // If the incoming header is part of a reorg that uncommits the committed tip from the best chain, // then we exit early. Such headers are invalid and should not be synced. committedBlockchainTip, _ := bc.GetCommittedTip() if committedBlockchainTip != nil && committedBlockchainTip.Header.Height >= header.Height { - return false, false, errors.New("processHeaderPoS: Header conflicts with committed tip") + return nil, false, false, errors.New("processHeaderPoS: Header conflicts with committed tip") } // Validate the header and index it in the block index. blockNode, isOrphan, err := bc.validateAndIndexHeaderPoS(header, headerHash, verifySignatures) if err != nil { - return false, false, errors.Wrapf(err, "processHeaderPoS: Problem validating and indexing header: ") + return blockNode, false, false, errors.Wrapf(err, "processHeaderPoS: Problem validating and indexing header: ") } - // Now that we know we have a valid header, we check the block index for it any orphan children for it - // and heal the parent pointers for all of them. - bc.healPointersForOrphanChildren(blockNode) - // Exit early if the header is an orphan. if isOrphan { - return false, true, nil + return blockNode, false, true, nil } // Exit early if the header's view is less than the current header chain's tip. The header is not // the new tip for the best header chain. currentTip := bc.headerTip() if header.ProposedInView <= currentTip.Header.ProposedInView { - return false, false, nil + return blockNode, false, false, nil } - // The header is not an orphan and has a higher view than the current tip. We reorg the header chain - // and apply the incoming header as the new tip. - _, blocksToDetach, blocksToAttach := GetReorgBlocks(currentTip, blockNode) - bc.bestHeaderChain, bc.bestHeaderChainMap = updateBestChainInMemory( - bc.bestHeaderChain, - bc.bestHeaderChainMap, - blocksToDetach, - blocksToAttach, - ) + bc.blockIndex.setHeaderTip(blockNode) // Success. The header is at the tip of the best header chain. - return true, false, nil -} - -// healPointersForOrphanChildren fixes an inconsistency in the block index that may have -// occurred as a result of a node restart. In cases where we have an orphan node that we store in the -// DB, then on restart, that node's parent will not be in the block index. When processing the parent -// later on, we not only need to store the parent in the block index but also need to update the -// pointer from the orphan block's BlockNode to the parent. We do that dynamically here as we -// process headers. -func (bc *Blockchain) healPointersForOrphanChildren(blockNode *BlockNode) { - // Fetch all potential children of this blockNode from the block index. - blockNodesAtNextHeight, exists := bc.blockIndexByHeight[blockNode.Header.Height+1] - if !exists { - // No children of this blockNode exist in the block index. Exit early. - return - } - - // Iterate through all block nodes at the next block height and update their parent pointers. - for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { - // Check if it's a child of the parent block node. - if !blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) { - continue - } - - // Check if it has its parent pointer set. If it does, then we exit early. - if blockNodeAtNextHeight.Parent != nil { - continue - } - - // If the parent block node is not set, then we set it to the parent block node. - blockNodeAtNextHeight.Parent = blockNode - } + return blockNode, true, false, nil } func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHash *BlockHash, verifySignatures bool) ( _headerBlockNode *BlockNode, _isOrphan bool, _err error, ) { // Look up the header in the block index to check if it has already been validated and indexed. - blockNode, blockNodeExists := bc.blockIndexByHash.Get(*headerHash) - + blockNode, blockNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight(headerHash, header.Height) // ------------------------------------ Base Cases ----------------------------------- // // The header is already validated. Exit early. @@ -157,7 +114,11 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas } // The header is an orphan. No need to store it in the block index. Exit early. - parentBlockNode, parentBlockNodeExists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + if header.Height < 1 { + return nil, false, errors.New("validateAndIndexHeaderPoS: Header height is less than 1 - no valid parent height") + } + parentBlockNode, parentBlockNodeExists := bc.blockIndex.GetBlockNodeByHashAndHeight( + header.PrevBlockHash, header.Height-1) if !parentBlockNodeExists { return nil, true, nil } @@ -191,14 +152,14 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas // is also not valid. if parentBlockNode.IsHeaderValidateFailed() { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( - header, errors.New("validateAndIndexHeaderPoS: Parent header failed validations"), + header, headerHash, errors.New("validateAndIndexHeaderPoS: Parent header failed validations"), ) } // Verify that the header is properly formed. if err := bc.isValidBlockHeaderPoS(header); err != nil { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( - header, errors.New("validateAndIndexHeaderPoS: Header failed validations"), + header, headerHash, errors.New("validateAndIndexHeaderPoS: Header failed validations"), ) } @@ -210,13 +171,13 @@ func (bc *Blockchain) validateAndIndexHeaderPoS(header *MsgDeSoHeader, headerHas } if !isValidRandomSeedSignature { return nil, false, bc.storeValidateFailedHeaderInBlockIndexWithWrapperError( - header, errors.New("validateAndIndexHeaderPoS: Header has invalid random seed signature"), + header, headerHash, errors.New("validateAndIndexHeaderPoS: Header has invalid random seed signature"), ) } } // Store it as HeaderValidated now that it has passed all validations. - blockNode, err = bc.storeValidatedHeaderInBlockIndex(header) + blockNode, err = bc.storeValidatedHeaderInBlockIndex(header, headerHash) if err != nil { return nil, false, errors.Wrapf(err, "validateAndIndexHeaderPoS: Problem adding header to block index: ") } @@ -268,14 +229,15 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } // If we can't hash the block, we can never store in the block index and we should throw it out immediately. - if _, err := block.Hash(); err != nil { + blockHash, err := block.Hash() + if err != nil { return false, false, nil, errors.Wrapf(err, "processBlockPoS: Problem hashing block") } // In hypersync archival mode, we may receive blocks that have already been processed and committed during state // synchronization. However, we may want to store these blocks in the db for archival purposes. We check if the // block we're dealing with is an archival block. If it is, we store it and return early. - if success, err := bc.checkAndStoreArchivalBlock(block); err != nil { + if success, err := bc.checkAndStoreArchivalBlock(block, blockHash); err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem checking and storing archival block") } else if success { return true, false, nil, nil @@ -296,6 +258,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v return false, false, nil, errors.Wrap(err, "processBlockPoS: ") } if errors.Is(err, RuleErrorMissingAncestorBlock) { + glog.V(0).Infof("processBlockPoS: Missing ancestor block for block %v", block.Header.String()) // In this case, the block is an orphan that does not extend from any blocks // on our best chain. Try to process the orphan by running basic validations. // If it passes basic integrity checks, we'll store it with the hope that we @@ -308,9 +271,12 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v "processBlockPoS: Unexpected problem getting lineage from committed tip: ") } + if block.Header.Height < 1 { + return false, false, nil, errors.New("processBlockPoS: Block height is less than 1 - no valid parent height") + } // We expect the utxoView for the parent block to be valid because we check that all ancestor blocks have // been validated. - parentUtxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash) + parentUtxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash, block.Header.Height-1) if err != nil { // This should never happen. If the parent is validated and extends from the tip, then we should // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. @@ -320,7 +286,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v parentUtxoView := parentUtxoViewAndUtxoOps.UtxoView // First, we perform a validation of the leader and the QC to prevent spam. // If the block fails this check, we throw it away. - passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, parentUtxoView, verifySignatures) + passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, blockHash, parentUtxoView, verifySignatures) if err != nil { // If we hit an error, we can't store it since we're not sure if it passed the spam prevention check. return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem validating leader and QC") @@ -331,7 +297,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v } // Validate the block and store it in the block index. The block is guaranteed to not be an orphan. - blockNode, err := bc.validateAndIndexBlockPoS(block, parentUtxoView, verifySignatures) + blockNode, err := bc.validateAndIndexBlockPoS(block, blockHash, parentUtxoView, verifySignatures) if err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem validating block: ") @@ -354,7 +320,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // header and applying it to the header chain will result in the two chains being out of // sync. The header chain is less critical and mutations to it are reversible. So we attempt // to mutate it first before attempting to mutate the block chain. - if _, _, err = bc.processHeaderPoS(block.Header, verifySignatures); err != nil { + if _, _, _, err = bc.processHeaderPoS(block.Header, blockHash, verifySignatures); err != nil { return false, false, nil, errors.Wrap(err, "processBlockPoS: Problem processing header") } @@ -401,7 +367,7 @@ func (bc *Blockchain) processBlockPoS(block *MsgDeSoBlock, currentView uint64, v // Now that we've processed this block, we check for any blocks that were previously // stored as orphans, which are children of this block. We can process them now. - blockNodesAtNextHeight := bc.blockIndexByHeight[uint64(blockNode.Height)+1] + blockNodesAtNextHeight := bc.blockIndex.GetBlockNodesByHeight(uint64(blockNode.Height) + 1) for _, blockNodeAtNextHeight := range blockNodesAtNextHeight { if blockNodeAtNextHeight.Header.PrevBlockHash.IsEqual(blockNode.Hash) && blockNodeAtNextHeight.IsStored() && @@ -544,32 +510,30 @@ func (bc *Blockchain) processOrphanBlockPoS(block *MsgDeSoBlock) error { // As a spam-prevention measure, we just throw away this block and don't store it. return nil } + + blockHash, err := block.Header.Hash() if err != nil { - return errors.Wrap(err, "processOrphanBlockPoS: Problem getting snapshot global params") + return errors.Wrap(err, "processOrphanBlockPoS: Problem hashing block") } + // All blocks should pass the basic integrity validations, which ensure the block // is not malformed. If the block is malformed, we should store it as ValidateFailed. if err = bc.isProperlyFormedBlockPoS(block); err != nil { - if _, innerErr := bc.storeValidateFailedBlockInBlockIndex(block); innerErr != nil { + if _, innerErr := bc.storeValidateFailedBlockInBlockIndex(block, blockHash); innerErr != nil { return errors.Wrapf(innerErr, "processOrphanBlockPoS: Problem adding validate failed block to block index: %v", err) } return nil } // Add to blockIndexByHash with status STORED only as we are not sure if it's valid yet. - _, err = bc.storeBlockInBlockIndex(block) + _, err = bc.storeBlockInBlockIndex(block, blockHash) return errors.Wrap(err, "processBlockPoS: Problem adding block to block index: ") } // checkAndStoreArchivalBlock is a helper function that takes in a block and checks if it's an archival block. // If it is, it stores the block in the db and returns true. If it's not, it returns false, or false and an error. -func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock) (_success bool, _err error) { - // First, get the block hash and lookup the block index. - blockHash, err := block.Hash() - if err != nil { - return false, errors.Wrap(err, "checkAndStoreArchivalBlock: Problem hashing block") - } - blockNode, exists := bc.blockIndexByHash.Get(*blockHash) +func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock, blockHash *BlockHash) (_success bool, _err error) { + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, block.Header.Height) // If the blockNode doesn't exist, or the block is not committed, or it's already stored, then we're not dealing // with an archival block. Archival blocks must have an existing blockNode, be committed, and not be stored. if !exists || !blockNode.IsCommitted() || blockNode.IsStored() { @@ -578,8 +542,7 @@ func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock) (_success // If we get to this point, we're dealing with an archival block, so we'll attempt to store it. // This means, this block node is already marked as COMMITTED and VALIDATED, and we just need to store it. - _, err = bc.storeBlockInBlockIndex(block) - if err != nil { + if _, err := bc.storeBlockInBlockIndex(block, blockHash); err != nil { return false, errors.Wrap(err, "checkAndStoreArchivalBlock: Problem storing block in block index") } return true, nil @@ -587,9 +550,9 @@ func (bc *Blockchain) checkAndStoreArchivalBlock(block *MsgDeSoBlock) (_success // storeValidateFailedBlockWithWrappedError is a helper function that takes in a block and an error and // stores the block in the block index with status VALIDATE_FAILED. It returns the resulting BlockNode. -func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlock, outerErr error) ( +func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlock, hash *BlockHash, outerErr error) ( *BlockNode, error) { - blockNode, innerErr := bc.storeValidateFailedBlockInBlockIndex(block) + blockNode, innerErr := bc.storeValidateFailedBlockInBlockIndex(block, hash) if innerErr != nil { return nil, errors.Wrapf(innerErr, "storeValidateFailedBlockWithWrappedError: Problem adding validate failed block to block index: %v", @@ -600,6 +563,7 @@ func (bc *Blockchain) storeValidateFailedBlockWithWrappedError(block *MsgDeSoBlo func (bc *Blockchain) validateLeaderAndQC( block *MsgDeSoBlock, + blockHash *BlockHash, parentUtxoView *UtxoView, verifySignatures bool, ) (_passedSpamPreventionCheck bool, _err error) { @@ -621,7 +585,7 @@ func (bc *Blockchain) validateLeaderAndQC( "validateLeaderAndQC: Problem getting snapshot epoch number for epoch #%d", currentEpochEntry.EpochNumber) } - isValidPartialSig, err := parentUtxoView.hasValidProposerPartialSignaturePoS(block, snapshotAtEpochNumber) + isValidPartialSig, err := parentUtxoView.hasValidProposerPartialSignaturePoS(block, blockHash, snapshotAtEpochNumber) if err != nil { return false, errors.Wrap(err, "validateLeaderAndQC: Problem validating proposer partial sig") @@ -679,16 +643,24 @@ func (bc *Blockchain) validateLeaderAndQC( // return the new BlockNode. // - Error case: Something goes wrong that doesn't result in the block being marked VALIDATE or VALIDATE_FAILED. In // this case, we will add the block to the block index with status STORED and return the BlockNode. -func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoView *UtxoView, verifySignatures bool) ( - *BlockNode, error) { - blockHash, err := block.Header.Hash() - if err != nil { - return nil, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem hashing block %v", block) - } +func (bc *Blockchain) validateAndIndexBlockPoS( + block *MsgDeSoBlock, + blockHash *BlockHash, + parentUtxoView *UtxoView, + verifySignatures bool, +) (*BlockNode, error) { // Base case - Check if the block is validated or validate failed. If so, we can return early. - blockNode, exists := bc.blockIndexByHash.Get(*blockHash) + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, block.Header.Height) if exists && (blockNode.IsValidateFailed() || blockNode.IsValidated()) { + // If the block isn't stored, we store it now. + if !blockNode.IsStored() { + var err error + blockNode, err = bc.storeBlockInBlockIndex(block, blockHash) + if err != nil { + return nil, errors.Wrap(err, "validateAndIndexBlockPoS: Problem storing block to block index") + } + } return blockNode, nil } @@ -708,7 +680,7 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // Run the validation for the parent and update the block index with the parent's status. We first // check if the parent has a cached status. If so, we use the cached status. Otherwise, we run // the full validation algorithm on it, then index it and use the result. - parentBlockNode, err := bc.validatePreviouslyIndexedBlockPoS(block.Header.PrevBlockHash, verifySignatures) + parentBlockNode, err := bc.validatePreviouslyIndexedBlockPoS(block.Header.PrevBlockHash, block.Header.Height-1, verifySignatures) if err != nil { return blockNode, errors.Wrapf(err, "validateAndIndexBlockPoS: Problem validating previously indexed block: ") } @@ -717,13 +689,13 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // this block as ValidateFailed. If the parent is not ValidateFailed, we ONLY store the block and move on. // We don't want to store it as ValidateFailed because we don't know if it's actually invalid. if parentBlockNode.IsValidateFailed() { - return bc.storeValidateFailedBlockWithWrappedError(block, errors.New("parent block is ValidateFailed")) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, errors.New("parent block is ValidateFailed")) } // If the parent block still has a Stored status, it means that we weren't able to validate it // despite trying. The current block will also be stored as a Stored block. if !parentBlockNode.IsValidated() { - return bc.storeBlockInBlockIndex(block) + return bc.storeBlockInBlockIndex(block, blockHash) } // Validate the block's random seed signature @@ -731,14 +703,14 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi isValidRandomSeedSignature, err := bc.hasValidProposerRandomSeedSignaturePoS(block.Header) if err != nil { var innerErr error - blockNode, innerErr = bc.storeBlockInBlockIndex(block) + blockNode, innerErr = bc.storeBlockInBlockIndex(block, blockHash) if innerErr != nil { return nil, errors.Wrapf(innerErr, "validateAndIndexBlockPoS: Problem adding block to block index: %v", err) } return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem validating random seed signature") } if !isValidRandomSeedSignature { - return bc.storeValidateFailedBlockWithWrappedError(block, errors.New("invalid random seed signature")) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, errors.New("invalid random seed signature")) } } @@ -746,15 +718,15 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi serializedBlock, err := block.ToBytes(false) if err != nil { return bc.storeValidateFailedBlockWithWrappedError( - block, errors.Wrap(err, "validateAndIndexBlockPoS: Problem serializing block")) + block, blockHash, errors.Wrap(err, "validateAndIndexBlockPoS: Problem serializing block")) } if uint64(len(serializedBlock)) > parentUtxoView.GetCurrentGlobalParamsEntry().MaxBlockSizeBytesPoS { - return bc.storeValidateFailedBlockWithWrappedError(block, RuleErrorBlockTooBig) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, RuleErrorBlockTooBig) } // Check if the block is properly formed and passes all basic validations. if err = bc.isValidBlockPoS(block); err != nil { - return bc.storeValidateFailedBlockWithWrappedError(block, err) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, err) } // Connect this block to the parent block's UtxoView. @@ -765,7 +737,7 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // If we fail to connect the block, then it means the block is invalid. We should store it as ValidateFailed. if _, err = parentUtxoView.ConnectBlock(block, txHashes, verifySignatures, nil, block.Header.Height); err != nil { // If it doesn't connect, we want to mark it as ValidateFailed. - return bc.storeValidateFailedBlockWithWrappedError(block, err) + return bc.storeValidateFailedBlockWithWrappedError(block, blockHash, err) } // If the block is too far in the future, we leave it as STORED and return early. @@ -774,11 +746,11 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem checking block timestamp") } if failsTimestampDriftCheck { - return bc.storeBlockInBlockIndex(block) + return bc.storeBlockInBlockIndex(block, blockHash) } // We can now add this block to the block index since we have performed all basic validations. - blockNode, err = bc.storeValidatedBlockInBlockIndex(block) + blockNode, err = bc.storeValidatedBlockInBlockIndex(block, blockHash) if err != nil { return blockNode, errors.Wrap(err, "validateAndIndexBlockPoS: Problem adding block to block index: ") } @@ -789,10 +761,11 @@ func (bc *Blockchain) validateAndIndexBlockPoS(block *MsgDeSoBlock, parentUtxoVi // cached block, and runs the validateAndIndexBlockPoS algorithm on it. It returns the resulting BlockNode. func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( blockHash *BlockHash, + blockHeight uint64, verifySignatures bool, ) (*BlockNode, error) { // Check if the block is already in the block index. If so, we check its current status first. - blockNode, exists := bc.blockIndexByHash.Get(*blockHash) + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeight) if !exists { // We should never really hit this if the block has already been cached in the block index first. // We check here anyway to be safe. @@ -813,8 +786,11 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( // provided the block was cached in the block index and stored in the DB first. return nil, errors.Wrapf(err, "validatePreviouslyIndexedBlockPoS: Problem fetching block from DB") } + if block.Header.Height < 1 { + return nil, errors.New("processBlockPoS: Block height is less than 1 - no valid parent height") + } // Build utxoView for the block's parent. - parentUtxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash) + parentUtxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*block.Header.PrevBlockHash, block.Header.Height-1) if err != nil { // This should never happen. If the parent is validated and extends from the tip, then we should // be able to build a UtxoView for it. This failure can only happen due to transient or badger issues. @@ -823,7 +799,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( parentUtxoView := parentUtxoViewAndUtxoOps.UtxoView // If the block isn't validated or validate failed, we need to run the anti-spam checks on it. - passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, parentUtxoView, verifySignatures) + passedSpamPreventionCheck, err := bc.validateLeaderAndQC(block, blockHash, parentUtxoView, verifySignatures) if err != nil { // If we hit an error, that means there was an intermittent issue when trying to // validate the QC or the leader. @@ -832,7 +808,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( if !passedSpamPreventionCheck { // If the QC or Leader check failed, we'll never accept this block, but we've already stored it, // so we need to mark it as ValidateFailed. - blockNode, err = bc.storeValidateFailedBlockInBlockIndex(block) + blockNode, err = bc.storeValidateFailedBlockInBlockIndex(block, blockHash) if err != nil { return nil, errors.Wrap(err, "validatePreviouslyIndexedBlockPoS: Problem adding validate failed block to block index") @@ -841,7 +817,7 @@ func (bc *Blockchain) validatePreviouslyIndexedBlockPoS( } // We run the full validation algorithm on the block. - return bc.validateAndIndexBlockPoS(block, parentUtxoView, verifySignatures) + return bc.validateAndIndexBlockPoS(block, blockHash, parentUtxoView, verifySignatures) } // isValidBlockPoS performs all basic block integrity checks. Any error @@ -890,7 +866,7 @@ func (bc *Blockchain) isValidBlockHeaderPoS(header *MsgDeSoHeader) error { // greater than its parent's timestamp. func (bc *Blockchain) isBlockTimestampValidRelativeToParentPoS(header *MsgDeSoHeader) error { // Validate that the timestamp is not less than its parent. - parentBlockNode, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1050,7 +1026,7 @@ func (bc *Blockchain) hasValidBlockHeightPoS(header *MsgDeSoHeader) error { return RuleErrorPoSBlockBeforeCutoverHeight } // Validate that the block height is exactly one greater than its parent. - parentBlockNode, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1065,7 +1041,7 @@ func (bc *Blockchain) hasValidBlockHeightPoS(header *MsgDeSoHeader) error { // hasValidBlockViewPoS validates the view for a given block header func (bc *Blockchain) hasValidBlockViewPoS(header *MsgDeSoHeader) error { // Validate that the view is greater than the latest uncommitted block. - parentBlockNode, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1094,7 +1070,7 @@ func (bc *Blockchain) hasValidBlockViewPoS(header *MsgDeSoHeader) error { func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(header *MsgDeSoHeader) (bool, error) { // Validate that the leader proposed a valid random seed signature. - parentBlock, exists := bc.blockIndexByHash.Get(*header.PrevBlockHash) + parentBlock, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(header.PrevBlockHash, header.Height-1) if !exists { // Note: this should never happen as we only call this function after // we've validated that all ancestors exist in the block index. @@ -1115,8 +1091,20 @@ func (bc *Blockchain) hasValidProposerRandomSeedSignaturePoS(header *MsgDeSoHead return isVerified, nil } -func (bav *UtxoView) hasValidProposerPartialSignaturePoS(block *MsgDeSoBlock, snapshotAtEpochNumber uint64) ( - bool, error) { +func (bav *UtxoView) hasValidProposerPartialSignaturePoS( + block *MsgDeSoBlock, + blockHash *BlockHash, + snapshotAtEpochNumber uint64, +) (bool, error) { + // If we aren't provided a hash, we can just compute it on the fly. + // It's more efficient for us not to recompute the hash though, so we only do it if we have to. + if blockHash == nil { + var err error + blockHash, err = block.Hash() + if err != nil { + return false, errors.Wrapf(err, "hasValidProposerPartialSignaturePoS: Problem hashing block") + } + } votingPublicKey := block.Header.ProposerVotingPublicKey proposerPartialSig := block.Header.ProposerVotePartialSignature // If the proposer partial sig is nil, we can't validate it. That's an error. @@ -1140,11 +1128,6 @@ func (bav *UtxoView) hasValidProposerPartialSignaturePoS(block *MsgDeSoBlock, sn if !snapshotBlockProposerValidatorEntry.VotingPublicKey.Eq(votingPublicKey) { return false, nil } - // Get the block's hash - blockHash, err := block.Header.Hash() - if err != nil { - return false, errors.Wrapf(err, "hasValidProposerPartialSignaturePoS: Problem hashing block") - } // Now that we have the snapshot validator entry and validated that the // voting public key from this block's header matches the snapshotted // voting public key, we can validate the partial sig. @@ -1314,32 +1297,97 @@ func (bc *Blockchain) getStoredLineageFromCommittedTip(header *MsgDeSoHeader) ( _missingBlockHashes []*BlockHash, _err error, ) { - highestCommittedBlock, idx := bc.GetCommittedTip() - if idx == -1 || highestCommittedBlock == nil { + highestCommittedBlock, exists := bc.GetCommittedTip() + if !exists || highestCommittedBlock == nil { return nil, nil, errors.New("getStoredLineageFromCommittedTip: No committed blocks found") } currentHash := header.PrevBlockHash.NewBlockHash() + currentHeight := header.Height - 1 ancestors := []*BlockNode{} - prevHeight := header.Height - prevView := header.GetView() + childHeight := header.Height + childView := header.GetView() + calledWithHeader := fmt.Sprintf("\nCalled with header for block at height %v", header.Height) + + bigLogger := func(otherBlock *BlockNode) { + // Log HighestCommittedBlock + glog.V(0).Infof("HighestCommittedBlock: %v", highestCommittedBlock.String()) + // Get the highest committed block from the block index cache. + highestCommittedBlockFromCache, highestCommittedBlockExistsInCache := + bc.blockIndex.blockIndexByHash.Get(*highestCommittedBlock.Hash) + if highestCommittedBlockExistsInCache { + glog.V(0).Infof("HighestCommittedBlockFromCache: %v", highestCommittedBlockFromCache.String()) + } else { + glog.V(0).Infof("HighestCommittedBlockFromCache: nil") + } + + highestCommittedBlockFromDB := GetHeightHashToNodeInfo( + bc.db, bc.snapshot, highestCommittedBlock.Height, highestCommittedBlock.Hash, false) + if highestCommittedBlockFromDB != nil { + glog.V(0).Infof("HighestCommittedBlockFromDB: %v", highestCommittedBlockFromDB.String()) + } else { + glog.V(0).Infof("HighestCommittedBlockFromDB: nil") + } + + if otherBlock == nil { + glog.V(0).Infof("OtherBlock: nil") + return + } + // Log OtherBlock + glog.V(0).Infof("OtherBlock: %v", otherBlock.String()) + + // Get the other block from the block index cache. + otherBlockFromCache, otherBlockExistsInCache := + bc.blockIndex.blockIndexByHash.Get(*otherBlock.Hash) + if otherBlockExistsInCache { + glog.V(0).Infof("OtherBlockFromCache: %v", otherBlockFromCache.String()) + } else { + glog.V(0).Infof("OtherBlockFromCache: nil") + } + + otherBlockFromDB := GetHeightHashToNodeInfo( + bc.db, bc.snapshot, otherBlock.Height, otherBlock.Hash, false) + if otherBlockFromDB != nil { + glog.V(0).Infof("OtherBlockFromDB: %v", otherBlockFromDB.String()) + } else { + glog.V(0).Infof("OtherBlockFromDB: nil") + } + } for { - currentBlock, exists := bc.blockIndexByHash.Get(*currentHash) - if !exists { + // TODO: is currentHeight correct here? + currentBlock, currentBlockExists := bc.blockIndex.GetBlockNodeByHashAndHeight(currentHash, currentHeight) + if !currentBlockExists { + glog.Errorf("getStoredLineageFromCommittedTip: Missing block %v - does not exist.%v", + currentHash, calledWithHeader) + bigLogger(currentBlock) return nil, []*BlockHash{currentHash}, RuleErrorMissingAncestorBlock } if currentBlock.Hash.IsEqual(highestCommittedBlock.Hash) { break } if currentBlock.IsCommitted() { + glog.Errorf("getStoredLineageFromCommittedTip: Block %v (%v) is committed. Committed tip is %v (%v). %v", + currentHash, currentHeight, highestCommittedBlock.Hash, highestCommittedBlock.Height, calledWithHeader) + bigLogger(currentBlock) return nil, nil, RuleErrorDoesNotExtendCommittedTip } if currentBlock.IsValidateFailed() { + glog.Errorf("getStoredLineageFromCommittedTip: Block %v (%v) has failed validation. %v", + currentHash, currentHeight, calledWithHeader) + bigLogger(currentBlock) return nil, nil, RuleErrorAncestorBlockValidationFailed } - if uint64(currentBlock.Header.Height)+1 != prevHeight { + if uint64(currentBlock.Header.Height)+1 != childHeight { + glog.Errorf("getStoredLineageFromCommittedTip: "+ + "Parent block height %v is not sequential with child block height %v. %v", + currentBlock.Header.Height, childHeight, calledWithHeader) + bigLogger(currentBlock) return nil, nil, RuleErrorParentBlockHeightNotSequentialWithChildBlockHeight } - if currentBlock.Header.GetView() >= prevView { + if currentBlock.Header.GetView() >= childView { + glog.Errorf("getStoredLineageFromCommittedTip: "+ + "Parent block view %v is greater than or equal to child block view %v. %v", + currentBlock.Header.GetView(), childView, calledWithHeader) + bigLogger(currentBlock) return nil, nil, RuleErrorParentBlockHasViewGreaterOrEqualToChildBlock } @@ -1348,42 +1396,46 @@ func (bc *Blockchain) getStoredLineageFromCommittedTip(header *MsgDeSoHeader) ( // we previously saw its header. We need to request the block again from a peer and // consider it to be missing. if !currentBlock.IsStored() { + glog.Errorf("getStoredLineageFromCommittedTip: Block %v (%v) is not stored. %v", + currentHash, currentHeight, calledWithHeader) + bigLogger(currentBlock) return nil, []*BlockHash{currentHash}, RuleErrorMissingAncestorBlock } ancestors = append(ancestors, currentBlock) currentHash = currentBlock.Header.PrevBlockHash - prevHeight = currentBlock.Header.Height - prevView = currentBlock.Header.GetView() + currentHeight = currentBlock.Header.Height - 1 + childHeight = currentBlock.Header.Height + childView = currentBlock.Header.GetView() } return collections.Reverse(ancestors), nil, nil } // getOrCreateBlockNodeFromBlockIndex returns the block node from the block index if it exists. // Otherwise, it creates a new block node and adds it to the blockIndexByHash and blockIndexByHeight. -func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - hash, err := block.Header.Hash() - if err != nil { - return nil, errors.Wrapf(err, "getOrCreateBlockNodeFromBlockIndex: Problem hashing block %v", block) +func (bc *Blockchain) getOrCreateBlockNodeFromBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + // If we aren't provided a hash, we can just compute it on the fly. + // It's more efficient for us not to recompute the hash though, so we only do it if we have to. + if hash == nil { + var err error + hash, err = block.Hash() + if err != nil { + return nil, errors.Wrapf(err, "storeBlockInBlockIndex: Problem hashing block") + } } - blockNode, _ := bc.blockIndexByHash.Get(*hash) - prevBlockNode, _ := bc.blockIndexByHash.Get(*block.Header.PrevBlockHash) + blockNode, _ := bc.blockIndex.GetBlockNodeByHashAndHeight(hash, block.Header.Height) if blockNode != nil { - // If the block node already exists, we should set its parent if it doesn't have one already. - if blockNode.Parent == nil { - blockNode.Parent = prevBlockNode - } return blockNode, nil } - newBlockNode := NewBlockNode(prevBlockNode, hash, uint32(block.Header.Height), nil, nil, block.Header, StatusNone) + newBlockNode := NewBlockNode(hash, uint32(block.Header.Height), nil, nil, block.Header, StatusNone) bc.addNewBlockNodeToBlockIndex(newBlockNode) return newBlockNode, nil } // storeBlockInBlockIndex upserts the blocks into the in-memory block index & badger and updates its status to // StatusBlockStored. It also writes the block to the block index in badger -func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) +func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidatedHeaderInBlockIndex: Problem getting or creating block node") } @@ -1401,8 +1453,8 @@ func (bc *Blockchain) storeValidatedHeaderInBlockIndex(header *MsgDeSoHeader) (* return blockNode, nil } -func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(header *MsgDeSoHeader, wrapperError error) error { - if _, innerErr := bc.storeValidateFailedHeaderInBlockIndex(header); innerErr != nil { +func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(header *MsgDeSoHeader, hash *BlockHash, wrapperError error) error { + if _, innerErr := bc.storeValidateFailedHeaderInBlockIndex(header, hash); innerErr != nil { return errors.Wrapf(innerErr, "%v", wrapperError) } return wrapperError @@ -1410,8 +1462,8 @@ func (bc *Blockchain) storeValidateFailedHeaderInBlockIndexWithWrapperError(head // storeValidateFailedHeaderInBlockIndex stores the header in the block index only and sets its status to // StatusHeaderValidateFailed. It does not write the header to the DB. -func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeader) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}) +func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeader, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(&MsgDeSoBlock{Header: header}, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidateFailedHeaderInBlockIndex: Problem getting or creating block node") } @@ -1432,8 +1484,8 @@ func (bc *Blockchain) storeValidateFailedHeaderInBlockIndex(header *MsgDeSoHeade // storeBlockInBlockIndex upserts the blocks into the in-memory block index & badger and updates its status to // StatusBlockStored. It also writes the block to the block index in badger // by calling upsertBlockAndBlockNodeToDB. -func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) +func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block, hash) if err != nil { return nil, errors.Wrapf(err, "storeBlockInBlockIndex: Problem getting or creating block node") } @@ -1453,8 +1505,8 @@ func (bc *Blockchain) storeBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, e // status to StatusBlockValidated. If it does not have the status StatusBlockStored already, we add that as we // will store the block in the DB after updating its status. It also writes the block to the block index in // badger by calling upsertBlockAndBlockNodeToDB. -func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) +func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidatedBlockInBlockIndex: Problem getting or creating block node") } @@ -1483,8 +1535,8 @@ func (bc *Blockchain) storeValidatedBlockInBlockIndex(block *MsgDeSoBlock) (*Blo // status to StatusBlockValidateFailed. If it does not have the status StatusBlockStored already, we add that as we // will store the block in the DB after updating its status. It also writes the block to the block index in badger // by calling upsertBlockAndBlockNodeToDB. -func (bc *Blockchain) storeValidateFailedBlockInBlockIndex(block *MsgDeSoBlock) (*BlockNode, error) { - blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block) +func (bc *Blockchain) storeValidateFailedBlockInBlockIndex(block *MsgDeSoBlock, hash *BlockHash) (*BlockNode, error) { + blockNode, err := bc.getOrCreateBlockNodeFromBlockIndex(block, hash) if err != nil { return nil, errors.Wrapf(err, "storeValidateFailedBlockInBlockIndex: Problem getting or creating block node") } @@ -1634,8 +1686,7 @@ func (bc *Blockchain) shouldReorg(blockNode *BlockNode, currentView uint64) bool // addTipBlockToBestChain adds the block as the new tip of the best chain. func (bc *Blockchain) addTipBlockToBestChain(blockNode *BlockNode) { - bc.bestChain = append(bc.bestChain, blockNode) - bc.bestChainMap[*blockNode.Hash] = blockNode + bc.blockIndex.setTip(blockNode) } // removeTipBlockFromBestChain removes the current tip from the best chain. It @@ -1644,9 +1695,8 @@ func (bc *Blockchain) addTipBlockToBestChain(blockNode *BlockNode) { // the bestChain slice and bestChainMap map. func (bc *Blockchain) removeTipBlockFromBestChain() *BlockNode { // Remove the last block from the best chain. - lastBlock := bc.bestChain[len(bc.bestChain)-1] - delete(bc.bestChainMap, *lastBlock.Hash) - bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] + lastBlock := bc.blockIndex.GetTip() + bc.blockIndex.setTip(lastBlock.GetParent(bc.blockIndex)) return lastBlock } @@ -1657,26 +1707,27 @@ func (bc *Blockchain) runCommitRuleOnBestChain(verifySignatures bool) error { currentBlock := bc.BlockTip() // If we can commit the grandparent, commit it. // Otherwise, we can't commit it and return nil. - blockToCommit, canCommit := bc.canCommitGrandparent(currentBlock) + blockNodeToCommit, canCommit := bc.canCommitGrandparent(currentBlock) if !canCommit { return nil } // Find all uncommitted ancestors of block to commit - _, idx := bc.GetCommittedTip() - if idx == -1 { + committedTip, exists := bc.GetCommittedTip() + if !exists { // This is an edge case we'll never hit in practice since all the PoW blocks // are committed. return errors.New("runCommitRuleOnBestChain: No committed blocks found") } uncommittedAncestors := []*BlockNode{} - for ii := idx + 1; ii < len(bc.bestChain); ii++ { - uncommittedAncestors = append(uncommittedAncestors, bc.bestChain[ii]) - if bc.bestChain[ii].Hash.IsEqual(blockToCommit) { - break - } + currentNode := &BlockNode{} + *currentNode = *blockNodeToCommit + for currentNode.Height > committedTip.Height { + uncommittedAncestors = append(uncommittedAncestors, currentNode) + currentNode = currentNode.GetParent(bc.blockIndex) } + uncommittedAncestors = collections.Reverse(uncommittedAncestors) for ii := 0; ii < len(uncommittedAncestors); ii++ { - if err := bc.commitBlockPoS(uncommittedAncestors[ii].Hash, verifySignatures); err != nil { + if err := bc.commitBlockPoS(uncommittedAncestors[ii].Hash, uint64(uncommittedAncestors[ii].Height), verifySignatures); err != nil { return errors.Wrapf(err, "runCommitRuleOnBestChain: Problem committing block %v", uncommittedAncestors[ii].Hash.String()) } @@ -1689,18 +1740,28 @@ func (bc *Blockchain) runCommitRuleOnBestChain(verifySignatures bool) error { // between the grandparent and parent of the new block, meaning the grandparent and parent // are proposed in consecutive views, and the "parent" is an ancestor of the incoming block // (not necessarily consecutive views). Additionally, the grandparent must not already be committed. -func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) (_grandparentBlockHash *BlockHash, _canCommit bool, +func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) ( + _grandparentBlockNode *BlockNode, + _canCommit bool, ) { // TODO: Is it sufficient that the current block's header points to the parent // or does it need to have something to do with the QC? - parent := bc.bestChainMap[*currentBlock.Header.PrevBlockHash] - grandParent := bc.bestChainMap[*parent.Header.PrevBlockHash] + parent := currentBlock.GetParent(bc.blockIndex) + if parent == nil { + glog.Errorf("canCommitGrandparent: Parent block %v not found in best chain map", currentBlock.Header.PrevBlockHash.String()) + return nil, false + } + grandParent := parent.GetParent(bc.blockIndex) + if grandParent == nil { + glog.Errorf("canCommitGrandparent: Grandparent block %v not found in best chain map", parent.Header.PrevBlockHash.String()) + return nil, false + } if grandParent.IsCommitted() { return nil, false } if grandParent.Header.ProposedInView+1 == parent.Header.ProposedInView { // Then we can run the commit rule up to the grandparent! - return grandParent.Hash, true + return grandParent, true } return nil, false } @@ -1708,9 +1769,9 @@ func (bc *Blockchain) canCommitGrandparent(currentBlock *BlockNode) (_grandparen // commitBlockPoS commits the block with the given hash. Specifically, this updates the // BlockStatus to include StatusBlockCommitted and flushes the view after connecting the block // to the DB and updates relevant badger indexes with info about the block. -func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool) error { +func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, blockHeight uint64, verifySignatures bool) error { // block must be in the best chain. we grab the block node from there. - blockNode, exists := bc.bestChainMap[*blockHash] + blockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(blockHash, blockHeight) if !exists { return errors.Errorf("commitBlockPoS: Block %v not found in best chain map", blockHash.String()) } @@ -1720,7 +1781,7 @@ func (bc *Blockchain) commitBlockPoS(blockHash *BlockHash, verifySignatures bool return errors.Errorf("commitBlockPoS: Block %v is already committed", blockHash.String()) } // Connect a view up to block we are committing. - utxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*blockHash) + utxoViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*blockHash, uint64(blockNode.Height)) if err != nil { return errors.Wrapf(err, "commitBlockPoS: Problem initializing UtxoView: ") } @@ -1839,7 +1900,10 @@ func (bc *Blockchain) GetUncommittedBlocks(tipHash *BlockHash) ([]*BlockNode, er } bc.ChainLock.RLock() defer bc.ChainLock.RUnlock() - tipBlock, exists := bc.bestChainMap[*tipHash] + tipBlock, exists, err := bc.blockIndex.GetBlockNodeByHashOnly(tipHash) + if err != nil { + return nil, errors.Wrapf(err, "GetUncommittedBlocks: Problem getting block %v", tipHash.String()) + } if !exists { return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in best chain map", tipHash.String()) } @@ -1855,9 +1919,9 @@ func (bc *Blockchain) GetUncommittedBlocks(tipHash *BlockHash) ([]*BlockNode, er if currentParentHash == nil { return nil, errors.Errorf("GetUncommittedBlocks: Block %v has nil PrevBlockHash", currentBlock.Hash) } - currentBlock, _ = bc.blockIndexByHash.Get(*currentParentHash) + currentBlock, _ = bc.blockIndex.GetBlockNodeByHashAndHeight(currentParentHash, currentBlock.Header.Height-1) if currentBlock == nil { - return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in block index", currentBlock.Hash) + return nil, errors.Errorf("GetUncommittedBlocks: Block %v not found in block index", currentParentHash) } } return collections.Reverse(uncommittedBlockNodes), nil @@ -1892,18 +1956,17 @@ func (viewAndUtxoOps *BlockViewAndUtxoOps) Copy() *BlockViewAndUtxoOps { // GetUncommittedTipView builds a UtxoView to the uncommitted tip. func (bc *Blockchain) GetUncommittedTipView() (*UtxoView, error) { // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks - blockViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*bc.BlockTip().Hash) + blockTip := bc.BlockTip() + blockViewAndUtxoOps, err := bc.GetUtxoViewAndUtxoOpsAtBlockHash(*blockTip.Hash, uint64(blockTip.Height)) if err != nil { return nil, errors.Wrapf(err, "GetUncommittedTipView: Problem getting UtxoView at block hash") } return blockViewAndUtxoOps.UtxoView, nil } -func (bc *Blockchain) getCachedBlockViewAndUtxoOps(blockHash BlockHash) (*BlockViewAndUtxoOps, error, bool) { - if viewAndUtxoOpsAtHash, exists := bc.blockViewCache.Get(blockHash); exists { - return viewAndUtxoOpsAtHash, nil, true - } - return nil, nil, false +func (bc *Blockchain) getCachedBlockViewAndUtxoOps(blockHash BlockHash) (*BlockViewAndUtxoOps, bool) { + viewAndUtxoOpsAtHash, exists := bc.blockViewCache.Get(blockHash) + return viewAndUtxoOpsAtHash, exists } // GetUtxoViewAndUtxoOpsAtBlockHash builds a UtxoView to the block provided and returns a BlockViewAndUtxoOps @@ -1912,18 +1975,19 @@ func (bc *Blockchain) getCachedBlockViewAndUtxoOps(blockHash BlockHash) (*BlockV // all uncommitted ancestors of this block. Then it checks the block view cache to see if we have already // computed this view. If not, connecting the uncommitted ancestor blocks and saving to the cache. The // returned UtxoOps and FullBlock should NOT be modified. -func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( +func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash, blockHeight uint64) ( *BlockViewAndUtxoOps, error) { // Always fetch the lineage from the committed tip to the block provided first to // ensure that a valid UtxoView is returned. uncommittedAncestors := []*BlockNode{} - currentBlock, _ := bc.blockIndexByHash.Get(blockHash) + currentBlock, _ := bc.blockIndex.GetBlockNodeByHashAndHeight(&blockHash, blockHeight) if currentBlock == nil { return nil, errors.Errorf("GetUtxoViewAndUtxoOpsAtBlockHash: Block %v not found in block index", blockHash) } - highestCommittedBlock, _ := bc.GetCommittedTip() - if highestCommittedBlock == nil { + highestCommittedBlock, exists := bc.GetCommittedTip() + glog.V(4).Infof("Highest committed block: %v", highestCommittedBlock) + if !exists || highestCommittedBlock == nil { return nil, errors.Errorf("GetUtxoViewAndUtxoOpsAtBlockHash: No committed blocks found") } // If the provided block is committed, we need to make sure it's the committed tip. @@ -1940,7 +2004,7 @@ func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( if currentParentHash == nil { return nil, errors.Errorf("GetUtxoViewAndUtxoOpsAtBlockHash: Block %v has nil PrevBlockHash", currentBlock.Hash) } - currentBlock, _ = bc.blockIndexByHash.Get(*currentParentHash) + currentBlock, _ = bc.blockIndex.GetBlockNodeByHashAndHeight(currentParentHash, currentBlock.Header.Height-1) if currentBlock == nil { return nil, errors.Errorf("GetUtxoViewAndUtxoOpsAtBlockHash: Block %v not found in block index", currentParentHash) } @@ -1948,15 +2012,8 @@ func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( return nil, errors.Errorf( "GetUtxoViewAndUtxoOpsAtBlockHash: extends from a committed block that isn't the committed tip") } - if currentBlock.IsCommitted() && !currentBlock.Hash.IsEqual(highestCommittedBlock.Hash) { - return nil, errors.Errorf( - "GetUtxoViewAndUtxoOpsAtBlockHash: extends from a committed block that isn't the committed tip") - } - } - viewAndUtxoOpsAtHash, err, exists := bc.getCachedBlockViewAndUtxoOps(blockHash) - if err != nil { - return nil, errors.Wrapf(err, "GetUtxoViewAndUtxoOpsAtBlockHash: Problem getting cached BlockViewAndUtxoOps") } + viewAndUtxoOpsAtHash, exists := bc.getCachedBlockViewAndUtxoOps(blockHash) if exists { viewAndUtxoOpsCopy := viewAndUtxoOpsAtHash.Copy() return viewAndUtxoOpsCopy, nil @@ -1964,12 +2021,22 @@ func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( // Connect the uncommitted blocks to the tip so that we can validate subsequent blocks utxoView := NewUtxoViewWithSnapshotCache(bc.db, bc.params, bc.postgres, bc.snapshot, bc.eventManager, bc.snapshotCache) - // TODO: there's another performance enhancement we can make here. If we have a view in the - // cache for one of the ancestors, we can skip fetching the block and connecting it by taking - // a copy of it and replacing the existing view. var utxoOps [][]*UtxoOperation var fullBlock *MsgDeSoBlock for ii := len(uncommittedAncestors) - 1; ii >= 0; ii-- { + // TODO: this optimization is disabled for now. Although this speeds up the fetching of + // a utxo view for a given block hash, it actually ends up building up a lot of data in the + // view of already committed data. + // Check the cache to see if we already have a view for this block. + //cachedView, cachedAncestorExists := bc.getCachedBlockViewAndUtxoOps(*uncommittedAncestors[ii].Hash) + //if cachedAncestorExists { + // cachedViewCopy := cachedView.Copy() + // utxoView = cachedViewCopy.UtxoView + // fullBlock = cachedViewCopy.Block + // utxoOps = cachedViewCopy.UtxoOps + // continue + //} + var err error // We need to get these blocks from badger fullBlock, err = GetBlock(uncommittedAncestors[ii].Hash, bc.db, bc.snapshot) if err != nil { @@ -2003,13 +2070,16 @@ func (bc *Blockchain) GetUtxoViewAndUtxoOpsAtBlockHash(blockHash BlockHash) ( } // GetCommittedTip returns the highest committed block and its index in the best chain. -func (bc *Blockchain) GetCommittedTip() (*BlockNode, int) { - for ii := len(bc.bestChain) - 1; ii >= 0; ii-- { - if bc.bestChain[ii].IsCommitted() { - return bc.bestChain[ii], ii +func (bc *Blockchain) GetCommittedTip() (*BlockNode, bool) { + // iterate backwards from the tip node + currentNode := bc.blockIndex.GetTip() + for currentNode != nil { + if currentNode.IsCommitted() { + return currentNode, true } + currentNode = currentNode.GetParent(bc.blockIndex) } - return nil, -1 + return nil, false } // GetSafeBlocks returns all headers of blocks from which the chain can safely extend. @@ -2032,22 +2102,29 @@ func (bc *Blockchain) GetSafeBlocks() ([]*MsgDeSoHeader, error) { return headers, nil } +// TODO: this is pretty inefficient now. We may want to consider keeping all safe block nodes in memory. func (bc *Blockchain) getSafeBlockNodes() ([]*BlockNode, error) { // First get committed tip. - committedTip, idx := bc.GetCommittedTip() - if idx == -1 || committedTip == nil { - return nil, errors.New("getSafeBlockNodes: No committed blocks found") + committedTip, exists := bc.GetCommittedTip() + if !exists || committedTip == nil { + return []*BlockNode{}, nil } // Now get all blocks from the committed tip to the best chain tip. safeBlocks := []*BlockNode{committedTip} - maxHeightWithSafeBlocks := bc.getMaxSequentialBlockHeightAfter(uint64(committedTip.Height)) - for ii := uint64(committedTip.Height + 1); ii < maxHeightWithSafeBlocks+1; ii++ { + // TODO: improve performance of getMaxSequentialBlockHeightAfter. It's too slow when + // we don't have the entire block index in memory. + //maxHeightWithSafeBlocks := bc.getMaxSequentialBlockHeightAfter(uint64(committedTip.Height)) + //for ii := uint64(committedTip.Height + 1); ii < maxHeightWithSafeBlocks+1; ii++ { + // By excluding the end condition, we can get all safe blocks without doing the same + // iteration twice. + for ii := uint64(committedTip.Height + 1); ; ii++ { + blockNodes := bc.blockIndex.GetBlockNodesByHeight(ii) // If we don't have any blocks at this height, we know that any blocks at a later height are not safe blocks. - if !bc.hasBlockNodesIndexedAtHeight(ii) { + if len(blockNodes) == 0 { break } hasSeenValidatedBlockAtThisHeight := false - blockNodes := bc.getAllBlockNodesIndexedAtHeight(ii) + for _, blockNode := range blockNodes { // TODO: Are there other conditions we should consider? if blockNode.IsValidated() { @@ -2103,8 +2180,8 @@ func (bc *Blockchain) GetProofOfStakeGenesisQuorumCertificate() (*QuorumCertific func (bc *Blockchain) GetFinalCommittedPoWBlock() (*BlockNode, error) { // Fetch the block node for the cutover block - blockNodes, blockNodesExist := bc.blockIndexByHeight[bc.params.GetFinalPoWBlockHeight()] - if !blockNodesExist { + blockNodes := bc.blockIndex.GetBlockNodesByHeight(bc.params.GetFinalPoWBlockHeight()) + if len(blockNodes) == 0 { return nil, errors.Errorf("Error fetching cutover block nodes before height %d", bc.params.GetFinalPoWBlockHeight()) } diff --git a/lib/pos_blockchain_test.go b/lib/pos_blockchain_test.go index 5553887c4..254ff0f91 100644 --- a/lib/pos_blockchain_test.go +++ b/lib/pos_blockchain_test.go @@ -2,6 +2,7 @@ package lib import ( "bytes" + "encoding/hex" "fmt" "math" "math/rand" @@ -243,7 +244,7 @@ func TestHasValidBlockHeight(t *testing.T) { bc, _, _ := NewTestBlockchain(t) hash := NewBlockHash(RandomBytes(32)) nowTimestamp := time.Now().UnixNano() - genesisBlock := NewBlockNode(nil, hash, 1, nil, nil, &MsgDeSoHeader{ + genesisBlock := NewBlockNode(hash, 1, nil, nil, &MsgDeSoHeader{ Version: 2, TstampNanoSecs: nowTimestamp - time.Minute.Nanoseconds(), Height: 1, @@ -251,8 +252,8 @@ func TestHasValidBlockHeight(t *testing.T) { ValidatorsVoteQC: nil, ValidatorsTimeoutAggregateQC: nil, }, StatusBlockStored|StatusBlockValidated) - bc.bestChain = []*BlockNode{genesisBlock} - bc.blockIndexByHash.Set(*genesisBlock.Hash, genesisBlock) + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{*genesisBlock.Hash: genesisBlock}) + bc.blockIndex.blockIndexByHash.Put(*genesisBlock.Hash, genesisBlock) // Create a block with a valid header. randomPayload := RandomBytes(256) randomBLSPrivateKey := _generateRandomBLSPrivateKey(t) @@ -301,7 +302,8 @@ func TestHasValidBlockHeight(t *testing.T) { require.Equal(t, err, RuleErrorInvalidPoSBlockHeight) block.Header.Height = 2 - bc.blockIndexByHash = collections.NewConcurrentMap[BlockHash, *BlockNode]() + // TODO: make sure setting to genesis block works. + bc.blockIndex = NewBlockIndex(bc.db, bc.snapshot, genesisBlock) err = bc.hasValidBlockHeightPoS(block.Header) require.Equal(t, err, RuleErrorMissingParentBlock) } @@ -317,19 +319,19 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { t.Cleanup(resetGlobalDeSoParams) hash1 := NewBlockHash(RandomBytes(32)) hash2 := NewBlockHash(RandomBytes(32)) - genesisNode := NewBlockNode(nil, hash1, 1, nil, nil, &MsgDeSoHeader{ + genesisNode := NewBlockNode(hash1, 0, nil, nil, &MsgDeSoHeader{ Version: 2, - Height: 1, + Height: 0, ProposedInView: 1, }, StatusBlockStored|StatusBlockValidated) - block2 := NewBlockNode(genesisNode, hash2, 2, nil, nil, &MsgDeSoHeader{ + block2 := NewBlockNode(hash2, 1, nil, nil, &MsgDeSoHeader{ Version: 2, - Height: 2, + Height: 1, ProposedInView: 2, ValidatorsVoteQC: nil, ValidatorsTimeoutAggregateQC: nil, }, StatusBlockStored|StatusBlockValidated) - bc.blockIndexByHash = collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ *hash1: genesisNode, *hash2: block2, }) @@ -372,24 +374,22 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { }, }, } - blockNode, err := bc.storeBlockInBlockIndex(block) - require.NoError(t, err) newHash, err := block.Hash() require.NoError(t, err) + blockNode, err := bc.storeBlockInBlockIndex(block, newHash) + require.NoError(t, err) // Check the block index by hash - blockNodeFromIndex, exists := bc.blockIndexByHash.Get(*newHash) + blockNodeFromIndex, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(newHash, uint64(blockNode.Height)) require.True(t, exists) require.True(t, blockNodeFromIndex.Hash.IsEqual(blockNode.Hash)) require.Equal(t, blockNodeFromIndex.Height, uint32(2)) require.True(t, blockNodeFromIndex.IsStored()) require.False(t, blockNodeFromIndex.IsValidated()) // Check the block index by height - byHeightBlockNodes, exists := bc.blockIndexByHeight[2] - require.True(t, exists) + byHeightBlockNodes := bc.blockIndex.GetBlockNodesByHeight(2) require.Len(t, byHeightBlockNodes, 1) - require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(newHash)) require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) - require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 1) // Check the DB for the block uncommittedBlock, err := GetBlock(newHash, bc.db, bc.snapshot) require.NoError(t, err) @@ -399,22 +399,20 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { require.NoError(t, err) require.True(t, bytes.Equal(uncommittedBytes, origBlockBytes)) // Okay now we update the status of the block to include validated. - blockNode, err = bc.storeValidatedBlockInBlockIndex(block) + blockNode, err = bc.storeValidatedBlockInBlockIndex(block, newHash) require.NoError(t, err) - blockNodeFromIndex, exists = bc.blockIndexByHash.Get(*newHash) + blockNodeFromIndex, exists = bc.blockIndex.GetBlockNodeByHashAndHeight(newHash, uncommittedBlock.Header.Height) require.True(t, exists) require.True(t, blockNodeFromIndex.Hash.IsEqual(blockNode.Hash)) require.Equal(t, blockNodeFromIndex.Height, uint32(2)) require.True(t, blockNodeFromIndex.IsStored()) require.True(t, blockNodeFromIndex.IsValidated()) // Check the block index by height. - byHeightBlockNodes, exists = bc.blockIndexByHeight[2] - require.True(t, exists) + byHeightBlockNodes = bc.blockIndex.GetBlockNodesByHeight(2) require.Len(t, byHeightBlockNodes, 1) - require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) - require.True(t, byHeightBlockNodes[*newHash].IsValidated()) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[0].IsValidated()) require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) - require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 1) // Okay now we'll put in another block at the same height. // Update the random seed hash so we have a new hash for the block. @@ -425,28 +423,27 @@ func TestUpsertBlockAndBlockNodeToDB(t *testing.T) { require.False(t, updatedBlockHash.IsEqual(newHash)) // Okay now put this new block in there. - blockNode, err = bc.storeBlockInBlockIndex(block) + blockNode, err = bc.storeBlockInBlockIndex(block, updatedBlockHash) require.NoError(t, err) // Make sure the blockIndexByHash is correct. - updatedBlockNode, exists := bc.blockIndexByHash.Get(*updatedBlockHash) + updatedBlockNode, exists := bc.blockIndex.GetBlockNodeByHashAndHeight(updatedBlockHash, uint64(blockNode.Height)) require.True(t, exists) require.True(t, updatedBlockNode.Hash.IsEqual(updatedBlockHash)) require.Equal(t, updatedBlockNode.Height, uint32(2)) require.True(t, updatedBlockNode.IsStored()) require.False(t, updatedBlockNode.IsValidated()) // Make sure the blockIndexByHeight is correct - byHeightBlockNodes, exists = bc.blockIndexByHeight[2] + byHeightBlockNodes = bc.blockIndex.GetBlockNodesByHeight(2) require.True(t, exists) require.Len(t, byHeightBlockNodes, 2) - require.True(t, byHeightBlockNodes[*newHash].Hash.IsEqual(newHash)) - require.True(t, byHeightBlockNodes[*updatedBlockHash].Hash.IsEqual(updatedBlockHash)) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(newHash) || byHeightBlockNodes[1].Hash.IsEqual(newHash)) + require.True(t, byHeightBlockNodes[0].Hash.IsEqual(updatedBlockHash) || byHeightBlockNodes[1].Hash.IsEqual(updatedBlockHash)) require.True(t, bc.hasBlockNodesIndexedAtHeight(2)) - require.Len(t, bc.getAllBlockNodesIndexedAtHeight(2), 2) // If we're missing a field in the header, we should get an error // as we can't compute the hash. block.Header.ProposerVotingPublicKey = nil - _, err = bc.storeBlockInBlockIndex(block) + _, err = bc.storeBlockInBlockIndex(block, nil) require.Error(t, err) } @@ -461,23 +458,19 @@ func TestHasValidBlockViewPoS(t *testing.T) { bc, _, _ := NewTestBlockchain(t) hash1 := NewBlockHash(RandomBytes(32)) hash2 := NewBlockHash(RandomBytes(32)) - genesisNode := NewBlockNode(nil, hash1, 1, nil, nil, &MsgDeSoHeader{ + genesisNode := NewBlockNode(hash1, 1, nil, nil, &MsgDeSoHeader{ Version: 2, Height: 1, ProposedInView: 1, }, StatusBlockStored|StatusBlockValidated) - block2 := NewBlockNode(genesisNode, hash2, 2, nil, nil, &MsgDeSoHeader{ + block2 := NewBlockNode(hash2, 2, nil, nil, &MsgDeSoHeader{ Version: 2, Height: 2, ProposedInView: 2, ValidatorsVoteQC: nil, ValidatorsTimeoutAggregateQC: nil, }, StatusBlockStored|StatusBlockValidated) - bc.bestChain = []*BlockNode{ - genesisNode, - block2, - } - bc.blockIndexByHash = collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ *hash1: genesisNode, *hash2: block2, }) @@ -804,13 +797,12 @@ func TestGetLineageFromCommittedTip(t *testing.T) { } t.Cleanup(resetGlobalDeSoParams) hash1 := NewBlockHash(RandomBytes(32)) - genesisNode := NewBlockNode(nil, hash1, 1, nil, nil, &MsgDeSoHeader{ + genesisNode := NewBlockNode(hash1, 1, nil, nil, &MsgDeSoHeader{ Version: 2, Height: 1, ProposedInView: 1, }, StatusBlockStored|StatusBlockValidated|StatusBlockCommitted) - bc.bestChain = []*BlockNode{genesisNode} - bc.blockIndexByHash = collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ *hash1: genesisNode, }) block := &MsgDeSoBlock{ @@ -840,14 +832,14 @@ func TestGetLineageFromCommittedTip(t *testing.T) { block.Header.PrevBlockHash = hash1 // add another block to the best chain. hash2 := NewBlockHash(RandomBytes(32)) - block2 := NewBlockNode(genesisNode, hash2, 2, nil, nil, &MsgDeSoHeader{ + block2 := NewBlockNode(hash2, 2, nil, nil, &MsgDeSoHeader{ Version: 2, Height: 2, ProposedInView: 2, PrevBlockHash: hash1, }, StatusBlockStored|StatusBlockValidated|StatusBlockCommitted) - bc.bestChain = append(bc.bestChain, block2) - bc.blockIndexByHash.Set(*hash2, block2) + bc.blockIndex.setTip(block2) + bc.blockIndex.blockIndexByHash.Put(*hash2, block2) ancestors, missingBlockHashes, err = bc.getStoredLineageFromCommittedTip(block.Header) require.Error(t, err) require.Equal(t, err, RuleErrorDoesNotExtendCommittedTip) @@ -1237,21 +1229,27 @@ func TestShouldReorg(t *testing.T) { hash1 := NewBlockHash(RandomBytes(32)) hash2 := NewBlockHash(RandomBytes(32)) hash3 := NewBlockHash(RandomBytes(32)) - bc.bestChain = []*BlockNode{ + chain := []*BlockNode{ { Hash: hash1, Status: StatusBlockStored | StatusBlockValidated | StatusBlockCommitted, + Height: 0, }, { Hash: hash3, Status: StatusBlockStored | StatusBlockValidated, + Height: 1, }, } + bc.blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ + *hash1: chain[0], + *hash3: chain[1], + }) newBlock := &BlockNode{ Header: &MsgDeSoHeader{ ProposedInView: 2, - PrevBlockHash: bc.bestChain[1].Hash, + PrevBlockHash: chain[1].Hash, }, } @@ -1275,7 +1273,7 @@ func TestShouldReorg(t *testing.T) { // 1. Simple reorg. Just replacing the uncommitted tip. // 2. Create a longer chain and reorg to it. // 3. Make sure no reorg when current view is greater than block's view -// 4. Super happy path of simply extending current uncommitted tip. +// 4. Super happy path of simply extet anding current uncommitted tip. func TestTryApplyNewTip(t *testing.T) { setBalanceModelBlockHeights(t) bc, _, _ := NewTestBlockchain(t) @@ -1319,9 +1317,9 @@ func TestTryApplyNewTip(t *testing.T) { bc.addTipBlockToBestChain(bn1) bc.addTipBlockToBestChain(bn2) bc.addTipBlockToBestChain(bn3) - bc.blockIndexByHash.Set(*hash1, bn1) - bc.blockIndexByHash.Set(*hash2, bn2) - bc.blockIndexByHash.Set(*hash3, bn3) + bc.blockIndex.blockIndexByHash.Put(*hash1, bn1) + bc.blockIndex.blockIndexByHash.Put(*hash2, bn2) + bc.blockIndex.blockIndexByHash.Put(*hash3, bn3) // Simple reorg. Just replacing the uncommitted tip. newBlock := &MsgDeSoBlock{ @@ -1337,44 +1335,47 @@ func TestTryApplyNewTip(t *testing.T) { ancestors, _, err := bc.getStoredLineageFromCommittedTip(newBlock.Header) require.NoError(t, err) checkBestChainForHash := func(hash *BlockHash) bool { - return collections.Any(bc.bestChain, func(bn *BlockNode) bool { - return bn.Hash.IsEqual(hash) - }) + _, exists, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(hash, nil, false) + require.NoError(t, err) + return exists } // Try to apply newBlock as tip. This should succeed. newBlockNode := &BlockNode{ Header: newBlock.Header, Hash: newBlockHash, + Height: 4, } appliedNewTip, connectedBlockHashes, disconnectedBlockHashes, err := bc.tryApplyNewTip(newBlockNode, 9, ancestors) require.NoError(t, err) require.True(t, appliedNewTip) // hash 3 should no longer be in the best chain or best chain map - _, hash3ExistsInBestChainMap := bc.bestChainMap[*hash3] + _, hash3ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(hash3, nil, false) + require.NoError(t, err) require.False(t, hash3ExistsInBestChainMap) require.False(t, checkBestChainForHash(hash3)) require.Len(t, connectedBlockHashes, 1) require.Len(t, disconnectedBlockHashes, 1) // newBlock should be in the best chain and the best chain map and should be the tip. - _, newBlockExistsInBestChainMap := bc.bestChainMap[*newBlockHash] + _, newBlockExistsInBestChainMap, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(newBlockHash, nil, false) + require.NoError(t, err) require.True(t, newBlockExistsInBestChainMap) require.True(t, checkBestChainForHash(newBlockHash)) require.True(t, bc.BlockTip().Hash.IsEqual(newBlockHash)) // Make sure block 2 and block 1 are still in the best chain. - _, hash2ExistsInBestChainMap := bc.bestChainMap[*hash2] + _, hash2ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(hash2, nil, false) + require.NoError(t, err) require.True(t, hash2ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash2)) - _, hash1ExistsInBestChainMap := bc.bestChainMap[*hash1] + _, hash1ExistsInBestChainMap := bc.blockIndex.blockIndexByHash.Get(*hash1) require.True(t, hash1ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash1)) // Remove newBlock from the best chain and block index to reset the state. - bc.bestChain = bc.bestChain[:len(bc.bestChain)-1] - delete(bc.bestChainMap, *newBlockHash) + bc.blockIndex.setTip(bc.blockIndex.GetTip().GetParent(bc.blockIndex)) // Add block 3 back bc.addTipBlockToBestChain(bn3) @@ -1403,8 +1404,8 @@ func TestTryApplyNewTip(t *testing.T) { Height: 6, }, } - bc.blockIndexByHash.Set(*hash4, bn4) - bc.blockIndexByHash.Set(*hash5, bn5) + bc.blockIndex.blockIndexByHash.Put(*hash4, bn4) + bc.blockIndex.blockIndexByHash.Put(*hash5, bn5) // Set new block's parent to hash5 newBlockNode.Header.PrevBlockHash = hash5 @@ -1422,19 +1423,23 @@ func TestTryApplyNewTip(t *testing.T) { // newBlockHash should be tip. require.True(t, bc.BlockTip().Hash.IsEqual(newBlockHash)) // hash 3 should no longer be in the best chain or best chain map - _, hash3ExistsInBestChainMap = bc.bestChainMap[*hash3] + _, hash3ExistsInBestChainMap, err = bc.GetBlockFromBestChainByHashAndOptionalHeight(hash3, nil, false) + require.NoError(t, err) require.False(t, hash3ExistsInBestChainMap) require.False(t, checkBestChainForHash(hash3)) // hash 2 should no longer be in the best chain or best chain map - _, hash2ExistsInBestChainMap = bc.bestChainMap[*hash2] + _, hash2ExistsInBestChainMap, err = bc.GetBlockFromBestChainByHashAndOptionalHeight(hash2, nil, false) + require.NoError(t, err) require.False(t, hash2ExistsInBestChainMap) require.False(t, checkBestChainForHash(hash2)) // hash 4 should be in the best chain and the best chain map - _, hash4ExistsInBestChainMap := bc.bestChainMap[*hash4] + _, hash4ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(hash4, nil, false) + require.NoError(t, err) require.True(t, hash4ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash4)) // hash 5 should be in the best chain and the best chain map - _, hash5ExistsInBestChainMap := bc.bestChainMap[*hash5] + _, hash5ExistsInBestChainMap, err := bc.GetBlockFromBestChainByHashAndOptionalHeight(hash5, nil, false) + require.NoError(t, err) require.True(t, hash5ExistsInBestChainMap) require.True(t, checkBestChainForHash(hash5)) @@ -1442,11 +1447,12 @@ func TestTryApplyNewTip(t *testing.T) { require.Len(t, connectedBlockHashes, 3) require.Len(t, disconnectedBlockHashes, 2) - // Reset the state of the best chain. - delete(bc.bestChainMap, *hash4) - delete(bc.bestChainMap, *hash5) - delete(bc.bestChainMap, *newBlockHash) - bc.bestChain = bc.bestChain[:len(bc.bestChain)-3] + // Reset the state of the best chain - parent of newBlock + //bc.bestChain.ChainMap.Remove(*hash4) + //bc.bestChain.ChainMap.Remove(*hash5) + //bc.bestChain.ChainMap.Remove(*newBlockHash) + //bc.bestChain.Chain = bc.bestChain.Chain[:len(bc.bestChain.Chain)-3] + bc.blockIndex.setTip(newBlockNode.GetParent(bc.blockIndex)) // Add block 2 and 3 back. bc.addTipBlockToBestChain(bn2) @@ -1516,8 +1522,8 @@ func TestCanCommitGrandparent(t *testing.T) { PrevBlockHash: hash1, }, } - bc.bestChainMap[*hash1] = bn1 - bc.bestChainMap[*hash2] = bn2 + bc.blockIndex.addNewBlockNodeToBlockIndex(bn1) + bc.blockIndex.addNewBlockNodeToBlockIndex(bn2) // define incoming block hash3 := NewBlockHash(RandomBytes(32)) @@ -1536,7 +1542,7 @@ func TestCanCommitGrandparent(t *testing.T) { // (meaning they are in consecutive views). So we should be able // to commit bn1. grandparentHash, canCommit := bc.canCommitGrandparent(bn3) - require.True(t, hash1.IsEqual(grandparentHash)) + require.True(t, hash1.IsEqual(grandparentHash.Hash)) require.True(t, canCommit) // Update bn1 to be committed. We no longer can run the commit since bn1 is already @@ -1611,7 +1617,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { // Add one more block to the best chain, but have the view be further in the future. // this should trigger a commit on block 2. - blockTemplate4 := _generateBlockAndAddToBestChain(testMeta, 14, 20, 429) + blockTemplate4 := _generateBlockAndAddToBestChain(testMeta, 15, 20, 429) err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) @@ -1623,7 +1629,7 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { // Okay so add block 5 to the best chain. This should NOT trigger a commit on block 3 // as block 4 is not a direct child of block 3 based on its view. - blockTemplate5 := _generateBlockAndAddToBestChain(testMeta, 15, 21, 654) + blockTemplate5 := _generateBlockAndAddToBestChain(testMeta, 16, 21, 654) err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) @@ -1634,9 +1640,9 @@ func TestRunCommitRuleOnBestChain(t *testing.T) { _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, blockHash4, blockHash5}, blockHash2) // If we now add a block that is a descendent of block 5, we should be able to commit - // blocks 3 and 4 as block 4 and 5 possess a direct parent child relationship and + // blocks 3 and 4 as block 4 and 5 possess a direct parent child relationship, and // we have a descendent of block 5. - blockTemplate6 := _generateBlockAndAddToBestChain(testMeta, 16, 22, 912) + blockTemplate6 := _generateBlockAndAddToBestChain(testMeta, 17, 22, 912) require.NoError(t, err) err = testMeta.chain.runCommitRuleOnBestChain(true) require.NoError(t, err) @@ -1659,7 +1665,8 @@ func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, u } for _, committedHash := range committedBlocks { // Okay so let's make sure the block is committed. - blockNode, exists := testMeta.chain.bestChainMap[*committedHash] + blockNode, exists, err := testMeta.chain.GetBlockFromBestChainByHashAndOptionalHeight(committedHash, nil, false) + require.NoError(testMeta.t, err) require.True(testMeta.t, exists) require.True(testMeta.t, blockNode.IsCommitted()) @@ -1684,7 +1691,8 @@ func _verifyCommitRuleHelper(testMeta *TestMeta, committedBlocks []*BlockHash, u } for _, uncommittedBlockHash := range uncommittedBlocks { // Okay so let's make sure the block is uncommitted. - blockNode, exists := testMeta.chain.bestChainMap[*uncommittedBlockHash] + blockNode, exists, err := testMeta.chain.GetBlockFromBestChainByHashAndOptionalHeight(uncommittedBlockHash, nil, false) + require.NoError(testMeta.t, err) require.True(testMeta.t, exists) require.False(testMeta.t, blockNode.IsCommitted()) // TODO: Verify DB results?? Kinda silly to make sure everything is missing. @@ -1839,7 +1847,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { futureBlockHash, err = futureBlock.Hash() require.NoError(t, err) - futureBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*futureBlockHash) + futureBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(futureBlockHash, futureBlock.Header.Height) require.True(t, exists) require.False(t, futureBlockNode.IsCommitted()) require.True(t, futureBlockNode.IsStored()) @@ -1848,10 +1856,12 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { } var timeoutBlockHash *BlockHash + var timeoutBlockHeight uint64 { // Okay let's timeout view 15 var timeoutBlock *MsgDeSoBlock timeoutBlock = _generateRealBlock(testMeta, 15, 16, 381, blockHash3, true) + timeoutBlockHeight = timeoutBlock.Header.Height success, _, _, err := testMeta.chain.ProcessBlockPoS(timeoutBlock, 15, true) fmt.Println(err) require.True(t, success) @@ -1874,12 +1884,18 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { // Timeout block will no longer be in best chain, and will still be in an uncommitted state in the block index _verifyCommitRuleHelper(testMeta, []*BlockHash{blockHash1, blockHash2}, []*BlockHash{blockHash3, reorgBlockHash}, blockHash2) _verifyRandomSeedHashHelper(testMeta, reorgBlock) - _, exists := testMeta.chain.bestChainMap[*timeoutBlockHash] + _, exists, err := testMeta.chain.GetBlockFromBestChainByHashAndOptionalHeight(timeoutBlockHash, nil, false) + require.NoError(t, err) require.False(t, exists) - timeoutBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*timeoutBlockHash) + timeoutBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(timeoutBlockHash, timeoutBlockHeight) require.True(t, exists) require.False(t, timeoutBlockNode.IsCommitted()) + + // The reorg block hash should be in the block index now. + reorgBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(reorgBlockHash, reorgBlock.Header.Height) + require.True(t, exists) + require.True(t, reorgBlockNode.IsStored()) } var dummyParentBlockHash, orphanBlockHash *BlockHash { @@ -1905,7 +1921,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.Len(t, missingBlockHashes, 1) require.True(t, missingBlockHashes[0].IsEqual(dummyParentBlockHash)) require.NoError(t, err) - orphanBlockInIndex, orphanBlockExists := testMeta.chain.blockIndexByHash.Get(*orphanBlockHash) + orphanBlockInIndex, orphanBlockExists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(orphanBlockHash, orphanBlock.Header.Height) require.True(t, orphanBlockExists) require.NotNil(t, orphanBlockInIndex) require.True(t, orphanBlockInIndex.IsStored()) @@ -1918,7 +1934,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.Len(t, missingBlockHashes, 0) require.NoError(t, err) - orphanBlockInIndex, orphanBlockExists = testMeta.chain.blockIndexByHash.Get(*orphanBlockHash) + orphanBlockInIndex, orphanBlockExists = testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(orphanBlockHash, orphanBlock.Header.Height) require.True(t, orphanBlockExists) require.NotNil(t, orphanBlockInIndex) require.True(t, orphanBlockInIndex.IsStored()) @@ -1944,7 +1960,7 @@ func testProcessBlockPoS(t *testing.T, testMeta *TestMeta) { require.True(t, missingBlockHashes[0].IsEqual(randomHash)) require.NoError(t, err) - malformedOrphanBlockInIndex, malformedOrphanBlockExists := testMeta.chain.blockIndexByHash.Get(*malformedOrphanBlockHash) + malformedOrphanBlockInIndex, malformedOrphanBlockExists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(malformedOrphanBlockHash, malformedOrphanBlock.Header.Height) require.True(t, malformedOrphanBlockExists) require.True(t, malformedOrphanBlockInIndex.IsValidateFailed()) require.True(t, malformedOrphanBlockInIndex.IsStored()) @@ -1982,11 +1998,12 @@ func TestGetSafeBlocks(t *testing.T) { testMeta := NewTestPoSBlockchainWithValidators(t) committedHash := testMeta.chain.BlockTip().Hash var block1 *MsgDeSoBlock + fmt.Println("HEX STUFF: ", hex.EncodeToString(committedHash[:])) block1 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight), uint64(testMeta.savedHeight), 1723, committedHash, false) block1Hash, err := block1.Hash() require.NoError(t, err) // Add block 1 w/ stored and validated - bn1, err := testMeta.chain.storeValidatedBlockInBlockIndex(block1) + bn1, err := testMeta.chain.storeValidatedBlockInBlockIndex(block1, nil) require.NoError(t, err) require.True(t, bn1.Hash.IsEqual(block1Hash)) // Create block 2 w/ block 1 as parent and add it to the block index w/ stored & validated @@ -1994,13 +2011,13 @@ func TestGetSafeBlocks(t *testing.T) { block2 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+1), uint64(testMeta.savedHeight+1), 1293, block1Hash, false) block2Hash, err := block2.Hash() require.NoError(t, err) - bn2, err := testMeta.chain.storeValidatedBlockInBlockIndex(block2) + bn2, err := testMeta.chain.storeValidatedBlockInBlockIndex(block2, nil) require.NoError(t, err) require.True(t, bn2.Hash.IsEqual(block2Hash)) // Add block 3 only as stored and validated var block3 *MsgDeSoBlock block3 = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+2), uint64(testMeta.savedHeight+2), 1372, block2Hash, false) - bn3, err := testMeta.chain.storeValidatedBlockInBlockIndex(block3) + bn3, err := testMeta.chain.storeValidatedBlockInBlockIndex(block3, nil) require.NoError(t, err) block3Hash, err := block3.Hash() require.NoError(t, err) @@ -2008,7 +2025,7 @@ func TestGetSafeBlocks(t *testing.T) { // Add block 3' only as stored var block3Prime *MsgDeSoBlock block3Prime = _generateRealBlock(testMeta, uint64(testMeta.savedHeight+2), uint64(testMeta.savedHeight+3), 137175, block2Hash, false) - bn3Prime, err := testMeta.chain.storeBlockInBlockIndex(block3Prime) + bn3Prime, err := testMeta.chain.storeBlockInBlockIndex(block3Prime, nil) require.NoError(t, err) block3PrimeHash, err := block3Prime.Hash() require.NoError(t, err) @@ -2019,7 +2036,7 @@ func TestGetSafeBlocks(t *testing.T) { block5.Header.Height = uint64(testMeta.savedHeight + 5) block5Hash, err := block5.Hash() require.NoError(t, err) - _, err = testMeta.chain.storeValidatedBlockInBlockIndex(block5) + _, err = testMeta.chain.storeValidatedBlockInBlockIndex(block5, nil) require.NoError(t, err) // Okay let's get the safe blocks. safeBlocks, err := testMeta.chain.GetSafeBlocks() @@ -2040,7 +2057,7 @@ func TestGetSafeBlocks(t *testing.T) { require.False(t, _checkSafeBlocksForBlockHash(block5Hash, safeBlocks)) // Update block 3 prime to be validated and it should now be a safe block. - bn3Prime, err = testMeta.chain.storeValidatedBlockInBlockIndex(block3Prime) + bn3Prime, err = testMeta.chain.storeValidatedBlockInBlockIndex(block3Prime, nil) require.NoError(t, err) require.True(t, bn3Prime.IsValidated()) safeBlocks, err = testMeta.chain.GetSafeBlocks() @@ -2067,7 +2084,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.False(t, blockNode.IsValidateFailed()) @@ -2088,7 +2105,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.True(t, blockNode.IsValidateFailed()) @@ -2115,7 +2132,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.False(t, exists) } @@ -2167,7 +2184,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := realBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, realBlock.Header.Height) require.False(t, exists) } { @@ -2185,7 +2202,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := nextEpochBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, nextEpochBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.False(t, blockNode.IsValidateFailed()) @@ -2210,7 +2227,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := nextEpochBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, nextEpochBlock.Header.Height) require.False(t, exists) } { @@ -2261,7 +2278,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // Get the block node from the block index. blockHash, err := nextEpochBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, nextEpochBlock.Header.Height) require.False(t, exists) } { @@ -2290,7 +2307,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // The block shouldn't be in the block index. blockHash, err := twoEpochsInFutureBlock.Hash() require.NoError(t, err) - _, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, twoEpochsInFutureBlock.Header.Height) require.False(t, exists) } { @@ -2307,7 +2324,7 @@ func TestProcessOrphanBlockPoS(t *testing.T) { // The block should be in the block index. blockHash, err := prevEpochBlock.Hash() require.NoError(t, err) - blockNode, exists := testMeta.chain.blockIndexByHash.Get(*blockHash) + blockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockHash, prevEpochBlock.Header.Height) require.True(t, exists) require.True(t, blockNode.IsStored()) require.False(t, blockNode.IsValidateFailed()) @@ -2323,7 +2340,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { utxoView := _newUtxoView(testMeta) snapshotEpochNumber, err := utxoView.GetCurrentSnapshotEpochNumber() require.NoError(t, err) - isValid, err := utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err := utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.True(t, isValid) @@ -2331,7 +2348,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { realVotingPublicKey := realBlock.Header.ProposerVotingPublicKey { realBlock.Header.ProposerVotingPublicKey = _generateRandomBLSPrivateKey(t).PublicKey() - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) // Reset the proposer voting public key @@ -2343,7 +2360,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { incorrectPayload := consensus.GetVoteSignaturePayload(13, testMeta.chain.BlockTip().Hash) realBlock.Header.ProposerVotePartialSignature, err = testMeta.blsPubKeyToBLSKeyMap[realBlock.Header.ProposerVotingPublicKey.ToString()].Sign(incorrectPayload[:]) - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) } @@ -2356,7 +2373,7 @@ func TestHasValidProposerPartialSignaturePoS(t *testing.T) { correctPayload := consensus.GetVoteSignaturePayload(12, realBlockHash) wrongPrivateKey := _generateRandomBLSPrivateKey(t) realBlock.Header.ProposerVotePartialSignature, err = wrongPrivateKey.Sign(correctPayload[:]) - isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, snapshotEpochNumber) + isValid, err = utxoView.hasValidProposerPartialSignaturePoS(realBlock, nil, snapshotEpochNumber) require.NoError(t, err) require.False(t, isValid) } @@ -2375,7 +2392,7 @@ func TestHasValidProposerRandomSeedSignaturePoS(t *testing.T) { require.NoError(t, err) realBlockHash, err := realBlock.Hash() require.NoError(t, err) - realBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*realBlockHash) + realBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(realBlockHash, realBlock.Header.Height) require.True(t, exists) require.True(t, realBlockNode.IsStored()) require.False(t, realBlockNode.IsValidateFailed()) @@ -2461,13 +2478,13 @@ func _generateRealBlockWithFailingTxn(testMeta *TestMeta, blockHeight uint64, vi } // TODO: Get real seed signature. - prevBlock, exists := testMeta.chain.blockIndexByHash.Get(*prevBlockHash) + prevBlock, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(prevBlockHash, blockHeight-1) require.True(testMeta.t, exists) // Always update the testMeta latestBlockView - latestBlockViewAndUtxoOps, err := testMeta.chain.GetUtxoViewAndUtxoOpsAtBlockHash(*prevBlockHash) + latestBlockViewAndUtxoOps, err := testMeta.chain.GetUtxoViewAndUtxoOpsAtBlockHash(*prevBlockHash, blockHeight-1) require.NoError(testMeta.t, err) latestBlockView := latestBlockViewAndUtxoOps.UtxoView - latestBlockNode, latestBlockNodeExists := testMeta.chain.blockIndexByHash.Get(*prevBlockHash) + latestBlockNode, latestBlockNodeExists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(prevBlockHash, blockHeight-1) require.True(testMeta.t, latestBlockNodeExists) latestBlockHeight := latestBlockNode.Height testMeta.posMempool.UpdateLatestBlock(latestBlockView, uint64(latestBlockHeight)) @@ -2521,10 +2538,10 @@ func _generateDummyBlock(testMeta *TestMeta, blockHeight uint64, view uint64, se require.NoError(testMeta.t, err) // Add block to block index. - blockNode, err := testMeta.chain.storeBlockInBlockIndex(msgDesoBlock) + blockNode, err := testMeta.chain.storeBlockInBlockIndex(msgDesoBlock, nil) require.NoError(testMeta.t, err) require.True(testMeta.t, blockNode.IsStored()) - _, exists := testMeta.chain.blockIndexByHash.Get(*newBlockHash) + _, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(newBlockHash, msgDesoBlock.Header.Height) require.True(testMeta.t, exists) // Remove the transactions from this block from the mempool. // This prevents nonce reuse issues when trying to make failing blocks. @@ -2537,17 +2554,20 @@ func _generateDummyBlock(testMeta *TestMeta, blockHeight uint64, view uint64, se // _generateBlockAndAddToBestChain generates a BlockTemplate by calling _generateRealBlock and then adds it to the // best chain. Finally it updates the PosMempool's latest block view. func _generateBlockAndAddToBestChain(testMeta *TestMeta, blockHeight uint64, view uint64, seed int64) *MsgDeSoBlock { - blockTemplate := _generateRealBlock(testMeta, blockHeight, view, seed, testMeta.chain.BlockTip().Hash, false) + prevBlockNode, exists, err := testMeta.chain.GetBlockFromBestChainByHeight(blockHeight-1, false) + require.NoError(testMeta.t, err) + require.True(testMeta.t, exists) + blockTemplate := _generateRealBlock(testMeta, blockHeight, view, seed, prevBlockNode.Hash, false) var msgDesoBlock *MsgDeSoBlock msgDesoBlock = blockTemplate newBlockHash, err := msgDesoBlock.Hash() require.NoError(testMeta.t, err) // Add block to block index. - blockNode, err := testMeta.chain.storeValidatedBlockInBlockIndex(msgDesoBlock) + blockNode, err := testMeta.chain.storeValidatedBlockInBlockIndex(msgDesoBlock, nil) require.NoError(testMeta.t, err) require.True(testMeta.t, blockNode.IsStored()) require.True(testMeta.t, blockNode.IsValidated()) - newBlockNode, exists := testMeta.chain.blockIndexByHash.Get(*newBlockHash) + newBlockNode, exists := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(newBlockHash, msgDesoBlock.Header.Height) require.True(testMeta.t, exists) testMeta.chain.addTipBlockToBestChain(newBlockNode) // Update the latest block view @@ -2678,7 +2698,7 @@ func _getFullRealBlockTemplate( // Get leader voting private key. leaderVotingPrivateKey := testMeta.pubKeyToBLSKeyMap[leaderPublicKey] // Get hash of last block - chainTip, _ := testMeta.chain.blockIndexByHash.Get(*blockTemplate.Header.PrevBlockHash) + chainTip, _ := testMeta.chain.blockIndex.GetBlockNodeByHashAndHeight(blockTemplate.Header.PrevBlockHash, blockTemplate.Header.Height-1) chainTipHash := chainTip.Hash // Get the vote signature payload // Hack to get view numbers working properly w/ PoW blocks. @@ -2819,6 +2839,7 @@ func NewTestPoSBlockchainWithValidators(t *testing.T) *TestMeta { for ii := 0; ii < 10; ii++ { _, err := miner.MineAndProcessSingleBlock(0 /*threadIndex*/, oldPool) require.NoError(t, err) + fmt.Println("CHAIN TIP: ", chain.BlockTip().Hash) } m0PubBytes, _, _ := Base58CheckDecode(m0Pub) diff --git a/lib/pos_consensus.go b/lib/pos_consensus.go index ea731def3..0f412bba2 100644 --- a/lib/pos_consensus.go +++ b/lib/pos_consensus.go @@ -238,6 +238,7 @@ func (fc *FastHotStuffConsensus) handleBlockProposalEvent( event *consensus.FastHotStuffEvent, expectedEventType consensus.FastHotStuffEventType, ) error { + glog.V(3).Infof("FastHotStuffConsensus.handleBlockProposalEvent: %s", event.ToString()) // Validate that the event's type is the expected proposal event type if !isValidBlockProposalEvent(event, expectedEventType) { return errors.Errorf("Unexpected event type: %v vs %v", event.EventType, expectedEventType) @@ -250,7 +251,10 @@ func (fc *FastHotStuffConsensus) handleBlockProposalEvent( // Fetch the parent block parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) - parentBlock, parentBlockExists := fc.blockchain.blockIndexByHash.Get(*parentBlockHash) + parentBlock, parentBlockExists, err := fc.blockchain.blockIndex.GetBlockNodeByHashOnly(parentBlockHash) + if err != nil { + return errors.Errorf("Error fetching parent block: %v", parentBlockHash) + } if !parentBlockExists { return errors.Errorf("Error fetching parent block: %v", parentBlockHash) } @@ -487,7 +491,8 @@ func (fc *FastHotStuffConsensus) HandleLocalTimeoutEvent(event *consensus.FastHo tipBlockHash := BlockHashFromConsensusInterface(event.TipBlockHash) // Fetch the HighQC from the Blockchain struct - tipBlockNode, tipBlockExists := fc.blockchain.blockIndexByHash.Get(*tipBlockHash) + // TODO: validate that TipHeight is a uint32 + tipBlockNode, tipBlockExists := fc.blockchain.blockIndex.GetBlockNodeByHashAndHeight(tipBlockHash, event.TipBlockHeight) if !tipBlockExists { return errors.Errorf("FastHotStuffConsensus.HandleLocalTimeoutEvent: Error fetching tip block: %v", tipBlockHash) } @@ -558,13 +563,17 @@ func (fc *FastHotStuffConsensus) HandleValidatorTimeout(pp *Peer, msg *MsgDeSoVa // If we don't have the highQC's block on hand, then we need to request it from the peer. We do // that first before storing the timeout message locally in the FastHotStuffEventLoop. This // prevents spamming of timeout messages by peers. - if !fc.blockchain.HasBlockInBlockIndex(msg.HighQC.BlockHash) { - err := errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Missing highQC's block: %v", msg.HighQC.BlockHash) + hasBlockInBlockIndex, err := fc.blockchain.HasBlockInBlockIndex(msg.HighQC.BlockHash) + if err != nil { + return nil, errors.Wrapf(err, "FastHotStuffConsensus.HandleValidatorTimeout: Error fetching block: ") + } + if !hasBlockInBlockIndex { + err = errors.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Missing highQC's block: %v", msg.HighQC.BlockHash) return []*BlockHash{msg.HighQC.BlockHash}, err } // Process the timeout message locally in the FastHotStuffEventLoop - if err := fc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { + if err = fc.fastHotStuffEventLoop.ProcessValidatorTimeout(msg); err != nil { // If we can't process the timeout locally, then it must somehow be malformed, stale, // or a duplicate vote/timeout for the same view. glog.Errorf("FastHotStuffConsensus.HandleValidatorTimeout: Error processing timeout msg: %v", err) @@ -693,7 +702,7 @@ func (fc *FastHotStuffConsensus) tryProcessBlockAsNewTip(block *MsgDeSoBlock) ([ return nil, errors.Errorf("Error hashing tip block: %v", err) } - utxoViewAndUtxoOps, err := fc.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*tipBlockHash) + utxoViewAndUtxoOps, err := fc.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*tipBlockHash, tipBlock.Height) if err != nil { return nil, errors.Errorf("Error fetching UtxoView for tip block: %v", err) } @@ -733,13 +742,16 @@ func (fc *FastHotStuffConsensus) produceUnsignedBlockForBlockProposalEvent( parentBlockHash := BlockHashFromConsensusInterface(event.QC.GetBlockHash()) // Fetch the parent block - parentBlock, parentBlockExists := fc.blockchain.blockIndexByHash.Get(*parentBlockHash) + parentBlock, parentBlockExists, err := fc.blockchain.blockIndex.GetBlockNodeByHashOnly(parentBlockHash) + if err != nil { + return nil, errors.Errorf("Error fetching parent block: %v", parentBlockHash) + } if !parentBlockExists { return nil, errors.Errorf("Error fetching parent block: %v", parentBlockHash) } // Build a UtxoView at the parent block - parentUtxoViewAndUtxoOps, err := fc.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*parentBlockHash) + parentUtxoViewAndUtxoOps, err := fc.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*parentBlockHash, uint64(parentBlock.Height)) if err != nil { // This should never happen as long as the parent block is a descendant of the committed tip. return nil, errors.Errorf("Error fetching UtxoView for parent block: %v", parentBlockHash) diff --git a/lib/pos_consensus_test.go b/lib/pos_consensus_test.go index 130aa901c..8010e5aeb 100644 --- a/lib/pos_consensus_test.go +++ b/lib/pos_consensus_test.go @@ -102,6 +102,11 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { currentView := blockHeader.ValidatorsVoteQC.GetView() + 1 nextView := currentView + 1 + blockIndex := NewBlockIndex(nil, nil, nil) + blockIndex.setBlockIndexFromMap(map[BlockHash]*BlockNode{ + *blockHash: {Header: blockHeader, Height: uint32(blockHeader.Height), Hash: blockHash}, + }) + // Create a mock consensus fastHotStuffConsensus := FastHotStuffConsensus{ lock: sync.RWMutex{}, @@ -111,11 +116,9 @@ func TestFastHotStuffConsensusHandleLocalTimeoutEvent(t *testing.T) { }, params: &DeSoTestnetParams, blockchain: &Blockchain{ - ChainLock: deadlock.RWMutex{}, - blockIndexByHash: collections.NewConcurrentMapFromMap(map[BlockHash]*BlockNode{ - *blockHash: {Header: blockHeader}, - }), - params: &DeSoTestnetParams, + ChainLock: deadlock.RWMutex{}, + blockIndex: blockIndex, + params: &DeSoTestnetParams, }, fastHotStuffEventLoop: &consensus.MockFastHotStuffEventLoop{ OnIsInitialized: alwaysReturnTrue, diff --git a/lib/pos_mempool.go b/lib/pos_mempool.go index 6895c7e30..b2217c121 100644 --- a/lib/pos_mempool.go +++ b/lib/pos_mempool.go @@ -2,14 +2,14 @@ package lib import ( "bytes" + "encoding/hex" "fmt" + "github.com/deso-protocol/core/collections" "path/filepath" "sync" "sync/atomic" "time" - "github.com/decred/dcrd/container/lru" - "github.com/dgraph-io/badger/v3" "github.com/golang/glog" "github.com/pkg/errors" @@ -183,11 +183,11 @@ type PosMempool struct { // recentBlockTxnCache is an LRU KV cache used to track the transaction that have been included in blocks. // This cache is used to power logic that waits for a transaction to either be validated in the mempool // or be included in a block. - recentBlockTxnCache lru.Set[BlockHash] + recentBlockTxnCache *collections.LruSet[BlockHash] // recentRejectedTxnCache is a cache to store the txns that were recently rejected so that we can return better // errors for them. - recentRejectedTxnCache lru.Map[BlockHash, error] + recentRejectedTxnCache *collections.LruCache[BlockHash, error] } func NewPosMempool() *PosMempool { @@ -234,8 +234,8 @@ func (mp *PosMempool) Init( mp.mempoolBackupIntervalMillis = mempoolBackupIntervalMillis mp.maxValidationViewConnects = maxValidationViewConnects mp.transactionValidationRefreshIntervalMillis = transactionValidationRefreshIntervalMillis - mp.recentBlockTxnCache = *lru.NewSet[BlockHash](100000) // cache 100K latest txns from blocks. - mp.recentRejectedTxnCache = *lru.NewMap[BlockHash, error](100000) // cache 100K rejected txns. + mp.recentBlockTxnCache, _ = collections.NewLruSet[BlockHash](100000) // cache 100K latest txns from blocks. + mp.recentRejectedTxnCache, _ = collections.NewLruCache[BlockHash, error](100000) // cache 100K rejected txns. // Recreate and initialize the transaction register and the nonce tracker. mp.txnRegister = NewTransactionRegister() @@ -684,6 +684,7 @@ func (mp *PosMempool) loadPersistedTransactions() error { if err != nil { return errors.Wrapf(err, "PosMempool.Start: Problem retrieving transactions from persister") } + glog.V(0).Infof("PosMempool.loadPersistedTransactions: Retrieved %d transactions from persister", len(txns)) // We set the persistToDb flag to false so that persister doesn't try to save the transactions. for _, txn := range txns { if err := mp.addTransactionNoLock(txn, false); err != nil { @@ -848,7 +849,12 @@ func (mp *PosMempool) validateTransactions() error { // try to resubmit it. txn.SetValidated(false) mp.recentRejectedTxnCache.Put(*txn.Hash, err) - + txnBytes, toBytesErr := txn.Tx.ToBytes(false) + if toBytesErr != nil { + glog.Errorf("PosMempool.validateTransactions: Problem converting txn to bytes: %v", toBytesErr) + } + glog.V(0).Infof("PosMempool.validateTransactions: Removing txn %v from mempool: %v\nTxn Hex: %v", + txn.Hash, err, hex.EncodeToString(txnBytes)) // Try to remove the transaction with a lock. mp.removeTransaction(txn, true) diff --git a/lib/pos_mempool_persister.go b/lib/pos_mempool_persister.go index 05a1ef617..43ee7c449 100644 --- a/lib/pos_mempool_persister.go +++ b/lib/pos_mempool_persister.go @@ -168,6 +168,8 @@ func (mp *MempoolPersister) persistBatchNoLock() error { return nil } + glog.V(1).Infof("MempoolPersister: Persisting batch of %d mempool events", len(mp.updateBatch)) + // If there are no transactions to persist, return. if len(mp.updateBatch) == 0 { return nil @@ -176,6 +178,7 @@ func (mp *MempoolPersister) persistBatchNoLock() error { wb := mp.db.NewWriteBatch() defer wb.Cancel() + addEvents, removeEvents := 0, 0 for _, event := range mp.updateBatch { if event.Txn == nil || event.Txn.Hash == nil { continue @@ -194,10 +197,12 @@ func (mp *MempoolPersister) persistBatchNoLock() error { if err := wb.Set(key, value); err != nil { glog.Errorf("MempoolPersister: Error setting key: %v", err) } + addEvents++ case MempoolEventRemove: if err := wb.Delete(key); err != nil { glog.Errorf("MempoolPersister: Error deleting key: %v", err) } + removeEvents++ } } err := wb.Flush() @@ -207,6 +212,8 @@ func (mp *MempoolPersister) persistBatchNoLock() error { mp.updateBatch = nil + glog.V(1).Infof("MempoolPersister: Persisted %d add events and %d remove events", addEvents, removeEvents) + return nil } diff --git a/lib/pos_snapshot_entries.go b/lib/pos_snapshot_entries.go index b727c384a..02cb16ceb 100644 --- a/lib/pos_snapshot_entries.go +++ b/lib/pos_snapshot_entries.go @@ -1335,7 +1335,7 @@ func DBSeekSnapshotLeaderSchedule( snapshotAtEpochNumber uint64, ) (map[uint16]*PKID, error) { seekKey := DBSSeekKeyForSnapshotLeaderSchedule(snapshotAtEpochNumber) - keysFound, valsFound := EnumerateKeysForPrefix(handle, seekKey, false) + keysFound, valsFound := EnumerateKeysForPrefix(handle, seekKey, false, false) leaderIdxToPKID := make(map[uint16]*PKID) for idx, keyFound := range keysFound { // TODO: Make sure this decode is correct diff --git a/lib/postgres.go b/lib/postgres.go index a80000674..f4da6642c 100644 --- a/lib/postgres.go +++ b/lib/postgres.go @@ -1296,8 +1296,11 @@ func (postgres *Postgres) UpsertBlockTx(tx *pg.Tx, blockNode *BlockNode) error { } // The genesis block has a nil parent - if blockNode.Parent != nil { - block.ParentHash = blockNode.Parent.Hash + if blockNode.Header.PrevBlockHash != nil { + block.ParentHash = blockNode.Header.PrevBlockHash + } else if !blockNode.Header.PrevBlockHash.IsEqual(GenesisBlockHash) { + // TODO: LN - why did I need to add this? + block.ParentHash = blockNode.Header.PrevBlockHash } _, err := tx.Model(block).WherePK().OnConflict("(hash) DO UPDATE").Insert() @@ -1305,16 +1308,16 @@ func (postgres *Postgres) UpsertBlockTx(tx *pg.Tx, blockNode *BlockNode) error { } // GetBlockIndex gets all the PGBlocks and creates a map of BlockHash to BlockNode as needed by blockchain.go -func (postgres *Postgres) GetBlockIndex() (*collections.ConcurrentMap[BlockHash, *BlockNode], error) { +func (postgres *Postgres) GetBlockIndex() (*collections.LruCache[BlockHash, *BlockNode], error) { var blocks []PGBlock err := postgres.db.Model(&blocks).Select() if err != nil { return nil, err } - blockMap := collections.NewConcurrentMap[BlockHash, *BlockNode]() + blockMap, _ := collections.NewLruCache[BlockHash, *BlockNode](MaxBlockIndexNodes) for _, block := range blocks { - blockMap.Set(*block.Hash, &BlockNode{ + blockMap.Put(*block.Hash, &BlockNode{ Hash: block.Hash, Height: uint32(block.Height), DifficultyTarget: block.DifficultyTarget, @@ -1332,19 +1335,6 @@ func (postgres *Postgres) GetBlockIndex() (*collections.ConcurrentMap[BlockHash, }) } - // Setup parent pointers - blockMap.Iterate(func(key BlockHash, blockNode *BlockNode) { - // Genesis block has nil parent - parentHash := blockNode.Header.PrevBlockHash - if parentHash != nil { - parent, exists := blockMap.Get(*parentHash) - if !exists { - glog.Fatal("Parent block not found in block map") - } - blockNode.Parent = parent - } - }) - return blockMap, nil } @@ -4093,7 +4083,6 @@ func (postgres *Postgres) InitGenesisBlock(params *DeSoParams, db *badger.DB) er diffTarget := MustDecodeHexBlockHash(params.MinDifficultyTargetHex) blockHash := MustDecodeHexBlockHash(params.GenesisBlockHashHex) genesisNode := NewBlockNode( - nil, blockHash, 0, diffTarget, diff --git a/lib/remote_node.go b/lib/remote_node.go index 1fc832991..b81ce41f8 100644 --- a/lib/remote_node.go +++ b/lib/remote_node.go @@ -409,7 +409,7 @@ func (rn *RemoteNode) Disconnect(disconnectReason string) { func (rn *RemoteNode) SendMessage(desoMsg DeSoMessage) error { rn.mtx.RLock() - rn.mtx.RUnlock() + defer rn.mtx.RUnlock() if rn.connectionStatus != RemoteNodeStatus_HandshakeCompleted { return fmt.Errorf("SendMessage: Remote node is not connected") diff --git a/lib/server.go b/lib/server.go index 0fc3c0050..2d272b7b8 100644 --- a/lib/server.go +++ b/lib/server.go @@ -4,26 +4,23 @@ import ( "bytes" "encoding/hex" "fmt" - "github.com/deso-protocol/go-deadlock" + "github.com/dgraph-io/badger/v3" "net" + "path/filepath" "reflect" "runtime" "strings" "sync/atomic" "time" - "github.com/btcsuite/btcd/wire" - "github.com/deso-protocol/core/collections" - "github.com/deso-protocol/core/consensus" - - "github.com/decred/dcrd/container/lru" - "github.com/DataDog/datadog-go/v5/statsd" - "github.com/btcsuite/btcd/addrmgr" chainlib "github.com/btcsuite/btcd/blockchain" + "github.com/btcsuite/btcd/wire" "github.com/davecgh/go-spew/spew" - "github.com/dgraph-io/badger/v3" + "github.com/deso-protocol/core/collections" + "github.com/deso-protocol/core/consensus" + "github.com/deso-protocol/go-deadlock" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -90,7 +87,8 @@ type Server struct { // adding it to this map and checking this map before replying will make it // so that we only send a reply to the first peer that sent us the inv, which // is more efficient. - inventoryBeingProcessed lru.Set[InvVect] + inventoryBeingProcessed *collections.LruSet[InvVect] + // hasRequestedSync indicates whether we've bootstrapped our mempool // by requesting all mempool transactions from a // peer. It's initially false @@ -363,6 +361,27 @@ func ValidateHyperSyncFlags(isHypersync bool, syncType NodeSyncType) { } } +// RunBlockIndexMigrationOnce runs the block index migration once and saves a file to +// indicate that it has been run. +func RunBlockIndexMigrationOnce(db *badger.DB, params *DeSoParams) error { + blockIndexMigrationFileName := filepath.Join(db.Opts().Dir, BlockIndexMigrationFileName) + glog.V(2).Info("FileName: ", blockIndexMigrationFileName) + hasRunMigration, err := ReadBoolFromFile(blockIndexMigrationFileName) + if err == nil && hasRunMigration { + glog.V(2).Info("Block index migration has already been run") + return nil + } + glog.V(0).Info("Running block index migration") + if err = RunBlockIndexMigration(db, nil, nil, params); err != nil { + return errors.Wrapf(err, "Problem running block index migration") + } + if err = SaveBoolToFile(blockIndexMigrationFileName, true); err != nil { + return errors.Wrapf(err, "Problem saving block index migration file") + } + glog.V(2).Info("Block index migration complete") + return nil +} + // NewServer initializes all of the internal data structures. Right now this basically // looks as follows: // - ConnectionManager starts and keeps track of peers. @@ -687,7 +706,8 @@ func NewServer( srv.blockProducer = _blockProducer srv.incomingMessages = _incomingMessages // Make this hold a multiple of what we hold for individual peers. - srv.inventoryBeingProcessed = *lru.NewSet[InvVect](maxKnownInventory) + srv.inventoryBeingProcessed, _ = collections.NewLruSet[InvVect](maxKnownInventory) + srv.requestTimeoutSeconds = 10 srv.statsdClient = statsd @@ -753,7 +773,11 @@ func (srv *Server) _handleGetHeaders(pp *Peer, msg *MsgDeSoGetHeaders) { if pp.NegotiatedProtocolVersion >= ProtocolVersion2 { maxHeadersPerMsg = MaxHeadersPerMsgPos } - headers := srv.blockchain.LocateBestBlockChainHeaders(msg.BlockLocator, msg.StopHash, maxHeadersPerMsg) + + headers, err := srv.GetHeadersForLocatorAndStopHash(msg.BlockLocator, msg.StopHash, maxHeadersPerMsg) + if err != nil { + glog.Errorf("Server._handleGetHeadersMessage: Error getting headers: %v", err) + } // Send found headers to the requesting peer. blockTip := srv.blockchain.blockTip() @@ -767,6 +791,88 @@ func (srv *Server) _handleGetHeaders(pp *Peer, msg *MsgDeSoGetHeaders) { headers, blockTip.Hash, blockTip.Height, pp) } +// GetHeadersForLocatorAndStopHash returns a list of headers given a list of locator block hashes +// and a stop hash. Note that this may be slow if the block nodes requested are not in the cache. +func (srv *Server) GetHeadersForLocatorAndStopHash( + locator []*BlockHash, + stopHash *BlockHash, + maxHeadersPerMsg uint32, +) ([]*MsgDeSoHeader, error) { + var headers []*MsgDeSoHeader + + stopNode, stopNodeExists, stopNodeError := srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight(stopHash, nil, true) + // Special case when there is no block locator provided but only a stop hash. + if len(locator) == 0 { + if stopNodeError != nil || !stopNodeExists || stopNode == nil { + return nil, fmt.Errorf("GetHeadersForLocatorAndStopHash: Stop hash provided but no stop node found") + } + return []*MsgDeSoHeader{stopNode.Header}, nil + } + var startNode *BlockNode + var startNodeExists bool + var startNodeError error + for _, blockNodeHash := range locator { + startNode, startNodeExists, startNodeError = srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight(blockNodeHash, nil, true) + if startNodeError != nil || !startNodeExists || startNode == nil { + glog.Errorf("GetHeadersForLocatorAndStopHash: locator provided but no block node found at %v", blockNodeHash) + } + if startNodeExists && startNode != nil { + break + } + } + if startNode == nil { + return nil, fmt.Errorf("GetHeadersForLocatorAndStopHash: No start node found after looping through locators") + } + + var backtrackingNode *BlockNode + var backtrackingNodeExists bool + var backtrackingNodeError error + // If the stop node isn't provided and the max header msgs would put us past the header tip, + // we use the header tip to start back tracking. + if stopNode == nil && srv.blockchain.HeaderTip().Height < startNode.Height+maxHeadersPerMsg { + backtrackingNode = srv.blockchain.HeaderTip() + backtrackingNodeExists = true + } else if stopNode == nil || stopNode.Height > startNode.Height+maxHeadersPerMsg { + // If the stop node isn't provided or the stop node is more than maxHeadersPerMsg away + // from the start node, we compute the height of the last header expected and start + // back tracking from there. + backtrackingNode, backtrackingNodeExists, backtrackingNodeError = srv.blockchain.GetBlockFromBestChainByHeight( + uint64(startNode.Height+maxHeadersPerMsg), true) + if backtrackingNodeError != nil { + return nil, fmt.Errorf("GetHeadersForLocatorAndStopHash: Error getting backtracking node by height: %v", backtrackingNodeError) + } + if !backtrackingNodeExists || backtrackingNode == nil { + return nil, errors.New("GetHeadersForLocatorAndStopHash: Backtracking node not found") + } + } else { + // Otherwise, the stop node is provided and we start back tracking from the stop node. + backtrackingNode = stopNode + } + for ii := uint32(0); ii < maxHeadersPerMsg; ii++ { + // If we've back tracked all the way to the start node, exit. + if backtrackingNode.Hash.IsEqual(startNode.Hash) { + break + } + headers = append(headers, backtrackingNode.Header) + // Avoid underflow. + if backtrackingNode.Height < 1 { + break + } + prevNodeHeight := backtrackingNode.Header.Height - 1 + backtrackingNode, backtrackingNodeExists, backtrackingNodeError = srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight( + backtrackingNode.Header.PrevBlockHash, + &prevNodeHeight, true) + if backtrackingNodeError != nil { + glog.Errorf("Server._handleGetHeadersMessage: Error getting prev node by height: %v", backtrackingNodeError) + break + } + if !backtrackingNodeExists || backtrackingNode == nil { + break + } + } + return collections.Reverse(headers), nil +} + // GetSnapshot is used for sending MsgDeSoGetSnapshot messages to peers. We will // check if the passed peer has been assigned to an in-progress prefix and if so, // we will request a snapshot data chunk from them. Otherwise, we will assign a @@ -862,7 +968,17 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { } // Go through the block nodes in the blockchain and download the blocks if they're not stored. - for _, blockNode := range srv.blockchain.bestChain { + for ii := uint32(srv.blockchain.lowestBlockNotStored); ii <= srv.blockchain.blockTip().Height; ii++ { + // TODO: this may be really slow. + blockNode, exists, err := srv.blockchain.GetBlockFromBestChainByHeight(uint64(ii), true) + if err != nil { + glog.Errorf("GetBlocksToStore: Error getting block from best chain by height: %v", err) + return + } + if !exists { + glog.Errorf("GetBlocksToStore: Block at height %v not found in best chain", ii) + return + } // We find the first block that's not stored and get ready to download blocks starting from this block onwards. if blockNode.Status&StatusBlockStored == 0 { maxBlocksInFlight := MaxBlocksInFlight @@ -872,28 +988,37 @@ func (srv *Server) GetBlocksToStore(pp *Peer) { maxBlocksInFlight = MaxBlocksInFlightPoS } + srv.blockchain.lowestBlockNotStored = uint64(blockNode.Height) numBlocksToFetch := maxBlocksInFlight - len(pp.requestedBlocks) - currentHeight := int(blockNode.Height) + currentHeight := uint64(blockNode.Height) blockNodesToFetch := []*BlockNode{} // In case there are blocks at tip that are already stored (which shouldn't really happen), we'll not download them. - var heightLimit int - for heightLimit = len(srv.blockchain.bestChain) - 1; heightLimit >= 0; heightLimit-- { - if !srv.blockchain.bestChain[heightLimit].Status.IsFullyProcessed() { + // We filter those out in the loop below by checking IsFullyProcessed. + // Find the blocks that we should download. + for len(blockNodesToFetch) < numBlocksToFetch { + if currentHeight > uint64(srv.blockchain.blockTip().Height) { break } - } - - // Find the blocks that we should download. - for currentHeight <= heightLimit && - len(blockNodesToFetch) < numBlocksToFetch { - // Get the current hash and increment the height. Genesis has height 0, so currentHeight corresponds to // the array index. - currentNode := srv.blockchain.bestChain[currentHeight] + // TODO: this may be really slow. + currentNode, currNodeExists, err := srv.blockchain.GetBlockFromBestChainByHeight(currentHeight, true) + if err != nil { + glog.Errorf("GetBlocksToStore: Error getting block from best chain by height: %v", err) + return + } + if !currNodeExists { + glog.Errorf("GetBlocksToStore: Block at height %v not found in best chain", currentHeight) + return + } currentHeight++ + // If this node is already fully processed, then we don't need to download it. + if currentNode.Status.IsFullyProcessed() { + break + } // If we've already requested this block then we don't request it again. - if _, exists := pp.requestedBlocks[*currentNode.Hash]; exists { + if _, exists = pp.requestedBlocks[*currentNode.Hash]; exists { continue } @@ -928,6 +1053,8 @@ func (srv *Server) RequestBlocksUpToHeight(pp *Peer, maxHeight int) { numBlocksToFetch, maxHeight, pp.requestedBlocks, ) if len(blockNodesToFetch) == 0 { + glog.V(1).Infof("RequestBlocksUpToHeight: No blocks to fetch from peer %v: maxBlocksInFlight: %d, peer requested blocks: %d", + pp, srv.getMaxBlocksInFlight(pp), len(pp.requestedBlocks)) // This can happen if, for example, we're already requesting the maximum // number of blocks we can. Just return in this case. return @@ -942,7 +1069,7 @@ func (srv *Server) RequestBlocksUpToHeight(pp *Peer, maxHeight int) { pp.AddDeSoMessage(&MsgDeSoGetBlocks{HashList: hashList}, false) - glog.V(1).Infof("GetBlocks: Downloading %d blocks from header %v to header %v from peer %v", + glog.V(1).Infof("RequestBlocksUpToHeight: Downloading %d blocks from header %v to header %v from peer %v", len(blockNodesToFetch), blockNodesToFetch[0].Header, blockNodesToFetch[len(blockNodesToFetch)-1].Header, @@ -1013,14 +1140,17 @@ func (srv *Server) shouldVerifySignatures(header *MsgDeSoHeader, isHeaderChain b if checkpointBlockInfo == nil { return true, false } - var hasSeenCheckpointBlockHash bool - var checkpointBlockNode *BlockNode + // If the current header has a height below the checkpoint block height, we should skip signature verification + // even if we've seen the checkpoint block hash. + if header.Height < checkpointBlockInfo.Height { + return false, false + } srv.blockchain.ChainLock.RLock() defer srv.blockchain.ChainLock.RUnlock() - if isHeaderChain { - checkpointBlockNode, hasSeenCheckpointBlockHash = srv.blockchain.bestHeaderChainMap[*checkpointBlockInfo.Hash] - } else { - checkpointBlockNode, hasSeenCheckpointBlockHash = srv.blockchain.bestChainMap[*checkpointBlockInfo.Hash] + checkpointBlockNode, hasSeenCheckpointBlockHash, err := srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight( + checkpointBlockInfo.Hash, &checkpointBlockInfo.Height, isHeaderChain) + if err != nil { + glog.Fatalf("shouldVerifySignatures: Problem getting checkpoint block node from best chain: %v", err) } // If we haven't seen the checkpoint block hash yet, we skip signature verification. if !hasSeenCheckpointBlockHash { @@ -1031,11 +1161,6 @@ func (srv *Server) shouldVerifySignatures(header *MsgDeSoHeader, isHeaderChain b } return false, false } - // If the current header has a height below the checkpoint block height, we should skip signature verification - // even if we've seen the checkpoint block hash. - if header.Height < checkpointBlockInfo.Height { - return false, false - } // Make sure that the header in the best chain map has the correct height, otherwise we need to disconnect this peer. if uint64(checkpointBlockNode.Height) != checkpointBlockInfo.Height { return true, true @@ -1048,13 +1173,11 @@ func (srv *Server) getCheckpointSyncingStatus(isHeaders bool) string { if checkpointBlockInfo == nil { return "" } - hasSeenCheckPointBlockHash := false - srv.blockchain.ChainLock.RLock() - defer srv.blockchain.ChainLock.RUnlock() - if isHeaders { - _, hasSeenCheckPointBlockHash = srv.blockchain.bestHeaderChainMap[*checkpointBlockInfo.Hash] - } else { - _, hasSeenCheckPointBlockHash = srv.blockchain.bestChainMap[*checkpointBlockInfo.Hash] + _, hasSeenCheckPointBlockHash, err := srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight( + checkpointBlockInfo.Hash, &checkpointBlockInfo.Height, isHeaders) + + if err != nil { + glog.Fatalf("getCheckpointSyncingStatus: Problem getting checkpoint block node from best chain: %v", err) } if !hasSeenCheckPointBlockHash { return fmt.Sprintf("", checkpointBlockInfo.String()) @@ -1072,6 +1195,15 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { len(msg.Headers), srv.blockchain.chainState(), pp, srv.blockchain.headerTip().Header.Height, printHeight, srv.getCheckpointSyncingStatus(true)))) + if glog.V(2) { + headerStrings := collections.Transform(msg.Headers, func(header *MsgDeSoHeader) string { return header.ShortString() }) + if len(msg.Headers) < 50 { + glog.V(2).Infof("Received headers :\n %v", strings.Join(headerStrings, "\n")) + } else { + glog.V(2).Infof("Received headers :\n %v", strings.Join( + append(headerStrings[:10], headerStrings[len(headerStrings)-10:]...), "\n")) + } + } // If we get here, it means that the node is not currently running a Fast-HotStuff // validator or that the node is syncing. In either case, we sync headers according // to the blocksync rules. @@ -1079,6 +1211,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // Start by processing all the headers given to us. They should start // right after the tip of our header chain ideally. While going through them // tally up the number that we actually process. + var blockNodeBatch []*BlockNode for ii, headerReceived := range msg.Headers { // If we've set a maximum height for node sync and we've reached it, // then we will not process any more headers. @@ -1091,25 +1224,26 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // have this issue. Hitting duplicates after we're done syncing is // fine and can happen in certain cases. headerHash, _ := headerReceived.Hash() - if srv.blockchain.HasHeader(headerHash) { - if srv.blockchain.isSyncing() { - - glog.Warningf("Server._handleHeaderBundle: Duplicate header %v received from peer %v "+ - "in state %s. Local header tip height %d "+ - "hash %s with duplicate %v", - headerHash, - pp, srv.blockchain.chainState(), srv.blockchain.headerTip().Height, - hex.EncodeToString(srv.blockchain.headerTip().Hash[:]), headerHash) - - // TODO: This logic should really be commented back in, but there was a bug that - // arises when a program is killed forcefully whereby a partial write leads to this - // logic causing the sync to stall. As such, it's more trouble than it's worth - // at the moment but we should consider being more strict about it in the future. - /* - pp.Disconnect() - return - */ - } + hasHeader := srv.blockchain.HasHeaderByHashAndHeight(headerHash, headerReceived.Height) + if hasHeader { + //if srv.blockchain.isSyncing() { + // Always log a warning if we get a duplicate header. This is useful for debugging. + glog.Warningf("Server._handleHeaderBundle: Duplicate header %v received from peer %v "+ + "in state %s. Local header tip height %d "+ + "hash %s with duplicate %v", + headerHash, + pp, srv.blockchain.chainState(), srv.blockchain.headerTip().Height, + hex.EncodeToString(srv.blockchain.headerTip().Hash[:]), headerHash) + + // TODO: This logic should really be commented back in, but there was a bug that + // arises when a program is killed forcefully whereby a partial write leads to this + // logic causing the sync to stall. As such, it's more trouble than it's worth + // at the moment but we should consider being more strict about it in the future. + /* + pp.Disconnect() + return + */ + //} // Don't process duplicate headers. continue @@ -1129,7 +1263,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // Process the header, as we haven't seen it before, set verifySignatures to false // if we're in the process of syncing. - _, isOrphan, err := srv.blockchain.ProcessHeader(headerReceived, headerHash, verifySignatures) + blockNode, _, isOrphan, err := srv.blockchain.ProcessHeader(headerReceived, headerHash, verifySignatures) numLogHeaders := 2000 if ii%numLogHeaders == 0 { @@ -1149,8 +1283,32 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { pp, srv.blockchain.chainState(), err, isOrphan) pp.Disconnect("Error processing header") + // Just to be safe, we flush all the headers we just got even tho we have a header. + currTime := time.Now() + if err = PutHeightHashToNodeInfoBatch( + srv.blockchain.db, srv.snapshot, blockNodeBatch, false /*bitcoinNodes*/, srv.eventManager); err != nil { + glog.Errorf("Server._handleHeaderBundle: Problem writing block nodes to db, error: (%v)", err) + return + } + glog.V(0).Info("Server._handleHeaderBundle: PutHeightHashToNodeInfoBatch took: ", time.Since(currTime)) return } + + // Append the block node to the block node batch. + if blockNode != nil { + blockNodeBatch = append(blockNodeBatch, blockNode) + } + } + currTime := time.Now() + if err := PutHeightHashToNodeInfoBatch( + srv.blockchain.db, srv.snapshot, blockNodeBatch, false /*bitcoinNodes*/, srv.eventManager); err != nil { + glog.Errorf("Server._handleHeaderBundle: Problem writing block nodes to db, error: (%v)", err) + return + } + if len(blockNodeBatch) > 0 { + glog.V(0).Info("Server._handleHeaderBundle: PutHeightHashToNodeInfoBatch took: ", time.Since(currTime)) + } else { + glog.V(0).Info("Server._handleHeaderBundle: No block nodes to write to db") } // After processing all the headers this will check to see if we are fully current @@ -1233,11 +1391,21 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // expected height at which the snapshot should be taking place. We do this to make sure that the // snapshot we receive from the peer is up-to-date. // TODO: error handle if the hash doesn't exist for some reason. + expectedSnapshotHeightBlock, expectedSnapshotHeightblockExists, err := + srv.blockchain.GetBlockFromBestChainByHeight(expectedSnapshotHeight, true) + if err != nil { + glog.Errorf("Server._handleHeaderBundle: Problem getting expected snapshot height block, error (%v)", err) + return + } + if !expectedSnapshotHeightblockExists || expectedSnapshotHeightBlock == nil { + glog.Errorf("Server._handleHeaderBundle: Expected snapshot height block doesn't exist.") + return + } srv.HyperSyncProgress.SnapshotMetadata = &SnapshotEpochMetadata{ SnapshotBlockHeight: expectedSnapshotHeight, FirstSnapshotBlockHeight: expectedSnapshotHeight, CurrentEpochChecksumBytes: []byte{}, - CurrentEpochBlockHash: srv.blockchain.bestHeaderChain[expectedSnapshotHeight].Hash, + CurrentEpochBlockHash: expectedSnapshotHeightBlock.Hash, } srv.HyperSyncProgress.PrefixProgress = []*SyncPrefixProgress{} srv.HyperSyncProgress.Completed = false @@ -1314,8 +1482,9 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // we're either not aware of or that we don't think is the best chain. // Doing things this way makes it so that when we request blocks we // are 100% positive the peer has them. - if !srv.blockchain.HasHeader(msg.TipHash) { - glog.V(1).Infof("Server._handleHeaderBundle: Peer's tip is not in our "+ + hasHeader := srv.blockchain.HasHeaderByHashAndHeight(msg.TipHash, uint64(msg.TipHeight)) + if !hasHeader { + glog.V(0).Infof("Server._handleHeaderBundle: Peer's tip is not in our "+ "blockchain so not requesting anything else from them. Our block "+ "tip %v, their tip %v:%d, peer: %v", srv.blockchain.blockTip().Header, msg.TipHash, msg.TipHeight, pp) @@ -1327,7 +1496,7 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // them should be available as long as they don't exceed the peer's // tip height. blockTip := srv.blockchain.blockTip() - glog.V(1).Infof("Server._handleHeaderBundle: *Downloading* blocks starting at "+ + glog.V(0).Infof("Server._handleHeaderBundle: *Downloading* blocks starting at "+ "block tip %v out of %d from peer %v", blockTip.Header, msg.TipHeight, pp) srv.RequestBlocksUpToHeight(pp, int(msg.TipHeight)) @@ -1352,7 +1521,10 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { // even if the peer has a long fork with more work than our current header // chain. lastHash, _ := msg.Headers[len(msg.Headers)-1].Hash() - locator, err := srv.blockchain.HeaderLocatorWithNodeHash(lastHash) + lastHeight := msg.Headers[len(msg.Headers)-1].Height + headerTip := srv.blockchain.headerTip() + currentBlockTip := srv.blockchain.blockTip() + locator, locatorHeights, err := srv.blockchain.HeaderLocatorWithNodeHashAndHeight(lastHash, lastHeight) if err != nil { glog.Warningf("Server._handleHeaderBundle: Disconnecting peer %v because "+ "she indicated that she has more headers but the last hash %v in "+ @@ -1361,14 +1533,20 @@ func (srv *Server) _handleHeaderBundle(pp *Peer, msg *MsgDeSoHeaderBundle) { pp.Disconnect("Last hash in header bundle not in our index") return } + glog.V(2).Infof("Server._handleHeaderBundle: Sending GET_HEADERS message to peer %v\n"+ + "Block Locator Hashes & Heights: (%v, %v) \n"+ + "Header Tip: (%v, %v)\nBlock Tip: (%v, %v)", + pp, locator, locatorHeights, headerTip.Hash, headerTip.Height, + currentBlockTip.Hash, currentBlockTip.Height) pp.AddDeSoMessage(&MsgDeSoGetHeaders{ StopHash: &BlockHash{}, BlockLocator: locator, }, false) - headerTip := srv.blockchain.headerTip() glog.V(1).Infof("Server._handleHeaderBundle: *Syncing* headers for blocks starting at "+ "header tip %v out of %d from peer %v", headerTip.Header, msg.TipHeight, pp) + glog.V(0).Infof("Server._handleHeaderBundle: Num Headers in header chain: (header tip height: %v) ", + srv.blockchain.blockIndex.GetHeaderTip()) } func (srv *Server) _handleGetBlocks(pp *Peer, msg *MsgDeSoGetBlocks) { @@ -1651,10 +1829,18 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { srv.snapshot.PrintChecksum("Finished hyper sync. Checksum is:") glog.Infof(CLog(Magenta, fmt.Sprintf("Metadata checksum: (%v)", srv.HyperSyncProgress.SnapshotMetadata.CurrentEpochChecksumBytes))) - - glog.Infof(CLog(Yellow, fmt.Sprintf("Best header chain %v best block chain %v", - srv.blockchain.bestHeaderChain[msg.SnapshotMetadata.SnapshotBlockHeight], srv.blockchain.bestChain))) - + blockNode, exists, err := srv.blockchain.GetBlockFromBestChainByHeight(msg.SnapshotMetadata.SnapshotBlockHeight, true) + if err != nil { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, error (%v)", err) + return + } + if !exists { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, block node does not exist: (%v)", msg.SnapshotMetadata.SnapshotBlockHeight) + return + } else { + glog.Infof(CLog(Yellow, fmt.Sprintf("Best header chain %v best block chain %v", + blockNode, srv.blockchain.blockIndex.GetTip()))) + } // Verify that the state checksum matches the one in HyperSyncProgress snapshot metadata. // If the checksums don't match, it means that we've been interacting with a peer that was misbehaving. checksumBytes, err := srv.snapshot.Checksum.ToBytes() @@ -1696,19 +1882,46 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { // being too large and possibly causing an error in badger. glog.V(0).Infof("Server._handleSnapshot: Updating snapshot block nodes in the database") var blockNodeBatch []*BlockNode + flushBlockNodeStartTime := time.Now() + // Disable deadlock detection, as the process of flushing entries to file can take a long time and + // if it takes longer than the deadlock detection timeout interval, it will cause an error to be thrown. + deadlock.Opts.Disable = true + defer func() { + deadlock.Opts.Disable = false + }() // acquire the chain lock while we update the best chain and best chain map. srv.blockchain.ChainLock.Lock() - for ii := uint64(1); ii <= srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight; ii++ { - currentNode := srv.blockchain.bestHeaderChain[ii] + currentNode := blockNode + currentNodeExists := true + // Set the block tip to the snapshot height block node. + srv.blockchain.blockIndex.setTip(currentNode) + for currentNode.Height > 0 { // Do not set the StatusBlockStored flag, because we still need to download the past blocks. currentNode.Status |= StatusBlockProcessed currentNode.Status |= StatusBlockValidated currentNode.Status |= StatusBlockCommitted srv.blockchain.addNewBlockNodeToBlockIndex(currentNode) - srv.blockchain.bestChainMap[*currentNode.Hash] = currentNode - srv.blockchain.bestChain = append(srv.blockchain.bestChain, currentNode) blockNodeBatch = append(blockNodeBatch, currentNode) - if len(blockNodeBatch) < 10000 { + if (srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight-uint64(currentNode.Height))%100000 == 0 { + glog.V(0).Infof("Time to process %v of %v block nodes in %v", + srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight-uint64(currentNode.Height), + srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight, + time.Since(flushBlockNodeStartTime), + ) + } + + prevNodeHeight := uint64(currentNode.Height) - 1 + currentNode, currentNodeExists, err = srv.blockchain.GetBlockFromBestChainByHashAndOptionalHeight(currentNode.Header.PrevBlockHash, &prevNodeHeight, true) + if err != nil { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, error: (%v)", err) + break + } + if !currentNodeExists { + glog.Errorf("Server._handleSnapshot: Problem getting block node by height, block node does not exist") + break + } + // TODO: should we adjust this value for batch sizes? + if len(blockNodeBatch) < 25000 { continue } err = PutHeightHashToNodeInfoBatch(srv.blockchain.db, srv.snapshot, blockNodeBatch, false /*bitcoinNodes*/, srv.eventManager) @@ -1724,6 +1937,8 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { glog.Errorf("Server._handleSnapshot: Problem updating snapshot block nodes, error: (%v)", err) } } + glog.V(0).Infof("Time to store %v block nodes in the database: %v", + srv.HyperSyncProgress.SnapshotMetadata.SnapshotBlockHeight, time.Since(flushBlockNodeStartTime)) err = PutBestHash(srv.blockchain.db, srv.snapshot, msg.SnapshotMetadata.CurrentEpochBlockHash, ChainTypeDeSoBlock, srv.eventManager) if err != nil { @@ -1731,7 +1946,7 @@ func (srv *Server) _handleSnapshot(pp *Peer, msg *MsgDeSoSnapshotData) { } // We also reset the in-memory snapshot cache, because it is populated with stale records after // we've initialized the chain with seed transactions. - srv.snapshot.DatabaseCache = *lru.NewMap[string, []byte](DatabaseCacheSize) + srv.snapshot.DatabaseCache, _ = collections.NewLruCache[string, []byte](int(DatabaseCacheSize)) // If we got here then we finished the snapshot sync so set appropriate flags. srv.blockchain.syncingState = false @@ -1840,7 +2055,14 @@ func (srv *Server) _startSync() { // Send a GetHeaders message to the Peer to start the headers sync. // Note that we include an empty BlockHash as the stopHash to indicate we want as // many headers as the Peer can give us. - locator := srv.blockchain.LatestHeaderLocator() + locator, locatorHeights := bestPeer.srv.blockchain.LatestHeaderLocator() + headerTip := bestPeer.srv.blockchain.headerTip() + currentBlockTip := bestPeer.srv.blockchain.blockTip() + glog.V(2).Infof("Server._startSync: Sending GET_HEADERS message to peer %v\n"+ + "Block Locator Hashes & Heights: (%v, %v)\n"+ + "Header Tip: (%v, %v)\nBlock Tip: (%v, %v)", + bestPeer, locator, locatorHeights, headerTip.Hash, headerTip.Height, + currentBlockTip.Hash, currentBlockTip.Height) bestPeer.AddDeSoMessage(&MsgDeSoGetHeaders{ StopHash: &BlockHash{}, BlockLocator: locator, @@ -1996,7 +2218,7 @@ func (srv *Server) _relayTransactions() { for _, pp := range allPeers { if !pp.canReceiveInvMessages { - glog.V(1).Infof("Skipping invs for peer %v because not ready "+ + glog.V(3).Infof("Skipping invs for peer %v because not ready "+ "yet: %v", pp, pp.canReceiveInvMessages) continue } @@ -2020,8 +2242,8 @@ func (srv *Server) _relayTransactions() { // Add the transaction to the peer's known inventory. We do // it here when we enqueue the message to the peers outgoing - // message queue so that we don't have remember to do it later. - pp.knownInventory.Add(*invVect, struct{}{}) + // message queue so that we don't have to remember to do it later. + pp.knownInventory.Put(*invVect) invMsg.InvList = append(invMsg.InvList, invVect) } if len(invMsg.InvList) > 0 { @@ -2427,7 +2649,14 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { glog.Warningf("Server._handleBlock: Received block while syncing headers: %v", blk) glog.Infof("Requesting headers: %v", pp) - locator := srv.blockchain.LatestHeaderLocator() + locator, locatorHeights := pp.srv.blockchain.LatestHeaderLocator() + headerTip := pp.srv.blockchain.headerTip() + currentBlockTip := pp.srv.blockchain.blockTip() + glog.V(2).Infof("Server._handleBlock (chainState = SYNCING_HEADERS): Sending GET_HEADERS message to peer %v\n"+ + "Block Locator Hashes & Heights: (%v, %v) \n"+ + "Header Tip: (%v, %v)\nBlock Tip: (%v, %v)", + pp, locator, locatorHeights, headerTip.Hash, headerTip.Height, + currentBlockTip.Hash, currentBlockTip.Height) pp.AddDeSoMessage(&MsgDeSoGetHeaders{ StopHash: &BlockHash{}, BlockLocator: locator, @@ -2471,7 +2700,14 @@ func (srv *Server) _handleBlock(pp *Peer, blk *MsgDeSoBlock, isLastBlock bool) { // and worst case the peer will return an empty header bundle that will // result in us not sending anything back because there won’t be any new // blocks to request. - locator := srv.blockchain.LatestHeaderLocator() + locator, locatorHeights := srv.blockchain.LatestHeaderLocator() + headerTip := srv.blockchain.headerTip() + currentBlockTip := srv.blockchain.blockTip() + glog.V(2).Infof("Server._handleBlock (chain state = NEEDS_BLOCKS): Sending GET_HEADERS message to peer %v\n"+ + "Block Locator Hashes & Heights: (%v, %v)\n"+ + "Header Tip: (%v, %v)\nBlock Tip: (%v, %v)", + pp, locator, locatorHeights, headerTip.Hash, headerTip.Height, + currentBlockTip.Hash, currentBlockTip.Height) pp.AddDeSoMessage(&MsgDeSoGetHeaders{ StopHash: &BlockHash{}, BlockLocator: locator, @@ -2510,7 +2746,7 @@ func (srv *Server) _handleBlockBundle(pp *Peer, bundle *MsgDeSoBlockBundle) { // TODO: We should fetch the next batch of blocks while we process this batch. // This requires us to modify GetBlocks to take a start hash and a count // of the number of blocks we want. Or we could make the existing GetBlocks - // take a start hash and the other node can just return as many blcoks as it + // take a start hash and the other node can just return as many blocks as it // can. // Process each block in the bundle. Record our blocks per second. diff --git a/lib/snapshot.go b/lib/snapshot.go index 83f59fc1d..8276f03d3 100644 --- a/lib/snapshot.go +++ b/lib/snapshot.go @@ -5,7 +5,7 @@ import ( "context" "encoding/hex" "fmt" - "github.com/deso-protocol/go-deadlock" + "github.com/deso-protocol/core/collections" "math" "reflect" "runtime" @@ -14,7 +14,7 @@ import ( "time" "github.com/cloudflare/circl/group" - "github.com/decred/dcrd/container/lru" + "github.com/deso-protocol/go-deadlock" "github.com/dgraph-io/badger/v3" "github.com/fatih/color" "github.com/golang/glog" @@ -313,7 +313,7 @@ type Snapshot struct { // DatabaseCache is used to store most recent DB records that we've read/written. // This is a low-level optimization for ancestral records that // saves us read time when we're writing to the DB during UtxoView flush. - DatabaseCache lru.Map[string, []byte] + DatabaseCache *collections.LruCache[string, []byte] // AncestralFlushCounter is used to offset ancestral records flush to occur only after x blocks. AncestralFlushCounter uint64 @@ -483,11 +483,14 @@ func NewSnapshot( "This may lead to unexpected behavior.") } + databaseCache, _ := collections.NewLruCache[string, []byte](int(DatabaseCacheSize)) + // Set the snapshot. snap := &Snapshot{ - mainDb: mainDb, - SnapshotDbMutex: &snapshotDbMutex, - DatabaseCache: *lru.NewMap[string, []byte](DatabaseCacheSize), + mainDb: mainDb, + SnapshotDbMutex: &snapshotDbMutex, + DatabaseCache: databaseCache, + AncestralFlushCounter: uint64(0), snapshotBlockHeightPeriod: snapshotBlockHeightPeriod, OperationChannel: operationChannel, @@ -1406,7 +1409,7 @@ type StateChecksum struct { ctx context.Context // hashToCurveCache is a cache of computed hashToCurve mappings - hashToCurveCache lru.Map[string, group.Element] + hashToCurveCache *collections.LruCache[string, group.Element] // When we want to add a database record to the state checksum, we will first have to // map the record to the Ristretto255 curve using the hash_to_curve. We will then add the @@ -1434,7 +1437,7 @@ func (sc *StateChecksum) Initialize(mainDb *badger.DB, snapshotDbMutex *sync.Mut sc.maxWorkers = int64(runtime.GOMAXPROCS(0)) // Set the hashToCurveCache - sc.hashToCurveCache = *lru.NewMap[string, group.Element](HashToCurveCache) + sc.hashToCurveCache, _ = collections.NewLruCache[string, group.Element](int(HashToCurveCache)) // Set the worker pool semaphore and context. sc.semaphore = semaphore.NewWeighted(sc.maxWorkers) diff --git a/lib/state_change_syncer.go b/lib/state_change_syncer.go index 81b359fa9..5f9cc068d 100644 --- a/lib/state_change_syncer.go +++ b/lib/state_change_syncer.go @@ -794,7 +794,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser return true, nil } - blockHeight := uint64(server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Height) + blockHeight := uint64(server.blockchain.blockIndex.GetTip().Height) stateChangeSyncer.MempoolFlushId = originalCommittedFlushId @@ -821,7 +821,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser mempoolUtxoView.Snapshot = nil server.blockchain.ChainLock.RLock() - mempoolUtxoView.TipHash = server.blockchain.bestChain[len(server.blockchain.bestChain)-1].Hash + mempoolUtxoView.TipHash = server.blockchain.blockIndex.GetTip().Hash server.blockchain.ChainLock.RUnlock() // A new transaction is created so that we can simulate writes to the db without actually writing to the db. @@ -861,7 +861,7 @@ func (stateChangeSyncer *StateChangeSyncer) SyncMempoolToStateSyncer(server *Ser // TODO: Have Z look at if we need to do some caching in the uncommitted blocks logic. // First connect the uncommitted blocks to the mempool view. for _, uncommittedBlock := range uncommittedBlocks { - utxoViewAndOpsAtBlockHash, err := server.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*uncommittedBlock.Hash) + utxoViewAndOpsAtBlockHash, err := server.blockchain.GetUtxoViewAndUtxoOpsAtBlockHash(*uncommittedBlock.Hash, uint64(uncommittedBlock.Height)) if err != nil { mempoolUtxoView.EventManager.stateSyncerFlushed(&StateSyncerFlushedEvent{ FlushId: originalCommittedFlushId, diff --git a/lib/txindex.go b/lib/txindex.go index 029f44487..3fadfae8a 100644 --- a/lib/txindex.go +++ b/lib/txindex.go @@ -150,8 +150,8 @@ func NewTXIndex(coreChain *Blockchain, params *DeSoParams, dataDirectory string) } func (txi *TXIndex) FinishedSyncing() bool { - committedTip, idx := txi.CoreChain.GetCommittedTip() - if idx == -1 { + committedTip, exists := txi.CoreChain.GetCommittedTip() + if !exists { return false } return txi.TXIndexChain.BlockTip().Height == committedTip.Height @@ -171,7 +171,9 @@ func (txi *TXIndex) Start() { txi.updateWaitGroup.Done() return default: - if txi.CoreChain.ChainState() == SyncStateFullyCurrent { + chainState := txi.CoreChain.ChainState() + if chainState == SyncStateFullyCurrent || (chainState == SyncStateNeedBlocksss && + txi.CoreChain.headerTip().Height-txi.CoreChain.blockTip().Height < 25) { if !txi.CoreChain.IsFullyStored() { glog.V(1).Infof("TXIndex: Waiting, blockchain is not fully stored") break @@ -207,8 +209,7 @@ func (txi *TXIndex) Stop() { // GetTxindexUpdateBlockNodes ... func (txi *TXIndex) GetTxindexUpdateBlockNodes() ( - _txindexTipNode *BlockNode, _blockTipNode *BlockNode, _commonAncestor *BlockNode, - _detachBlocks []*BlockNode, _attachBlocks []*BlockNode) { + _txindexTipNode *BlockNode, _blockTipNode *BlockNode, _commonAncestor *BlockNode) { // Get the current txindex tip. txindexTipHash := txi.TXIndexChain.BlockTip() @@ -218,33 +219,18 @@ func (txi *TXIndex) GetTxindexUpdateBlockNodes() ( // case. glog.Error("Error: TXIndexChain had nil tip; this should never " + "happen and it means the transaction index is broken.") - return nil, nil, nil, nil, nil + return nil, nil, nil } // If the tip of the txindex is no longer stored in the block index, it // means the txindex hit a fork that we are no longer keeping track of. // The only thing we can really do in this case is rebuild the entire index // from scratch. To do that, we return all the blocks in the index to detach // and all the blocks in the real chain to attach. - txindexTipNode, _ := txi.TXIndexChain.blockIndexByHash.Get(*txindexTipHash.Hash) + txindexTipNode, _ := txi.TXIndexChain.blockIndex.GetBlockNodeByHashAndHeight(txindexTipHash.Hash, uint64(txindexTipHash.Height)) // Get the committed tip. committedTip, _ := txi.CoreChain.GetCommittedTip() - if txindexTipNode == nil { - glog.Info("GetTxindexUpdateBlockNodes: Txindex tip was not found; building txindex starting at genesis block") - - newTxIndexBestChain, _ := txi.TXIndexChain.CopyBestChain() - newBlockchainBestChain, _ := txi.CoreChain.CopyBestChain() - - return txindexTipNode, committedTip, nil, newTxIndexBestChain, newBlockchainBestChain - } - - derefedTxindexTipNode := *txindexTipNode - - // At this point, we know our txindex tip is in our block index so - // there must be a common ancestor between the tip and the block tip. - commonAncestor, detachBlocks, attachBlocks := GetReorgBlocks(&derefedTxindexTipNode, committedTip) - - return txindexTipNode, committedTip, commonAncestor, detachBlocks, attachBlocks + return txindexTipNode, committedTip, txindexTipNode } // Update syncs the transaction index with the blockchain. @@ -264,7 +250,7 @@ func (txi *TXIndex) Update() error { // done with the rest of the function. txi.TXIndexLock.Lock() defer txi.TXIndexLock.Unlock() - txindexTipNode, blockTipNode, commonAncestor, detachBlocks, attachBlocks := txi.GetTxindexUpdateBlockNodes() + txindexTipNode, blockTipNode, commonAncestor := txi.GetTxindexUpdateBlockNodes() // Note that the blockchain's ChainLock does not need to be held at this // point because we're just reading blocks from the db, which never get @@ -293,97 +279,16 @@ func (txi *TXIndex) Update() error { // For each of the blocks we're removing, delete the transactions from // the transaction index. - for _, blockToDetach := range detachBlocks { - if txi.killed { - glog.Infof(CLog(Yellow, "TxIndex: Update: Killed while detaching blocks")) - break - } - // Go through each txn in the block and delete its mappings from our - // txindex. - glog.V(1).Infof("Update: Detaching block (height: %d, hash: %v)", - blockToDetach.Height, blockToDetach.Hash) - blockMsg, err := GetBlock(blockToDetach.Hash, txi.TXIndexChain.DB(), nil) - if err != nil { - return fmt.Errorf("Update: Problem fetching detach block "+ - "with hash %v: %v", blockToDetach.Hash, err) - } - blockHeight := uint64(txi.CoreChain.blockTip().Height) - err = txi.TXIndexChain.DB().Update(func(dbTxn *badger.Txn) error { - for _, txn := range blockMsg.Txns { - if err := DbDeleteTxindexTransactionMappingsWithTxn(dbTxn, nil, - blockHeight, txn, txi.Params, txi.CoreChain.eventManager, true); err != nil { - - return fmt.Errorf("Update: Problem deleting "+ - "transaction mappings for transaction %v: %v", txn.Hash(), err) - } - } - return nil - }) - if err != nil { - return err - } - - // Now that all the transactions have been deleted from our txindex, - // it's safe to disconnect the block from our txindex chain. - utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) - utxoOps, err := GetUtxoOperationsForBlock( - txi.TXIndexChain.DB(), nil, blockToDetach.Hash) - if err != nil { - return fmt.Errorf( - "Update: Error getting UtxoOps for block %v: %v", blockToDetach, err) - } - // Compute the hashes for all the transactions. - txHashes, err := ComputeTransactionHashes(blockMsg.Txns) - if err != nil { - return fmt.Errorf( - "Update: Error computing tx hashes for block %v: %v", - blockToDetach, err) - } - if err := utxoView.DisconnectBlock(blockMsg, txHashes, utxoOps, blockHeight); err != nil { - return fmt.Errorf("Update: Error detaching block "+ - "%v from UtxoView: %v", blockToDetach, err) - } - if err := utxoView.FlushToDb(blockHeight); err != nil { - return fmt.Errorf("Update: Error flushing view to db for block "+ - "%v: %v", blockToDetach, err) - } - // We have to flush a couple of extra things that the view doesn't flush... - if err := PutBestHash(txi.TXIndexChain.DB(), nil, utxoView.TipHash, ChainTypeDeSoBlock, txi.CoreChain.eventManager); err != nil { - return fmt.Errorf("Update: Error putting best hash for block "+ - "%v: %v", blockToDetach, err) - } - err = txi.TXIndexChain.DB().Update(func(txn *badger.Txn) error { - if err := DeleteUtxoOperationsForBlockWithTxn(txn, nil, blockToDetach.Hash, txi.TXIndexChain.eventManager, true); err != nil { - return fmt.Errorf("Update: Error deleting UtxoOperations 1 for block %v, %v", blockToDetach.Hash, err) - } - if err := txn.Delete(BlockHashToBlockKey(blockToDetach.Hash)); err != nil { - return fmt.Errorf("Update: Error deleting UtxoOperations 2 for block %v %v", blockToDetach.Hash, err) - } - return nil - }) - - if err != nil { - return fmt.Errorf("Update: Error updating badgger: %v", err) - } - // Delete this block from the chain db so we don't get duplicate block errors. - - // Remove this block from our bestChain data structures. - newBlockIndexByHash, newBlockIndexByHeight := txi.TXIndexChain.CopyBlockIndexes() - newBestChain, newBestChainMap := txi.TXIndexChain.CopyBestChain() - newBestChain = newBestChain[:len(newBestChain)-1] - delete(newBestChainMap, *(blockToDetach.Hash)) - newBlockIndexByHash.Remove(*(blockToDetach.Hash)) - - txi.TXIndexChain.SetBestChainMap(newBestChain, newBestChainMap, newBlockIndexByHash, newBlockIndexByHeight) - - // At this point the entries for the block should have been removed - // from both our Txindex chain and our transaction index mappings. - } // For each of the blocks we're adding, process them on our txindex chain // and add their mappings to our txn index. Compute any metadata that might // be useful. - for _, blockToAttach := range attachBlocks { + // Get the next block after the current txindex tip hash. we know we've already processed the txindex tip hash. + blockToAttach, exists, err := txi.CoreChain.GetBlockFromBestChainByHeight(uint64(txindexTipNode.Height+1), false) + if !exists || err != nil { + return fmt.Errorf("Update: Problem getting block at height %d: %v", txindexTipNode.Height+1, err) + } + for !blockToAttach.Hash.IsEqual(blockTipNode.Hash) { if txi.killed { glog.Infof(CLog(Yellow, "TxIndex: Update: Killed while attaching blocks")) break @@ -408,7 +313,7 @@ func (txi *TXIndex) Update() error { utxoView := NewUtxoView(txi.TXIndexChain.DB(), txi.Params, nil, nil, txi.CoreChain.eventManager) if blockToAttach.Header.PrevBlockHash != nil && !utxoView.TipHash.IsEqual(blockToAttach.Header.PrevBlockHash) { var utxoViewAndUtxoOps *BlockViewAndUtxoOps - utxoViewAndUtxoOps, err = txi.TXIndexChain.GetUtxoViewAndUtxoOpsAtBlockHash(*blockToAttach.Header.PrevBlockHash) + utxoViewAndUtxoOps, err = txi.TXIndexChain.GetUtxoViewAndUtxoOpsAtBlockHash(*blockToAttach.Header.PrevBlockHash, blockToAttach.Header.Height-1) if err != nil { return fmt.Errorf("Update: Problem getting UtxoView at block hash %v: %v", blockToAttach.Header.PrevBlockHash, err) @@ -453,6 +358,11 @@ func (txi *TXIndex) Update() error { return fmt.Errorf("Update: Problem attaching block %v: %v", blockToAttach, err) } + var exists bool + blockToAttach, exists, err = txi.CoreChain.GetBlockFromBestChainByHeight(uint64(blockToAttach.Height+1), false) + if !exists || err != nil { + return fmt.Errorf("Update: Problem getting block at height %d: %v", blockToAttach.Height+1, err) + } } glog.Infof("Update: Txindex update complete. New tip: (height: %d, hash: %v)", diff --git a/lib/types.go b/lib/types.go index dd7bf0518..5c486c99d 100644 --- a/lib/types.go +++ b/lib/types.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io" - "reflect" "sort" "github.com/deso-protocol/uint256" @@ -238,7 +237,7 @@ func (bh *BlockHash) IsEqual(target *BlockHash) bool { return false } - return reflect.DeepEqual(bh[:], target[:]) + return bytes.Equal(bh[:], target[:]) } func (bh *BlockHash) NewBlockHash() *BlockHash { diff --git a/scripts/mempool/mempool_dumper.go b/scripts/mempool/mempool_dumper.go index ce4e809d8..457dcef87 100644 --- a/scripts/mempool/mempool_dumper.go +++ b/scripts/mempool/mempool_dumper.go @@ -56,7 +56,7 @@ func main() { } netAddr2 := net.TCPAddr{ - IP: netAddrss.IP, + IP: netAddrss.ToLegacy().IP, Port: int(netAddrss.Port), } conn, err := net.DialTimeout(netAddr2.Network(), netAddr2.String(), lib.DeSoMainnetParams.DialTimeout) @@ -69,9 +69,7 @@ func main() { 10000, 0, &lib.DeSoMainnetParams, messagesFromPeer, nil, nil, lib.NodeSyncTypeAny, nil) time.Sleep(1 * time.Second) - if err := peer.NegotiateVersion(lib.DeSoMainnetParams.VersionNegotiationTimeout); err != nil { - panic(err) - } + peer.Start() // As a test, send a GetHeaders request and see if we get it back if *flagCommand == "get_headers" {