diff --git a/vendor/github.com/OpenBazaar/multiwallet/.travis.yml b/vendor/github.com/OpenBazaar/multiwallet/.travis.yml index 77f37c8225..cf81ad5677 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/.travis.yml +++ b/vendor/github.com/OpenBazaar/multiwallet/.travis.yml @@ -11,7 +11,7 @@ before_install: - go get github.com/tcnksm/ghr - go get github.com/axw/gocov/gocov - go get github.com/mattn/goveralls - - GL_DEBUG=linters_output GOPACKAGESPRINTGOLISTERRORS=1 curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.15.0 + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | bash -s -- -b $GOPATH/bin v1.15.0 install: - dep ensure script: diff --git a/vendor/github.com/OpenBazaar/multiwallet/Dockerfile.dev b/vendor/github.com/OpenBazaar/multiwallet/Dockerfile.dev deleted file mode 100755 index 2dc03c4878..0000000000 --- a/vendor/github.com/OpenBazaar/multiwallet/Dockerfile.dev +++ /dev/null @@ -1,10 +0,0 @@ -FROM golang:1.11 -VOLUME /var/lib/openbazaar - -WORKDIR /go/src/github.com/OpenBazaar/multiwallet -RUN curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh && \ - go get -u github.com/derekparker/delve/cmd/dlv - -COPY . . - -ENTRYPOINT ["/bin/bash"] diff --git a/vendor/github.com/OpenBazaar/multiwallet/Gopkg.lock b/vendor/github.com/OpenBazaar/multiwallet/Gopkg.lock deleted file mode 100644 index bb6ed82073..0000000000 --- a/vendor/github.com/OpenBazaar/multiwallet/Gopkg.lock +++ /dev/null @@ -1,845 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:9f438ced0b98101978ebd63588a0e671406334aa2c711940351507e396b3de8d" - name = "github.com/OpenBazaar/go-ethwallet" - packages = [ - "util", - "wallet", - ] - pruneopts = "UT" - revision = "0d5cc6fff3a2f43ab3013c75ff6af14e2acc27df" - -[[projects]] - branch = "master" - digest = "1:12249c9e5740e17587799c25425a8058cb942da38e8226998cccc99a16dd9fe5" - name = "github.com/OpenBazaar/golang-socketio" - packages = [ - ".", - "protocol", - "transport", - ] - pruneopts = "UT" - revision = "4147b5f0d29491b7cacd6bf58d825f54fe2b24af" - -[[projects]] - digest = "1:37000ab67cf1d629a1611fb6f415d985b56449db3a537e215fec14499439914f" - name = "github.com/OpenBazaar/openbazaar-go" - packages = ["util"] - pruneopts = "UT" - revision = "2c8da24bc3ad1bf8827874e7bcb5151e240955bb" - version = "v0.13.8" - -[[projects]] - branch = "ethereum-master" - digest = "1:266d74b30258b3f6cff7c970b3dc7f9f3d61c8bed3ab16da0232e5860eb0ce45" - name = "github.com/OpenBazaar/spvwallet" - packages = [ - ".", - "exchangerates", - ] - pruneopts = "UT" - revision = "10951cd851492bdf52b4f7b408adcb6752312b23" - -[[projects]] - branch = "ethereum-master" - digest = "1:c411c1051b3ab7c823786bf82887580f2d93898be2504e145a67419afef5ba2d" - name = "github.com/OpenBazaar/wallet-interface" - packages = ["."] - pruneopts = "UT" - revision = "cbbb40466dcfe7d299f685fd0aa9a4fe9ea49147" - -[[projects]] - digest = "1:ef98291cf6c2dd0f53949a1899e9a58d3159ad44d20f74b62467bd2a807b01ce" - name = "github.com/VictoriaMetrics/fastcache" - packages = ["."] - pruneopts = "UT" - revision = "4d94f266cd3cecbcd97eaebee9e3d6d8cf918643" - version = "v1.4.6" - -[[projects]] - branch = "master" - digest = "1:7d191fd0c54ff370eaf6116a14dafe2a328df487baea280699f597aae858d00d" - name = "github.com/aristanetworks/goarista" - packages = ["monotime"] - pruneopts = "UT" - revision = "5d8d36c240c9af0ccde364594dd5fae756790b63" - -[[projects]] - digest = "1:0f98f59e9a2f4070d66f0c9c39561f68fcd1dc837b22a852d28d0003aebd1b1e" - name = "github.com/boltdb/bolt" - packages = ["."] - pruneopts = "UT" - revision = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8" - version = "v1.3.1" - -[[projects]] - digest = "1:38e337477887a8935559e3042ce53f14fcc24fd66635b57f423965c8297ccc90" - name = "github.com/btcsuite/btcd" - packages = [ - "addrmgr", - "blockchain", - "btcec", - "chaincfg", - "chaincfg/chainhash", - "connmgr", - "database", - "peer", - "txscript", - "wire", - ] - pruneopts = "UT" - revision = "f3ec13030e4e828869954472cbc51ac36bee5c1d" - version = "v0.20.1-beta" - -[[projects]] - branch = "master" - digest = "1:30d4a548e09bca4a0c77317c58e7407e2a65c15325e944f9c08a7b7992f8a59e" - name = "github.com/btcsuite/btclog" - packages = ["."] - pruneopts = "UT" - revision = "84c8d2346e9fc8c7b947e243b9c24e6df9fd206a" - -[[projects]] - branch = "master" - digest = "1:0faf30bd4ac78188ebeb913680b3dec83dccc62050f92fb2da3940d55e6a7977" - name = "github.com/btcsuite/btcutil" - packages = [ - ".", - "base58", - "bech32", - "bloom", - "coinset", - "hdkeychain", - "txsort", - ] - pruneopts = "UT" - revision = "e17c9730c422e7c745002430f2782b948b59c1c2" - -[[projects]] - digest = "1:7ffc24d91a12c173c18fe9ada86a05fc476f8943f4acffeddc6ec87a4f32bdef" - name = "github.com/btcsuite/btcwallet" - packages = [ - "wallet/txauthor", - "wallet/txrules", - "wallet/txsizes", - ] - pruneopts = "UT" - revision = "b19df70dddb66b27902f48cc48e69741909ef2e9" - version = "v0.11.0" - -[[projects]] - branch = "master" - digest = "1:1e6b2f7aa98b082c30a1303c29a702c369b2ec6d86b74a599bc8bbe2333db299" - name = "github.com/btcsuite/go-socks" - packages = ["socks"] - pruneopts = "UT" - revision = "4720035b7bfd2a9bb130b1c184f8bbe41b6f0d0f" - -[[projects]] - branch = "master" - digest = "1:49ad1acb33bb5b40c0d197321d3cf9ee9a29eb02f4765ab7c316e08983eb7559" - name = "github.com/btcsuite/golangcrypto" - packages = ["ripemd160"] - pruneopts = "UT" - revision = "53f62d9b43e87a6c56975cf862af7edf33a8d0df" - -[[projects]] - digest = "1:91fc3f4d1842584d1342364193106e80d1d532bbd1668fbd7c61627f01d0111f" - name = "github.com/btcsuite/goleveldb" - packages = [ - "leveldb/errors", - "leveldb/storage", - "leveldb/util", - ] - pruneopts = "UT" - revision = "3fd0373267b6461dbefe91cef614278064d05465" - version = "v1.0.0" - -[[projects]] - digest = "1:61406f6571eeb97717bdfaac37fa0bc5260621c4cbf3ce7635e9828dcbb5258a" - name = "github.com/cespare/xxhash" - packages = ["."] - pruneopts = "UT" - revision = "d7df74196a9e781ede915320c11c378c1b2f3a1f" - version = "v2.1.1" - -[[projects]] - branch = "master" - digest = "1:9d61b5ca59d3db0b1f1c1e9f5930b4f7c7fd954f54b70c1d83802b8805db918f" - name = "github.com/cevaris/ordered_map" - packages = ["."] - pruneopts = "UT" - revision = "3adeae072e730f1919a936e13b4923706d3f60fe" - -[[projects]] - branch = "master" - digest = "1:36236e7063db3314f32b60885ef7ddf8abab65cb055f3a21d118f7e19148cfa3" - name = "github.com/cpacia/bchutil" - packages = ["."] - pruneopts = "UT" - revision = "b126f6a35b6c2968c0877cb4d2ac5dcf67682d27" - -[[projects]] - digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "UT" - revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" - version = "v1.1.1" - -[[projects]] - digest = "1:1e9a0ec4f7e852123fefad9aadd7647eed1e9fd3716118e99a4b3dc463705c82" - name = "github.com/dchest/siphash" - packages = ["."] - pruneopts = "UT" - revision = "34f201214d993633bb24f418ba11736ab8b55aa7" - version = "v1.2.1" - -[[projects]] - digest = "1:e47d51dab652d26c3fba6f8cba403f922d02757a82abdc77e90df7948daf296e" - name = "github.com/deckarep/golang-set" - packages = ["."] - pruneopts = "UT" - revision = "cbaa98ba5575e67703b32b4b19f73c91f3c4159e" - version = "v1.7.1" - -[[projects]] - digest = "1:edb569dd02419a41ddd98768cc0e7aec922ef19dae139731e5ca750afcf6f4c5" - name = "github.com/edsrzf/mmap-go" - packages = ["."] - pruneopts = "UT" - revision = "188cc3b666ba704534fa4f96e9e61f21f1e1ba7c" - version = "v1.0.0" - -[[projects]] - digest = "1:2b2daa41f40acd66f4aa0ae213bfd286096663c999a26deb773127b7864c6bd0" - name = "github.com/elastic/gosigar" - packages = [ - ".", - "sys/windows", - ] - pruneopts = "UT" - revision = "7aef3366157f2bfdf3e068f73ce7193573e88e0c" - version = "v0.10.5" - -[[projects]] - digest = "1:41d4c366cbfdb3623d8f6fd866b142d238bbfc3a812712278f9fcb42e8644964" - name = "github.com/ethereum/go-ethereum" - packages = [ - ".", - "accounts", - "accounts/abi", - "accounts/abi/bind", - "accounts/external", - "accounts/keystore", - "accounts/scwallet", - "accounts/usbwallet", - "accounts/usbwallet/trezor", - "common", - "common/bitutil", - "common/hexutil", - "common/math", - "common/mclock", - "common/prque", - "consensus", - "consensus/clique", - "consensus/ethash", - "consensus/misc", - "core", - "core/bloombits", - "core/rawdb", - "core/state", - "core/types", - "core/vm", - "crypto", - "crypto/blake2b", - "crypto/bn256", - "crypto/bn256/cloudflare", - "crypto/bn256/google", - "crypto/ecies", - "crypto/secp256k1", - "eth/downloader", - "ethclient", - "ethdb", - "ethdb/leveldb", - "ethdb/memorydb", - "event", - "internal/ethapi", - "log", - "metrics", - "p2p", - "p2p/discover", - "p2p/discv5", - "p2p/enode", - "p2p/enr", - "p2p/nat", - "p2p/netutil", - "params", - "rlp", - "rpc", - "signer/core", - "signer/storage", - "trie", - ] - pruneopts = "T" - revision = "017449971e1e9e220efcd97d3313a0e27f47003b" - version = "v1.9.9" - -[[projects]] - branch = "master" - digest = "1:fedce3f87da3944ec15789ff4d1d17e77554148626d079a9ffd2cae6112fdc8b" - name = "github.com/gballet/go-libpcsclite" - packages = ["."] - pruneopts = "UT" - revision = "4678299bea08415f0ca8bd71da9610625cc86e86" - -[[projects]] - digest = "1:967f26d236f25cce1fcc98b88e2ea526a556fe3f9cbf1d6cb404aa72b2b858a9" - name = "github.com/gcash/bchd" - packages = [ - "bchec", - "chaincfg", - "chaincfg/chainhash", - "txscript", - "wire", - ] - pruneopts = "UT" - revision = "4e8fe019ad33cead8f4d58642394e990f855d0a3" - version = "v0.15.2" - -[[projects]] - branch = "master" - digest = "1:b1053b781e9090dab5d3e916eb04c8d85b63a7f6911007c2cd1dd82fb22f7f6a" - name = "github.com/gcash/bchlog" - packages = ["."] - pruneopts = "UT" - revision = "b4f036f92fa66c88eec458f4531ff14ff87704d6" - -[[projects]] - branch = "master" - digest = "1:b1c06051d563f82aa7ec6cb3f759d51301936cc0426933c7c567d8f2fd004c59" - name = "github.com/gcash/bchutil" - packages = [ - ".", - "base58", - ] - pruneopts = "UT" - revision = "98e73ec336ba521482403cf7cda69281170e50e0" - -[[projects]] - digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d" - name = "github.com/go-stack/stack" - packages = ["."] - pruneopts = "UT" - revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" - version = "v1.8.0" - -[[projects]] - branch = "master" - digest = "1:228f39dbc93e88d95a024f45d5beea0a64cd33e89bdfb841a1669abb74f8b1e9" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "protoc-gen-go/descriptor", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "4e55bbcbfaa105a596caba5bbc20d392806beda9" - -[[projects]] - digest = "1:e4f5819333ac698d294fe04dbf640f84719658d5c7ce195b10060cc37292ce79" - name = "github.com/golang/snappy" - packages = ["."] - pruneopts = "UT" - revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a" - version = "v0.0.1" - -[[projects]] - digest = "1:582b704bebaa06b48c29b0cec224a6058a09c86883aaddabde889cd1a5f73e1b" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "UT" - revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" - version = "v1.1.1" - -[[projects]] - branch = "master" - digest = "1:e62657cca9badaa308d86e7716083e4c5933bb78e30a17743fc67f50be26f6f4" - name = "github.com/gorilla/websocket" - packages = ["."] - pruneopts = "UT" - revision = "c3e18be99d19e6b3e8f1559eea2c161a665c4b6b" - -[[projects]] - digest = "1:e631368e174090a276fc00b48283f92ac4ccfbbb1945bcfcee083f5f9210dc00" - name = "github.com/hashicorp/golang-lru" - packages = [ - ".", - "simplelru", - ] - pruneopts = "UT" - revision = "14eae340515388ca95aa8e7b86f0de668e981f54" - version = "v0.5.4" - -[[projects]] - digest = "1:c00cc6d95a674b4b923ac069d364445043bc67836e9bd8aeff8440cfbe6a2cc7" - name = "github.com/huin/goupnp" - packages = [ - ".", - "dcps/internetgateway1", - "dcps/internetgateway2", - "httpu", - "scpd", - "soap", - "ssdp", - ] - pruneopts = "UT" - revision = "656e61dfadd241c7cbdd22a023fa81ecb6860ea8" - version = "v1.0.0" - -[[projects]] - digest = "1:94d189f7124eba234224e1a3d28b943d826d480cf71cc71d53c2eac8132f31ed" - name = "github.com/hunterlong/tokenbalance" - packages = ["."] - pruneopts = "UT" - revision = "1fcaffaac40cf0559ccba1276d90757bdf1284e9" - version = "v1.72" - -[[projects]] - digest = "1:71193da2829127d2cd7d2045175a65ef04d79176de5f1ebb185d331daa53b5c9" - name = "github.com/jackpal/go-nat-pmp" - packages = ["."] - pruneopts = "UT" - revision = "059203efa1edd7130293a583541b8308e7c640c4" - version = "v1.0.2" - -[[projects]] - branch = "master" - digest = "1:459271b8268fe541549b299f65160b1df5abe9ffef0426cc38607f771dbc6bb4" - name = "github.com/jessevdk/go-flags" - packages = ["."] - pruneopts = "UT" - revision = "c0795c8afcf41dd1d786bebce68636c199b3bb45" - -[[projects]] - branch = "master" - digest = "1:f275e994e11f9bec072885d81a8aaa1a95bdd0ebca4cd78f1d37d3d84f88f3b8" - name = "github.com/karalabe/usb" - packages = ["."] - pruneopts = "T" - revision = "911d15fe12a9c411cf5d0dd5635231c759399bed" - -[[projects]] - branch = "master" - digest = "1:5a580af993fe973c9c96ca9ed10f64e979a85eab3b3f56e50003fefb91b10db9" - name = "github.com/ltcsuite/ltcd" - packages = [ - "btcec", - "chaincfg", - "chaincfg/chainhash", - "txscript", - "wire", - ] - pruneopts = "UT" - revision = "92166e4124994fcca26545ee95d5532749593596" - -[[projects]] - branch = "master" - digest = "1:7e604729bde3f3f9f01454a2e13b99e475ec725794ae5b9d4f8a62ccd8608493" - name = "github.com/ltcsuite/ltcutil" - packages = [ - ".", - "base58", - "bech32", - ] - pruneopts = "UT" - revision = "6bec450ea6ad382fc379160f355562b64382366c" - -[[projects]] - branch = "master" - digest = "1:a302d142a103687a0dc12e2c1fffc4128011b6ed27dbc969c549799b23f57b8d" - name = "github.com/ltcsuite/ltcwallet" - packages = ["wallet/txrules"] - pruneopts = "UT" - revision = "fc621f0f45c334831b2dda5ae8b85cf0185fe114" - -[[projects]] - digest = "1:eb1bffab7260bf5ddc95fc2c41d4bfee1a4f5fe18194b3946fe8a9e9121a282f" - name = "github.com/mattn/go-runewidth" - packages = ["."] - pruneopts = "UT" - revision = "a4df4ddbff020e131056d91f580a1cdcd806e3ae" - version = "v0.0.8" - -[[projects]] - branch = "master" - digest = "1:130cefe87d7eeefc824978dcb78e35672d4c49a11f25c153fbf0cfd952756fa3" - name = "github.com/minio/blake2b-simd" - packages = ["."] - pruneopts = "UT" - revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4" - -[[projects]] - digest = "1:5d231480e1c64a726869bc4142d270184c419749d34f167646baa21008eb0a79" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - pruneopts = "UT" - revision = "af06845cf3004701891bf4fdb884bfe4920b3727" - version = "v1.1.0" - -[[projects]] - digest = "1:00e810a1a88ae7bccca5a31de78ddb0db5ce5cc27dcd5744aa452db3e65f2a9a" - name = "github.com/nanmu42/etherscan-api" - packages = ["."] - pruneopts = "UT" - revision = "586884d258b6b8b22d5bd039e270d33572888f54" - version = "v1.1.0" - -[[projects]] - digest = "1:b8261a46d75566ebf5b4fb6bb762f54f47e6633e0995118393afc80bb1f428f5" - name = "github.com/olekukonko/tablewriter" - packages = ["."] - pruneopts = "UT" - revision = "876dd0e0227ec99c0243b639b92139915b65331a" - version = "v0.0.4" - -[[projects]] - digest = "1:5b3b29ce0e569f62935d9541dff2e16cc09df981ebde48e82259076a73a3d0c7" - name = "github.com/op/go-logging" - packages = ["."] - pruneopts = "UT" - revision = "b2cb9fa56473e98db8caba80237377e83fe44db5" - version = "v1" - -[[projects]] - digest = "1:e5d0bd87abc2781d14e274807a470acd180f0499f8bf5bb18606e9ec22ad9de9" - name = "github.com/pborman/uuid" - packages = ["."] - pruneopts = "UT" - revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" - version = "v1.2" - -[[projects]] - digest = "1:9e1d37b58d17113ec3cb5608ac0382313c5b59470b94ed97d0976e69c7022314" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "UT" - revision = "614d223910a179a466c1767a985424175c39b465" - version = "v0.9.1" - -[[projects]] - digest = "1:19a227084137c73d7a519ff90acc9fa69855c2ba134bb9c7dfe94e9ad2949c64" - name = "github.com/prometheus/tsdb" - packages = ["fileutil"] - pruneopts = "UT" - revision = "7762249358193da791ec62e72b080d908f96e776" - version = "v0.10.0" - -[[projects]] - digest = "1:31d83d1b1c288073c91abadee3caec87de2a1fb5dbe589039264a802e67a26b8" - name = "github.com/rjeczalik/notify" - packages = ["."] - pruneopts = "UT" - revision = "69d839f37b13a8cb7a78366f7633a4071cb43be7" - version = "v0.9.2" - -[[projects]] - digest = "1:c5dfe46811af7e2eff7c11fc84b6c841520338613c056f659f262d5a4fb42fa8" - name = "github.com/rs/cors" - packages = ["."] - pruneopts = "UT" - revision = "db0fe48135e83b5812a5a31be0eea66984b1b521" - version = "v1.7.0" - -[[projects]] - digest = "1:81e02c4edb639c80559c0650f9401d3e2dcc3256d1fa215382bb7c83c1db9126" - name = "github.com/shopspring/decimal" - packages = ["."] - pruneopts = "UT" - revision = "cd690d0c9e2447b1ef2a129a6b7b49077da89b8e" - version = "1.1.0" - -[[projects]] - branch = "develop" - digest = "1:6deccaba5762377091f2e5b26dba70e630e01edb3f95d1a6a59d9b098bd4358f" - name = "github.com/status-im/keycard-go" - packages = ["derivationpath"] - pruneopts = "UT" - revision = "f38e9a19958eb492359ace5d068a7ce42e7824f8" - -[[projects]] - digest = "1:266e2f508feb9a9a765bfeb74d116a88514248b2f8428788dcce574bd026b9c0" - name = "github.com/steakknife/bloomfilter" - packages = ["."] - pruneopts = "UT" - revision = "99ee86d9200fcc2ffde62f508329bd6627c0a307" - version = "1.0.4" - -[[projects]] - digest = "1:5ca4bdccd72e66aaba5b52f9c4a21f1021102f0919432fe138ad5d48abf06833" - name = "github.com/steakknife/hamming" - packages = ["."] - pruneopts = "UT" - revision = "003c143a81c25ea5e263d692919c611c7122ae6b" - version = "0.2.5" - -[[projects]] - digest = "1:c345767003e0d53971e0f409a42b875dab3fee7ec269557c863ebbb194341420" - name = "github.com/syndtr/goleveldb" - packages = [ - "leveldb", - "leveldb/cache", - "leveldb/comparer", - "leveldb/errors", - "leveldb/filter", - "leveldb/iterator", - "leveldb/journal", - "leveldb/memdb", - "leveldb/opt", - "leveldb/storage", - "leveldb/table", - "leveldb/util", - ] - pruneopts = "UT" - revision = "758128399b1df3a87e92df6c26c1d2063da8fabe" - -[[projects]] - digest = "1:91b40a2adb6b4ccd51b1dfb306edfa76139e1599b01666346085472b386f5447" - name = "github.com/tyler-smith/go-bip39" - packages = [ - ".", - "wordlists", - ] - pruneopts = "UT" - revision = "5e3853c3f4e1a44df487c7efeb064ee8b43755de" - version = "1.0.2" - -[[projects]] - branch = "master" - digest = "1:7dca0da64f5937af74f21618cdb812c8f16a7d042316dd5bf2f1dfd086be3fc6" - name = "github.com/wsddn/go-ecdh" - packages = ["."] - pruneopts = "UT" - revision = "48726bab92085232373de4ec5c51ce7b441c63a0" - -[[projects]] - branch = "master" - digest = "1:b90cae04932efcf7747730936b3ed1d52a913a97e7a658869bed2539b7a956df" - name = "golang.org/x/crypto" - packages = [ - "curve25519", - "pbkdf2", - "ripemd160", - "scrypt", - "sha3", - "ssh/terminal", - ] - pruneopts = "UT" - revision = "6d4e4cb37c7d6416dfea8472e751c7b6615267a6" - -[[projects]] - branch = "master" - digest = "1:de276a36cf59627923c9aca0ed1425b0dd49a90a0e4542abb97de61cc8f625e6" - name = "golang.org/x/net" - packages = [ - "context", - "html", - "html/atom", - "html/charset", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/socks", - "internal/timeseries", - "proxy", - "trace", - ] - pruneopts = "UT" - revision = "6afb5195e5aab057fda82e27171243402346b0ad" - -[[projects]] - branch = "master" - digest = "1:6c49651edbcfad5c1ed228e827b5dbeefe656f8b71852093d9f4c470c71eb21a" - name = "golang.org/x/sys" - packages = [ - "cpu", - "unix", - "windows", - ] - pruneopts = "UT" - revision = "b77594299b429d05028403d72b68172959c7dad5" - -[[projects]] - digest = "1:28deae5fe892797ff37a317b5bcda96d11d1c90dadd89f1337651df3bc4c586e" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "encoding", - "encoding/charmap", - "encoding/htmlindex", - "encoding/internal", - "encoding/internal/identifier", - "encoding/japanese", - "encoding/korean", - "encoding/simplifiedchinese", - "encoding/traditionalchinese", - "encoding/unicode", - "internal/colltab", - "internal/gen", - "internal/language", - "internal/language/compact", - "internal/tag", - "internal/triegen", - "internal/ucd", - "internal/utf8internal", - "language", - "runes", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475" - version = "v0.3.2" - -[[projects]] - branch = "master" - digest = "1:583a0c80f5e3a9343d33aea4aead1e1afcc0043db66fdf961ddd1fe8cd3a4faf" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "ca5a22157cba8746e7aa978de1b1ac4085150840" - -[[projects]] - branch = "master" - digest = "1:fe5ba00f68cf67a31f8dca3a0942fd76612868e2e915c5a70a6e3926ba2ac258" - name = "google.golang.org/grpc" - packages = [ - ".", - "attributes", - "backoff", - "balancer", - "balancer/base", - "balancer/roundrobin", - "binarylog/grpc_binarylog_v1", - "codes", - "connectivity", - "credentials", - "credentials/internal", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/balancerload", - "internal/binarylog", - "internal/buffer", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/grpcsync", - "internal/resolver/dns", - "internal/resolver/passthrough", - "internal/syscall", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "reflection", - "reflection/grpc_reflection_v1alpha", - "resolver", - "serviceconfig", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "3311b9ea68a8c6c1554506af68fcbd9fe3e1c74f" - -[[projects]] - branch = "v1" - digest = "1:b8621abaa059e2a06b87dada1b8439ddc53b71e7b7794346cf3cc92c6fad9f0d" - name = "gopkg.in/jarcoal/httpmock.v1" - packages = ["."] - pruneopts = "UT" - revision = "a728a90ba3c33b3752e8a8ad8c0409cb78a62287" - -[[projects]] - branch = "v2" - digest = "1:3d3f9391ab615be8655ae0d686a1564f3fec413979bb1aaf018bac1ec1bb1cc7" - name = "gopkg.in/natefinch/npipe.v2" - packages = ["."] - pruneopts = "UT" - revision = "c1b8fa8bdccecb0b8db834ee0b92fdbcfa606dd6" - -[[projects]] - digest = "1:b75b3deb2bce8bc079e16bb2aecfe01eb80098f5650f9e93e5643ca8b7b73737" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "UT" - revision = "1f64d6156d11335c3f22d9330b0ad14fc1e789ce" - version = "v2.2.7" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/OpenBazaar/go-ethwallet/wallet", - "github.com/OpenBazaar/golang-socketio", - "github.com/OpenBazaar/golang-socketio/protocol", - "github.com/OpenBazaar/golang-socketio/transport", - "github.com/OpenBazaar/spvwallet", - "github.com/OpenBazaar/spvwallet/exchangerates", - "github.com/OpenBazaar/wallet-interface", - "github.com/btcsuite/btcd/blockchain", - "github.com/btcsuite/btcd/btcec", - "github.com/btcsuite/btcd/chaincfg", - "github.com/btcsuite/btcd/chaincfg/chainhash", - "github.com/btcsuite/btcd/txscript", - "github.com/btcsuite/btcd/wire", - "github.com/btcsuite/btcutil", - "github.com/btcsuite/btcutil/base58", - "github.com/btcsuite/btcutil/bech32", - "github.com/btcsuite/btcutil/coinset", - "github.com/btcsuite/btcutil/hdkeychain", - "github.com/btcsuite/btcutil/txsort", - "github.com/btcsuite/btcwallet/wallet/txauthor", - "github.com/btcsuite/btcwallet/wallet/txrules", - "github.com/btcsuite/golangcrypto/ripemd160", - "github.com/cpacia/bchutil", - "github.com/gcash/bchd/chaincfg/chainhash", - "github.com/gcash/bchd/txscript", - "github.com/gcash/bchd/wire", - "github.com/golang/protobuf/proto", - "github.com/golang/protobuf/ptypes/timestamp", - "github.com/gorilla/websocket", - "github.com/jessevdk/go-flags", - "github.com/ltcsuite/ltcd/chaincfg", - "github.com/ltcsuite/ltcd/chaincfg/chainhash", - "github.com/ltcsuite/ltcutil", - "github.com/ltcsuite/ltcutil/base58", - "github.com/ltcsuite/ltcwallet/wallet/txrules", - "github.com/minio/blake2b-simd", - "github.com/op/go-logging", - "github.com/tyler-smith/go-bip39", - "golang.org/x/crypto/ripemd160", - "golang.org/x/net/context", - "golang.org/x/net/proxy", - "google.golang.org/grpc", - "google.golang.org/grpc/reflection", - "gopkg.in/jarcoal/httpmock.v1", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/OpenBazaar/multiwallet/Gopkg.toml b/vendor/github.com/OpenBazaar/multiwallet/Gopkg.toml deleted file mode 100644 index 4f0c6849ff..0000000000 --- a/vendor/github.com/OpenBazaar/multiwallet/Gopkg.toml +++ /dev/null @@ -1,126 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - branch = "master" - name = "github.com/OpenBazaar/golang-socketio" - -[[constraint]] - branch = "ethereum-master" - name = "github.com/OpenBazaar/spvwallet" - -[[constraint]] - branch = "ethereum-master" - name = "github.com/OpenBazaar/wallet-interface" - -[[constraint]] - version = "v0.20.1-beta" - name = "github.com/btcsuite/btcd" - -[[constraint]] - branch = "master" - name = "github.com/btcsuite/btcutil" - -[[constraint]] - version = "v0.11.0" - name = "github.com/btcsuite/btcwallet" - -[[constraint]] - branch = "master" - name = "github.com/btcsuite/golangcrypto" - -[[constraint]] - branch = "master" - name = "github.com/cpacia/bchutil" - -[[constraint]] - branch = "master" - name = "github.com/golang/protobuf" - -[[constraint]] - branch = "master" - name = "github.com/gorilla/websocket" - -[[constraint]] - branch = "master" - name = "github.com/jessevdk/go-flags" - -[[constraint]] - branch = "master" - name = "github.com/ltcsuite/ltcd" - -[[constraint]] - branch = "master" - name = "github.com/ltcsuite/ltcutil" - -[[constraint]] - branch = "master" - name = "github.com/ltcsuite/ltcwallet" - -[[constraint]] - branch = "master" - name = "github.com/minio/blake2b-simd" - -[[constraint]] - version = "v1.0" - name = "github.com/op/go-logging" - -[[constraint]] - version = "v1.0.2" - name = "github.com/tyler-smith/go-bip39" - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "google.golang.org/grpc" - -[[constraint]] - branch = "v1" - name = "gopkg.in/jarcoal/httpmock.v1" - -[[override]] - revision = "758128399b1df3a87e92df6c26c1d2063da8fabe" - name = "github.com/syndtr/goleveldb" - -[prune] - go-tests = true - unused-packages = true - -[[prune.project]] - name = "github.com/ethereum/go-ethereum" - unused-packages = false - -[[prune.project]] - name = "github.com/karalabe/usb" - unused-packages = false diff --git a/vendor/github.com/OpenBazaar/multiwallet/bitcoin/sign_test.go b/vendor/github.com/OpenBazaar/multiwallet/bitcoin/sign_test.go new file mode 100644 index 0000000000..b2f0c3bbef --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/bitcoin/sign_test.go @@ -0,0 +1,691 @@ +package bitcoin + +import ( + "bytes" + "encoding/hex" + "github.com/OpenBazaar/multiwallet/util" + "testing" + "time" + + "github.com/OpenBazaar/multiwallet/cache" + "github.com/OpenBazaar/multiwallet/datastore" + "github.com/OpenBazaar/multiwallet/keys" + "github.com/OpenBazaar/multiwallet/model/mock" + "github.com/OpenBazaar/multiwallet/service" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/hdkeychain" +) + +type FeeResponse struct { + Priority int `json:"priority"` + Normal int `json:"normal"` + Economic int `json:"economic"` +} + +func newMockWallet() (*BitcoinWallet, error) { + mockDb := datastore.NewMockMultiwalletDatastore() + + db, err := mockDb.GetDatastoreForWallet(wallet.Bitcoin) + if err != nil { + return nil, err + } + + params := &chaincfg.MainNetParams + + seed, err := hex.DecodeString("16c034c59522326867593487c03a8f9615fb248406dd0d4ffb3a6b976a248403") + if err != nil { + return nil, err + } + master, err := hdkeychain.NewMaster(seed, params) + if err != nil { + return nil, err + } + km, err := keys.NewKeyManager(db.Keys(), params, master, wallet.Bitcoin, keyToAddress) + if err != nil { + return nil, err + } + + fp := util.NewFeeProvider(2000, 300, 200, 100, 50, nil) + + bw := &BitcoinWallet{ + params: params, + km: km, + db: db, + fp: fp, + } + cli := mock.NewMockApiClient(bw.AddressToScript) + ws, err := service.NewWalletService(db, km, cli, params, wallet.Bitcoin, cache.NewMockCacher()) + if err != nil { + return nil, err + } + + bw.client = cli + bw.ws = ws + return bw, nil +} + +func TestWalletService_VerifyWatchScriptFilter(t *testing.T) { + // Verify that AddWatchedAddress should never add a script which already represents a key from its own wallet + w, err := newMockWallet() + if err != nil { + t.Fatal(err) + } + keys := w.km.GetKeys() + + addr, err := w.km.KeyToAddress(keys[0]) + if err != nil { + t.Fatal(err) + } + err = w.AddWatchedAddresses(addr) + if err != nil { + t.Fatal(err) + } + + watchScripts, err := w.db.WatchedScripts().GetAll() + if err != nil { + t.Fatal(err) + } + + if len(watchScripts) != 0 { + t.Error("Put watched scripts fails on key manager owned key") + } +} + +func TestWalletService_VerifyWatchScriptPut(t *testing.T) { + // Verify that AddWatchedAddress should add a script which does not represent a key from its own wallet + w, err := newMockWallet() + if err != nil { + t.Fatal(err) + } + + addr, err := w.DecodeAddress("16E4rWXEDcDRfmuMmJ6tTvL2uwHNgWF4yR") + if err != nil { + t.Fatal(err) + } + + err = w.AddWatchedAddresses(addr) + if err != nil { + t.Fatal(err) + } + + watchScripts, err := w.db.WatchedScripts().GetAll() + if err != nil { + t.Fatal(err) + } + + if len(watchScripts) == 0 { + t.Error("Put watched scripts fails on non-key manager owned key") + } +} + +func waitForTxnSync(t *testing.T, txnStore wallet.Txns) { + // Look for a known txn, this sucks a bit. It would be better to check if the + // number of stored txns matched the expected, but not all the mock + // transactions are relevant, so the numbers don't add up. + // Even better would be for the wallet to signal that the initial sync was + // done. + lastTxn := mock.MockTransactions[len(mock.MockTransactions)-2] + txHash, err := chainhash.NewHashFromStr(lastTxn.Txid) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 100; i++ { + if _, err := txnStore.Get(*txHash); err == nil { + return + } + time.Sleep(100 * time.Millisecond) + } + t.Fatal("timeout waiting for wallet to sync transactions") +} + +func TestBitcoinWallet_buildTx(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + time.Sleep(time.Second / 2) + + waitForTxnSync(t, w.db.Txns()) + addr, err := w.DecodeAddress("1AhsMpyyyVyPZ9KDUgwsX3zTDJWWSsRo4f") + if err != nil { + t.Error(err) + } + + // Test build normal tx + tx, err := w.buildTx(1500000, addr, wallet.NORMAL, nil) + if err != nil { + t.Error(err) + } + if !containsOutput(tx, addr) { + t.Error("Built tx does not contain the requested output") + } + if !validInputs(tx, w.db) { + t.Error("Built tx does not contain valid inputs") + } + if !validChangeAddress(tx, w.db, w.params) { + t.Error("Built tx does not contain a valid change output") + } + + // Insuffient funds + _, err = w.buildTx(1000000000, addr, wallet.NORMAL, nil) + if err != wallet.ErrorInsuffientFunds { + t.Error("Failed to throw insuffient funds error") + } + + // Dust + _, err = w.buildTx(1, addr, wallet.NORMAL, nil) + if err != wallet.ErrorDustAmount { + t.Error("Failed to throw dust error") + } +} + +func TestBitcoinWallet_buildSpendAllTx(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + time.Sleep(time.Second / 2) + + waitForTxnSync(t, w.db.Txns()) + addr, err := w.DecodeAddress("1AhsMpyyyVyPZ9KDUgwsX3zTDJWWSsRo4f") + if err != nil { + t.Error(err) + } + + // Test build spendAll tx + tx, err := w.buildSpendAllTx(addr, wallet.NORMAL) + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Fatal(err) + } + spendableUtxos := 0 + for _, u := range utxos { + if !u.WatchOnly { + spendableUtxos++ + } + } + if len(tx.TxIn) != spendableUtxos { + t.Error("Built tx does not spend all available utxos") + } + if !containsOutput(tx, addr) { + t.Error("Built tx does not contain the requested output") + } + if !validInputs(tx, w.db) { + t.Error("Built tx does not contain valid inputs") + } + if len(tx.TxOut) != 1 { + t.Error("Built tx should only have one output") + } + + // Verify the signatures on each input using the scripting engine + for i, in := range tx.TxIn { + var prevScript []byte + for _, u := range utxos { + if util.OutPointsEqual(u.Op, in.PreviousOutPoint) { + prevScript = u.ScriptPubkey + break + } + } + vm, err := txscript.NewEngine(prevScript, tx, i, txscript.StandardVerifyFlags, nil, nil, 0) + if err != nil { + t.Fatal(err) + } + if err := vm.Execute(); err != nil { + t.Error(err) + } + } +} + +func containsOutput(tx *wire.MsgTx, addr btcutil.Address) bool { + for _, o := range tx.TxOut { + script, _ := txscript.PayToAddrScript(addr) + if bytes.Equal(script, o.PkScript) { + return true + } + } + return false +} + +func validInputs(tx *wire.MsgTx, db wallet.Datastore) bool { + utxos, _ := db.Utxos().GetAll() + uMap := make(map[wire.OutPoint]bool) + for _, u := range utxos { + uMap[u.Op] = true + } + for _, in := range tx.TxIn { + if !uMap[in.PreviousOutPoint] { + return false + } + } + return true +} + +func validChangeAddress(tx *wire.MsgTx, db wallet.Datastore, params *chaincfg.Params) bool { + for _, out := range tx.TxOut { + _, addrs, _, err := txscript.ExtractPkScriptAddrs(out.PkScript, params) + if err != nil { + continue + } + if len(addrs) == 0 { + continue + } + _, err = db.Keys().GetPathForKey(addrs[0].ScriptAddress()) + if err == nil { + return true + } + } + return false +} + +func TestBitcoinWallet_GenerateMultisigScript(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + key3, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + keys := []hdkeychain.ExtendedKey{*key1, *key2, *key3} + + // test without timeout + addr, redeemScript, err := w.generateMultisigScript(keys, 2, 0, nil) + if err != nil { + t.Error(err) + } + if addr.String() != "bc1q7ckk79my7g0jltxtae34yk7e6nzth40dy6j6a67c96mhh6ue0hyqtmf66p" { + t.Error("Returned invalid address") + } + + rs := "52" + // OP_2 + "21" + // OP_PUSHDATA(33) + "03c157f2a7c178430972263232c9306110090c50b44d4e906ecd6d377eec89a53c" + // pubkey1 + "21" + // OP_PUSHDATA(33) + "0205b02b9dbe570f36d1c12e3100e55586b2b9dc61d6778c1d24a8eaca03625e7e" + // pubkey2 + "21" + // OP_PUSHDATA(33) + "030c83b025cd6bdd8c06e93a2b953b821b4a8c29da211335048d7dc3389706d7e8" + // pubkey3 + "53" + // OP_3 + "ae" // OP_CHECKMULTISIG + rsBytes, err := hex.DecodeString(rs) + if err != nil { + t.Error(err) + } + if !bytes.Equal(rsBytes, redeemScript) { + t.Error("Returned invalid redeem script") + } + + // test with timeout + key4, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + addr, redeemScript, err = w.generateMultisigScript(keys, 2, time.Hour*10, key4) + if err != nil { + t.Error(err) + } + if addr.String() != "bc1qlx7djex36u6ttf7kvqk0uzhvyu0ug3t695r4xjqz0s7pl4kkyzmqwxp2mc" { + t.Error("Returned invalid address") + } + + rs = "63" + // OP_IF + "52" + // OP_2 + "21" + // OP_PUSHDATA(33) + "03c157f2a7c178430972263232c9306110090c50b44d4e906ecd6d377eec89a53c" + // pubkey1 + "21" + // OP_PUSHDATA(33) + "0205b02b9dbe570f36d1c12e3100e55586b2b9dc61d6778c1d24a8eaca03625e7e" + // pubkey2 + "21" + // OP_PUSHDATA(33) + "030c83b025cd6bdd8c06e93a2b953b821b4a8c29da211335048d7dc3389706d7e8" + // pubkey3 + "53" + // OP_3 + "ae" + // OP_CHECKMULTISIG + "67" + // OP_ELSE + "01" + // OP_PUSHDATA(1) + "3c" + // 60 blocks + "b2" + // OP_CHECKSEQUENCEVERIFY + "75" + // OP_DROP + "21" + // OP_PUSHDATA(33) + "02c2902e25457d7780471890b957fbbc3d80af94e3bba9a6b89fd28f618bf4147e" + // timeout pubkey + "ac" + // OP_CHECKSIG + "68" // OP_ENDIF + rsBytes, err = hex.DecodeString(rs) + if err != nil { + t.Error(err) + } + if !bytes.Equal(rsBytes, redeemScript) { + t.Error("Returned invalid redeem script") + } +} + +func TestBitcoinWallet_newUnsignedTransaction(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + waitForTxnSync(t, w.db.Txns()) + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + addr, err := w.DecodeAddress("1AhsMpyyyVyPZ9KDUgwsX3zTDJWWSsRo4f") + if err != nil { + t.Error(err) + } + + script, err := txscript.PayToAddrScript(addr) + if err != nil { + t.Error(err) + } + out := wire.NewTxOut(10000, script) + outputs := []*wire.TxOut{out} + + changeSource := func() ([]byte, error) { + addr := w.CurrentAddress(wallet.INTERNAL) + script, err := txscript.PayToAddrScript(addr) + if err != nil { + return []byte{}, err + } + return script, nil + } + + inputSource := func(target btcutil.Amount) (total btcutil.Amount, inputs []*wire.TxIn, inputValues []btcutil.Amount, scripts [][]byte, err error) { + total += btcutil.Amount(utxos[0].Value) + in := wire.NewTxIn(&utxos[0].Op, []byte{}, [][]byte{}) + in.Sequence = 0 // Opt-in RBF so we can bump fees + inputs = append(inputs, in) + return total, inputs, inputValues, scripts, nil + } + + // Regular transaction + authoredTx, err := newUnsignedTransaction(outputs, btcutil.Amount(1000), inputSource, changeSource) + if err != nil { + t.Error(err) + } + if len(authoredTx.Tx.TxOut) != 2 { + t.Error("Returned incorrect number of outputs") + } + if len(authoredTx.Tx.TxIn) != 1 { + t.Error("Returned incorrect number of inputs") + } + + // Insufficient funds + outputs[0].Value = 1000000000 + _, err = newUnsignedTransaction(outputs, btcutil.Amount(1000), inputSource, changeSource) + if err == nil { + t.Error("Failed to return insuffient funds error") + } +} + +func TestBitcoinWallet_CreateMultisigSignature(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + ins, outs, redeemScript, err := buildTxData(w) + if err != nil { + t.Error(err) + } + + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + sigs, err := w.CreateMultisigSignature(ins, outs, key1, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs) != 2 { + t.Error(err) + } + for _, sig := range sigs { + if len(sig.Signature) == 0 { + t.Error("Returned empty signature") + } + } +} + +func buildTxData(w *BitcoinWallet) ([]wallet.TransactionInput, []wallet.TransactionOutput, []byte, error) { + redeemScript := "522103c157f2a7c178430972263232c9306110090c50b44d4e906ecd6d377eec89a53c210205b02b9dbe570f36d1c12e3100e55586b2b9dc61d6778c1d24a8eaca03625e7e21030c83b025cd6bdd8c06e93a2b953b821b4a8c29da211335048d7dc3389706d7e853ae" + redeemScriptBytes, err := hex.DecodeString(redeemScript) + if err != nil { + return nil, nil, nil, err + } + h1, err := hex.DecodeString("1a20f4299b4fa1f209428dace31ebf4f23f13abd8ed669cebede118343a6ae05") + if err != nil { + return nil, nil, nil, err + } + in1 := wallet.TransactionInput{ + OutpointHash: h1, + OutpointIndex: 1, + } + h2, err := hex.DecodeString("458d88b4ae9eb4a347f2e7f5592f1da3b9ddf7d40f307f6e5d7bc107a9b3e90e") + if err != nil { + return nil, nil, nil, err + } + in2 := wallet.TransactionInput{ + OutpointHash: h2, + OutpointIndex: 0, + } + addr, err := w.DecodeAddress("1AhsMpyyyVyPZ9KDUgwsX3zTDJWWSsRo4f") + if err != nil { + return nil, nil, nil, err + } + + out := wallet.TransactionOutput{ + Value: 20000, + Address: addr, + } + return []wallet.TransactionInput{in1, in2}, []wallet.TransactionOutput{out}, redeemScriptBytes, nil +} + +func TestBitcoinWallet_Multisign(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + ins, outs, redeemScript, err := buildTxData(w) + if err != nil { + t.Error(err) + } + + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + sigs1, err := w.CreateMultisigSignature(ins, outs, key1, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs1) != 2 { + t.Error(err) + } + sigs2, err := w.CreateMultisigSignature(ins, outs, key2, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs2) != 2 { + t.Error(err) + } + txBytes, err := w.Multisign(ins, outs, sigs1, sigs2, redeemScript, 50, false) + if err != nil { + t.Error(err) + } + + tx := wire.NewMsgTx(0) + tx.BtcDecode(bytes.NewReader(txBytes), wire.ProtocolVersion, wire.WitnessEncoding) + if len(tx.TxIn) != 2 { + t.Error("Transactions has incorrect number of inputs") + } + if len(tx.TxOut) != 1 { + t.Error("Transactions has incorrect number of outputs") + } + for _, in := range tx.TxIn { + if len(in.Witness) == 0 { + t.Error("Input witness has zero length") + } + } +} + +func TestBitcoinWallet_bumpFee(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + waitForTxnSync(t, w.db.Txns()) + ch, err := chainhash.NewHashFromStr("ff2b865c3b73439912eebf4cce9a15b12c7d7bcdd14ae1110a90541426c4e7c5") + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + for _, u := range utxos { + if u.Op.Hash.IsEqual(ch) { + u.AtHeight = 0 + w.db.Utxos().Put(u) + } + } + + w.db.Txns().UpdateHeight(*ch, 0, time.Now()) + + // Test unconfirmed + _, err = w.bumpFee(*ch) + if err != nil { + t.Error(err) + } + + err = w.db.Txns().UpdateHeight(*ch, 1289597, time.Now()) + if err != nil { + t.Error(err) + } + + // Test confirmed + _, err = w.bumpFee(*ch) + if err == nil { + t.Error("Should not be able to bump fee of confirmed txs") + } +} + +func TestBitcoinWallet_sweepAddress(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + waitForTxnSync(t, w.db.Txns()) + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + + var in wallet.TransactionInput + var key *hdkeychain.ExtendedKey + for _, ut := range utxos { + if ut.Value > 0 && !ut.WatchOnly { + addr, err := w.ScriptToAddress(ut.ScriptPubkey) + if err != nil { + t.Error(err) + } + key, err = w.km.GetKeyForScript(addr.ScriptAddress()) + if err != nil { + t.Error(err) + } + h, err := hex.DecodeString(ut.Op.Hash.String()) + if err != nil { + t.Error(err) + } + in = wallet.TransactionInput{ + LinkedAddress: addr, + Value: ut.Value, + OutpointIndex: ut.Op.Index, + OutpointHash: h, + } + } + } + // P2PKH addr + _, err = w.sweepAddress([]wallet.TransactionInput{in}, nil, key, nil, wallet.NORMAL) + if err != nil { + t.Error(err) + return + } + + // 1 of 2 P2WSH + for _, ut := range utxos { + if ut.Value > 0 && ut.WatchOnly { + addr, err := w.ScriptToAddress(ut.ScriptPubkey) + if err != nil { + t.Error(err) + } + h, err := hex.DecodeString(ut.Op.Hash.String()) + if err != nil { + t.Error(err) + } + in = wallet.TransactionInput{ + LinkedAddress: addr, + Value: ut.Value, + OutpointIndex: ut.Op.Index, + OutpointHash: h, + } + } + } + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + _, redeemScript, err := w.GenerateMultisigScript([]hdkeychain.ExtendedKey{*key1, *key2}, 1, 0, nil) + if err != nil { + t.Error(err) + } + _, err = w.sweepAddress([]wallet.TransactionInput{in}, nil, key1, &redeemScript, wallet.NORMAL) + if err != nil { + t.Error(err) + } +} + +func TestBitcoinWallet_estimateSpendFee(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + waitForTxnSync(t, w.db.Txns()) + fee, err := w.estimateSpendFee(1000, wallet.NORMAL) + if err != nil { + t.Error(err) + } + if fee == 0 { + t.Error("Returned incorrect fee") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/bitcoin/txsizes_test.go b/vendor/github.com/OpenBazaar/multiwallet/bitcoin/txsizes_test.go new file mode 100644 index 0000000000..7555b7f8e8 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/bitcoin/txsizes_test.go @@ -0,0 +1,84 @@ +package bitcoin + +// Copyright (c) 2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* Copied here from a btcd internal package*/ + +import ( + "bytes" + "encoding/hex" + "github.com/btcsuite/btcd/wire" + "testing" +) + +const ( + p2pkhScriptSize = P2PKHPkScriptSize + p2shScriptSize = 23 +) + +func makeInts(value int, n int) []int { + v := make([]int, n) + for i := range v { + v[i] = value + } + return v +} + +func TestEstimateSerializeSize(t *testing.T) { + tests := []struct { + InputCount int + OutputScriptLengths []int + AddChangeOutput bool + ExpectedSizeEstimate int + }{ + 0: {1, []int{}, false, 161}, + 1: {1, []int{p2pkhScriptSize}, false, 195}, + 2: {1, []int{}, true, 195}, + 3: {1, []int{p2pkhScriptSize}, true, 229}, + 4: {1, []int{p2shScriptSize}, false, 193}, + 5: {1, []int{p2shScriptSize}, true, 227}, + + 6: {2, []int{}, false, 310}, + 7: {2, []int{p2pkhScriptSize}, false, 344}, + 8: {2, []int{}, true, 344}, + 9: {2, []int{p2pkhScriptSize}, true, 378}, + 10: {2, []int{p2shScriptSize}, false, 342}, + 11: {2, []int{p2shScriptSize}, true, 376}, + + // 0xfd is discriminant for 16-bit compact ints, compact int + // total size increases from 1 byte to 3. + 12: {1, makeInts(p2pkhScriptSize, 0xfc), false, 8729}, + 13: {1, makeInts(p2pkhScriptSize, 0xfd), false, 8729 + P2PKHOutputSize + 2}, + 14: {1, makeInts(p2pkhScriptSize, 0xfc), true, 8729 + P2PKHOutputSize + 2}, + 15: {0xfc, []int{}, false, 37560}, + 16: {0xfd, []int{}, false, 37560 + RedeemP2PKHInputSize + 2}, + } + for i, test := range tests { + outputs := make([]*wire.TxOut, 0, len(test.OutputScriptLengths)) + for _, l := range test.OutputScriptLengths { + outputs = append(outputs, &wire.TxOut{PkScript: make([]byte, l)}) + } + actualEstimate := EstimateSerializeSize(test.InputCount, outputs, test.AddChangeOutput, P2PKH) + if actualEstimate != test.ExpectedSizeEstimate { + t.Errorf("Test %d: Got %v: Expected %v", i, actualEstimate, test.ExpectedSizeEstimate) + } + } +} + +func TestSumOutputSerializeSizes(t *testing.T) { + testTx := "0100000001066b78efa7d66d271cae6d6eb799e1d10953fb1a4a760226cc93186d52b55613010000006a47304402204e6c32cc214c496546c3277191ca734494fe49fed0af1d800db92fed2021e61802206a14d063b67f2f1c8fc18f9e9a5963fe33e18c549e56e3045e88b4fc6219be11012103f72d0a11727219bff66b8838c3c5e1c74a5257a325b0c84247bd10bdb9069e88ffffffff0200c2eb0b000000001976a914426e80ad778792e3e19c20977fb93ec0591e1a3988ac35b7cb59000000001976a914e5b6dc0b297acdd99d1a89937474df77db5743c788ac00000000" + txBytes, err := hex.DecodeString(testTx) + if err != nil { + t.Error(err) + return + } + r := bytes.NewReader(txBytes) + msgTx := wire.NewMsgTx(1) + msgTx.BtcDecode(r, 1, wire.WitnessEncoding) + if SumOutputSerializeSizes(msgTx.TxOut) != 68 { + t.Error("SumOutputSerializeSizes returned incorrect value") + } + +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/exchange_rates_test.go b/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/exchange_rates_test.go new file mode 100644 index 0000000000..67574c07ad --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/exchange_rates_test.go @@ -0,0 +1,219 @@ +package bitcoincash + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "testing" + + "github.com/jarcoal/httpmock" +) + +func setupBitcoinPriceFetcher() (BitcoinCashPriceFetcher, func()) { + var ( + url = "https://ticker.openbazaar.org/api" + mockResponse = `{ + "BCH": { + "last": 20.00000, + "type": "crypto" + }, + "USD": { + "last": 10000.00, + "type": "fiat" + } + }` + exchangeCache = make(map[string]float64) + ) + + httpmock.Activate() + httpmock.RegisterResponder("GET", url, + httpmock.NewStringResponder(200, mockResponse)) + + return BitcoinCashPriceFetcher{ + cache: exchangeCache, + providers: []*ExchangeRateProvider{{url, exchangeCache, &http.Client{}, OpenBazaarDecoder{}}}, + }, httpmock.DeactivateAndReset +} + +func TestFetchCurrentRates(t *testing.T) { + b, teardown := setupBitcoinPriceFetcher() + defer teardown() + + err := b.fetchCurrentRates() + if err != nil { + t.Error("Failed to fetch bitcoin exchange rates") + } +} + +func TestGetLatestRate(t *testing.T) { + b, teardown := setupBitcoinPriceFetcher() + defer teardown() + + price, ok := b.cache["USD"] + if !ok && price == 500.00 { + t.Errorf("incorrect cache value, expected (%f) but got (%f)", 500.00, price) + } + price, err := b.GetLatestRate("USD") + if err != nil && price == 500.00 { + t.Error("Incorrect return at GetLatestRate (price, err)", price, err) + } +} + +func TestGetAllRates(t *testing.T) { + b, teardown := setupBitcoinPriceFetcher() + defer teardown() + + b.cache["USD"] = 650.00 + b.cache["EUR"] = 600.00 + priceMap, err := b.GetAllRates(true) + if err != nil { + t.Error(err) + } + usd, ok := priceMap["USD"] + if !ok || usd != 650.00 { + t.Error("Failed to fetch exchange rates from cache") + } + eur, ok := priceMap["EUR"] + if !ok || eur != 600.00 { + t.Error("Failed to fetch exchange rates from cache") + } +} + +func TestGetExchangeRate(t *testing.T) { + b, teardown := setupBitcoinPriceFetcher() + defer teardown() + + b.cache["USD"] = 650.00 + r, err := b.GetExchangeRate("USD") + if err != nil { + t.Error("Failed to fetch exchange rate") + } + if r != 650.00 { + t.Error("Returned exchange rate incorrect") + } + r, err = b.GetExchangeRate("EUR") + if r != 0 || err == nil { + t.Error("Return erroneous exchange rate") + } + + // Test that currency symbols are normalized correctly + r, err = b.GetExchangeRate("usd") + if err != nil { + t.Error("Failed to fetch exchange rate") + } + if r != 650.00 { + t.Error("Returned exchange rate incorrect") + } +} + +type req struct { + io.Reader +} + +func (r *req) Close() error { + return nil +} + +func TestDecodeOpenBazaar(t *testing.T) { + cache := make(map[string]float64) + openbazaarDecoder := OpenBazaarDecoder{} + var dataMap interface{} + + response := `{ + "AED": { + "ask": 2242.19, + "bid": 2236.61, + "last": 2239.99, + "timestamp": "Tue, 02 Aug 2016 00:20:45 -0000", + "volume_btc": 0.0, + "volume_percent": 0.0 + }, + "AFN": { + "ask": 41849.95, + "bid": 41745.86, + "last": 41808.85, + "timestamp": "Tue, 02 Aug 2016 00:20:45 -0000", + "volume_btc": 0.0, + "volume_percent": 0.0 + }, + "ALL": { + "ask": 74758.44, + "bid": 74572.49, + "last": 74685.02, + "timestamp": "Tue, 02 Aug 2016 00:20:45 -0000", + "volume_btc": 0.0, + "volume_percent": 0.0 + }, + "BCH": { + "ask":32.089016, + "bid":32.089016, + "last":32.089016, + "timestamp": "Tue, 02 Aug 2016 00:20:45 -0000" + }, + "timestamp": "Tue, 02 Aug 2016 00:20:45 -0000" + }` + // Test valid response + r := &req{bytes.NewReader([]byte(response))} + decoder := json.NewDecoder(r) + err := decoder.Decode(&dataMap) + if err != nil { + t.Error(err) + } + err = openbazaarDecoder.decode(dataMap, cache) + if err != nil { + t.Error(err) + } + // Make sure it saved to cache + if len(cache) == 0 { + t.Error("Failed to response to cache") + } + resp := `{"ZWL": { + "ask": 196806.48, + "bid": 196316.95, + "timestamp": "Tue, 02 Aug 2016 00:20:45 -0000", + "volume_btc": 0.0, + "volume_percent": 0.0 + }}` + + // Test missing JSON element + r = &req{bytes.NewReader([]byte(resp))} + decoder = json.NewDecoder(r) + err = decoder.Decode(&dataMap) + if err != nil { + t.Error(err) + } + err = openbazaarDecoder.decode(dataMap, cache) + if err == nil { + t.Error(err) + } + resp = `{ + "ask": 196806.48, + "bid": 196316.95, + "last": 196613.2, + "timestamp": "Tue, 02 Aug 2016 00:20:45 -0000", + "volume_btc": 0.0, + "volume_percent": 0.0 + }` + + // Test invalid JSON + r = &req{bytes.NewReader([]byte(resp))} + decoder = json.NewDecoder(r) + err = decoder.Decode(&dataMap) + if err != nil { + t.Error(err) + } + err = openbazaarDecoder.decode(dataMap, cache) + if err == nil { + t.Error(err) + } + + // Test decode error + r = &req{bytes.NewReader([]byte(""))} + decoder = json.NewDecoder(r) + decoder.Decode(&dataMap) + err = openbazaarDecoder.decode(dataMap, cache) + if err == nil { + t.Error(err) + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/txsizes_test.go b/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/txsizes_test.go new file mode 100644 index 0000000000..5243f13078 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/bitcoincash/txsizes_test.go @@ -0,0 +1,84 @@ +package bitcoincash + +// Copyright (c) 2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* Copied here from a btcd internal package*/ + +import ( + "bytes" + "encoding/hex" + "github.com/btcsuite/btcd/wire" + "testing" +) + +const ( + p2pkhScriptSize = P2PKHPkScriptSize + p2shScriptSize = 23 +) + +func makeInts(value int, n int) []int { + v := make([]int, n) + for i := range v { + v[i] = value + } + return v +} + +func TestEstimateSerializeSize(t *testing.T) { + tests := []struct { + InputCount int + OutputScriptLengths []int + AddChangeOutput bool + ExpectedSizeEstimate int + }{ + 0: {1, []int{}, false, 161}, + 1: {1, []int{p2pkhScriptSize}, false, 195}, + 2: {1, []int{}, true, 195}, + 3: {1, []int{p2pkhScriptSize}, true, 229}, + 4: {1, []int{p2shScriptSize}, false, 193}, + 5: {1, []int{p2shScriptSize}, true, 227}, + + 6: {2, []int{}, false, 310}, + 7: {2, []int{p2pkhScriptSize}, false, 344}, + 8: {2, []int{}, true, 344}, + 9: {2, []int{p2pkhScriptSize}, true, 378}, + 10: {2, []int{p2shScriptSize}, false, 342}, + 11: {2, []int{p2shScriptSize}, true, 376}, + + // 0xfd is discriminant for 16-bit compact ints, compact int + // total size increases from 1 byte to 3. + 12: {1, makeInts(p2pkhScriptSize, 0xfc), false, 8729}, + 13: {1, makeInts(p2pkhScriptSize, 0xfd), false, 8729 + P2PKHOutputSize + 2}, + 14: {1, makeInts(p2pkhScriptSize, 0xfc), true, 8729 + P2PKHOutputSize + 2}, + 15: {0xfc, []int{}, false, 37560}, + 16: {0xfd, []int{}, false, 37560 + RedeemP2PKHInputSize + 2}, + } + for i, test := range tests { + outputs := make([]*wire.TxOut, 0, len(test.OutputScriptLengths)) + for _, l := range test.OutputScriptLengths { + outputs = append(outputs, &wire.TxOut{PkScript: make([]byte, l)}) + } + actualEstimate := EstimateSerializeSize(test.InputCount, outputs, test.AddChangeOutput, P2PKH) + if actualEstimate != test.ExpectedSizeEstimate { + t.Errorf("Test %d: Got %v: Expected %v", i, actualEstimate, test.ExpectedSizeEstimate) + } + } +} + +func TestSumOutputSerializeSizes(t *testing.T) { + testTx := "0100000001066b78efa7d66d271cae6d6eb799e1d10953fb1a4a760226cc93186d52b55613010000006a47304402204e6c32cc214c496546c3277191ca734494fe49fed0af1d800db92fed2021e61802206a14d063b67f2f1c8fc18f9e9a5963fe33e18c549e56e3045e88b4fc6219be11012103f72d0a11727219bff66b8838c3c5e1c74a5257a325b0c84247bd10bdb9069e88ffffffff0200c2eb0b000000001976a914426e80ad778792e3e19c20977fb93ec0591e1a3988ac35b7cb59000000001976a914e5b6dc0b297acdd99d1a89937474df77db5743c788ac00000000" + txBytes, err := hex.DecodeString(testTx) + if err != nil { + t.Error(err) + return + } + r := bytes.NewReader(txBytes) + msgTx := wire.NewMsgTx(1) + msgTx.BtcDecode(r, 1, wire.WitnessEncoding) + if SumOutputSerializeSizes(msgTx.TxOut) != 68 { + t.Error("SumOutputSerializeSizes returned incorrect value") + } + +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/cache/cacher_example_test.go b/vendor/github.com/OpenBazaar/multiwallet/cache/cacher_example_test.go new file mode 100644 index 0000000000..6de320afd6 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/cache/cacher_example_test.go @@ -0,0 +1,55 @@ +package cache_test + +import ( + "encoding/json" + "testing" + "time" + + "github.com/OpenBazaar/multiwallet/cache" +) + +type testStructSubject struct { + StringType string + IntType int + TimeType time.Time +} + +func TestSettingGettingStructs(t *testing.T) { + var ( + subject = testStructSubject{ + StringType: "teststring", + IntType: 123456, + TimeType: time.Now(), + } + cacher = cache.NewMockCacher() + ) + marshalledSubject, err := json.Marshal(subject) + if err != nil { + t.Fatal(err) + } + err = cacher.Set("thing1", marshalledSubject) + if err != nil { + t.Fatal(err) + } + + marshalledThing, err := cacher.Get("thing1") + if err != nil { + t.Fatal(err) + } + + var actual testStructSubject + err = json.Unmarshal(marshalledThing, &actual) + if err != nil { + t.Fatal(err) + } + + if subject.StringType != actual.StringType { + t.Error("expected StringType to match but did not") + } + if subject.IntType != actual.IntType { + t.Error("expected IntType to match but did not") + } + if !subject.TimeType.Equal(actual.TimeType) { + t.Error("expected TimeType to match but did not") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/client/blockbook/client.go b/vendor/github.com/OpenBazaar/multiwallet/client/blockbook/client.go index c4728728b9..f996146549 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/client/blockbook/client.go +++ b/vendor/github.com/OpenBazaar/multiwallet/client/blockbook/client.go @@ -333,7 +333,6 @@ func (i *BlockBookClient) GetTransaction(txid string) (*model.Transaction, error } for n, o := range tx.Vout { newOut := model.Output{ - ValueIface: tx.Vout[n].ValueIface, Value: tx.Vout[n].Value, N: o.N, ScriptPubKey: o.ScriptPubKey, @@ -647,7 +646,6 @@ func (i *BlockBookClient) setupListeners() error { i.SocketClient.Emit("subscribe", protocol.ToArgArray("bitcoind/hashblock")) i.SocketClient.On("bitcoind/addresstxid", func(h *gosocketio.Channel, arg interface{}) { - fmt.Println("On addr txid", arg) m, ok := arg.(map[string]interface{}) if !ok { Log.Errorf("error checking type after socket notification: %T", arg) @@ -662,7 +660,7 @@ func (i *BlockBookClient) setupListeners() error { tx, err := i.GetTransaction(txid) if err != nil { Log.Errorf("error downloading tx after socket notification: %s", err.Error()) - continue + return } tx.Time = time.Now().Unix() i.txNotifyChan <- *tx diff --git a/vendor/github.com/OpenBazaar/multiwallet/client/errors/fatal_server_error_test.go b/vendor/github.com/OpenBazaar/multiwallet/client/errors/fatal_server_error_test.go new file mode 100644 index 0000000000..bc5fe00f8c --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/client/errors/fatal_server_error_test.go @@ -0,0 +1,22 @@ +package errors_test + +import ( + "errors" + "testing" + + clientErrs "github.com/OpenBazaar/multiwallet/client/errors" +) + +func TestIsFatal(t *testing.T) { + var ( + nonFatal = errors.New("nonfatal error") + fatal = clientErrs.NewFatalError("fatal error") + ) + + if clientErrs.IsFatal(nonFatal) { + t.Error("expected non-fatal error to not indicate fatal, but did") + } + if !clientErrs.IsFatal(fatal) { + t.Error("expected fatal error to indicate fatal, but did not") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/client/errors/retryable_server_error_test.go b/vendor/github.com/OpenBazaar/multiwallet/client/errors/retryable_server_error_test.go new file mode 100644 index 0000000000..203c87603d --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/client/errors/retryable_server_error_test.go @@ -0,0 +1,22 @@ +package errors_test + +import ( + "errors" + "testing" + + clientErrs "github.com/OpenBazaar/multiwallet/client/errors" +) + +func TestIsRetryable(t *testing.T) { + var ( + nonRetryable = errors.New("nonretryable error") + retryable = clientErrs.NewRetryableError("retryable error") + ) + + if clientErrs.IsRetryable(nonRetryable) { + t.Error("expected non-retryable error to not indicate retryable, but did") + } + if !clientErrs.IsRetryable(retryable) { + t.Error("expected retryable error to indicate retryable, but did not") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/client/errors/wrapped_error_test.go b/vendor/github.com/OpenBazaar/multiwallet/client/errors/wrapped_error_test.go new file mode 100644 index 0000000000..b7c6d73168 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/client/errors/wrapped_error_test.go @@ -0,0 +1,33 @@ +package errors_test + +import ( + "errors" + "testing" + + clientErr "github.com/OpenBazaar/multiwallet/client/errors" +) + +func TestWrappedErrorsAreComposable(t *testing.T) { + var ( + baseErr = errors.New("base") + fatalErr = clientErr.MakeFatal(baseErr) + retryableErr = clientErr.MakeRetryable(baseErr) + + fatalRetryableErr = clientErr.MakeFatal(retryableErr) + retryableFatalErr = clientErr.MakeRetryable(fatalErr) + ) + + if !clientErr.IsRetryable(fatalRetryableErr) { + t.Errorf("expected fatal(retryable(err)) to be retryable but was not") + } + if !clientErr.IsFatal(fatalRetryableErr) { + t.Errorf("expected fatal(retryable(err)) to be fatal but was not") + } + + if !clientErr.IsRetryable(retryableFatalErr) { + t.Errorf("expected retryable(fatal(err)) to be retryable but was not") + } + if !clientErr.IsFatal(retryableFatalErr) { + t.Errorf("expected retryable(fatal(err)) to be fatal but was not") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/docker-compose.yml b/vendor/github.com/OpenBazaar/multiwallet/docker-compose.yml deleted file mode 100755 index 0f35d7c82e..0000000000 --- a/vendor/github.com/OpenBazaar/multiwallet/docker-compose.yml +++ /dev/null @@ -1,10 +0,0 @@ -version: '3' -services: - dev: - build: - context: . - dockerfile: Dockerfile.dev - volumes: - - .:/go/src/github.com/OpenBazaar/multiwallet - security_opt: - - seccomp:unconfined #req: delve for golang diff --git a/vendor/github.com/OpenBazaar/multiwallet/filecoin/service.go b/vendor/github.com/OpenBazaar/multiwallet/filecoin/service.go index 83904e14a2..360ad41288 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/filecoin/service.go +++ b/vendor/github.com/OpenBazaar/multiwallet/filecoin/service.go @@ -9,7 +9,6 @@ import ( "github.com/OpenBazaar/wallet-interface" "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcutil" - "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" "github.com/op/go-logging" "math/big" @@ -193,13 +192,6 @@ func (fs *FilecoinService) processIncomingBlock(block model.Block) { fs.saveSingleTxToDB(*ret, int32(block.Height)) return } - // Incoming txs do not have a signature attached and we can't rebroadcast. - if txn.Value != "" { - bigVal, _ := new(big.Int).SetString(txn.Value, 10) - if bigVal.Cmp(big.NewInt(0)) > 0 { - return - } - } // Rebroadcast unconfirmed transactions _, err = fs.client.Broadcast(tx.Bytes) if err != nil { @@ -211,8 +203,7 @@ func (fs *FilecoinService) processIncomingBlock(block model.Block) { } func (fs *FilecoinService) saveSingleTxToDB(u model.Transaction, chainHeight int32) { - hits := 0 - value := big.NewInt(0) + value := new(big.Int) height := int32(0) if u.Confirmations > 0 { @@ -220,15 +211,12 @@ func (fs *FilecoinService) saveSingleTxToDB(u model.Transaction, chainHeight int } txHash, err := cid.Decode(u.Txid) - if err != nil { Log.Errorf("error converting to txHash for %s: %s", fs.coinType.String(), err.Error()) return } var relevant bool - sender := false cb := wallet.TransactionCallback{Txid: txHash.String(), Height: height, Timestamp: time.Unix(u.Time, 0)} - for _, in := range u.Inputs { faddr, err := NewFilecoinAddress(in.Addr) if err != nil { @@ -236,15 +224,6 @@ func (fs *FilecoinService) saveSingleTxToDB(u model.Transaction, chainHeight int continue } - if in.ValueIface == nil { - if in.Addr == fs.addr.String() { - relevant = true - sender = true - hits++ - } - continue - } - v, _ := new(big.Int).SetString(in.ValueIface.(string), 10) cbin := wallet.TransactionInput{ LinkedAddress: faddr, @@ -254,8 +233,7 @@ func (fs *FilecoinService) saveSingleTxToDB(u model.Transaction, chainHeight int if in.Addr == fs.addr.String() { relevant = true - sender = true - hits++ + value.Sub(value, v) } } for i, out := range u.Outputs { @@ -275,50 +253,23 @@ func (fs *FilecoinService) saveSingleTxToDB(u model.Transaction, chainHeight int if out.ScriptPubKey.Addresses[0] == fs.addr.String() { relevant = true - hits++ - } - - if sender { - value.Sub(value, v) - } else { value.Add(value, v) } } - if value.String() == "0" { - relevant = false - } - if !relevant { Log.Warningf("abort saving irrelevant txid (%s) to db", u.Txid) return } cb.Value = *value - saved, err := fs.db.Txns().Get(txHash.String()) if err != nil { - - // Check to see if this is a incoming block tx - allTxs, err := fs.db.Txns().GetAll(true) - for _, iTx := range allTxs { - sm, checkError := types.DecodeSignedMessage(iTx.Bytes) - if checkError != nil { - continue - } - if sm.Message.Cid().String() == u.Txid { - fmt.Println("found match") - txHash = sm.Cid() - u.RawBytes = iTx.Bytes - break - } - } - ts := time.Now() if u.Confirmations > 0 { ts = time.Unix(u.BlockTime, 0) } - err = fs.db.Txns().Put(u.RawBytes, txHash.String(), value.String(), int(height), ts, hits == 0) + err = fs.db.Txns().Put(u.RawBytes, txHash.String(), value.String(), int(height), ts, false) if err != nil { Log.Errorf("putting txid (%s): %s", txHash.String(), err.Error()) return diff --git a/vendor/github.com/OpenBazaar/multiwallet/filecoin/wallet.go b/vendor/github.com/OpenBazaar/multiwallet/filecoin/wallet.go index b210a62dd2..74e729afcf 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/filecoin/wallet.go +++ b/vendor/github.com/OpenBazaar/multiwallet/filecoin/wallet.go @@ -4,10 +4,10 @@ import ( "fmt" "github.com/OpenBazaar/multiwallet/keys" "github.com/btcsuite/btcd/btcec" + "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/lib/sigs" _ "github.com/filecoin-project/lotus/lib/sigs/secp" - "github.com/filecoin-project/specs-actors/actors/crypto" "io" "math/big" "time" @@ -136,26 +136,6 @@ func (w *FilecoinWallet) MasterPublicKey() *hd.ExtendedKey { return w.mPubKey } -func (wallet *FilecoinWallet) BumpFee(txid string) (string, error) { - return txid, nil -} - -func (wallet *FilecoinWallet) CreateMultisigSignature(ins []wi.TransactionInput, outs []wi.TransactionOutput, key *hd.ExtendedKey, redeemScript []byte, feePerByte big.Int) ([]wi.Signature, error) { - return nil, nil -} - -func (w *FilecoinWallet) GenerateMultisigScript(keys []hd.ExtendedKey, threshold int, timeout time.Duration, timeoutKey *hd.ExtendedKey) (addr btcutil.Address, redeemScript []byte, err error) { - return nil, nil, nil -} - -func (wallet *FilecoinWallet) Multisign(ins []wi.TransactionInput, outs []wi.TransactionOutput, sigs1 []wi.Signature, sigs2 []wi.Signature, redeemScript []byte, feePerByte big.Int, broadcast bool) ([]byte, error) { - return nil, nil -} - -func (wallet *FilecoinWallet) SweepAddress(utxos []wi.TransactionInput, address *btcutil.Address, key *hd.ExtendedKey, redeemScript *[]byte, feeLevel wi.FeeLevel) (string, error) { - return "", nil -} - func (w *FilecoinWallet) ChildKey(keyBytes []byte, chaincode []byte, isPrivateKey bool) (*hd.ExtendedKey, error) { parentFP := []byte{0x00, 0x00, 0x00, 0x00} var id []byte @@ -208,10 +188,18 @@ func (w *FilecoinWallet) Balance() (wi.CurrencyValue, wi.CurrencyValue) { confirmed, unconfirmed := big.NewInt(0), big.NewInt(0) for _, tx := range txns { val, _ := new(big.Int).SetString(tx.Value, 10) - if tx.Height > 0 { - confirmed.Add(confirmed, val) - } else { - unconfirmed.Sub(unconfirmed, val) + if val.Cmp(big.NewInt(0)) > 0 { + if tx.Height > 0 { + confirmed.Add(confirmed, val) + } else { + unconfirmed.Add(confirmed, val) + } + } else if val.Cmp(big.NewInt(0)) < 0 { + if tx.Height > 0 { + confirmed.Sub(confirmed, val) + } else { + unconfirmed.Sub(confirmed, val) + } } } return wi.CurrencyValue{Value: *confirmed, Currency: FilecoinCurrencyDefinition}, @@ -254,42 +242,6 @@ func (w *FilecoinWallet) Transactions() ([]wi.Txn, error) { func (w *FilecoinWallet) GetTransaction(txid string) (wi.Txn, error) { txn, err := w.db.Txns().Get(txid) - - var from string - var to string - - sm, smError := types.DecodeSignedMessage(txn.Bytes) - if smError != nil { - m, mError := types.DecodeMessage(txn.Bytes) - if mError != nil { - return wi.Txn{}, err - } - from = m.From.String() - to = m.To.String() - } else { - from = sm.Message.From.String() - to = sm.Message.To.String() - } - - txn.ToAddress = to - txn.FromAddress = from - - v, ok := new (big.Int).SetString(txn.Value, 10) - if !ok { - return wi.Txn{}, err - } - - fToAddress, err := w.DecodeAddress(txn.ToAddress) - if err != nil { - return wi.Txn{}, err - } - - output := wi.TransactionOutput{ - Value: *v, - Address: fToAddress, - } - txn.Outputs = []wi.TransactionOutput{output} - return txn, err } @@ -326,17 +278,25 @@ func (w *FilecoinWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi if val.Cmp(big.NewInt(0)) > 0 { continue } + + m, err := types.DecodeMessage(tx.Bytes) + if err != nil { + return "", err + } + if m.Nonce > nonce { + nonce = m.Nonce + } + } + if nonce > 0 { nonce++ } m := types.Message{ To: address, - Version: 0, Value: bigAmt, From: w.addr, GasLimit: 1000, Nonce: nonce, - Params: []byte(referenceID), } id := m.Cid() @@ -356,7 +316,7 @@ func (w *FilecoinWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi return "", err } - myAddr, err := NewFilecoinAddress(w.addr.String()) + myAddr, err := NewFilecoinAddress(w.addr.String()) if err != nil { return "", err } @@ -365,19 +325,19 @@ func (w *FilecoinWallet) Spend(amount big.Int, addr btcutil.Address, feeLevel wi Outputs: []wi.TransactionOutput{ { Address: addr, - Value: amount, + Value: amount, OrderID: referenceID, }, }, Inputs: []wi.TransactionInput{ { - Value: amount, - OrderID: referenceID, + Value: amount, + OrderID: referenceID, LinkedAddress: myAddr, }, }, - Value: amount, - Txid: signed.Cid().String(), + Value: *amount.Mul(&amount, big.NewInt(-1)), + Txid: signed.Cid().String(), }) return signed.Cid().String(), nil @@ -491,24 +451,25 @@ func (w *FilecoinWallet) DumpTables(wr io.Writer) { // Build a client.Transaction so we can ingest it into the wallet service then broadcast func (w *FilecoinWallet) Broadcast(msg *types.SignedMessage) error { + id := msg.Cid() ser, err := msg.Serialize() if err != nil { return err } cTxn := model.Transaction{ - Txid: msg.Cid().String(), + Txid: id.String(), Version: int(msg.Message.Version), Confirmations: 0, Time: time.Now().Unix(), RawBytes: ser, Inputs: []model.Input{ { - Addr: w.addr.String(), + Addr: w.addr.String(), ValueIface: msg.Message.Value.String(), }, }, - Outputs: []model.Output { + Outputs: []model.Output{ { ScriptPubKey: model.OutScript{ Addresses: []string{msg.Message.To.String()}, diff --git a/vendor/github.com/OpenBazaar/multiwallet/go.mod b/vendor/github.com/OpenBazaar/multiwallet/go.mod index a97d5013a1..0ddb8790b7 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/go.mod +++ b/vendor/github.com/OpenBazaar/multiwallet/go.mod @@ -16,15 +16,16 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible github.com/cpacia/bchutil v0.0.0-20181003130114-b126f6a35b6c github.com/ethereum/go-ethereum v1.9.15 // indirect - github.com/filecoin-project/go-address v0.0.2-0.20200504173055-8b6f2fb2b3ef - github.com/filecoin-project/lotus v0.4.0 - github.com/filecoin-project/specs-actors v0.7.1 + github.com/filecoin-project/filecoin-ffi v0.30.4-0.20201006125140-a62d00da59d1 // indirect + github.com/filecoin-project/go-address v0.0.4 + github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f + github.com/filecoin-project/lotus v0.10.2 github.com/gcash/bchd v0.16.4 github.com/golang/protobuf v1.4.2 github.com/google/go-cmp v0.4.1 // indirect github.com/gorilla/websocket v1.4.2 github.com/hunterlong/tokenbalance v0.0.12-0.20191105170207-4f98e641e619 // indirect - github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 + github.com/ipfs/go-cid v0.0.7 github.com/jarcoal/httpmock v1.0.5 github.com/jessevdk/go-flags v1.4.0 github.com/joho/godotenv v1.3.0 // indirect @@ -38,9 +39,12 @@ require ( github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 github.com/prometheus/tsdb v0.7.1 // indirect github.com/shopspring/decimal v1.2.0 // indirect - github.com/stretchr/testify v1.6.0 // indirect + github.com/sirupsen/logrus v1.7.0 // indirect + github.com/stretchr/objx v0.2.0 // indirect github.com/tyler-smith/go-bip39 v1.0.2 golang.org/x/crypto v0.0.0-20200707235045-ab33eee955e0 + golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect golang.org/x/net v0.0.0-20200707034311-ab3426394381 google.golang.org/grpc v1.30.0 + honnef.co/go/tools v0.0.1-2020.1.3 // indirect ) diff --git a/vendor/github.com/OpenBazaar/multiwallet/go.sum b/vendor/github.com/OpenBazaar/multiwallet/go.sum index ed397e59fd..181c8a25cf 100644 --- a/vendor/github.com/OpenBazaar/multiwallet/go.sum +++ b/vendor/github.com/OpenBazaar/multiwallet/go.sum @@ -8,20 +8,14 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA= contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= @@ -65,12 +59,11 @@ github.com/OpenBazaar/openbazaar-go v0.14.3 h1:8GLYjZp+vZFrsonyj4a8q/JJucgMemN1S github.com/OpenBazaar/openbazaar-go v0.14.3/go.mod h1:IPD0UJLYifq0e5ZiJ1oXpKyg1raiRvmU0Mq17sKj/S0= github.com/OpenBazaar/spvwallet v0.0.0-20200112224336-39f04e8d6d34 h1:xGvs105pXKoIVhYc5SaxbLOzvriX61KYB9oSinjFGUk= github.com/OpenBazaar/spvwallet v0.0.0-20200112224336-39f04e8d6d34/go.mod h1:SVavvqIp6t5kuJx+PqDsKIQ+avRth92nGg2wVp7mW/s= -github.com/OpenBazaar/wallet-interface v0.0.0-20200511225711-6ec1fd0d9d23 h1:A+/78rkN9VEJNccIcMSdiYfYwZ6AZgenIX41cSN8B08= -github.com/OpenBazaar/wallet-interface v0.0.0-20200511225711-6ec1fd0d9d23/go.mod h1:KiLnq+35bzKd6Bq8EP8iGElNBU/++VxbDVg9zCvKMgU= github.com/OpenBazaar/wallet-interface v0.0.0-20200720181501-d30f5eb54286 h1:gHiOKvfKlJwmkX6lfjEuCx13PrMAXdMqjUQzA4eqWSg= github.com/OpenBazaar/wallet-interface v0.0.0-20200720181501-d30f5eb54286/go.mod h1:KiLnq+35bzKd6Bq8EP8iGElNBU/++VxbDVg9zCvKMgU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= @@ -80,14 +73,16 @@ github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= -github.com/alangpierce/go-forceexport v0.0.0-20160317203124-8f1d6941cd75/go.mod h1:uAXEEpARkRhCZfEvy/y0Jcc888f9tHCc1W7/UeEtreE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= @@ -106,15 +101,19 @@ github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6l github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.32.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.0.3 h1:vkLuvpK4fmtSCuo60+yC63p7y0BmQ8gm5ZXGuBCJyXg= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= +github.com/briandowns/spinner v1.11.1/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -145,11 +144,13 @@ github.com/btcsuite/snappy-go v1.0.0 h1:ZxaA6lo2EpxGddsA8JwWOcxlzRybb444sgmeJQMJ github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -166,6 +167,10 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= +github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -192,7 +197,6 @@ github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY= github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= -github.com/dave/jennifer v1.4.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -212,8 +216,10 @@ github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhY github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= +github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -224,16 +230,17 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dop251/goja v0.0.0-20200219165308-d1232e640a87/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v0.9.2-0.20200616080806-a94e9c1636a4/go.mod h1:Bu8QYdU0YdB2ZQZezHxabmOIciddiwLRnyV4nuZ2HQE= +github.com/drand/drand v1.1.2-0.20200905144319-79c957281b32/go.mod h1:0sQEVg+ngs1jaDPVIiEgY0lbENWJPaUlWxGHEaSmKVM= github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber v1.1.0/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= +github.com/drand/kyber v1.1.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= github.com/drand/kyber-bls12381 v0.1.0/go.mod h1:N1emiHpm+jj7kMlxEbu3MUyOiooTgNySln564cgD9mk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c h1:JHHhtb9XWJrGNMcrVP6vyzO4dusgi/HnceHTgxSejUM= github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= @@ -252,70 +259,86 @@ github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fd/go-nat v1.0.0/go.mod h1:BTBu/CKvMmOMUPkKVef1pngt2WFH/lg7E6yQnulfp6E= -github.com/filecoin-project/chain-validation v0.0.6-0.20200615191232-6be1a8c6ed09/go.mod h1:HEJn6kOXMNhCNBYNTO/lrEI7wSgqCOR6hN5ecfYUnC8= -github.com/filecoin-project/filecoin-ffi v0.0.0-20200326153646-e899cc1dd072/go.mod h1:PtH9YP0rURHUKHrKeEBeWg/BqIBMQOz8wtlXlVGREBE= -github.com/filecoin-project/filecoin-ffi v0.26.1-0.20200508175440-05b30afeb00d h1:smoOJ2TGTYFsmBaH01WIx4crs8axosy1V9Pi+/bdk5Y= -github.com/filecoin-project/filecoin-ffi v0.26.1-0.20200508175440-05b30afeb00d/go.mod h1:vlQ7sDkbrtM70QMJFDvEyTDywY5SvIjadRCUB+76l90= -github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= +github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d h1:YVh0Q+1iUvbv7SIfwA/alULOlWjQNOEnV72rgeYweLY= +github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200716204036-cddc56607e1d/go.mod h1:XE4rWG1P7zWPaC11Pkn1CVR20stqN52MnMkIrF4q6ZU= +github.com/filecoin-project/filecoin-ffi v0.30.4-0.20201006125140-a62d00da59d1 h1:tQWTejWA9P2TQLvUlzJeaCenqhswhfXm6zLRiUE9CIw= +github.com/filecoin-project/filecoin-ffi v0.30.4-0.20201006125140-a62d00da59d1/go.mod h1:qby9lZcWbuZJ+9qqu5jUAnSaQ95XsxdT3cavjY9YLO4= github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= -github.com/filecoin-project/go-address v0.0.2-0.20200504173055-8b6f2fb2b3ef h1:Wi5E+P1QfHP8IF27eUiTx5vYfqQZwfPxzq3oFEq8w8U= -github.com/filecoin-project/go-address v0.0.2-0.20200504173055-8b6f2fb2b3ef/go.mod h1:SrA+pWVoUivqKOfC+ckVYbx41hWz++HxJcrlmHNnebU= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= +github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.4 h1:gSNMv0qWwH16fGQs7ycOUrDjY6YCSsgLUl0I0KLjo8w= +github.com/filecoin-project/go-address v0.0.4/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2 h1:jamfsxfK0Q9yCMHt8MPWx7Aa/O9k2Lve8eSc6FILYGQ= github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= -github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349 h1:pIuR0dnMD0i+as8wNnjjHyQrnhP5O5bmba/lmgQeRgU= +github.com/filecoin-project/go-amt-ipld/v2 v2.1.1-0.20201006184820-924ee87a1349/go.mod h1:vgmwKBkx+ca5OIeEvstiQgzAZnb7R6QaqE1oEDSqa6g= github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= -github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60= -github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= +github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= +github.com/filecoin-project/go-bitfield v0.2.1 h1:S6Uuqcspqu81sWJ0He4OAfFLm1tSwPdVjtKTkl5m/xQ= +github.com/filecoin-project/go-bitfield v0.2.1/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v0.3.0 h1:BwBrrXu9Unh9JjjX4GAc5FfzUNioor/aATIjfc7JTBg= -github.com/filecoin-project/go-data-transfer v0.3.0/go.mod h1:cONglGP4s/d+IUQw5mWZrQK+FQATQxr3AXzi4dRh0l4= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA= -github.com/filecoin-project/go-fil-markets v0.3.0 h1:7iCGiuTSia4f4DmOn3s96NWUwMNSOI0ZHel/XgeApAQ= -github.com/filecoin-project/go-fil-markets v0.3.0/go.mod h1:UXsXi43AyUQ5ieb4yIaLgk4PVt7TAbl1UCccuNw+7ds= -github.com/filecoin-project/go-jsonrpc v0.1.1-0.20200602181149-522144ab4e24/go.mod h1:j6zV//WXIIY5kky873Q3iIKt/ViOE8rcijovmpxrXzM= -github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6 h1:92PET+sx1Hb4W/8CgFwGuxaKbttwY+UNspYZTvXY0vs= -github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6/go.mod h1:0HgYnrkeSU4lu1p+LEOeDpFsNBssa0OGGriWdA4hvaE= -github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200218225740-47c639bab663/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca h1:OGykrCr6mSn/ckk2IFbIlkc76nsgEs7tSLhZXQt7+z4= -github.com/filecoin-project/go-paramfetch v0.0.2-0.20200605171344-fcac609550ca/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= -github.com/filecoin-project/go-statemachine v0.0.0-20200226041606-2074af6d51d9/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= -github.com/filecoin-project/go-statemachine v0.0.0-20200612181802-4eb3d0c68eba h1:GEWb/6KQyNZt4jm8fgVcIFPH0ElAGXfHM59ZSiqPTvY= -github.com/filecoin-project/go-statemachine v0.0.0-20200612181802-4eb3d0c68eba/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= +github.com/filecoin-project/go-data-transfer v0.9.0 h1:nTT8j7Hu3TM0wRWrGy83/ctawG7sleJGdFWtIsUsKgY= +github.com/filecoin-project/go-data-transfer v0.9.0/go.mod h1:i2CqUy7TMQGKukj9BgqIxiP8nDHDXU2VLd771KVaCaQ= +github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= +github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f h1:GxJzR3oRIMTPtpZ0b7QF8FKPK6/iPAc7trhlL5k/g+s= +github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= +github.com/filecoin-project/go-fil-markets v0.9.1 h1:MgO+UkpreD6x8DV2Zkw2xlBogixfpw9/wf4+nBii7bU= +github.com/filecoin-project/go-fil-markets v0.9.1/go.mod h1:h+bJ/IUnYjnW5HMKyt9JQSnhslqetkpuzwwugc3K8vM= +github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= +github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= +github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+eEvrDCGJoPLxFpDynFjYfBjI= +github.com/filecoin-project/go-jsonrpc v0.1.2-0.20201008195726-68c6a2704e49/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= +github.com/filecoin-project/go-multistore v0.0.3 h1:vaRBY4YiA2UZFPK57RNuewypB8u0DzzQwqsL0XarpnI= +github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= +github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20 h1:+/4aUeUoKr6AKfPE3mBhXA5spIV6UcKdTYDPNU2Tdmg= +github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261 h1:A256QonvzRaknIIAuWhe/M2dpV2otzs3NBhi5TWa/UA= +github.com/filecoin-project/go-paramfetch v0.0.2-0.20200701152213-3e0f0afdc261/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= +github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= +github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= +github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20201003010437-c33112184a2b h1:bMUfG6Sy6YSMbsjQAO1Q2vEZldbSdsbRy/FX3OlTck0= +github.com/filecoin-project/go-state-types v0.0.0-20201003010437-c33112184a2b/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f h1:TZDTu4MtBKSFLXWGKLy+cvC3nHfMFIrVgWLAz/+GgZQ= +github.com/filecoin-project/go-state-types v0.0.0-20201013222834-41ea465f274f/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= -github.com/filecoin-project/lotus v0.4.0 h1:tr9g3hhv/LrezPlbtOj6MCw1hVkbhpOXiq+Y9QFt34Q= -github.com/filecoin-project/lotus v0.4.0/go.mod h1:RGmcSJ6+0D3vXcBgNk6T7fT9Y5UBZ+Aowse3cTi+yZA= -github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM= -github.com/filecoin-project/sector-storage v0.0.0-20200618073200-d9de9b7cb4b4 h1:lQC8Fbyn31/H4QxYAYwVV3PYZ9vS61EmjktZc5CaiYs= -github.com/filecoin-project/sector-storage v0.0.0-20200618073200-d9de9b7cb4b4/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM= -github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA= -github.com/filecoin-project/specs-actors v0.0.0-20200226200336-94c9b92b2775/go.mod h1:0HAWYrvajFHDgRaKbF0rl+IybVLZL5z4gQ8koCMPhoU= -github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= -github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= -github.com/filecoin-project/specs-actors v0.6.2-0.20200617175406-de392ca14121/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= -github.com/filecoin-project/specs-actors v0.7.1 h1:/zW++MN4gGIPvG+s0zmSI97k0Z/aaeiREjLC10gQbco= -github.com/filecoin-project/specs-actors v0.7.1/go.mod h1:+z0htZu/wLBDbOLcQTKKUEC2rkUTFzL2KJ/bRAVWkws= -github.com/filecoin-project/specs-storage v0.1.0 h1:PkDgTOT5W5Ao7752onjDl4QSv+sgOVdJbvFjOnD5w94= -github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= -github.com/filecoin-project/storage-fsm v0.0.0-20200617183754-4380106d3e94/go.mod h1:q1YCutTSMq/yGYvDPHReT37bPfDLHltnwJutzR9kOY0= +github.com/filecoin-project/lotus v0.10.2 h1:1PSqy4juXAPHo33o+5PiDzXCWI/sb4RnH5y9nh/fCc0= +github.com/filecoin-project/lotus v0.10.2/go.mod h1:ahNsMjh5iWm3w6W1kP9Uq/pPfAy8gvhJdEghYB0kEJI= +github.com/filecoin-project/specs-actors v0.6.1/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= +github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= +github.com/filecoin-project/specs-actors v0.9.12 h1:iIvk58tuMtmloFNHhAOQHG+4Gci6Lui0n7DYQGi3cJk= +github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= +github.com/filecoin-project/specs-actors/v2 v2.1.0 h1:ocEuGz8DG2cUWw32c/tvF8D6xT+dGVWJTr5yDevU00g= +github.com/filecoin-project/specs-actors/v2 v2.1.0/go.mod h1:E7fAX4CZkDVQvDNRCxfq+hc3nx56KcCKyuZf0hlQJ20= +github.com/filecoin-project/specs-storage v0.1.1-0.20200907031224-ed2e5cd13796/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/test-vectors/schema v0.0.4/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6 h1:u/UEqS66A5ckRmS4yNpjmVH56sVtS/RfclBAYocb4as= +github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= @@ -330,6 +353,8 @@ github.com/gcash/bchutil v0.0.0-20190625002603-800e62fe9aff/go.mod h1:zXSP0Fg2L5 github.com/gcash/bchutil v0.0.0-20191012211144-98e73ec336ba/go.mod h1:nUIrcbbtEQdCsRwcp+j/CndDKMQE9Fi8p2F8cIZmIqI= github.com/gcash/bchutil v0.0.0-20200229194731-128fc9884722 h1:oeBQvSvKXcDbyoUbyeveB99CHJWgQfxiV9gKcPrXfhs= github.com/gcash/bchutil v0.0.0-20200229194731-128fc9884722/go.mod h1:wB++2ZcHUvGLN1OgO9swBmJK1vmyshJLW9SNS+apXwc= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= @@ -337,19 +362,23 @@ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= @@ -368,6 +397,7 @@ github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -376,8 +406,7 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3 h1:GV+pQPG/EUUbkh47niozDcADz6go/dUwhVzdUQHIVRw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -385,7 +414,6 @@ github.com/golang/protobuf v1.3.2-0.20190517061210-b285ee9cfc6c/go.mod h1:6lQm79 github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -397,6 +425,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= +github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -411,12 +441,12 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.17 h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY= github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= +github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= +github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= @@ -435,7 +465,6 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -456,8 +485,9 @@ github.com/gxed/go-shellwords v1.0.3/go.mod h1:N7paucT91ByIjmVJHhvoarjoQnmsi3Jd3 github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/gxed/pubsub v0.0.0-20180201040156-26ebdf44f824/go.mod h1:OiEWyHgK+CWrmOlVquHaIK1vhpUJydC9m0Je6mhaiNE= -github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099 h1:vQqOW42RRM5LoM/1K5dK940VipLqpH8lEVGrMz+mNjU= -github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099/go.mod h1:WVPCl0HO/0RAL5+vBH2GMxBomlxBF70MAS78+Lu1//k= +github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -488,6 +518,7 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/huin/goupnp v0.0.0-20180415215157-1395d1447324/go.mod h1:MZ2ZmwcBpvOoJ22IJsc7va19ZwoheaBk43rKg12SKag= @@ -510,8 +541,8 @@ github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3 github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.2.8 h1:5tQrbyyRS3DkzvcM5n+bVjdSAHLgvH7D+1LopndhUII= -github.com/ipfs/go-bitswap v0.2.8/go.mod h1:2Yjog0GMdH8+AsxkE0DI9D2mANaUTxbVVav0pPoZoug= +github.com/ipfs/go-bitswap v0.2.20 h1:Zfi5jDUoqxDThORUznqdeL77DdGniAzlccNJ4vr+Itc= +github.com/ipfs/go-bitswap v0.2.20/go.mod h1:C7TwBgHnu89Q8sHsTJP7IhUqF9XYLe71P4tT5adgmYo= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= @@ -520,6 +551,8 @@ github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbR github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM= github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 h1:hFJoI1D2a3MqiNkSb4nKwrdkhCngUxUTFNwVwovZX2s= +github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -528,6 +561,10 @@ github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 h1:QN88Q0kT2QiDaLxpR/SDsqOBtNIEF/F3n96gSDUimkA= github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cidutil v0.0.2/go.mod h1:ewllrvrxG6AMYStla3GD7Cqn+XYSLqjK0vc+086tB6s= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= @@ -537,8 +574,11 @@ github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRV github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg= +github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -547,26 +587,29 @@ github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9 github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= +github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= +github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= -github.com/ipfs/go-fs-lock v0.0.1/go.mod h1:DNBekbboPKcxs1aukPSaOtFA3QfSdi5C855v0i9XJ8Y= -github.com/ipfs/go-graphsync v0.0.6-0.20200504202014-9d5f2c26a103 h1:SD+bXod/pOWKJCGj0tG140ht8Us5k+3JBcHw0PVYTho= -github.com/ipfs/go-graphsync v0.0.6-0.20200504202014-9d5f2c26a103/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= +github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= +github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= +github.com/ipfs/go-graphsync v0.3.0 h1:I6Y20kSuCWkUvPoUWo4V3am704/9QjgDVVkf0zIV8+8= +github.com/ipfs/go-graphsync v0.3.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= -github.com/ipfs/go-hamt-ipld v0.0.15-0.20200204200533-99b8553ef242/go.mod h1:kq3Pi+UP3oHhAdKexE+kHHYRKMoFNuGero0R7q3hWGg= -github.com/ipfs/go-hamt-ipld v0.1.1-0.20200501020327-d53d20a7063e/go.mod h1:giiPqWYCnRBYpNTsJ/EX1ojldX5kTXrXYckSJQ7ko9M= -github.com/ipfs/go-hamt-ipld v0.1.1-0.20200605182717-0310ad2b0b1f h1:mchhWiYYUSoCuE3wDfRCo8cho5kqSoxkgnOtGcnNMZw= -github.com/ipfs/go-hamt-ipld v0.1.1-0.20200605182717-0310ad2b0b1f/go.mod h1:phOFBB7W73N9dg1glcb1fQ9HtQFDUpeyJgatW8ns0bw= +github.com/ipfs/go-hamt-ipld v0.1.1 h1:0IQdvwnAAUKmDE+PMJa5y1QiwOPHpI9+eAbQEEEYthk= +github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= github.com/ipfs/go-ipfs-blockstore v1.0.0 h1:pmFp5sFYsYVvMOp9X01AK3s85usVcLvkBTRsN6SnfUA= github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= +github.com/ipfs/go-ipfs-blockstore v1.0.1 h1:fnuVj4XdZp4yExhd0CnUwAiMNJHiPnfInhiuwz4lW1w= +github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= @@ -588,7 +631,6 @@ github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAz github.com/ipfs/go-ipfs-files v0.0.2/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= github.com/ipfs/go-ipfs-flags v0.0.1/go.mod h1:RnXBb9WV53GSfTrSDVK61NLTFKvWc60n+K9EgCDh+rA= @@ -603,6 +645,8 @@ github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRD github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= +github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipld-cbor v0.0.1/go.mod h1:RXHr8s4k0NE0TKhnrxqZC9M888QfsBN9rhS5NjfKzY8= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= @@ -627,13 +671,16 @@ github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBW github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= github.com/ipfs/go-log/v2 v2.0.8/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.2-0.20200609205458-f8d20c392cb7 h1:LtL/rvdfbKSthZGmAAD9o4KKg6HA6Qn8gXCCdgnj7lw= -github.com/ipfs/go-log/v2 v2.1.2-0.20200609205458-f8d20c392cb7/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= +github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4 h1:3bijxqzQ1O9yg7gd7Aqk80oaEvsJ+uXw0zSvi2qR3Jw= +github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-merkledag v0.0.3/go.mod h1:Oc5kIXLHokkE1hWGMBHw+oxehkAaTOqtEb7Zbh6BhLA= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ= github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= +github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-path v0.0.3/go.mod h1:zIRQUez3LuQIU25zFjC2hpBTHimWx7VK5bjZgRLbbdo= @@ -653,18 +700,22 @@ github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZ github.com/ipfs/interface-go-ipfs-core v0.2.3/go.mod h1:Tihp8zxGpUeE3Tokr94L6zWZZdkRQvG5TL6i9MuNE+s= github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdmg= github.com/ipfs/iptb-plugins v0.2.1/go.mod h1:QXMbtIWZ+jRsW8a4h13qAKU7jcM7qaittO8wOsTP0Rs= -github.com/ipld/go-car v0.1.1-0.20200429200904-c222d793c339/go.mod h1:eajxljm6I8o3LitnFeVEmucwZmz7+yLSiKce9yYMefg= -github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae h1:OV9dxl8iPMCOD8Vi/hvFwRh3JWPXqmkYSVxWr9JnEzM= -github.com/ipld/go-car v0.1.1-0.20200526133713-1c7508d55aae/go.mod h1:2mvxpu4dKRnuH3mj5u6KW/tmRSCcXvy/KYiJ4nC6h4c= +github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4 h1:6phjU3kXvCEWOZpu+Ob0w6DzgPFZmDLgLPxJhD8RxEY= +github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e h1:ZISbJlM0urTANR9KRfRaqlBmyOj5uUtxs2r4Up9IXsA= github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1 h1:K1Ysr7kgIlo7YQkPqdkA6H7BVdIugvuAz7OQUTJxLdE= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 h1:6Mq+tZGSEMEoJJ1NbJRhddeelkXZcU8yfH/ZRYUo/Es= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/gateway v1.0.4/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= @@ -688,11 +739,14 @@ github.com/jessevdk/go-flags v0.0.0-20181221193153-c0795c8afcf4/go.mod h1:4FA24M github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= @@ -726,7 +780,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -735,7 +791,7 @@ github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= @@ -766,11 +822,13 @@ github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt0 github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= -github.com/libp2p/go-libp2p v0.8.2/go.mod h1:NQDA/F/qArMHGe0J7sDScaKjW8Jh4y/ozQqBbYJ+BnA= +github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= -github.com/libp2p/go-libp2p v0.9.4 h1:yighwjFvsF/qQaGtHPZfxcF+ph4ydCNnsKvg712lYRo= -github.com/libp2p/go-libp2p v0.9.4/go.mod h1:NzQcC2o19xgwGqCmjx7DN+4h2F13qPCZ9UJmweYzsnU= +github.com/libp2p/go-libp2p v0.10.0 h1:7ooOvK1wi8eLpyTppy8TeH43UHy5uI75GAHGJxenUi0= +github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= +github.com/libp2p/go-libp2p v0.11.0 h1:jb5mqdqYEBAybTEhD8io43Cz5LzVKuWxOK7znSN69jE= +github.com/libp2p/go-libp2p v0.11.0/go.mod h1:3/ogJDXsbbepEfqtZKBR/DedzxJXCeK17t2Z9RE9bEE= github.com/libp2p/go-libp2p-autonat v0.0.2/go.mod h1:fs71q5Xk+pdnKU014o2iq1RhMs9/PMaG5zXRFNnIIT4= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= @@ -780,6 +838,8 @@ github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRk github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= github.com/libp2p/go-libp2p-autonat v0.2.3 h1:w46bKK3KTOUWDe5mDYMRjJu1uryqBp8HCNDp/TWMqKw= github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= +github.com/libp2p/go-libp2p-autonat v0.3.2 h1:OhDSwVVaq7liTaRIsFFYvsaPp0pn2yi0WazejZ4DUmo= +github.com/libp2p/go-libp2p-autonat v0.3.2/go.mod h1:0OzOi1/cVc7UcxfOddemYD5vzEqi4fwRbnZcJGLi68U= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= @@ -787,6 +847,8 @@ github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uL github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= github.com/libp2p/go-libp2p-blankhost v0.1.6 h1:CkPp1/zaCrCnBo0AdsQA0O1VkUYoUOtyHOnoa8gKIcE= github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= +github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= +github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= github.com/libp2p/go-libp2p-circuit v0.0.1/go.mod h1:Dqm0s/BiV63j8EEAs8hr1H5HudqvCAeXxDyic59lCwE= github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= @@ -796,8 +858,13 @@ github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3 github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= github.com/libp2p/go-libp2p-circuit v0.2.2 h1:87RLabJ9lrhoiSDDZyCJ80ZlI5TLJMwfyoGAaWXzWqA= github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= +github.com/libp2p/go-libp2p-circuit v0.2.3 h1:3Uw1fPHWrp1tgIhBz0vSOxRUmnKL8L/NGUyEd5WfSGM= +github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= +github.com/libp2p/go-libp2p-circuit v0.3.1 h1:69ENDoGnNN45BNDnBd+8SXSetDuw0eJFcGmOvvtOgBw= +github.com/libp2p/go-libp2p-circuit v0.3.1/go.mod h1:8RMIlivu1+RxhebipJwFDA45DasLx+kkrp4IlJj53F4= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= +github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= @@ -821,6 +888,10 @@ github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqe github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= github.com/libp2p/go-libp2p-core v0.5.7 h1:QK3xRwFxqd0Xd9bSZL+8yZ8ncZZbl6Zngd/+Y+A6sgQ= github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.0 h1:u03qofNYTBN+yVg08PuAKylZogVf0xcTEeM8skGf+ak= +github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= +github.com/libp2p/go-libp2p-core v0.6.1 h1:XS+Goh+QegCDojUZp00CaPMfiEADCrLjNZskWE7pvqs= +github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= @@ -832,6 +903,8 @@ github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfx github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= github.com/libp2p/go-libp2p-discovery v0.4.0 h1:dK78UhopBk48mlHtRCzbdLm3q/81g77FahEBTjcqQT8= github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= +github.com/libp2p/go-libp2p-discovery v0.5.0 h1:Qfl+e5+lfDgwdrXdu4YNCWyEo3fWuP+WgN9mN0iWviQ= +github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= @@ -839,7 +912,7 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= -github.com/libp2p/go-libp2p-kad-dht v0.8.1/go.mod h1:u3rbYbp3CSraAHD5s81CJ3hHozKTud/UOXfAgh93Gek= +github.com/libp2p/go-libp2p-kad-dht v0.8.3/go.mod h1:HnYYy8taJWESkqiESd1ngb9XX/XGGsMA5G0Vj2HoSh4= github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= github.com/libp2p/go-libp2p-kbucket v0.4.2/go.mod h1:7sCeZx2GkNK1S6lQnGUW5JYZCFPnXzAZCCBBS70lytY= github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= @@ -852,6 +925,8 @@ github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiY github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= github.com/libp2p/go-libp2p-mplex v0.2.3 h1:2zijwaJvpdesST2MXpI5w9wWFRgYtMcpRX7rrw0jmOo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= +github.com/libp2p/go-libp2p-mplex v0.2.4 h1:XFFXaN4jhqnIuJVjYOR3k6bnRj0mFfJOlIuDVww+4Zo= +github.com/libp2p/go-libp2p-mplex v0.2.4/go.mod h1:mI7iOezdWFOisvUwaYd3IDrJ4oVmgoXK8H331ui39CE= github.com/libp2p/go-libp2p-nat v0.0.2/go.mod h1:QrjXQSD5Dj4IJOdEcjHRkWTSomyxRo6HnUkf/TfQpLQ= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= @@ -862,6 +937,9 @@ github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8 github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= +github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= +github.com/libp2p/go-libp2p-noise v0.1.2 h1:IH9GRihQJTx56obm+GnpdPX4KeVIlvpXrP6xnJ0wxWk= +github.com/libp2p/go-libp2p-noise v0.1.2/go.mod h1:9B10b7ueo7TIxZHHcjcDCo5Hd6kfKT2m77by82SFRfE= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= @@ -876,23 +954,27 @@ github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRj github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= github.com/libp2p/go-libp2p-peerstore v0.2.4 h1:jU9S4jYN30kdzTpDAR7SlHUD+meDUjTODh4waLWF1ws= github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.6 h1:2ACefBX23iMdJU9Ke+dcXt3w86MIryes9v7In4+Qq3U= +github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= -github.com/libp2p/go-libp2p-pubsub v0.3.1/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk= +github.com/libp2p/go-libp2p-pubsub v0.3.6 h1:9oO8W7qIWCYQYyz5z8nUsPcb3rrFehBlkbqvbSVjBxY= +github.com/libp2p/go-libp2p-pubsub v0.3.6/go.mod h1:DTMSVmZZfXodB/pvdTGrY2eHPZ9W2ev7hzTH83OKHrI= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= -github.com/libp2p/go-libp2p-quic-transport v0.3.7/go.mod h1:Kr4aDtnfHHNeENn5J+sZIVc+t8HpQn9W6BOxhVGHbgI= github.com/libp2p/go-libp2p-quic-transport v0.5.0 h1:BUN1lgYNUrtv4WLLQ5rQmC9MCJ6uEXusezGvYRNoJXE= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= +github.com/libp2p/go-libp2p-quic-transport v0.8.2/go.mod h1:L+e0q15ZNaYm3seHgbsXjWP8kXLEqz+elLWKk9l8DhM= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= github.com/libp2p/go-libp2p-record v0.1.2 h1:M50VKzWnmUrk/M5/Dz99qO9Xh4vs8ijsK+7HkJvRP+0= github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= +github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= +github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= @@ -910,8 +992,10 @@ github.com/libp2p/go-libp2p-swarm v0.2.1/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+ github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y= -github.com/libp2p/go-libp2p-swarm v0.2.6 h1:UhMXIa+yCOALQyceENEIStMlbTCzOM6aWo6vw8QW17Q= -github.com/libp2p/go-libp2p-swarm v0.2.6/go.mod h1:F9hrkZjO7dDbcEiYii/fAB1QdpLuU6h1pa4P5VNsEgc= +github.com/libp2p/go-libp2p-swarm v0.2.7 h1:4lV/sf7f0NuVqunOpt1I11+Z54+xp+m0eeAvxj/LyRc= +github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= +github.com/libp2p/go-libp2p-swarm v0.2.8 h1:cIUUvytBzNQmGSjnXFlI6UpoBGsaud82mJPIJVfkDlg= +github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -919,6 +1003,8 @@ github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MB github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= +github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8 h1:v4dvk7YEW8buwCdIVWnhpv0Hp/AAJKRWIxBhmLRZrsk= +github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-tls v0.1.3 h1:twKMhMu44jQO+HgQK9X8NHO5HkeJu2QbhLzLJpa8oNM= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= @@ -955,23 +1041,33 @@ github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+ github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4 h1:agEFehY3zWJFUHK6SEMR7UYmk2z6kC3oeCM7ybLhguA= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= +github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= +github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= github.com/libp2p/go-netroute v0.1.2 h1:UHhB35chwgvcRI392znJA3RCBtZ3MpE3ahNCN5MR4Xg= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= +github.com/libp2p/go-netroute v0.1.3 h1:1ngWRx61us/EpaKkdqkMjKk/ufr/JlIFYQAxV2XX8Ig= +github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.5 h1:pQkejVhF0xp08D4CQUcw8t+BFJeXowja6RVcb5p++EA= github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= +github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= +github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FWpFLw= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= +github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= +github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= github.com/libp2p/go-reuseport-transport v0.0.1/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3 h1:zzOeXnTooCkRvoH+bSXEfXhn76+LAiwoneM0gnXjF2M= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= +github.com/libp2p/go-reuseport-transport v0.0.4 h1:OZGz0RB620QDGpv300n1zaOcKGGAoGVf8h9txtt/1uM= +github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0 h1:Y4s3/jNoryVRKEBrkJ576F17CPOaMIzUeCsg7dlTDj0= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= @@ -987,6 +1083,8 @@ github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2 github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0 h1:YoThc549fzmNJIh7XjHVtMIFaEDRtIrtWciG5LyYAPo= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= +github.com/libp2p/go-tcp-transport v0.2.1 h1:ExZiVQV+h+qL16fzCWtd1HSzPsqWottJ8KXwWaVi8Ns= +github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.0.1/go.mod h1:p3bKjDWHEgtuKKj+2OdPYs5dAPIjtpQGHF2tJfGz7Ww= @@ -1016,9 +1114,9 @@ github.com/ltcsuite/ltcutil v1.0.2-beta/go.mod h1:G1JGpaqtMm0mPtheTryXnDd9a4KAFu github.com/ltcsuite/ltcwallet/wallet/txrules v1.0.0 h1:WDrodrBVO5EbaAT5//i2YOg7DH+FnWSm/kjTvMNT/EY= github.com/ltcsuite/ltcwallet/wallet/txrules v1.0.0/go.mod h1:H/FiHbbfd9+TPn9ao1Ier7rBosT5j2ejIbHvZqHSEVU= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= -github.com/lucas-clemente/quic-go v0.15.7/go.mod h1:Myi1OyS0FOjL3not4BxT7KN29bRkcMUV5JVVFLKtDp8= github.com/lucas-clemente/quic-go v0.16.0 h1:jJw36wfzGJhmOhAOaOC2lS36WgeqXQszH47A7spo1LI= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= +github.com/lucas-clemente/quic-go v0.18.1/go.mod h1:yXttHsSNxQi8AWijC/vLP+OJczXqzHSOcJrM5ITUlCg= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= @@ -1026,15 +1124,21 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= +github.com/marten-seemann/qpack v0.2.0/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= github.com/marten-seemann/qtls v0.9.1 h1:O0YKQxNVPaiFgMng0suWEOY2Sb4LT2sRn9Qimq3Z1IQ= github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= +github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= +github.com/marten-seemann/qtls-go1-15 v0.1.0/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.0 h1:v2XXALHHh6zHfYTJ+cSkwtyffnaOyR1MXaA91mTrb8o= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1042,7 +1146,7 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -1066,11 +1170,13 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= @@ -1092,8 +1198,12 @@ github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVq github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= @@ -1103,6 +1213,9 @@ github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y9 github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= github.com/multiformats/go-multiaddr v0.2.2 h1:XZLDTszBIJe6m0zF6ITBrEcZR73OPUhCBBS9rYAuUzI= github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= +github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= +github.com/multiformats/go-multiaddr v0.3.1 h1:1bxa+W7j9wZKTZREySx1vPMs2TqrYWjVZ7zE6/XLG1I= +github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= @@ -1120,9 +1233,13 @@ github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysj github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.1.5 h1:QoRKvu0xHN1FCFJcMQLbG/yQE2z441L5urvG3+qyz7g= github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= +github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk= +github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.2 h1:2pAgScmS1g9XjH7EtAfNhTuyrWYEWcxy0G5Wo85hWDA= github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.7/go.mod h1:XuKXPp8VHcTygube3OWZC+aZrA+H1IhmjoCDtJc7PXM= @@ -1131,15 +1248,21 @@ github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-multihash v0.0.14 h1:QoBceQYQQtNUuf6s7wHxnE2c8bhbMqhfGzNI032se/I= +github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1 h1:JlAdpIFhBhGRLxe9W6Om0w++Gd6KMWoFPZL/dEnm9nI= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= +github.com/multiformats/go-multistream v0.1.2 h1:knyamLYMPFPngQjGQ0lhnlys3jtVR/3xV6TREUJr+fE= +github.com/multiformats/go-multistream v0.1.2/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/multiformats/go-varint v0.0.6 h1:gk85QWKxh3TazbLxED/NlDVv8+q+ReFJk7Y2W/KhfNY= +github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nanmu42/etherscan-api v1.1.1 h1:Pcx6+iIiERfw7ZeybEOx+ykEQDn1P0Shoxbamk/j620= @@ -1171,19 +1294,25 @@ github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FW github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0 h1:R1uwffexN6Pr340GtYRIdZmAiN4J+iw6WG4wog1DUXg= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 h1:lDH9UUVJtmYCjyT0CI4q8xvlXPxeZ0gYCVvWbmPlp88= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= @@ -1195,6 +1324,8 @@ github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKw github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= @@ -1202,6 +1333,7 @@ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnh github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 h1:goeTyGkArOZIVOMA0dQbyuPWGNQJZGPwPu/QS9GlpnA= github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= @@ -1212,6 +1344,7 @@ github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssy github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1247,6 +1380,7 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1263,7 +1397,11 @@ github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 h1:ZeU+auZj1iNzN github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= +github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -1278,6 +1416,7 @@ github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNue github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= @@ -1319,6 +1458,8 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= @@ -1364,15 +1505,21 @@ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.0 h1:jlIyCplCJFULU/01vCkhKuTyc3OorI3bJFuw6obfgho= github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/supranational/blst v0.1.1/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d h1:gZZadD8H+fF+n9CmNhYL1Y0dJB+kLOmKd7FbPJLeGHs= github.com/syndtr/goleveldb v1.0.1-0.20190923125748-758128399b1d/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= +github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/tyler-smith/go-bip39 v1.0.2 h1:+t3w+KwLXO6154GNJY+qUtIxLTmFjfUmpguQT1OlOT8= @@ -1402,16 +1549,20 @@ github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMU github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc/go.mod h1:r45hJU7yEoA81k6MWNhpMj/kms0n14dkzkxYHoB96UM= github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba h1:X4n8JG2e2biEZZXdBKt9HX7DN3bYGFUqljqqy0DqgnY= github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba/go.mod h1:CHQnYnQUEPydYCwuy8lmTHfGmdw9TKrhWV0xLx8l0oM= -github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200501014322-5f9941ef88e0/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200501232601-351665a6e756/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d h1:Y25auOnuZb/GuJvqMflRSDWBz8/HBRME8fiD+H8zLfs= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 h1:TtcUeY2XZSriVWR1pXyfCBWIf/NGC2iUdNw1lofUjUU= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-ctrlnet v0.0.0-20180313164037-f564fbbdaa95/go.mod h1:SJqKCCPXRfBFCwXjfNT/skfsceF7+MBFLI2OrvuRA7g= @@ -1423,20 +1574,31 @@ github.com/whyrusleeping/go-smux-multiplex v3.0.16+incompatible/go.mod h1:34LEDb github.com/whyrusleeping/go-smux-multistream v2.0.2+incompatible/go.mod h1:dRWHHvc4HDQSHh9gbKEBbUZ+f2Q8iZTPG3UOGYODxSQ= github.com/whyrusleeping/go-smux-yamux v2.0.8+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= github.com/whyrusleeping/go-smux-yamux v2.0.9+incompatible/go.mod h1:6qHUzBXUbB9MXmw3AUdB52L8sEb/hScCqOdW2kj/wuI= +github.com/whyrusleeping/ledger-filecoin-go v0.9.1-0.20201010031517-c3dcc1bddce4/go.mod h1:K+EVq8d5QcQ2At5VECsA+SNZvWefyBXh8TnIsxo1OvQ= github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= github.com/whyrusleeping/pubsub v0.0.0-20131020042734-02de8aa2db3d/go.mod h1:g7ckxrjiFh8mi1AY7ox23PZD0g6QU/TxW3U3unX7I3A= +github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee h1:lYbXeSvJi5zk5GLKVuid9TVjS9a0OmLIDKTfoZBL6Ow= github.com/whyrusleeping/timecache v0.0.0-20160911033111-cfcb2f1abfee/go.mod h1:m2aV4LZI4Aez7dP5PMyVKEHhUyEJ/RjmPEDOpDvudHg= github.com/whyrusleeping/yamux v1.1.5/go.mod h1:E8LnQQ8HKx5KD29HZFUwM1PxCOdPRzGwur1mcYhXcD8= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f h1:nMhj+x/m7ZQsHBz0L3gpytp0v6ogokdbrQDnhB8Kh7s= +github.com/xlab/c-for-go v0.0.0-20201002084316-c134bfab968f/go.mod h1:h/1PEBwj7Ym/8kOuMWvO2ujZ6Lt+TMbySEXNhjjR87I= +github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 h1:Sw125DKxZhPUI4JLlWugkzsrlB50jR9v2khiD9FxuSo= +github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245/go.mod h1:C+diUUz7pxhNY6KAoLgrTYARGWnt82zWTylZlxT92vk= +github.com/xorcare/golden v0.6.0/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= +github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 h1:oWgZJmC1DorFZDpfMfWg7xk29yEOZiXmo/wZl+utTI8= +github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542/go.mod h1:7T39/ZMvaSEZlBPoYfVFmsBLmUl3uz9IuzWj/U6FtvQ= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.12.1/go.mod h1:KatxXrVDzgWwbssUWsF5+cOJHXPvzQ09YSlzGNuhOEo= github.com/zquestz/grab v0.0.0-20190224022517-abcee96e61b1/go.mod h1:bslhAiUxakrA6z6CHmVyvkfpnxx18RJBwVyx2TluJWw= go.dedis.ch/fixbuf v1.0.3/go.mod h1:yzJMt34Wa5xD37V5RTdmp38cz3QhMagdGoem9anUalw= go.dedis.ch/kyber/v3 v3.0.4/go.mod h1:OzvaEnPvKlyrWyp3kGXlFdp7ap1VC6RkZDTaPikqhsQ= @@ -1456,13 +1618,15 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/dig v1.8.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= +go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= @@ -1479,8 +1643,8 @@ go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -go4.org v0.0.0-20190218023631-ce4c26f7be8e/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -go4.org v0.0.0-20190313082347-94abd6928b1d/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= +go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1510,9 +1674,7 @@ golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200311171314-f7b00557c8c4/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200317142112-1b76d66859c6/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200427165652-729f1e841bcc/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1526,9 +1688,9 @@ golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm0 golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc= +golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1551,6 +1713,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1587,15 +1751,15 @@ golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1614,6 +1778,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1648,23 +1814,22 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025090151-53bf42e6b339/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1672,11 +1837,9 @@ golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200427175716-29b57079015a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3 h1:5B6i6EAiSYyejWfvc5Rc9BbI3rzIsrrXfAQBWnYfn+w= golang.org/x/sys v0.0.0-20200501145240-bc7a7d42d5c3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1726,22 +1889,19 @@ golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200318150045-ba25ddc85566/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4 h1:kDtqNkeBrZb8B+atrj50B5XLHpzXXqcCdZPP/ApQ5NY= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= @@ -1755,9 +1915,6 @@ google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1784,13 +1941,9 @@ google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84 h1:pSLkPbrjnPyLDYUO2VM9mDLqo2V6CFBY84lFSZAfoi4= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= @@ -1812,7 +1965,6 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -1822,10 +1974,12 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1835,6 +1989,7 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1854,10 +2009,13 @@ gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bl gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= @@ -1873,6 +2031,16 @@ honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3 honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= +modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.1.1 h1:FeylZSVX8S+58VsyJlkEj2bcpdytmp9MmDKZkKx8OIE= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0 h1:7ccXrupWZIS3twbUGrtKmHS2DXY6xegFua+6O3xgAFU= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/github.com/OpenBazaar/multiwallet/keys/keys_test.go b/vendor/github.com/OpenBazaar/multiwallet/keys/keys_test.go new file mode 100644 index 0000000000..d16fd511ff --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/keys/keys_test.go @@ -0,0 +1,342 @@ +package keys + +import ( + "bytes" + "encoding/hex" + "testing" + + "github.com/OpenBazaar/multiwallet/datastore" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/hdkeychain" +) + +func createKeyManager() (*KeyManager, error) { + masterPrivKey, err := hdkeychain.NewKeyFromString("xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6") + if err != nil { + return nil, err + } + return NewKeyManager(&datastore.MockKeyStore{Keys: make(map[string]*datastore.KeyStoreEntry)}, &chaincfg.MainNetParams, masterPrivKey, wallet.Bitcoin, bitcoinAddress) +} + +func bitcoinAddress(key *hdkeychain.ExtendedKey, params *chaincfg.Params) (btcutil.Address, error) { + return key.Address(params) +} + +func TestNewKeyManager(t *testing.T) { + km, err := createKeyManager() + if err != nil { + t.Error(err) + } + keys, err := km.datastore.GetAll() + if err != nil { + t.Error(err) + } + if len(keys) != LOOKAHEADWINDOW*2 { + t.Error("Failed to generate lookahead windows when creating a new KeyManager") + } +} + +func TestBip44Derivation(t *testing.T) { + masterPrivKey, err := hdkeychain.NewKeyFromString("xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6") + if err != nil { + t.Error(err) + } + internal, external, err := Bip44Derivation(masterPrivKey, wallet.Bitcoin) + if err != nil { + t.Error(err) + } + externalKey, err := external.Child(0) + if err != nil { + t.Error(err) + } + externalAddr, err := externalKey.Address(&chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if externalAddr.String() != "17rxURoF96VhmkcEGCj5LNQkmN9HVhWb7F" { + t.Error("Incorrect Bip44 key derivation") + } + + internalKey, err := internal.Child(0) + if err != nil { + t.Error(err) + } + internalAddr, err := internalKey.Address(&chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if internalAddr.String() != "16wbbYdecq9QzXdxa58q2dYXJRc8sfkE4J" { + t.Error("Incorrect Bip44 key derivation") + } +} + +func TestKeys_generateChildKey(t *testing.T) { + km, err := createKeyManager() + if err != nil { + t.Error(err) + } + internalKey, err := km.GenerateChildKey(wallet.INTERNAL, 0) + internalAddr, err := internalKey.Address(&chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if internalAddr.String() != "16wbbYdecq9QzXdxa58q2dYXJRc8sfkE4J" { + t.Error("generateChildKey returned incorrect key") + } + externalKey, err := km.GenerateChildKey(wallet.EXTERNAL, 0) + externalAddr, err := externalKey.Address(&chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if externalAddr.String() != "17rxURoF96VhmkcEGCj5LNQkmN9HVhWb7F" { + t.Error("generateChildKey returned incorrect key") + } +} + +func TestKeyManager_lookahead(t *testing.T) { + masterPrivKey, err := hdkeychain.NewKeyFromString("xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6") + if err != nil { + t.Error(err) + } + mock := &datastore.MockKeyStore{Keys: make(map[string]*datastore.KeyStoreEntry)} + km, err := NewKeyManager(mock, &chaincfg.MainNetParams, masterPrivKey, wallet.Bitcoin, bitcoinAddress) + if err != nil { + t.Error(err) + } + for _, key := range mock.Keys { + key.Used = true + } + n := len(mock.Keys) + err = km.lookahead() + if err != nil { + t.Error(err) + } + if len(mock.Keys) != n+(LOOKAHEADWINDOW*2) { + t.Error("Failed to generated a correct lookahead window") + } + unused := 0 + for _, k := range mock.Keys { + if !k.Used { + unused++ + } + } + if unused != LOOKAHEADWINDOW*2 { + t.Error("Failed to generated unused keys in lookahead window") + } +} + +func TestKeyManager_MarkKeyAsUsed(t *testing.T) { + km, err := createKeyManager() + if err != nil { + t.Error(err) + } + i, err := km.datastore.GetUnused(wallet.EXTERNAL) + if err != nil { + t.Error(err) + } + if len(i) == 0 { + t.Error("No unused keys in database") + } + key, err := km.GenerateChildKey(wallet.EXTERNAL, uint32(i[0])) + if err != nil { + t.Error(err) + } + addr, err := key.Address(&chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + err = km.MarkKeyAsUsed(addr.ScriptAddress()) + if err != nil { + t.Error(err) + } + if len(km.GetKeys()) != (LOOKAHEADWINDOW*2)+1 { + t.Error("Failed to extend lookahead window when marking as read") + } + unused, err := km.datastore.GetUnused(wallet.EXTERNAL) + if err != nil { + t.Error(err) + } + for _, i := range unused { + if i == 0 { + t.Error("Failed to mark key as used") + } + } +} + +func TestKeyManager_GetCurrentKey(t *testing.T) { + masterPrivKey, err := hdkeychain.NewKeyFromString("xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6") + if err != nil { + t.Error(err) + } + mock := &datastore.MockKeyStore{Keys: make(map[string]*datastore.KeyStoreEntry)} + km, err := NewKeyManager(mock, &chaincfg.MainNetParams, masterPrivKey, wallet.Bitcoin, bitcoinAddress) + if err != nil { + t.Error(err) + } + var scriptAddress string + for script, key := range mock.Keys { + if key.Path.Purpose == wallet.EXTERNAL && key.Path.Index == 0 { + scriptAddress = script + break + } + } + key, err := km.GetCurrentKey(wallet.EXTERNAL) + if err != nil { + t.Error(err) + } + addr, err := key.Address(&chaincfg.Params{}) + if err != nil { + t.Error(err) + } + if hex.EncodeToString(addr.ScriptAddress()) != scriptAddress { + t.Error("CurrentKey returned wrong key") + } +} + +func TestKeyManager_GetFreshKey(t *testing.T) { + km, err := createKeyManager() + if err != nil { + t.Error(err) + } + key, err := km.GetFreshKey(wallet.EXTERNAL) + if err != nil { + t.Error(err) + } + if len(km.GetKeys()) != LOOKAHEADWINDOW*2+1 { + t.Error("Failed to create additional key") + } + key2, err := km.GenerateChildKey(wallet.EXTERNAL, 20) + if err != nil { + t.Error(err) + } + if key.String() != key2.String() { + t.Error("GetFreshKey returned incorrect key") + } +} + +func TestKeyManager_GetNextUnused(t *testing.T) { + km, err := createKeyManager() + if err != nil { + t.Fatal(err) + } + + // Since the lookahead window has already been generated, GetNextUnused + // should return the key with index 1. + key, err := km.GetNextUnused(wallet.EXTERNAL) + if err != nil { + t.Fatal(err) + } + + nextUnused, err := km.GenerateChildKey(wallet.EXTERNAL, uint32(1)) + if err != nil { + t.Fatal(err) + } + + if key.String() != nextUnused.String() { + t.Errorf("Derived incorrect key. Expected %s got %s", nextUnused.String(), key.String()) + } + + // Next let's mark all the keys as used and make sure GetNextUnused still + // generates a lookahead window and returns the next unused key. + allKeys := km.GetKeys() + for _, key := range allKeys { + addr, err := key.Address(&chaincfg.MainNetParams) + if err != nil { + t.Fatal(err) + } + if err := km.datastore.MarkKeyAsUsed(addr.ScriptAddress()); err != nil { + t.Fatal(err) + } + } + + key, err = km.GetNextUnused(wallet.EXTERNAL) + if err != nil { + t.Fatal(err) + } + + nextUnused, err = km.GenerateChildKey(wallet.EXTERNAL, uint32(21)) + if err != nil { + t.Fatal(err) + } + + if key.String() != nextUnused.String() { + t.Errorf("Derived incorrect key. Expected %s got %s", nextUnused.String(), key.String()) + } +} + +func TestKeyManager_GetKeys(t *testing.T) { + km, err := createKeyManager() + if err != nil { + t.Error(err) + } + keys := km.GetKeys() + if len(keys) != LOOKAHEADWINDOW*2 { + t.Error("Returned incorrect number of keys") + } + for _, key := range keys { + if key == nil { + t.Error("Incorrectly returned nil key") + } + } +} + +func TestKeyManager_GetKeyForScript(t *testing.T) { + masterPrivKey, err := hdkeychain.NewKeyFromString("xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6") + if err != nil { + t.Error(err) + } + mock := &datastore.MockKeyStore{Keys: make(map[string]*datastore.KeyStoreEntry)} + km, err := NewKeyManager(mock, &chaincfg.MainNetParams, masterPrivKey, wallet.Bitcoin, bitcoinAddress) + if err != nil { + t.Error(err) + } + addr, err := btcutil.DecodeAddress("17rxURoF96VhmkcEGCj5LNQkmN9HVhWb7F", &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + key, err := km.GetKeyForScript(addr.ScriptAddress()) + if err != nil { + t.Error(err) + } + if key == nil { + t.Error("Returned key is nil") + } + testAddr, err := key.Address(&chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if testAddr.String() != addr.String() { + t.Error("Returned incorrect key") + } + importKey, err := btcec.NewPrivateKey(btcec.S256()) + if err != nil { + t.Error(err) + } + importAddr, err := key.Address(&chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + importScript, err := txscript.PayToAddrScript(importAddr) + if err != nil { + t.Error(err) + } + err = km.datastore.ImportKey(importScript, importKey) + if err != nil { + t.Error(err) + } + retKey, err := km.GetKeyForScript(importScript) + if err != nil { + t.Error(err) + } + retECKey, err := retKey.ECPrivKey() + if err != nil { + t.Error(err) + } + if !bytes.Equal(retECKey.Serialize(), importKey.Serialize()) { + t.Error("Failed to return imported key") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/litecoin/address/address_test.go b/vendor/github.com/OpenBazaar/multiwallet/litecoin/address/address_test.go new file mode 100644 index 0000000000..c392021bd4 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/litecoin/address/address_test.go @@ -0,0 +1,104 @@ +package address + +import ( + "github.com/btcsuite/btcd/chaincfg" + "testing" +) + +func TestDecodeLitecoinAddress(t *testing.T) { + // Mainnet + addr, err := DecodeAddress("ltc1qj065d66h5943s357vfd9kltn6k4atn3qwqy8frycnfcf4ycwhrtqr6496q", &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if addr.String() != "ltc1qj065d66h5943s357vfd9kltn6k4atn3qwqy8frycnfcf4ycwhrtqr6496q" { + t.Error("Address decoding error") + } + addr1, err := DecodeAddress("LKxmT8iooGt2d9xQn1y8PU6KwW3J8EDQ9a", &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if addr1.String() != "LKxmT8iooGt2d9xQn1y8PU6KwW3J8EDQ9a" { + t.Error("Address decoding error") + } + // Testnet + addr, err = DecodeAddress("mjFBdzsYNBCeabLNwyYYCt8epG7GhzYeTw", &chaincfg.TestNet3Params) + if err != nil { + t.Error(err) + } + if addr.String() != "mjFBdzsYNBCeabLNwyYYCt8epG7GhzYeTw" { + t.Error("Address decoding error") + } + + // Testnet witness + addr, err = DecodeAddress("tltc1qxjqda2dlef5250yqgdhyscj2n2sv98yt6f9ewzvrmt0v86xuefxs9xya9u", &chaincfg.TestNet3Params) + if err != nil { + t.Error(err) + } + if addr.String() != "tltc1qxjqda2dlef5250yqgdhyscj2n2sv98yt6f9ewzvrmt0v86xuefxs9xya9u" { + t.Error("Address decoding error") + } + +} + +var dataElement = []byte{203, 72, 18, 50, 41, 156, 213, 116, 49, 81, 172, 75, 45, 99, 174, 25, 142, 123, 176, 169} + +// Second address of https://github.com/Bitcoin-UAHF/spec/blob/master/cashaddr.md#examples-of-address-translation +func TestAddressPubKeyHash_EncodeAddress(t *testing.T) { + // Mainnet + addr, err := NewAddressPubKeyHash(dataElement, &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if addr.String() != "LdkomjvYVsoY5DdZx3LJVd1dXRhpKc18Xa" { + t.Error("Address decoding error") + } + // Testnet + addr, err = NewAddressPubKeyHash(dataElement, &chaincfg.TestNet3Params) + if err != nil { + t.Error(err) + } + if addr.String() != "mz3ooahhEEzjbXR2VUKP3XACBCwF5zhQBy" { + t.Error("Address decoding error") + } +} + +var dataElement2 = []byte{118, 160, 64, 83, 189, 160, 168, 139, 218, 81, 119, 184, 106, 21, 195, 178, 159, 85, 152, 115, 118, 160, 64, 83, 189, 160, 168, 139, 218, 81, 119, 184} + +// 4th address of https://github.com/Bitcoin-UAHF/spec/blob/master/cashaddr.md#examples-of-address-translation +func TestWitnessScriptHash_EncodeAddress(t *testing.T) { + // Mainnet + addr, err := NewAddressWitnessScriptHash(dataElement2, &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if addr.String() != "ltc1qw6syq5aa5z5ghkj3w7ux59wrk204txrnw6syq5aa5z5ghkj3w7uqdjs2cd" { + t.Error("Address decoding error") + } + // Testnet + addr, err = NewAddressWitnessScriptHash(dataElement2, &chaincfg.TestNet3Params) + if err != nil { + t.Error(err) + } + if addr.String() != "tltc1qw6syq5aa5z5ghkj3w7ux59wrk204txrnw6syq5aa5z5ghkj3w7uqxa558c" { + t.Error("Address decoding error") + } +} + +func TestScriptParsing(t *testing.T) { + addr, err := DecodeAddress("ltc1qj065d66h5943s357vfd9kltn6k4atn3qwqy8frycnfcf4ycwhrtqr6496q", &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + script, err := PayToAddrScript(addr) + if err != nil { + t.Error(err) + } + addr2, err := ExtractPkScriptAddrs(script, &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if addr.String() != addr2.String() { + t.Error("Failed to convert script back into address") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/litecoin/sign_test.go b/vendor/github.com/OpenBazaar/multiwallet/litecoin/sign_test.go new file mode 100644 index 0000000000..8a95c6e623 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/litecoin/sign_test.go @@ -0,0 +1,703 @@ +package litecoin + +import ( + "bytes" + "encoding/hex" + "testing" + "time" + + "github.com/OpenBazaar/multiwallet/cache" + "github.com/OpenBazaar/multiwallet/datastore" + "github.com/OpenBazaar/multiwallet/keys" + laddr "github.com/OpenBazaar/multiwallet/litecoin/address" + "github.com/OpenBazaar/multiwallet/model/mock" + "github.com/OpenBazaar/multiwallet/service" + "github.com/OpenBazaar/multiwallet/util" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/hdkeychain" +) + +type FeeResponse struct { + Priority int `json:"priority"` + Normal int `json:"normal"` + Economic int `json:"economic"` +} + +func newMockWallet() (*LitecoinWallet, error) { + mockDb := datastore.NewMockMultiwalletDatastore() + + db, err := mockDb.GetDatastoreForWallet(wallet.Litecoin) + if err != nil { + return nil, err + } + + params := &chaincfg.MainNetParams + + seed, err := hex.DecodeString("16c034c59522326867593487c03a8f9615fb248406dd0d4ffb3a6b976a248403") + if err != nil { + return nil, err + } + master, err := hdkeychain.NewMaster(seed, params) + if err != nil { + return nil, err + } + km, err := keys.NewKeyManager(db.Keys(), params, master, wallet.Litecoin, litecoinAddress) + if err != nil { + return nil, err + } + + fp := util.NewFeeProvider(2000, 300, 200, 100, nil) + + bw := &LitecoinWallet{ + params: params, + km: km, + db: db, + fp: fp, + } + cli := mock.NewMockApiClient(bw.AddressToScript) + ws, err := service.NewWalletService(db, km, cli, params, wallet.Litecoin, cache.NewMockCacher()) + if err != nil { + return nil, err + } + bw.client = cli + bw.ws = ws + return bw, nil +} + +func TestWalletService_VerifyWatchScriptFilter(t *testing.T) { + // Verify that AddWatchedAddress should never add a script which already represents a key from its own wallet + w, err := newMockWallet() + if err != nil { + t.Fatal(err) + } + keys := w.km.GetKeys() + + addr, err := w.km.KeyToAddress(keys[0]) + if err != nil { + t.Fatal(err) + } + err = w.AddWatchedAddresses(addr) + if err != nil { + t.Fatal(err) + } + + watchScripts, err := w.db.WatchedScripts().GetAll() + if err != nil { + t.Fatal(err) + } + + if len(watchScripts) != 0 { + t.Error("Put watched scripts fails on key manager owned key") + } +} + +func TestWalletService_VerifyWatchScriptPut(t *testing.T) { + // Verify that AddWatchedAddress should add a script which does not represent a key from its own wallet + w, err := newMockWallet() + if err != nil { + t.Fatal(err) + } + + addr, err := w.DecodeAddress("LhyLNfBkoKshT7R8Pce6vkB9T2cP2o84hx") + if err != nil { + t.Fatal(err) + } + + err = w.AddWatchedAddresses(addr) + if err != nil { + t.Fatal(err) + } + + watchScripts, err := w.db.WatchedScripts().GetAll() + if err != nil { + t.Fatal(err) + } + + if len(watchScripts) == 0 { + t.Error("Put watched scripts fails on non-key manager owned key") + } +} + +func waitForTxnSync(t *testing.T, txnStore wallet.Txns) { + // Look for a known txn, this sucks a bit. It would be better to check if the + // number of stored txns matched the expected, but not all the mock + // transactions are relevant, so the numbers don't add up. + // Even better would be for the wallet to signal that the initial sync was + // done. + lastTxn := mock.MockTransactions[len(mock.MockTransactions)-2] + txHash, err := chainhash.NewHashFromStr(lastTxn.Txid) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 100; i++ { + if _, err := txnStore.Get(*txHash); err == nil { + return + } + time.Sleep(100 * time.Millisecond) + } + t.Fatal("timeout waiting for wallet to sync transactions") +} + +func TestLitecoinWallet_buildTx(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + waitForTxnSync(t, w.db.Txns()) + addr, err := w.DecodeAddress("Lep9b95MtHofxS72Hjdg4Wfmr43sHetrZT") + if err != nil { + t.Error(err) + } + + // Test build normal tx + tx, err := w.buildTx(1500000, addr, wallet.NORMAL, nil) + if err != nil { + t.Error(err) + } + if !containsOutput(tx, addr) { + t.Error("Built tx does not contain the requested output") + } + if !validInputs(tx, w.db) { + t.Error("Built tx does not contain valid inputs") + } + if !validChangeAddress(tx, w.db, w.params) { + t.Error("Built tx does not contain a valid change output") + } + + // Insuffient funds + _, err = w.buildTx(1000000000, addr, wallet.NORMAL, nil) + if err != wallet.ErrorInsuffientFunds { + t.Error("Failed to throw insuffient funds error") + } + + // Dust + _, err = w.buildTx(1, addr, wallet.NORMAL, nil) + if err != wallet.ErrorDustAmount { + t.Error("Failed to throw dust error") + } +} + +func TestLitecoinWallet_buildSpendAllTx(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + time.Sleep(time.Second / 2) + + waitForTxnSync(t, w.db.Txns()) + addr, err := w.DecodeAddress("Lep9b95MtHofxS72Hjdg4Wfmr43sHetrZT") + if err != nil { + t.Error(err) + } + + // Test build spendAll tx + tx, err := w.buildSpendAllTx(addr, wallet.NORMAL) + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Fatal(err) + } + spendableUtxos := 0 + for _, u := range utxos { + if !u.WatchOnly { + spendableUtxos++ + } + } + if len(tx.TxIn) != spendableUtxos { + t.Error("Built tx does not spend all available utxos") + } + if !containsOutput(tx, addr) { + t.Error("Built tx does not contain the requested output") + } + if !validInputs(tx, w.db) { + t.Error("Built tx does not contain valid inputs") + } + if len(tx.TxOut) != 1 { + t.Error("Built tx should only have one output") + } + + // Verify the signatures on each input using the scripting engine + for i, in := range tx.TxIn { + var prevScript []byte + for _, u := range utxos { + if util.OutPointsEqual(u.Op, in.PreviousOutPoint) { + prevScript = u.ScriptPubkey + break + } + } + vm, err := txscript.NewEngine(prevScript, tx, i, txscript.StandardVerifyFlags, nil, nil, 0) + if err != nil { + t.Fatal(err) + } + if err := vm.Execute(); err != nil { + t.Error(err) + } + } +} + +func containsOutput(tx *wire.MsgTx, addr btcutil.Address) bool { + for _, o := range tx.TxOut { + script, _ := laddr.PayToAddrScript(addr) + if bytes.Equal(script, o.PkScript) { + return true + } + } + return false +} + +func validInputs(tx *wire.MsgTx, db wallet.Datastore) bool { + utxos, _ := db.Utxos().GetAll() + uMap := make(map[wire.OutPoint]bool) + for _, u := range utxos { + uMap[u.Op] = true + } + for _, in := range tx.TxIn { + if !uMap[in.PreviousOutPoint] { + return false + } + } + return true +} + +func validChangeAddress(tx *wire.MsgTx, db wallet.Datastore, params *chaincfg.Params) bool { + for _, out := range tx.TxOut { + _, addrs, _, err := txscript.ExtractPkScriptAddrs(out.PkScript, params) + if err != nil { + continue + } + if len(addrs) == 0 { + continue + } + _, err = db.Keys().GetPathForKey(addrs[0].ScriptAddress()) + if err == nil { + return true + } + } + return false +} + +func TestLitecoinWallet_GenerateMultisigScript(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey1, err := key1.ECPubKey() + if err != nil { + t.Error(err) + } + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey2, err := key2.ECPubKey() + if err != nil { + t.Error(err) + } + key3, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey3, err := key3.ECPubKey() + if err != nil { + t.Error(err) + } + keys := []hdkeychain.ExtendedKey{*key1, *key2, *key3} + + // test without timeout + addr, redeemScript, err := w.generateMultisigScript(keys, 2, 0, nil) + if err != nil { + t.Error(err) + } + if addr.String() != "ltc1qrrsyr5ktgfl3w8aahzzdz5g87yplaze7ump2vht7lj7g5fg34ruspm8n44" { + t.Error("Returned invalid address") + } + + rs := "52" + // OP_2 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey1.SerializeCompressed()) + // pubkey1 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey2.SerializeCompressed()) + // pubkey2 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey3.SerializeCompressed()) + // pubkey3 + "53" + // OP_3 + "ae" // OP_CHECKMULTISIG + rsBytes, err := hex.DecodeString(rs) + if !bytes.Equal(rsBytes, redeemScript) { + t.Error("Returned invalid redeem script") + } + + // test with timeout + key4, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey4, err := key4.ECPubKey() + if err != nil { + t.Error(err) + } + addr, redeemScript, err = w.generateMultisigScript(keys, 2, time.Hour*10, key4) + if err != nil { + t.Error(err) + } + if addr.String() != "ltc1qf5x040t6thd6mkcjsde2zxpxq2lmzp3grwau055e085vqs00qa3qscdl2k" { + t.Error("Returned invalid address") + } + + rs = "63" + // OP_IF + "52" + // OP_2 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey1.SerializeCompressed()) + // pubkey1 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey2.SerializeCompressed()) + // pubkey2 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey3.SerializeCompressed()) + // pubkey3 + "53" + // OP_3 + "ae" + // OP_CHECKMULTISIG + "67" + // OP_ELSE + "01" + // OP_PUSHDATA(1) + "3c" + // 60 blocks + "b2" + // OP_CHECKSEQUENCEVERIFY + "75" + // OP_DROP + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey4.SerializeCompressed()) + // timeout pubkey + "ac" + // OP_CHECKSIG + "68" // OP_ENDIF + rsBytes, err = hex.DecodeString(rs) + if !bytes.Equal(rsBytes, redeemScript) { + t.Error("Returned invalid redeem script") + } +} + +func TestLitecoinWallet_newUnsignedTransaction(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + waitForTxnSync(t, w.db.Txns()) + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + addr, err := w.DecodeAddress("Lep9b95MtHofxS72Hjdg4Wfmr43sHetrZT") + if err != nil { + t.Error(err) + } + + script, err := laddr.PayToAddrScript(addr) + if err != nil { + t.Error(err) + } + out := wire.NewTxOut(10000, script) + outputs := []*wire.TxOut{out} + + changeSource := func() ([]byte, error) { + addr := w.CurrentAddress(wallet.INTERNAL) + script, err := laddr.PayToAddrScript(addr) + if err != nil { + return []byte{}, err + } + return script, nil + } + + inputSource := func(target btcutil.Amount) (total btcutil.Amount, inputs []*wire.TxIn, inputValues []btcutil.Amount, scripts [][]byte, err error) { + total += btcutil.Amount(utxos[0].Value) + in := wire.NewTxIn(&utxos[0].Op, []byte{}, [][]byte{}) + in.Sequence = 0 // Opt-in RBF so we can bump fees + inputs = append(inputs, in) + return total, inputs, inputValues, scripts, nil + } + + // Regular transaction + authoredTx, err := newUnsignedTransaction(outputs, btcutil.Amount(1000), inputSource, changeSource) + if err != nil { + t.Error(err) + } + if len(authoredTx.Tx.TxOut) != 2 { + t.Error("Returned incorrect number of outputs") + } + if len(authoredTx.Tx.TxIn) != 1 { + t.Error("Returned incorrect number of inputs") + } + + // Insufficient funds + outputs[0].Value = 1000000000 + _, err = newUnsignedTransaction(outputs, btcutil.Amount(1000), inputSource, changeSource) + if err == nil { + t.Error("Failed to return insuffient funds error") + } +} + +func TestLitecoinWallet_CreateMultisigSignature(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + ins, outs, redeemScript, err := buildTxData(w) + if err != nil { + t.Error(err) + } + + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + sigs, err := w.CreateMultisigSignature(ins, outs, key1, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs) != 2 { + t.Error(err) + } + for _, sig := range sigs { + if len(sig.Signature) == 0 { + t.Error("Returned empty signature") + } + } +} + +func buildTxData(w *LitecoinWallet) ([]wallet.TransactionInput, []wallet.TransactionOutput, []byte, error) { + redeemScript := "522103c157f2a7c178430972263232c9306110090c50b44d4e906ecd6d377eec89a53c210205b02b9dbe570f36d1c12e3100e55586b2b9dc61d6778c1d24a8eaca03625e7e21030c83b025cd6bdd8c06e93a2b953b821b4a8c29da211335048d7dc3389706d7e853ae" + redeemScriptBytes, err := hex.DecodeString(redeemScript) + if err != nil { + return nil, nil, nil, err + } + h1, err := hex.DecodeString("1a20f4299b4fa1f209428dace31ebf4f23f13abd8ed669cebede118343a6ae05") + if err != nil { + return nil, nil, nil, err + } + in1 := wallet.TransactionInput{ + OutpointHash: h1, + OutpointIndex: 1, + } + h2, err := hex.DecodeString("458d88b4ae9eb4a347f2e7f5592f1da3b9ddf7d40f307f6e5d7bc107a9b3e90e") + if err != nil { + return nil, nil, nil, err + } + in2 := wallet.TransactionInput{ + OutpointHash: h2, + OutpointIndex: 0, + } + addr, err := w.DecodeAddress("Lep9b95MtHofxS72Hjdg4Wfmr43sHetrZT") + if err != nil { + return nil, nil, nil, err + } + + out := wallet.TransactionOutput{ + Value: 20000, + Address: addr, + } + return []wallet.TransactionInput{in1, in2}, []wallet.TransactionOutput{out}, redeemScriptBytes, nil +} + +func TestLitecoinWallet_Multisign(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + ins, outs, redeemScript, err := buildTxData(w) + if err != nil { + t.Error(err) + } + + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + sigs1, err := w.CreateMultisigSignature(ins, outs, key1, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs1) != 2 { + t.Error(err) + } + sigs2, err := w.CreateMultisigSignature(ins, outs, key2, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs2) != 2 { + t.Error(err) + } + txBytes, err := w.Multisign(ins, outs, sigs1, sigs2, redeemScript, 50, false) + if err != nil { + t.Error(err) + } + + tx := wire.NewMsgTx(0) + tx.BtcDecode(bytes.NewReader(txBytes), wire.ProtocolVersion, wire.WitnessEncoding) + if len(tx.TxIn) != 2 { + t.Error("Transactions has incorrect number of inputs") + } + if len(tx.TxOut) != 1 { + t.Error("Transactions has incorrect number of outputs") + } + for _, in := range tx.TxIn { + if len(in.Witness) == 0 { + t.Error("Input witness has zero length") + } + } +} + +func TestLitecoinWallet_bumpFee(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + waitForTxnSync(t, w.db.Txns()) + txns, err := w.db.Txns().GetAll(false) + if err != nil { + t.Error(err) + } + ch, err := chainhash.NewHashFromStr(txns[2].Txid) + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + for _, u := range utxos { + if u.Op.Hash.IsEqual(ch) { + u.AtHeight = 0 + w.db.Utxos().Put(u) + } + } + + w.db.Txns().UpdateHeight(*ch, 0, time.Now()) + + // Test unconfirmed + _, err = w.bumpFee(*ch) + if err != nil { + t.Error(err) + } + + err = w.db.Txns().UpdateHeight(*ch, 1289597, time.Now()) + if err != nil { + t.Error(err) + } + + // Test confirmed + _, err = w.bumpFee(*ch) + if err == nil { + t.Error("Should not be able to bump fee of confirmed txs") + } +} + +func TestLitecoinWallet_sweepAddress(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + waitForTxnSync(t, w.db.Txns()) + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + + var in wallet.TransactionInput + var key *hdkeychain.ExtendedKey + for _, ut := range utxos { + if ut.Value > 0 && !ut.WatchOnly { + addr, err := w.ScriptToAddress(ut.ScriptPubkey) + if err != nil { + t.Error(err) + } + key, err = w.km.GetKeyForScript(addr.ScriptAddress()) + if err != nil { + t.Error(err) + } + h, err := hex.DecodeString(ut.Op.Hash.String()) + if err != nil { + t.Error(err) + } + in = wallet.TransactionInput{ + LinkedAddress: addr, + Value: ut.Value, + OutpointIndex: ut.Op.Index, + OutpointHash: h, + } + } + } + // P2PKH addr + _, err = w.sweepAddress([]wallet.TransactionInput{in}, nil, key, nil, wallet.NORMAL) + if err != nil { + t.Error(err) + return + } + + // 1 of 2 P2WSH + for _, ut := range utxos { + if ut.Value > 0 && ut.WatchOnly { + addr, err := w.ScriptToAddress(ut.ScriptPubkey) + if err != nil { + t.Error(err) + } + h, err := hex.DecodeString(ut.Op.Hash.String()) + if err != nil { + t.Error(err) + } + in = wallet.TransactionInput{ + LinkedAddress: addr, + Value: ut.Value, + OutpointIndex: ut.Op.Index, + OutpointHash: h, + } + } + } + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + _, redeemScript, err := w.GenerateMultisigScript([]hdkeychain.ExtendedKey{*key1, *key2}, 1, 0, nil) + if err != nil { + t.Error(err) + } + _, err = w.sweepAddress([]wallet.TransactionInput{in}, nil, key1, &redeemScript, wallet.NORMAL) + if err != nil { + t.Error(err) + } +} + +func TestLitecoinWallet_estimateSpendFee(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + waitForTxnSync(t, w.db.Txns()) + fee, err := w.estimateSpendFee(1000, wallet.NORMAL) + if err != nil { + t.Error(err) + } + if fee <= 0 { + t.Error("Returned incorrect fee") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/litecoin/txsizes_test.go b/vendor/github.com/OpenBazaar/multiwallet/litecoin/txsizes_test.go new file mode 100644 index 0000000000..f0776d6452 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/litecoin/txsizes_test.go @@ -0,0 +1,84 @@ +package litecoin + +// Copyright (c) 2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* Copied here from a btcd internal package*/ + +import ( + "bytes" + "encoding/hex" + "github.com/btcsuite/btcd/wire" + "testing" +) + +const ( + p2pkhScriptSize = P2PKHPkScriptSize + p2shScriptSize = 23 +) + +func makeInts(value int, n int) []int { + v := make([]int, n) + for i := range v { + v[i] = value + } + return v +} + +func TestEstimateSerializeSize(t *testing.T) { + tests := []struct { + InputCount int + OutputScriptLengths []int + AddChangeOutput bool + ExpectedSizeEstimate int + }{ + 0: {1, []int{}, false, 161}, + 1: {1, []int{p2pkhScriptSize}, false, 195}, + 2: {1, []int{}, true, 195}, + 3: {1, []int{p2pkhScriptSize}, true, 229}, + 4: {1, []int{p2shScriptSize}, false, 193}, + 5: {1, []int{p2shScriptSize}, true, 227}, + + 6: {2, []int{}, false, 310}, + 7: {2, []int{p2pkhScriptSize}, false, 344}, + 8: {2, []int{}, true, 344}, + 9: {2, []int{p2pkhScriptSize}, true, 378}, + 10: {2, []int{p2shScriptSize}, false, 342}, + 11: {2, []int{p2shScriptSize}, true, 376}, + + // 0xfd is discriminant for 16-bit compact ints, compact int + // total size increases from 1 byte to 3. + 12: {1, makeInts(p2pkhScriptSize, 0xfc), false, 8729}, + 13: {1, makeInts(p2pkhScriptSize, 0xfd), false, 8729 + P2PKHOutputSize + 2}, + 14: {1, makeInts(p2pkhScriptSize, 0xfc), true, 8729 + P2PKHOutputSize + 2}, + 15: {0xfc, []int{}, false, 37560}, + 16: {0xfd, []int{}, false, 37560 + RedeemP2PKHInputSize + 2}, + } + for i, test := range tests { + outputs := make([]*wire.TxOut, 0, len(test.OutputScriptLengths)) + for _, l := range test.OutputScriptLengths { + outputs = append(outputs, &wire.TxOut{PkScript: make([]byte, l)}) + } + actualEstimate := EstimateSerializeSize(test.InputCount, outputs, test.AddChangeOutput, P2PKH) + if actualEstimate != test.ExpectedSizeEstimate { + t.Errorf("Test %d: Got %v: Expected %v", i, actualEstimate, test.ExpectedSizeEstimate) + } + } +} + +func TestSumOutputSerializeSizes(t *testing.T) { + testTx := "0100000001066b78efa7d66d271cae6d6eb799e1d10953fb1a4a760226cc93186d52b55613010000006a47304402204e6c32cc214c496546c3277191ca734494fe49fed0af1d800db92fed2021e61802206a14d063b67f2f1c8fc18f9e9a5963fe33e18c549e56e3045e88b4fc6219be11012103f72d0a11727219bff66b8838c3c5e1c74a5257a325b0c84247bd10bdb9069e88ffffffff0200c2eb0b000000001976a914426e80ad778792e3e19c20977fb93ec0591e1a3988ac35b7cb59000000001976a914e5b6dc0b297acdd99d1a89937474df77db5743c788ac00000000" + txBytes, err := hex.DecodeString(testTx) + if err != nil { + t.Error(err) + return + } + r := bytes.NewReader(txBytes) + msgTx := wire.NewMsgTx(1) + msgTx.BtcDecode(r, 1, wire.WitnessEncoding) + if SumOutputSerializeSizes(msgTx.TxOut) != 68 { + t.Error("SumOutputSerializeSizes returned incorrect value") + } + +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/litecoin/wallet_test.go b/vendor/github.com/OpenBazaar/multiwallet/litecoin/wallet_test.go new file mode 100644 index 0000000000..429d00ff2e --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/litecoin/wallet_test.go @@ -0,0 +1,71 @@ +package litecoin + +import ( + "crypto/rand" + "github.com/OpenBazaar/multiwallet/datastore" + "github.com/OpenBazaar/multiwallet/keys" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcutil/hdkeychain" + "strings" + "testing" +) + +func TestLitecoinWallet_CurrentAddress(t *testing.T) { + w, seed, err := createWalletAndSeed() + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + addr := w.CurrentAddress(wallet.EXTERNAL) + if strings.HasPrefix(strings.ToLower(addr.String()), "ltc1") { + t.Errorf("Address %s hash ltc1 prefix: seed %x", addr, seed) + } + if err := w.db.Keys().MarkKeyAsUsed(addr.ScriptAddress()); err != nil { + t.Fatal(err) + } + } +} + +func TestLitecoinWallet_NewAddress(t *testing.T) { + w, seed, err := createWalletAndSeed() + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + addr := w.NewAddress(wallet.EXTERNAL) + if strings.HasPrefix(strings.ToLower(addr.String()), "ltc1") { + t.Errorf("Address %s hash ltc1 prefix: %x", addr, seed) + } + } +} + +func createWalletAndSeed() (*LitecoinWallet, []byte, error) { + ds := datastore.NewMockMultiwalletDatastore() + db, err := ds.GetDatastoreForWallet(wallet.Litecoin) + if err != nil { + return nil, nil, err + } + + seed := make([]byte, 32) + if _, err := rand.Read(seed); err != nil { + return nil, nil, err + } + + masterPrivKey, err := hdkeychain.NewMaster(seed, &chaincfg.MainNetParams) + if err != nil { + return nil, nil, err + } + km, err := keys.NewKeyManager(db.Keys(), &chaincfg.MainNetParams, masterPrivKey, wallet.Litecoin, litecoinAddress) + if err != nil { + return nil, nil, err + } + + return &LitecoinWallet{ + db: db, + km: km, + params: &chaincfg.MainNetParams, + }, seed, nil +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/model/helper_test.go b/vendor/github.com/OpenBazaar/multiwallet/model/helper_test.go new file mode 100644 index 0000000000..007f693bfd --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/model/helper_test.go @@ -0,0 +1,24 @@ +package model_test + +import ( + "testing" + + "github.com/OpenBazaar/multiwallet/model" +) + +func Test_toFloat64(t *testing.T) { + f, err := model.ToFloat(12.345) + if err != nil { + t.Error(err) + } + if f != 12.345 { + t.Error("Returned incorrect float") + } + f, err = model.ToFloat("456.789") + if err != nil { + t.Error(err) + } + if f != 456.789 { + t.Error("Returned incorrect float") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/multiwallet_test.go b/vendor/github.com/OpenBazaar/multiwallet/multiwallet_test.go new file mode 100644 index 0000000000..69bda36753 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/multiwallet_test.go @@ -0,0 +1,63 @@ +package multiwallet + +import ( + "fmt" + "github.com/OpenBazaar/multiwallet/cache" + "github.com/OpenBazaar/multiwallet/config" + "github.com/OpenBazaar/multiwallet/datastore" + "github.com/OpenBazaar/multiwallet/filecoin" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/op/go-logging" + "math/big" + "os" + "testing" + "time" +) + +func TestMultiWallet_Filecoin(t *testing.T) { + mdb := datastore.NewMockMultiwalletDatastore() + db, err := mdb.GetDatastoreForWallet(wallet.Filecoin) + if err != nil { + t.Fatal(err) + } + + logger := logging.NewLogBackend(os.Stdout, "", 0) + + cfg := &config.Config{ + Mnemonic: "abcdefg", + Params: &chaincfg.MainNetParams, + Cache: cache.NewMockCacher(), + Coins: []config.CoinConfig{ + { + CoinType: wallet.Filecoin, + DB: db, + ClientAPIs: []string{"http://localhost:8080/api"}, + }, + }, + Logger: logger, + } + + w, err := NewMultiWallet(cfg) + if err != nil { + t.Fatal(err) + } + + w.Start() + + fmt.Println(w[wallet.Filecoin].CurrentAddress(wallet.EXTERNAL)) + + <-time.After(time.Second * 40) + + addr, err := filecoin.NewFilecoinAddress("t3vjuvunjquznv6nlhs72utwndnr6xlaaqf3xeympz4bj4cclxtldrdlcdqvdx2fragwlo6xddd475uezjeapq") + if err != nil { + t.Fatal(err) + } + + txid, err := w[wallet.Filecoin].Spend(*big.NewInt(10000), addr, wallet.NORMAL, "", false) + if err != nil { + t.Fatal(err) + } + fmt.Println(txid) + select {} +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/service/wallet_service_test.go b/vendor/github.com/OpenBazaar/multiwallet/service/wallet_service_test.go new file mode 100644 index 0000000000..84c031158b --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/service/wallet_service_test.go @@ -0,0 +1,494 @@ +package service + +import ( + "encoding/hex" + "strconv" + "testing" + "time" + + "github.com/OpenBazaar/multiwallet/cache" + "github.com/OpenBazaar/multiwallet/datastore" + "github.com/OpenBazaar/multiwallet/keys" + "github.com/OpenBazaar/multiwallet/model/mock" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/hdkeychain" + "github.com/ltcsuite/ltcd/chaincfg/chainhash" +) + +func mockWalletService() (*WalletService, error) { + datastore := datastore.NewMockMultiwalletDatastore() + + db, err := datastore.GetDatastoreForWallet(wallet.Bitcoin) + if err != nil { + return nil, err + } + params := &chaincfg.MainNetParams + + seed, err := hex.DecodeString("16c034c59522326867593487c03a8f9615fb248406dd0d4ffb3a6b976a248403") + if err != nil { + return nil, err + } + master, err := hdkeychain.NewMaster(seed, params) + if err != nil { + return nil, err + } + km, err := keys.NewKeyManager(db.Keys(), params, master, wallet.Bitcoin, bitcoinAddress) + if err != nil { + return nil, err + } + cli := mock.NewMockApiClient(func(addr btcutil.Address) ([]byte, error) { + return txscript.PayToAddrScript(addr) + }) + return NewWalletService(db, km, cli, params, wallet.Bitcoin, cache.NewMockCacher()) +} + +func bitcoinAddress(key *hdkeychain.ExtendedKey, params *chaincfg.Params) (btcutil.Address, error) { + return key.Address(params) +} + +func TestWalletService_ChainTip(t *testing.T) { + ws, err := mockWalletService() + if err != nil { + t.Fatal(err) + } + ws.UpdateState() + height, hash := ws.ChainTip() + if height != 1289594 { + t.Error("returned incorrect height") + } + if hash.String() != "000000000000004c68a477283a8db18c1d1c2155b03d9bc23d587ac5e1c4d1af" { + t.Error("returned incorrect best hash") + } +} + +func TestWalletService_syncTxs(t *testing.T) { + ws, err := mockWalletService() + if err != nil { + t.Fatal(err) + } + ws.syncTxs(ws.getStoredAddresses()) + + txns, err := ws.db.Txns().GetAll(true) + if err != nil { + t.Error(err) + } + if len(txns) != 3 { + t.Error("failed to update state correctly") + } + txMap := make(map[string]wallet.Txn) + for _, tx := range txns { + txMap[tx.Txid] = tx + } + + tx, ok := txMap["54ebaa07c42216393b9d5816e40dd608593b92c42e2d6525f45bdd36bce8fe4d"] + if !ok { + t.Error("failed to return tx") + } + if tx.Value != 2717080 || tx.WatchOnly { + t.Error("failed to return incorrect value for tx") + } + tx, ok = txMap["ff2b865c3b73439912eebf4cce9a15b12c7d7bcdd14ae1110a90541426c4e7c5"] + if !ok { + t.Error("failed to return tx") + } + if tx.Value != -1717080 || tx.WatchOnly { + t.Error("failed to return incorrect value for tx") + } + tx, ok = txMap["1d4288fa682fa376fbae73dbd74ea04b9ea33011d63315ca9d2d50d081e671d5"] + if !ok { + t.Error("failed to return tx") + } + if tx.Value != 10000000 || tx.WatchOnly { + t.Error("failed to return incorrect value for tx") + } +} + +func TestWalletService_syncUtxos(t *testing.T) { + ws, err := mockWalletService() + if err != nil { + t.Fatal(err) + } + script, err := hex.DecodeString("a91457fc729da2a83dc8cd3c1835351c4a813c2ae8ba87") + if err != nil { + t.Fatal(err) + } + if err := ws.db.WatchedScripts().Put(script); err != nil { + t.Fatal(err) + } + ws.syncUtxos(ws.getStoredAddresses()) + + utxos, err := ws.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + if len(utxos) != 3 { + t.Error("failed to update state correctly") + } + + utxoMap := make(map[string]wallet.Utxo) + for _, u := range utxos { + utxoMap[u.Op.Hash.String()+":"+strconv.Itoa(int(u.Op.Index))] = u + } + + u, ok := utxoMap["ff2b865c3b73439912eebf4cce9a15b12c7d7bcdd14ae1110a90541426c4e7c5:1"] + if !ok { + t.Error("failed to return correct utxo") + } + if u.Value != 1000000 || u.WatchOnly { + t.Error("returned incorrect value") + } + u, ok = utxoMap["1d4288fa682fa376fbae73dbd74ea04b9ea33011d63315ca9d2d50d081e671d5:1"] + if !ok { + t.Error("failed to return correct utxo") + } + if u.Value != 10000000 || u.WatchOnly { + t.Error("returned incorrect value") + } + u, ok = utxoMap["830bf683ab8eec1a75d891689e2989f846508bc7d500cb026ef671c2d1dce20c:1"] + if !ok { + t.Error("failed to return correct utxo") + } + if u.Value != 751918 || !u.WatchOnly { + t.Error("returned incorrect value") + } +} + +func TestWalletService_TestSyncWatchOnly(t *testing.T) { + ws, err := mockWalletService() + if err != nil { + t.Fatal(err) + } + script, err := hex.DecodeString("a91457fc729da2a83dc8cd3c1835351c4a813c2ae8ba87") + if err != nil { + t.Fatal(err) + } + if err := ws.db.WatchedScripts().Put(script); err != nil { + t.Fatal(err) + } + ws.syncTxs(ws.getStoredAddresses()) + ws.syncUtxos(ws.getStoredAddresses()) + + txns, err := ws.db.Txns().GetAll(true) + if err != nil { + t.Error(err) + } + if len(txns) != 4 { + t.Error("failed to update state correctly") + } + txMap := make(map[string]wallet.Txn) + for _, tx := range txns { + txMap[tx.Txid] = tx + } + + tx, ok := txMap["830bf683ab8eec1a75d891689e2989f846508bc7d500cb026ef671c2d1dce20c"] + if !ok { + t.Fatal("Failed to return correct transaction") + } + if !tx.WatchOnly { + t.Error("failed to return correct value for tx") + } + + utxos, err := ws.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + if len(utxos) != 3 { + t.Error("failed to update state correctly") + } + utxoMap := make(map[string]wallet.Utxo) + for _, u := range utxos { + utxoMap[u.Op.String()] = u + } + + utxo, ok := utxoMap["830bf683ab8eec1a75d891689e2989f846508bc7d500cb026ef671c2d1dce20c:1"] + if !ok { + t.Fatal("Failed to return correct utxo") + } + if !utxo.WatchOnly { + t.Error("failed to return correct value for utxo") + } +} + +func TestWalletService_ProcessIncomingTransaction(t *testing.T) { + ws, err := mockWalletService() + if err != nil { + t.Fatal(err) + } + + // Process an incoming transaction + ws.ProcessIncomingTransaction(mock.MockTransactions[0]) + txns, err := ws.db.Txns().GetAll(true) + if err != nil { + t.Error(err) + } + if len(txns) != 1 { + t.Error("failed to update state correctly") + } + if txns[0].Txid != mock.MockTransactions[0].Txid { + t.Error("saved incorrect transaction") + } + if txns[0].Value != 2717080 { + t.Error("saved incorrect value") + } + if txns[0].WatchOnly { + t.Error("saved incorrect watch only") + } + + utxos, err := ws.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + if len(utxos) != 1 { + t.Error("failed to update state correctly") + } + if utxos[0].WatchOnly { + t.Error("saved incorrect watch only") + } + if utxos[0].Op.Hash.String() != mock.MockTransactions[0].Txid { + t.Error("saved incorrect transaction ID") + } + if utxos[0].Op.Index != 1 { + t.Error("saved incorrect outpoint index") + } + if utxos[0].Value != 2717080 { + t.Error("saved incorrect value") + } + + // Process an outgoing transaction. Make sure it deletes the utxo + ws.ProcessIncomingTransaction(mock.MockTransactions[1]) + txns, err = ws.db.Txns().GetAll(true) + if err != nil { + t.Error(err) + } + if len(txns) != 2 { + t.Error("failed to update state correctly") + } + + utxos, err = ws.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + if len(utxos) != 1 { + t.Error("failed to update state correctly") + } + if utxos[0].Op.Hash.String() != mock.MockTransactions[1].Txid { + t.Error("failed to save correct utxo") + } + if utxos[0].Op.Index != 1 { + t.Error("failed to save correct utxo") + } +} + +func TestWalletService_processIncomingBlock(t *testing.T) { + ws, err := mockWalletService() + if err != nil { + t.Fatal(err) + } + ws.chainHeight = uint32(mock.MockBlocks[0].Height) + ws.bestBlock = mock.MockBlocks[0].Hash + + // Check update height + ws.processIncomingBlock(mock.MockBlocks[1]) + height, hash := ws.ChainTip() + if height != uint32(mock.MockBlocks[1].Height) { + t.Error("failed to update height") + } + if hash.String() != mock.MockBlocks[1].Hash { + t.Error("failed to update hash") + } + + // Check update height of unconfirmed txs and utxos + tx := mock.MockTransactions[0] + tx.Confirmations = 0 + ws.ProcessIncomingTransaction(tx) + + ws.processIncomingBlock(mock.MockBlocks[2]) + time.Sleep(time.Second / 2) + + txns, err := ws.db.Txns().GetAll(true) + if err != nil { + t.Fatal(err) + } + if len(txns) != 1 { + t.Fatal("Returned incorrect number of txs") + } + if txns[0].Height != int32(mock.MockBlocks[2].Height-14) { + t.Error("returned incorrect transaction height") + } + + utxos, err := ws.db.Utxos().GetAll() + if err != nil { + t.Fatal(err) + } + if len(utxos) != 1 { + t.Fatal("Returned incorrect number of utxos") + } + if utxos[0].AtHeight != int32(mock.MockBlocks[2].Height-14) { + t.Error("returned incorrect utxo height") + } + + // Test updateState() is called during reorg + block := mock.MockBlocks[1] + block.Hash = "0000000000000000003c4b7f56e45567980f02012ea00d8e384267a2d825fcf9" + ws.processIncomingBlock(block) + + time.Sleep(time.Second / 2) + + txns, err = ws.db.Txns().GetAll(true) + if err != nil { + t.Fatal(err) + } + if len(txns) != 3 { + t.Fatal("Returned incorrect number of txs") + } + + utxos, err = ws.db.Utxos().GetAll() + if err != nil { + t.Fatal(err) + } + + if len(utxos) != 3 { + t.Fatal("Returned incorrect number of utxos") + } +} + +func TestWalletService_listenersFired(t *testing.T) { + nCallbacks := 0 + var response wallet.TransactionCallback + cb := func(callback wallet.TransactionCallback) { + nCallbacks++ + response = callback + } + ws, err := mockWalletService() + if err != nil { + t.Fatal(err) + } + ws.AddTransactionListener(cb) + tx := mock.MockTransactions[0] + tx.Confirmations = 0 + ws.saveSingleTxToDB(tx, int32(mock.MockBlocks[0].Height), ws.getStoredAddresses()) + if nCallbacks != 1 { + t.Errorf("expected 1 callback but had %d", nCallbacks) + } + ch, err := chainhash.NewHashFromStr(response.Txid) + if err != nil { + t.Errorf("failed getting hash from %s: %s", response.Txid, err) + } + if ch.String() != mock.MockTransactions[0].Txid { + t.Errorf("expected hash to be %s, but was %s", mock.MockTransactions[0].Txid, ch.String()) + } + if response.Value != 2717080 { + t.Errorf("expected tx value to be 2717080, but was %d", response.Value) + } + if response.Height != 0 { + t.Error("returned incorrect height") + } + if response.WatchOnly { + t.Error("returned incorrect watch only") + } + + // Test watch only + script, err := hex.DecodeString("a91457fc729da2a83dc8cd3c1835351c4a813c2ae8ba87") + if err != nil { + t.Fatal(err) + } + if err := ws.db.WatchedScripts().Put(script); err != nil { + t.Fatal(err) + } + ws.saveSingleTxToDB(mock.MockTransactions[3], int32(mock.MockBlocks[0].Height), ws.getStoredAddresses()) + if nCallbacks != 2 { + t.Error("failed to fire transaction callback") + } + ch, err = chainhash.NewHashFromStr(response.Txid) + if err != nil { + t.Error(err) + } + if ch.String() != mock.MockTransactions[3].Txid { + t.Error("returned incorrect txid") + } + if response.Height != 1289594-1 { + t.Error("returned incorrect height") + } + if !response.WatchOnly { + t.Error("returned incorrect watch only") + } + + // Test fired when height is updated + tx = mock.MockTransactions[0] + tx.Confirmations = 1 + ws.saveSingleTxToDB(tx, int32(mock.MockBlocks[0].Height), ws.getStoredAddresses()) + if nCallbacks != 3 { + t.Error("failed to fire transaction callback") + } + ch, err = chainhash.NewHashFromStr(response.Txid) + if err != nil { + t.Error(err) + } + if ch.String() != mock.MockTransactions[0].Txid { + t.Error("returned incorrect txid") + } + if response.Value != 2717080 { + t.Error("returned incorrect value") + } + if response.Height != int32(mock.MockBlocks[0].Height) { + t.Error("returned incorrect height") + } + if response.WatchOnly { + t.Error("returned incorrect watch only") + } +} + +func TestWalletService_getStoredAddresses(t *testing.T) { + ws, err := mockWalletService() + if err != nil { + t.Fatal(err) + } + + types := []wallet.CoinType{ + wallet.Bitcoin, + wallet.BitcoinCash, + wallet.Zcash, + wallet.Litecoin, + } + + script, err := hex.DecodeString("a91457fc729da2a83dc8cd3c1835351c4a813c2ae8ba87") + if err != nil { + t.Fatal(err) + } + if err := ws.db.WatchedScripts().Put(script); err != nil { + t.Fatal(err) + } + + for _, ty := range types { + ws.coinType = ty + addrs := ws.getStoredAddresses() + if len(addrs) != 41 { + t.Error("returned incorrect number of addresses") + } + switch ty { + case wallet.Bitcoin: + sa, ok := addrs["39iF8cDMhctrPVoPbi2Vb1NnErg6CEB7BZ"] + if !sa.WatchOnly || !ok { + t.Error("returned incorrect watch only address") + } + case wallet.BitcoinCash: + sa, ok := addrs["pptlcu5a525rmjxd8svr2dguf2qnc2hghgln5xu4l7"] + if !sa.WatchOnly || !ok { + t.Error("returned incorrect watch only address") + } + case wallet.Zcash: + sa, ok := addrs["t3Sar8wdVfwgSz8rHY8qcipUhVWsB2x2xxa"] + if !sa.WatchOnly || !ok { + t.Error("returned incorrect watch only address") + } + case wallet.Litecoin: + sa, ok := addrs["39iF8cDMhctrPVoPbi2Vb1NnErg6CEB7BZ"] + if !sa.WatchOnly || !ok { + t.Error("returned incorrect watch only address") + } + } + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/util/balance_test.go b/vendor/github.com/OpenBazaar/multiwallet/util/balance_test.go new file mode 100644 index 0000000000..9d1b0566ff --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/util/balance_test.go @@ -0,0 +1,112 @@ +package util + +import ( + "bytes" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "testing" +) + +func TestCalcBalance(t *testing.T) { + ch1, err := chainhash.NewHashFromStr("ccfd8d91b38e065a4d0f655fffabbdbf61666d1fdf1b54b7432c5d0ad453b76d") + if err != nil { + t.Error(err) + } + ch2, err := chainhash.NewHashFromStr("37aface44f82f6f319957b501030da2595b35d8bbc953bbe237f378c5f715bdd") + if err != nil { + t.Error(err) + } + ch3, err := chainhash.NewHashFromStr("2d08e0e877ff9d034ca272666d01626e96a0cf9e17004aafb4ae9d5aa109dd20") + if err != nil { + t.Error(err) + } + ch4, err := chainhash.NewHashFromStr("c803c8e21a464f0425fda75fb43f5a40bb6188bab9f3bfe0c597b46899e30045") + if err != nil { + t.Error(err) + } + + var utxos []wallet.Utxo + var txns []wallet.Txn + + // Test confirmed and unconfirmed + utxos = append(utxos, wallet.Utxo{ + AtHeight: 500, + Value: 1000, + Op: *wire.NewOutPoint(ch1, 0), + }) + utxos = append(utxos, wallet.Utxo{ + AtHeight: 0, + Value: 2000, + Op: *wire.NewOutPoint(ch2, 0), + }) + + confirmed, unconfirmed := CalcBalance(utxos, txns) + if confirmed != 1000 || unconfirmed != 2000 { + t.Error("Returned incorrect balance") + } + + // Test confirmed stxo + tx := wire.NewMsgTx(1) + op := wire.NewOutPoint(ch3, 1) + in := wire.NewTxIn(op, []byte{}, [][]byte{}) + out := wire.NewTxOut(500, []byte{0x00}) + tx.TxIn = append(tx.TxIn, in) + tx.TxOut = append(tx.TxOut, out) + var buf bytes.Buffer + err = tx.BtcEncode(&buf, wire.ProtocolVersion, wire.WitnessEncoding) + if err != nil { + t.Error(err) + } + txns = append(txns, wallet.Txn{ + Bytes: buf.Bytes(), + Txid: "37aface44f82f6f319957b501030da2595b35d8bbc953bbe237f378c5f715bdd", + }) + tx = wire.NewMsgTx(1) + op = wire.NewOutPoint(ch4, 1) + in = wire.NewTxIn(op, []byte{}, [][]byte{}) + out = wire.NewTxOut(500, []byte{0x00}) + tx.TxIn = append(tx.TxIn, in) + tx.TxOut = append(tx.TxOut, out) + var buf2 bytes.Buffer + err = tx.BtcEncode(&buf, wire.ProtocolVersion, wire.WitnessEncoding) + if err != nil { + t.Error(err) + } + txns = append(txns, wallet.Txn{ + Bytes: buf2.Bytes(), + Txid: "2d08e0e877ff9d034ca272666d01626e96a0cf9e17004aafb4ae9d5aa109dd20", + Height: 1999, + }) + confirmed, unconfirmed = CalcBalance(utxos, txns) + if confirmed != 3000 || unconfirmed != 0 { + t.Error("Returned incorrect balance") + } + + // Test unconfirmed stxo + txns = []wallet.Txn{} + txns = append(txns, wallet.Txn{ + Bytes: buf.Bytes(), + Txid: "37aface44f82f6f319957b501030da2595b35d8bbc953bbe237f378c5f715bdd", + }) + txns = append(txns, wallet.Txn{ + Bytes: buf2.Bytes(), + Txid: "2d08e0e877ff9d034ca272666d01626e96a0cf9e17004aafb4ae9d5aa109dd20", + Height: 0, + }) + confirmed, unconfirmed = CalcBalance(utxos, txns) + if confirmed != 1000 || unconfirmed != 2000 { + t.Error("Returned incorrect balance") + } + + // Test without stxo in db + txns = []wallet.Txn{} + txns = append(txns, wallet.Txn{ + Bytes: buf.Bytes(), + Txid: "37aface44f82f6f319957b501030da2595b35d8bbc953bbe237f378c5f715bdd", + }) + confirmed, unconfirmed = CalcBalance(utxos, txns) + if confirmed != 1000 || unconfirmed != 2000 { + t.Error("Returned incorrect balance") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/util/coin_test.go b/vendor/github.com/OpenBazaar/multiwallet/util/coin_test.go new file mode 100644 index 0000000000..3d2caf2c5f --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/util/coin_test.go @@ -0,0 +1,251 @@ +package util + +import ( + "bytes" + "encoding/hex" + "errors" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + hd "github.com/btcsuite/btcutil/hdkeychain" + "testing" +) + +func TestNewCoin(t *testing.T) { + txid := "7eae21cc2709a58a8795f9b0239b6b8ed974a3c4ce10f8919deae527995dd744" + ch, err := chainhash.NewHashFromStr(txid) + if err != nil { + t.Error(err) + return + } + scriptPubkey := "76a914ab8c06d1c22f575b30c3afc66bde8b3aa2de99bc88ac" + scriptBytes, err := hex.DecodeString(scriptPubkey) + if err != nil { + t.Error(err) + return + } + c, err := NewCoin(*ch, 0, btcutil.Amount(100000), 10, scriptBytes) + if err != nil { + t.Error(err) + return + } + if c.Hash().String() != ch.String() { + t.Error("Returned incorrect txid") + } + if c.Index() != 0 { + t.Error("Returned incorrect index") + } + if c.Value() != btcutil.Amount(100000) { + t.Error("Returned incorrect value") + } + if !bytes.Equal(c.PkScript(), scriptBytes) { + t.Error("Returned incorrect pk script") + } + if c.NumConfs() != 10 { + t.Error("Returned incorrect num confs") + } + if c.ValueAge() != 1000000 { + t.Error("Returned incorrect value age") + } +} + +func buildTestData() (uint32, []wallet.Utxo, func(script []byte) (btcutil.Address, error), + func(scriptAddress []byte) (*hd.ExtendedKey, error), + map[string]wallet.Utxo, map[string]*hd.ExtendedKey, error) { + + scriptPubkey1 := "76a914ab8c06d1c22f575b30c3afc66bde8b3aa2de99bc88ac" + scriptBytes1, err := hex.DecodeString(scriptPubkey1) + if err != nil { + return 0, nil, nil, nil, nil, nil, err + } + scriptPubkey2 := "76a914281032bc033f41a33ded636bc2f7c2d67bb2871f88ac" + scriptBytes2, err := hex.DecodeString(scriptPubkey2) + if err != nil { + return 0, nil, nil, nil, nil, nil, err + } + scriptPubkey3 := "76a91450033f99ce3ed61dc428a0ac481e9bdab646664c88ac" + scriptBytes3, err := hex.DecodeString(scriptPubkey3) + if err != nil { + return 0, nil, nil, nil, nil, nil, err + } + ch1, err := chainhash.NewHashFromStr("8cf466484a741850b63482133b6f7d506297c624290db2bb74214e4f9932f93e") + if err != nil { + return 0, nil, nil, nil, nil, nil, err + } + op1 := wire.NewOutPoint(ch1, 0) + ch2, err := chainhash.NewHashFromStr("8fc073d5452cc2765a24baf5d434fedc1d16b7f74f9dabce209a6b416d4fb91f") + if err != nil { + return 0, nil, nil, nil, nil, nil, err + } + op2 := wire.NewOutPoint(ch2, 1) + ch3, err := chainhash.NewHashFromStr("d7144e933f4a03ff194e373331d5a4ef8c5e4ce8df666c66b882145e686834b1") + if err != nil { + return 0, nil, nil, nil, nil, nil, err + } + op3 := wire.NewOutPoint(ch3, 2) + utxos := []wallet.Utxo{ + { + Value: 100000, + WatchOnly: false, + AtHeight: 300000, + ScriptPubkey: scriptBytes1, + Op: *op1, + }, + { + Value: 50000, + WatchOnly: false, + AtHeight: 350000, + ScriptPubkey: scriptBytes2, + Op: *op2, + }, + { + Value: 99000, + WatchOnly: true, + AtHeight: 250000, + ScriptPubkey: scriptBytes3, + Op: *op3, + }, + } + + utxoMap := make(map[string]wallet.Utxo) + utxoMap[utxos[0].Op.Hash.String()] = utxos[0] + utxoMap[utxos[1].Op.Hash.String()] = utxos[1] + utxoMap[utxos[2].Op.Hash.String()] = utxos[2] + + master, err := hd.NewMaster([]byte("8cf466484a741850b63482133b6f7d506297c624290db2bb74214e4f9932f93e"), &chaincfg.MainNetParams) + if err != nil { + return 0, nil, nil, nil, nil, nil, err + } + key0, err := master.Child(0) + if err != nil { + return 0, nil, nil, nil, nil, nil, err + } + key1, err := master.Child(1) + if err != nil { + return 0, nil, nil, nil, nil, nil, err + } + key2, err := master.Child(2) + if err != nil { + return 0, nil, nil, nil, nil, nil, err + } + + keyMap := make(map[string]*hd.ExtendedKey) + keyMap["ab8c06d1c22f575b30c3afc66bde8b3aa2de99bc"] = key0 + keyMap["281032bc033f41a33ded636bc2f7c2d67bb2871f"] = key1 + keyMap["50033f99ce3ed61dc428a0ac481e9bdab646664c"] = key2 + + height := uint32(351000) + + scriptToAddress := func(script []byte) (btcutil.Address, error) { + _, addrs, _, err := txscript.ExtractPkScriptAddrs(script, &chaincfg.MainNetParams) + if err != nil { + return nil, err + } + return addrs[0], nil + } + getKeyForScript := func(scriptAddress []byte) (*hd.ExtendedKey, error) { + key, ok := keyMap[hex.EncodeToString(scriptAddress)] + if !ok { + return nil, errors.New("key not found") + } + return key, nil + } + return height, utxos, scriptToAddress, getKeyForScript, utxoMap, keyMap, nil +} + +func TestGatherCoins(t *testing.T) { + + height, utxos, scriptToAddress, getKeyForScript, utxoMap, keyMap, err := buildTestData() + if err != nil { + t.Fatal(err) + } + + coins := GatherCoins(height, utxos, scriptToAddress, getKeyForScript) + if len(coins) != 2 { + t.Error("Returned incorrect number of coins") + } + for coin, key := range coins { + u := utxoMap[coin.Hash().String()] + addr, err := scriptToAddress(coin.PkScript()) + if err != nil { + t.Error(err) + } + k := keyMap[hex.EncodeToString(addr.ScriptAddress())] + if coin.Value() != btcutil.Amount(u.Value) { + t.Error("Returned incorrect value") + } + if coin.Hash().String() != u.Op.Hash.String() { + t.Error("Returned incorrect outpoint hash") + } + if coin.Index() != u.Op.Index { + t.Error("Returned incorrect outpoint index") + } + if !bytes.Equal(coin.PkScript(), u.ScriptPubkey) { + t.Error("Returned incorrect script pubkey") + } + if key.String() != k.String() { + t.Error("Returned incorrect key") + } + } +} + +func TestLoadAllInputs(t *testing.T) { + height, utxos, scriptToAddress, getKeyForScript, _, keyMap, err := buildTestData() + if err != nil { + t.Fatal(err) + } + coins := GatherCoins(height, utxos, scriptToAddress, getKeyForScript) + + tx := wire.NewMsgTx(1) + totalIn, inputValMap, additionalPrevScripts, additionalKeysByAddress := LoadAllInputs(tx, coins, &chaincfg.MainNetParams) + + if totalIn != 150000 { + t.Errorf("Failed to return correct total input value: expected 150000 got %d", totalIn) + } + + for _, u := range utxos { + val, ok := inputValMap[u.Op] + if !u.WatchOnly && !ok { + t.Errorf("Missing outpoint %s in input value map", u.Op) + } + if u.WatchOnly && ok { + t.Error("Watch only output found in input values map") + } + + if !u.WatchOnly && val != u.Value { + t.Errorf("Returned incorrect input value for outpoint %s. Expected %d, got %d", u.Op, u.Value, val) + } + + prevScript, ok := additionalPrevScripts[u.Op] + if !u.WatchOnly && !ok { + t.Errorf("Missing outpoint %s in additionalPrevScripts map", u.Op) + } + if u.WatchOnly && ok { + t.Error("Watch only output found in additionalPrevScripts map") + } + + if !u.WatchOnly && !bytes.Equal(prevScript, u.ScriptPubkey) { + t.Errorf("Returned incorrect script for script %s. Expected %x, got %x", u.Op, u.ScriptPubkey, prevScript) + } + } + + for _, key := range additionalKeysByAddress { + found := false + for _, k := range keyMap { + priv, err := k.ECPrivKey() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(key.PrivKey.Serialize(), priv.Serialize()) { + found = true + break + } + } + if !found { + t.Errorf("Key %s not in additionalKeysByAddress map", key.String()) + } + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/util/fees_test.go b/vendor/github.com/OpenBazaar/multiwallet/util/fees_test.go new file mode 100644 index 0000000000..7b36bfd416 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/util/fees_test.go @@ -0,0 +1,85 @@ +package util + +import ( + "bytes" + "testing" + + "github.com/OpenBazaar/wallet-interface" +) + +type ClosingBuffer struct { + *bytes.Buffer +} + +func (cb *ClosingBuffer) Close() (err error) { + return +} + +type mockExchangeRate struct { + rate float64 +} + +func (m *mockExchangeRate) GetExchangeRate(currencyCode string) (float64, error) { + return 0, nil +} + +func (m *mockExchangeRate) GetLatestRate(currencyCode string) (float64, error) { + return m.rate, nil +} + +func (m *mockExchangeRate) GetAllRates(usecache bool) (map[string]float64, error) { + return make(map[string]float64), nil +} + +func (m *mockExchangeRate) UnitsPerCoin() int { + return 0 +} + +func TestFeeProvider_GetFeePerByte(t *testing.T) { + er := &mockExchangeRate{438} + fp := NewFeeProvider(2000, 360, 320, 280, er) + + // Test using exchange rates + if fp.GetFeePerByte(wallet.PRIOIRTY) != 50 { + t.Error("Returned incorrect fee per byte") + } + if fp.GetFeePerByte(wallet.NORMAL) != 10 { + t.Error("Returned incorrect fee per byte") + } + if fp.GetFeePerByte(wallet.ECONOMIC) != 1 { + t.Error("Returned incorrect fee per byte") + } + if fp.GetFeePerByte(wallet.FEE_BUMP) != 101 { + t.Error("Returned incorrect fee per byte") + } + + // Test exchange rate is limited at max if bad exchange rate is returned + er.rate = 0.1 + if fp.GetFeePerByte(wallet.PRIOIRTY) != 2000 { + t.Error("Returned incorrect fee per byte") + } + if fp.GetFeePerByte(wallet.NORMAL) != 2000 { + t.Error("Returned incorrect fee per byte") + } + if fp.GetFeePerByte(wallet.ECONOMIC) != 2000 { + t.Error("Returned incorrect fee per byte") + } + if fp.GetFeePerByte(wallet.FEE_BUMP) != 2000 { + t.Error("Returned incorrect fee per byte") + } + + // Test no Exchange rate provided + fp.exchangeRates = nil + if fp.GetFeePerByte(wallet.PRIOIRTY) != 360 { + t.Error("Returned incorrect fee per byte") + } + if fp.GetFeePerByte(wallet.NORMAL) != 320 { + t.Error("Returned incorrect fee per byte") + } + if fp.GetFeePerByte(wallet.ECONOMIC) != 280 { + t.Error("Returned incorrect fee per byte") + } + if fp.GetFeePerByte(wallet.FEE_BUMP) != 720 { + t.Error("Returned incorrect fee per byte") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/util/outpoints_test.go b/vendor/github.com/OpenBazaar/multiwallet/util/outpoints_test.go new file mode 100644 index 0000000000..043c7c1b4e --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/util/outpoints_test.go @@ -0,0 +1,26 @@ +package util + +import ( + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "testing" +) + +func TestOutPointsEqual(t *testing.T) { + h1, err := chainhash.NewHashFromStr("6f7a58ad92702601fcbaac0e039943a384f5274a205c16bb8bbab54f9ea2fbad") + if err != nil { + t.Error(err) + } + op := wire.NewOutPoint(h1, 0) + h2, err := chainhash.NewHashFromStr("a0d4cbcd8d0694e1132400b5e114b31bc3e0d8a2ac26e054f78727c95485b528") + op2 := wire.NewOutPoint(h2, 0) + if err != nil { + t.Error(err) + } + if !OutPointsEqual(*op, *op) { + t.Error("Failed to detect equal outpoints") + } + if OutPointsEqual(*op, *op2) { + t.Error("Incorrectly returned equal outpoints") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/zcash/address/address_test.go b/vendor/github.com/OpenBazaar/multiwallet/zcash/address/address_test.go new file mode 100644 index 0000000000..1f70a9ff33 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/zcash/address/address_test.go @@ -0,0 +1,87 @@ +package address + +import ( + "github.com/btcsuite/btcd/chaincfg" + "testing" +) + +func TestDecodeZcashAddress(t *testing.T) { + // Mainnet + addr, err := DecodeAddress("t1cQTWs2rPYM5R3zJiLA8MR3nZsXd1p2U6Q", &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if addr.String() != "t1cQTWs2rPYM5R3zJiLA8MR3nZsXd1p2U6Q" { + t.Error("Address decoding error") + } + // Testnet + addr, err = DecodeAddress("tmUFCqhXFnCraZJBkP4TsD5iYArcSWSmgkT", &chaincfg.TestNet3Params) + if err != nil { + t.Error(err) + } + if addr.String() != "tmUFCqhXFnCraZJBkP4TsD5iYArcSWSmgkT" { + t.Error("Address decoding error") + } +} + +var dataElement = []byte{203, 72, 18, 50, 41, 156, 213, 116, 49, 81, 172, 75, 45, 99, 174, 25, 142, 123, 176, 169} + +// Second address of https://github.com/Bitcoin-UAHF/spec/blob/master/cashaddr.md#examples-of-address-translation +func TestAddressPubKeyHash_EncodeAddress(t *testing.T) { + // Mainnet + addr, err := NewAddressPubKeyHash(dataElement, &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if addr.String() != "t1cQTWs2rPYM5R3zJiLA8MR3nZsXd1p2U6Q" { + t.Error("Address decoding error") + } + // Testnet + addr, err = NewAddressPubKeyHash(dataElement, &chaincfg.TestNet3Params) + if err != nil { + t.Error(err) + } + if addr.String() != "tmUFCqhXFnCraZJBkP4TsD5iYArcSWSmgkT" { + t.Error("Address decoding error") + } +} + +var dataElement2 = []byte{118, 160, 64, 83, 189, 160, 168, 139, 218, 81, 119, 184, 106, 21, 195, 178, 159, 85, 152, 115} + +// 4th address of https://github.com/Bitcoin-UAHF/spec/blob/master/cashaddr.md#examples-of-address-translation +func TestCashWitnessScriptHash_EncodeAddress(t *testing.T) { + // Mainnet + addr, err := NewAddressScriptHashFromHash(dataElement2, &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if addr.String() != "t3VNrdy8EjPaEJv2DnRN414eVwVQR9M8iS3" { + t.Error("Address decoding error") + } + // Testnet + addr, err = NewAddressScriptHashFromHash(dataElement2, &chaincfg.TestNet3Params) + if err != nil { + t.Error(err) + } + if addr.String() != "t2HN3geENbrBbrcbxiAN6Ygq93ydayzuTqB" { + t.Error("Address decoding error") + } +} + +func TestScriptParsing(t *testing.T) { + addr, err := DecodeAddress("t3VNrdy8EjPaEJv2DnRN414eVwVQR9M8iS3", &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + script, err := PayToAddrScript(addr) + if err != nil { + t.Error(err) + } + addr2, err := ExtractPkScriptAddrs(script, &chaincfg.MainNetParams) + if err != nil { + t.Error(err) + } + if addr.String() != addr2.String() { + t.Error("Failed to convert script back into address") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/zcash/sign_test.go b/vendor/github.com/OpenBazaar/multiwallet/zcash/sign_test.go new file mode 100644 index 0000000000..ffc573e1ad --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/zcash/sign_test.go @@ -0,0 +1,717 @@ +package zcash + +import ( + "bytes" + "encoding/hex" + "os" + "testing" + "time" + + "github.com/OpenBazaar/multiwallet/cache" + "github.com/OpenBazaar/multiwallet/datastore" + "github.com/OpenBazaar/multiwallet/keys" + "github.com/OpenBazaar/multiwallet/model/mock" + "github.com/OpenBazaar/multiwallet/service" + "github.com/OpenBazaar/multiwallet/util" + zaddr "github.com/OpenBazaar/multiwallet/zcash/address" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/txscript" + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/btcsuite/btcutil/hdkeychain" +) + +type FeeResponse struct { + Priority int `json:"priority"` + Normal int `json:"normal"` + Economic int `json:"economic"` +} + +func newMockWallet() (*ZCashWallet, error) { + mockDb := datastore.NewMockMultiwalletDatastore() + + db, err := mockDb.GetDatastoreForWallet(wallet.Zcash) + if err != nil { + return nil, err + } + params := &chaincfg.MainNetParams + + seed, err := hex.DecodeString("16c034c59522326867593487c03a8f9615fb248406dd0d4ffb3a6b976a248403") + if err != nil { + return nil, err + } + master, err := hdkeychain.NewMaster(seed, params) + if err != nil { + return nil, err + } + km, err := keys.NewKeyManager(db.Keys(), params, master, wallet.Zcash, zcashAddress) + if err != nil { + return nil, err + } + + fp := util.NewFeeProvider(2000, 300, 200, 100, nil) + + bw := &ZCashWallet{ + params: params, + km: km, + db: db, + fp: fp, + } + cli := mock.NewMockApiClient(bw.AddressToScript) + ws, err := service.NewWalletService(db, km, cli, params, wallet.Zcash, cache.NewMockCacher()) + if err != nil { + return nil, err + } + bw.client = cli + bw.ws = ws + return bw, nil +} + +func TestWalletService_VerifyWatchScriptFilter(t *testing.T) { + // Verify that AddWatchedAddress should never add a script which already represents a key from its own wallet + w, err := newMockWallet() + if err != nil { + t.Fatal(err) + } + keys := w.km.GetKeys() + + addr, err := w.km.KeyToAddress(keys[0]) + if err != nil { + t.Fatal(err) + } + err = w.AddWatchedAddresses(addr) + if err != nil { + t.Fatal(err) + } + + watchScripts, err := w.db.WatchedScripts().GetAll() + if err != nil { + t.Fatal(err) + } + + if len(watchScripts) != 0 { + t.Error("Put watched scripts fails on key manager owned key") + } +} + +func TestWalletService_VerifyWatchScriptPut(t *testing.T) { + // Verify that AddWatchedAddress should add a script which does not represent a key from its own wallet + w, err := newMockWallet() + if err != nil { + t.Fatal(err) + } + + addr, err := w.DecodeAddress("t1aZvxRLCGVeMPFXvqfnBgHVEbi4c6g8MVa") + if err != nil { + t.Fatal(err) + } + + err = w.AddWatchedAddresses(addr) + if err != nil { + t.Fatal(err) + } + + watchScripts, err := w.db.WatchedScripts().GetAll() + if err != nil { + t.Fatal(err) + } + + if len(watchScripts) == 0 { + t.Error("Put watched scripts fails on non-key manager owned key") + } +} + +func waitForTxnSync(t *testing.T, txnStore wallet.Txns) { + // Look for a known txn, this sucks a bit. It would be better to check if the + // number of stored txns matched the expected, but not all the mock + // transactions are relevant, so the numbers don't add up. + // Even better would be for the wallet to signal that the initial sync was + // done. + lastTxn := mock.MockTransactions[len(mock.MockTransactions)-2] + txHash, err := chainhash.NewHashFromStr(lastTxn.Txid) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 100; i++ { + if _, err := txnStore.Get(*txHash); err == nil { + return + } + time.Sleep(100 * time.Millisecond) + } + t.Fatal("timeout waiting for wallet to sync transactions") +} + +func TestZCashWallet_buildTx(t *testing.T) { + w, err := newMockWallet() + w.ws.Start() + time.Sleep(time.Second / 2) + if err != nil { + t.Error(err) + } + addr, err := w.DecodeAddress("t1hASvMj8e6TXWryuB3L5TKXJB7XfNioZP3") + if err != nil { + t.Error(err) + } + // Test build normal tx + tx, err := w.buildTx(1500000, addr, wallet.NORMAL, nil) + if err != nil { + w.DumpTables(os.Stdout) + t.Error(err) + return + } + if !containsOutput(tx, addr) { + t.Error("Built tx does not contain the requested output") + } + if !validInputs(tx, w.db) { + t.Error("Built tx does not contain valid inputs") + } + if !validChangeAddress(tx, w.db, w.params) { + t.Error("Built tx does not contain a valid change output") + } + + // Insuffient funds + _, err = w.buildTx(1000000000, addr, wallet.NORMAL, nil) + if err != wallet.ErrorInsuffientFunds { + t.Error("Failed to throw insuffient funds error") + } + + // Dust + _, err = w.buildTx(1, addr, wallet.NORMAL, nil) + if err != wallet.ErrorDustAmount { + t.Error("Failed to throw dust error") + } +} + +func TestZCashWallet_buildSpendAllTx(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + w.ws.Start() + time.Sleep(time.Second / 2) + + waitForTxnSync(t, w.db.Txns()) + addr, err := w.DecodeAddress("t1hASvMj8e6TXWryuB3L5TKXJB7XfNioZP3") + if err != nil { + t.Error(err) + } + + // Test build spendAll tx + tx, err := w.buildSpendAllTx(addr, wallet.NORMAL) + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Fatal(err) + } + spendableUtxos := 0 + for _, u := range utxos { + if !u.WatchOnly { + spendableUtxos++ + } + } + if len(tx.TxIn) != spendableUtxos { + t.Error("Built tx does not spend all available utxos") + } + if !containsOutput(tx, addr) { + t.Error("Built tx does not contain the requested output") + } + if !validInputs(tx, w.db) { + t.Error("Built tx does not contain valid inputs") + } + if len(tx.TxOut) != 1 { + t.Error("Built tx should only have one output") + } +} + +func containsOutput(tx *wire.MsgTx, addr btcutil.Address) bool { + for _, o := range tx.TxOut { + script, _ := zaddr.PayToAddrScript(addr) + if bytes.Equal(script, o.PkScript) { + return true + } + } + return false +} + +func validInputs(tx *wire.MsgTx, db wallet.Datastore) bool { + utxos, _ := db.Utxos().GetAll() + uMap := make(map[wire.OutPoint]bool) + for _, u := range utxos { + uMap[u.Op] = true + } + for _, in := range tx.TxIn { + if !uMap[in.PreviousOutPoint] { + return false + } + } + return true +} + +func validChangeAddress(tx *wire.MsgTx, db wallet.Datastore, params *chaincfg.Params) bool { + for _, out := range tx.TxOut { + _, addrs, _, err := txscript.ExtractPkScriptAddrs(out.PkScript, params) + if err != nil { + continue + } + if len(addrs) == 0 { + continue + } + _, err = db.Keys().GetPathForKey(addrs[0].ScriptAddress()) + if err == nil { + return true + } + } + return false +} + +func TestZCashWallet_GenerateMultisigScript(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey1, err := key1.ECPubKey() + if err != nil { + t.Error(err) + } + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey2, err := key2.ECPubKey() + if err != nil { + t.Error(err) + } + key3, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + pubkey3, err := key3.ECPubKey() + if err != nil { + t.Error(err) + } + keys := []hdkeychain.ExtendedKey{*key1, *key2, *key3} + + // test without timeout + addr, redeemScript, err := w.generateMultisigScript(keys, 2, 0, nil) + if err != nil { + t.Error(err) + } + if addr.String() != "t3S5yuHPzajqHcaJ6WDTGAwTuK9VDvWYj7r" { + t.Error("Returned invalid address") + } + + rs := "52" + // OP_2 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey1.SerializeCompressed()) + // pubkey1 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey2.SerializeCompressed()) + // pubkey2 + "21" + // OP_PUSHDATA(33) + hex.EncodeToString(pubkey3.SerializeCompressed()) + // pubkey3 + "53" + // OP_3 + "ae" // OP_CHECKMULTISIG + rsBytes, err := hex.DecodeString(rs) + if !bytes.Equal(rsBytes, redeemScript) { + t.Error("Returned invalid redeem script") + } +} + +func TestZCashWallet_newUnsignedTransaction(t *testing.T) { + w, err := newMockWallet() + w.ws.Start() + time.Sleep(time.Second / 2) + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + addr, err := w.DecodeAddress("t3ZZqETXWTohq3xXHxD9yzfq4UxpcACLkVc") + if err != nil { + t.Error(err) + } + + script, err := zaddr.PayToAddrScript(addr) + if err != nil { + t.Error(err) + } + out := wire.NewTxOut(10000, script) + outputs := []*wire.TxOut{out} + + changeSource := func() ([]byte, error) { + addr := w.CurrentAddress(wallet.INTERNAL) + script, err := zaddr.PayToAddrScript(addr) + if err != nil { + return []byte{}, err + } + return script, nil + } + + inputSource := func(target btcutil.Amount) (total btcutil.Amount, inputs []*wire.TxIn, inputValues []btcutil.Amount, scripts [][]byte, err error) { + total += btcutil.Amount(utxos[0].Value) + in := wire.NewTxIn(&utxos[0].Op, []byte{}, [][]byte{}) + in.Sequence = 0 // Opt-in RBF so we can bump fees + inputs = append(inputs, in) + return total, inputs, inputValues, scripts, nil + } + + // Regular transaction + authoredTx, err := newUnsignedTransaction(outputs, btcutil.Amount(1000), inputSource, changeSource) + if err != nil { + t.Error(err) + } + if len(authoredTx.Tx.TxOut) != 2 { + t.Error("Returned incorrect number of outputs") + } + if len(authoredTx.Tx.TxIn) != 1 { + t.Error("Returned incorrect number of inputs") + } + + // Insufficient funds + outputs[0].Value = 1000000000 + _, err = newUnsignedTransaction(outputs, btcutil.Amount(1000), inputSource, changeSource) + if err == nil { + t.Error("Failed to return insuffient funds error") + } +} + +func TestZCashWallet_CreateMultisigSignature(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + ins, outs, redeemScript, err := buildTxData(w) + if err != nil { + t.Error(err) + } + + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + sigs, err := w.CreateMultisigSignature(ins, outs, key1, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs) != 2 { + t.Error(err) + } + for _, sig := range sigs { + if len(sig.Signature) == 0 { + t.Error("Returned empty signature") + } + } +} + +func buildTxData(w *ZCashWallet) ([]wallet.TransactionInput, []wallet.TransactionOutput, []byte, error) { + redeemScript := "522103c157f2a7c178430972263232c9306110090c50b44d4e906ecd6d377eec89a53c210205b02b9dbe570f36d1c12e3100e55586b2b9dc61d6778c1d24a8eaca03625e7e21030c83b025cd6bdd8c06e93a2b953b821b4a8c29da211335048d7dc3389706d7e853ae" + redeemScriptBytes, err := hex.DecodeString(redeemScript) + if err != nil { + return nil, nil, nil, err + } + h1, err := hex.DecodeString("1a20f4299b4fa1f209428dace31ebf4f23f13abd8ed669cebede118343a6ae05") + if err != nil { + return nil, nil, nil, err + } + in1 := wallet.TransactionInput{ + OutpointHash: h1, + OutpointIndex: 1, + } + h2, err := hex.DecodeString("458d88b4ae9eb4a347f2e7f5592f1da3b9ddf7d40f307f6e5d7bc107a9b3e90e") + if err != nil { + return nil, nil, nil, err + } + in2 := wallet.TransactionInput{ + OutpointHash: h2, + OutpointIndex: 0, + } + addr, err := w.DecodeAddress("t3ZZqETXWTohq3xXHxD9yzfq4UxpcACLkVc") + if err != nil { + return nil, nil, nil, err + } + + out := wallet.TransactionOutput{ + Value: 20000, + Address: addr, + } + return []wallet.TransactionInput{in1, in2}, []wallet.TransactionOutput{out}, redeemScriptBytes, nil +} + +func TestZCashWallet_Multisign(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Error(err) + } + ins, outs, redeemScript, err := buildTxData(w) + if err != nil { + t.Error(err) + } + + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + sigs1, err := w.CreateMultisigSignature(ins, outs, key1, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs1) != 2 { + t.Error(err) + } + sigs2, err := w.CreateMultisigSignature(ins, outs, key2, redeemScript, 50) + if err != nil { + t.Error(err) + } + if len(sigs2) != 2 { + t.Error(err) + } + _, err = w.Multisign(ins, outs, sigs1, sigs2, redeemScript, 50, false) + if err != nil { + t.Error(err) + } +} + +func TestZCashWallet_bumpFee(t *testing.T) { + w, err := newMockWallet() + if err != nil { + t.Fatal(err) + } + + w.ws.Start() + time.Sleep(time.Second / 2) + txns, err := w.db.Txns().GetAll(false) + if err != nil { + t.Fatal(err) + } + + ch, err := chainhash.NewHashFromStr(txns[2].Txid) + if err != nil { + t.Fatal(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Fatal(err) + } + for _, u := range utxos { + if u.Op.Hash.IsEqual(ch) { + u.AtHeight = 0 + if err := w.db.Utxos().Put(u); err != nil { + t.Fatal(err) + } + } + } + + w.db.Txns().UpdateHeight(*ch, 0, time.Now()) + + // Test unconfirmed + _, err = w.bumpFee(*ch) + if err != nil { + t.Error(err) + } + + err = w.db.Txns().UpdateHeight(*ch, 1289597, time.Now()) + if err != nil { + t.Error(err) + } + + // Test confirmed + _, err = w.bumpFee(*ch) + if err == nil { + t.Error("Should not be able to bump fee of confirmed txs") + } +} + +func TestZCashWallet_sweepAddress(t *testing.T) { + w, err := newMockWallet() + w.ws.Start() + time.Sleep(time.Second / 2) + if err != nil { + t.Error(err) + } + utxos, err := w.db.Utxos().GetAll() + if err != nil { + t.Error(err) + } + var in wallet.TransactionInput + var key *hdkeychain.ExtendedKey + for _, ut := range utxos { + if ut.Value > 0 && !ut.WatchOnly { + addr, err := w.ScriptToAddress(ut.ScriptPubkey) + if err != nil { + t.Error(err) + } + key, err = w.km.GetKeyForScript(addr.ScriptAddress()) + if err != nil { + t.Error(err) + } + h, err := hex.DecodeString(ut.Op.Hash.String()) + if err != nil { + t.Error(err) + } + in = wallet.TransactionInput{ + LinkedAddress: addr, + Value: ut.Value, + OutpointIndex: ut.Op.Index, + OutpointHash: h, + } + } + } + // P2PKH addr + _, err = w.sweepAddress([]wallet.TransactionInput{in}, nil, key, nil, wallet.NORMAL) + if err != nil { + t.Error(err) + return + } + + // 1 of 2 P2WSH + for _, ut := range utxos { + if ut.Value > 0 && ut.WatchOnly { + addr, err := w.ScriptToAddress(ut.ScriptPubkey) + if err != nil { + t.Error(err) + } + h, err := hex.DecodeString(ut.Op.Hash.String()) + if err != nil { + t.Error(err) + } + in = wallet.TransactionInput{ + LinkedAddress: addr, + Value: ut.Value, + OutpointIndex: ut.Op.Index, + OutpointHash: h, + } + } + } + key1, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + + key2, err := w.km.GetFreshKey(wallet.INTERNAL) + if err != nil { + t.Error(err) + } + _, redeemScript, err := w.GenerateMultisigScript([]hdkeychain.ExtendedKey{*key1, *key2}, 1, 0, nil) + if err != nil { + t.Error(err) + } + _, err = w.sweepAddress([]wallet.TransactionInput{in}, nil, key1, &redeemScript, wallet.NORMAL) + if err != nil { + t.Error(err) + } +} + +func TestZCashWallet_estimateSpendFee(t *testing.T) { + w, err := newMockWallet() + w.ws.Start() + time.Sleep(time.Second / 2) + if err != nil { + t.Error(err) + } + fee, err := w.estimateSpendFee(1000, wallet.NORMAL) + if err != nil { + t.Error(err) + } + if fee <= 0 { + t.Error("Returned incorrect fee") + } +} + +func buildTestTx() (*wire.MsgTx, []byte, error) { + expected, err := hex.DecodeString(`0400008085202f8901a8c685478265f4c14dada651969c45a65e1aeb8cd6791f2f5bb6a1d9952104d9010000006b483045022100a61e5d557568c2ddc1d9b03a7173c6ce7c996c4daecab007ac8f34bee01e6b9702204d38fdc0bcf2728a69fde78462a10fb45a9baa27873e6a5fc45fb5c76764202a01210365ffea3efa3908918a8b8627724af852fc9b86d7375b103ab0543cf418bcaa7ffeffffff02005a6202000000001976a9148132712c3ff19f3a151234616777420a6d7ef22688ac8b959800000000001976a9145453e4698f02a38abdaa521cd1ff2dee6fac187188ac29b0040048b004000000000000000000000000`) + if err != nil { + return nil, nil, err + } + + tx := wire.NewMsgTx(1) + + inHash, err := hex.DecodeString("a8c685478265f4c14dada651969c45a65e1aeb8cd6791f2f5bb6a1d9952104d9") + if err != nil { + return nil, nil, err + } + prevHash, err := chainhash.NewHash(inHash) + if err != nil { + return nil, nil, err + } + op := wire.NewOutPoint(prevHash, 1) + + scriptSig, err := hex.DecodeString("483045022100a61e5d557568c2ddc1d9b03a7173c6ce7c996c4daecab007ac8f34bee01e6b9702204d38fdc0bcf2728a69fde78462a10fb45a9baa27873e6a5fc45fb5c76764202a01210365ffea3efa3908918a8b8627724af852fc9b86d7375b103ab0543cf418bcaa7f") + if err != nil { + return nil, nil, err + } + txIn := wire.NewTxIn(op, scriptSig, nil) + txIn.Sequence = 4294967294 + + tx.TxIn = []*wire.TxIn{txIn} + + pkScirpt0, err := hex.DecodeString("76a9148132712c3ff19f3a151234616777420a6d7ef22688ac") + if err != nil { + return nil, nil, err + } + out0 := wire.NewTxOut(40000000, pkScirpt0) + + pkScirpt1, err := hex.DecodeString("76a9145453e4698f02a38abdaa521cd1ff2dee6fac187188ac") + if err != nil { + return nil, nil, err + } + out1 := wire.NewTxOut(9999755, pkScirpt1) + tx.TxOut = []*wire.TxOut{out0, out1} + + tx.LockTime = 307241 + return tx, expected, nil +} + +func TestSerializeVersion4Transaction(t *testing.T) { + tx, expected, err := buildTestTx() + if err != nil { + t.Fatal(err) + } + + serialized, err := serializeVersion4Transaction(tx, 307272) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(serialized, expected) { + t.Fatal("Failed to serialize transaction correctly") + } +} + +func TestCalcSignatureHash(t *testing.T) { + tx, _, err := buildTestTx() + if err != nil { + t.Fatal(err) + } + + prevScript, err := hex.DecodeString("76a914507173527b4c3318a2aecd793bf1cfed705950cf88ac") + if err != nil { + t.Fatal(err) + } + sigHash, err := calcSignatureHash(prevScript, txscript.SigHashAll, tx, 0, 50000000, 307272) + if err != nil { + t.Fatal(err) + } + expected, err := hex.DecodeString("8df91420215909927be677a978c36b528e1e7b4ba343acefdd259fe57f3f1f85") + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(sigHash, expected) { + t.Fatal("Failed to calculate correct sig hash") + } +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/zcash/txsizes_test.go b/vendor/github.com/OpenBazaar/multiwallet/zcash/txsizes_test.go new file mode 100644 index 0000000000..56843c0a8b --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/zcash/txsizes_test.go @@ -0,0 +1,84 @@ +package zcash + +// Copyright (c) 2016 The btcsuite developers +// Use of this source code is governed by an ISC +// license that can be found in the LICENSE file. + +/* Copied here from a btcd internal package*/ + +import ( + "bytes" + "encoding/hex" + "github.com/btcsuite/btcd/wire" + "testing" +) + +const ( + p2pkhScriptSize = P2PKHPkScriptSize + p2shScriptSize = 23 +) + +func makeInts(value int, n int) []int { + v := make([]int, n) + for i := range v { + v[i] = value + } + return v +} + +func TestEstimateSerializeSize(t *testing.T) { + tests := []struct { + InputCount int + OutputScriptLengths []int + AddChangeOutput bool + ExpectedSizeEstimate int + }{ + 0: {1, []int{}, false, 161}, + 1: {1, []int{p2pkhScriptSize}, false, 195}, + 2: {1, []int{}, true, 195}, + 3: {1, []int{p2pkhScriptSize}, true, 229}, + 4: {1, []int{p2shScriptSize}, false, 193}, + 5: {1, []int{p2shScriptSize}, true, 227}, + + 6: {2, []int{}, false, 310}, + 7: {2, []int{p2pkhScriptSize}, false, 344}, + 8: {2, []int{}, true, 344}, + 9: {2, []int{p2pkhScriptSize}, true, 378}, + 10: {2, []int{p2shScriptSize}, false, 342}, + 11: {2, []int{p2shScriptSize}, true, 376}, + + // 0xfd is discriminant for 16-bit compact ints, compact int + // total size increases from 1 byte to 3. + 12: {1, makeInts(p2pkhScriptSize, 0xfc), false, 8729}, + 13: {1, makeInts(p2pkhScriptSize, 0xfd), false, 8729 + P2PKHOutputSize + 2}, + 14: {1, makeInts(p2pkhScriptSize, 0xfc), true, 8729 + P2PKHOutputSize + 2}, + 15: {0xfc, []int{}, false, 37560}, + 16: {0xfd, []int{}, false, 37560 + RedeemP2PKHInputSize + 2}, + } + for i, test := range tests { + outputs := make([]*wire.TxOut, 0, len(test.OutputScriptLengths)) + for _, l := range test.OutputScriptLengths { + outputs = append(outputs, &wire.TxOut{PkScript: make([]byte, l)}) + } + actualEstimate := EstimateSerializeSize(test.InputCount, outputs, test.AddChangeOutput, P2PKH) + if actualEstimate != test.ExpectedSizeEstimate { + t.Errorf("Test %d: Got %v: Expected %v", i, actualEstimate, test.ExpectedSizeEstimate) + } + } +} + +func TestSumOutputSerializeSizes(t *testing.T) { + testTx := "0100000001066b78efa7d66d271cae6d6eb799e1d10953fb1a4a760226cc93186d52b55613010000006a47304402204e6c32cc214c496546c3277191ca734494fe49fed0af1d800db92fed2021e61802206a14d063b67f2f1c8fc18f9e9a5963fe33e18c549e56e3045e88b4fc6219be11012103f72d0a11727219bff66b8838c3c5e1c74a5257a325b0c84247bd10bdb9069e88ffffffff0200c2eb0b000000001976a914426e80ad778792e3e19c20977fb93ec0591e1a3988ac35b7cb59000000001976a914e5b6dc0b297acdd99d1a89937474df77db5743c788ac00000000" + txBytes, err := hex.DecodeString(testTx) + if err != nil { + t.Error(err) + return + } + r := bytes.NewReader(txBytes) + msgTx := wire.NewMsgTx(1) + msgTx.BtcDecode(r, 1, wire.WitnessEncoding) + if SumOutputSerializeSizes(msgTx.TxOut) != 68 { + t.Error("SumOutputSerializeSizes returned incorrect value") + } + +} diff --git a/vendor/github.com/OpenBazaar/multiwallet/zcash/wallet_test.go b/vendor/github.com/OpenBazaar/multiwallet/zcash/wallet_test.go new file mode 100644 index 0000000000..7cb9c4f654 --- /dev/null +++ b/vendor/github.com/OpenBazaar/multiwallet/zcash/wallet_test.go @@ -0,0 +1,95 @@ +package zcash + +import ( + "github.com/OpenBazaar/multiwallet/datastore" + "github.com/OpenBazaar/wallet-interface" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "testing" + "time" +) + +func TestZCashWallet_Balance(t *testing.T) { + ds := datastore.NewMockMultiwalletDatastore() + db, err := ds.GetDatastoreForWallet(wallet.Zcash) + if err != nil { + t.Fatal(err) + } + + w := ZCashWallet{ + db: db, + } + + ch1, err := chainhash.NewHashFromStr("ccfd8d91b38e065a4d0f655fffabbdbf61666d1fdf1b54b7432c5d0ad453b76d") + if err != nil { + t.Error(err) + } + ch2, err := chainhash.NewHashFromStr("37aface44f82f6f319957b501030da2595b35d8bbc953bbe237f378c5f715bdd") + if err != nil { + t.Error(err) + } + ch3, err := chainhash.NewHashFromStr("2d08e0e877ff9d034ca272666d01626e96a0cf9e17004aafb4ae9d5aa109dd20") + if err != nil { + t.Error(err) + } + ch4, err := chainhash.NewHashFromStr("c803c8e21a464f0425fda75fb43f5a40bb6188bab9f3bfe0c597b46899e30045") + if err != nil { + t.Error(err) + } + + err = db.Utxos().Put(wallet.Utxo{ + AtHeight: 500, + Value: 1000, + Op: *wire.NewOutPoint(ch1, 0), + }) + if err != nil { + t.Fatal(err) + } + err = db.Utxos().Put(wallet.Utxo{ + AtHeight: 0, + Value: 2000, + Op: *wire.NewOutPoint(ch2, 0), + }) + if err != nil { + t.Fatal(err) + } + + // Test unconfirmed + confirmed, unconfirmed := w.Balance() + if confirmed != 1000 || unconfirmed != 2000 { + t.Error("Returned incorrect balance") + } + + // Test confirmed stxo + tx := wire.NewMsgTx(1) + op := wire.NewOutPoint(ch3, 1) + in := wire.NewTxIn(op, []byte{}, [][]byte{}) + out := wire.NewTxOut(500, []byte{0x00}) + tx.TxIn = append(tx.TxIn, in) + tx.TxOut = append(tx.TxOut, out) + buf, err := serializeVersion4Transaction(tx, 0) + if err != nil { + t.Fatal(err) + } + if err := db.Txns().Put(buf, "37aface44f82f6f319957b501030da2595b35d8bbc953bbe237f378c5f715bdd", 0, 0, time.Now(), false); err != nil { + t.Fatal(err) + } + + tx = wire.NewMsgTx(1) + op = wire.NewOutPoint(ch4, 1) + in = wire.NewTxIn(op, []byte{}, [][]byte{}) + out = wire.NewTxOut(500, []byte{0x00}) + tx.TxIn = append(tx.TxIn, in) + tx.TxOut = append(tx.TxOut, out) + buf2, err := serializeVersion4Transaction(tx, 0) + if err != nil { + t.Fatal(err) + } + if err := db.Txns().Put(buf2, "2d08e0e877ff9d034ca272666d01626e96a0cf9e17004aafb4ae9d5aa109dd20", 0, 1999, time.Now(), false); err != nil { + t.Fatal(err) + } + confirmed, unconfirmed = w.Balance() + if confirmed != 3000 || unconfirmed != 0 { + t.Error("Returned incorrect balance") + } +} diff --git a/vendor/github.com/filecoin-project/go-address/.circleci/config.yml b/vendor/github.com/filecoin-project/go-address/.circleci/config.yml deleted file mode 100644 index 4fca8299da..0000000000 --- a/vendor/github.com/filecoin-project/go-address/.circleci/config.yml +++ /dev/null @@ -1,185 +0,0 @@ -version: 2.1 -orbs: - go: gotest/tools@0.0.9 - -executors: - golang: - docker: - - image: circleci/golang:1.13 - resource_class: small - -commands: - install-deps: - steps: - - go/install-ssh - - go/install: {package: git} - prepare: - parameters: - linux: - default: true - description: is a linux build environment? - type: boolean - steps: - - checkout - - when: - condition: << parameters.linux >> - steps: - - run: sudo apt-get update - - run: sudo apt-get install ocl-icd-opencl-dev - - run: git submodule sync - - run: git submodule update --init - build-all: - - -jobs: - mod-tidy-check: - executor: golang - steps: - - install-deps - - prepare - - go/mod-download - - go/mod-tidy-check - - build-all: - executor: golang - steps: - - install-deps - - prepare - - go/mod-download - - run: sudo apt-get update - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - - run: - command: make build - - store_artifacts: - path: go-address - - store_artifacts: - path: go-address - - test: &test - description: | - Run tests with gotestsum. - parameters: - executor: - type: executor - default: golang - go-test-flags: - type: string - default: "-timeout 5m" - description: Flags passed to go test. - packages: - type: string - default: "./..." - description: Import paths of packages to be tested. - test-suite-name: - type: string - default: unit - description: Test suite name to report to CircleCI. - gotestsum-format: - type: string - default: short - description: gotestsum format. https://github.com/gotestyourself/gotestsum#format - coverage: - type: string - default: -coverprofile=coverage.txt - description: Coverage flag. Set to the empty string to disable. - codecov-upload: - type: boolean - default: false - description: | - Upload coverage report to https://codecov.io/. Requires the codecov API token to be - set as an environment variable for private projects. - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - go/mod-download - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - - run: - command: make build - - go/install-gotestsum: - gobin: $HOME/.local/bin - - run: - name: go test - environment: - GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml - GOTESTSUM_FORMAT: << parameters.gotestsum-format >> - command: | - mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> - gotestsum -- \ - << parameters.coverage >> \ - << parameters.go-test-flags >> \ - << parameters.packages >> - no_output_timeout: 30m - - store_test_results: - path: /tmp/test-reports - - when: - condition: << parameters.codecov-upload >> - steps: - - go/install: {package: bash} - - go/install: {package: curl} - - run: - shell: /bin/bash -eo pipefail - command: | - bash <(curl -s https://codecov.io/bash) - - save_cache: - name: save go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - paths: - - "~/go/pkg" - - "~/go/src/github.com" - - "~/go/src/golang.org" - - lint: &lint - description: | - Run golangci-lint. - parameters: - executor: - type: executor - default: golang - golangci-lint-version: - type: string - default: 1.21.0 - concurrency: - type: string - default: '2' - description: | - Concurrency used to run linters. Defaults to 2 because NumCPU is not - aware of container CPU limits. - args: - type: string - default: '' - description: | - Arguments to pass to golangci-lint - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - run: - command: make build - - go/install-golangci-lint: - gobin: $HOME/.local/bin - version: << parameters.golangci-lint-version >> - - run: - name: Lint - command: | - $HOME/.local/bin/golangci-lint run -v \ - --concurrency << parameters.concurrency >> << parameters.args >> - lint-changes: - <<: *lint - - lint-all: - <<: *lint - -workflows: - version: 2.1 - ci: - jobs: - - lint-changes: - args: "--new-from-rev origin/master" - - test - - mod-tidy-check - - build-all diff --git a/vendor/github.com/filecoin-project/go-address/.gitignore b/vendor/github.com/filecoin-project/go-address/.gitignore new file mode 100644 index 0000000000..48925c79c8 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/.gitignore @@ -0,0 +1,18 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +.filecoin-build +.update-modules \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-address/.gitmodules b/vendor/github.com/filecoin-project/go-address/.gitmodules new file mode 100644 index 0000000000..773dea9715 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-address/.gitmodules @@ -0,0 +1,3 @@ +[submodule "extern/filecoin-ffi"] + path = extern/filecoin-ffi + url = https://github.com/filecoin-project/filecoin-ffi.git diff --git a/vendor/github.com/filecoin-project/go-address/address.go b/vendor/github.com/filecoin-project/go-address/address.go index d97fa79bb0..29cd402b0f 100644 --- a/vendor/github.com/filecoin-project/go-address/address.go +++ b/vendor/github.com/filecoin-project/go-address/address.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "math" "strconv" cbor "github.com/ipfs/go-ipld-cbor" @@ -31,6 +32,9 @@ var addressAtlasEntry = atlas.BuildEntry(Address{}).Transform(). })). Complete() +// CurrentNetwork specifies which network the address belongs to +var CurrentNetwork = Testnet + // Address is the go type that represents an address in the filecoin network. type Address struct{ str string } @@ -79,6 +83,9 @@ func (a Address) Protocol() Protocol { // Payload returns the payload of the address. func (a Address) Payload() []byte { + if len(a.str) == 0 { + return nil + } return []byte(a.str[1:]) } @@ -89,7 +96,7 @@ func (a Address) Bytes() []byte { // String returns an address encoded as a string. func (a Address) String() string { - str, err := encode(Testnet, a) + str, err := encode(CurrentNetwork, a) if err != nil { panic(err) // I don't know if this one is okay } @@ -149,6 +156,9 @@ func (a *Address) Scan(value interface{}) error { // NewIDAddress returns an address using the ID protocol. func NewIDAddress(id uint64) (Address, error) { + if id > math.MaxInt64 { + return Undef, xerrors.New("IDs must be less than 2^63") + } return newAddress(ID, varint.ToUvarint(id)) } @@ -352,8 +362,13 @@ func (a *Address) UnmarshalBinary(b []byte) error { return nil } -func (a Address) MarshalCBOR(w io.Writer) error { - if a == Undef { +func (a *Address) MarshalCBOR(w io.Writer) error { + if a == nil { + _, err := w.Write(cbg.CborNull) + return err + } + + if *a == Undef { return fmt.Errorf("cannot marshal undefined address") } @@ -368,7 +383,9 @@ func (a Address) MarshalCBOR(w io.Writer) error { return nil } -func (a *Address) UnmarshalCBOR(br io.Reader) error { +func (a *Address) UnmarshalCBOR(r io.Reader) error { + br := cbg.GetPeeker(r) + maj, extra, err := cbg.CborReadHeader(br) if err != nil { return err diff --git a/vendor/github.com/filecoin-project/go-address/address_test.go b/vendor/github.com/filecoin-project/go-address/address_test.go deleted file mode 100644 index ac33fca74c..0000000000 --- a/vendor/github.com/filecoin-project/go-address/address_test.go +++ /dev/null @@ -1,506 +0,0 @@ -package address - -import ( - "bytes" - "encoding/base32" - "fmt" - "math" - "math/rand" - "strconv" - "testing" - "time" - - "github.com/multiformats/go-varint" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-crypto" -) - -func init() { - rand.Seed(time.Now().Unix()) -} - -func TestRandomIDAddress(t *testing.T) { - assert := assert.New(t) - - addr, err := NewIDAddress(uint64(rand.Int())) - assert.NoError(err) - assert.Equal(ID, addr.Protocol()) - - str, err := encode(Testnet, addr) - assert.NoError(err) - - maybe, err := decode(str) - assert.NoError(err) - assert.Equal(addr, maybe) - -} - -var allTestAddresses = []string{ - "t00", - "t01", - "t010", - "t0150", - "t0499", - "t01024", - "t01729", - "t0999999", - "t15ihq5ibzwki2b4ep2f46avlkrqzhpqgtga7pdrq", - "t12fiakbhe2gwd5cnmrenekasyn6v5tnaxaqizq6a", - "t1wbxhu3ypkuo6eyp6hjx6davuelxaxrvwb2kuwva", - "t1xtwapqc6nh4si2hcwpr3656iotzmlwumogqbuaa", - "t1xcbgdhkgkwht3hrrnui3jdopeejsoatkzmoltqy", - "t17uoq6tp427uzv7fztkbsnn64iwotfrristwpryy", - "t24vg6ut43yw2h2jqydgbg2xq7x6f4kub3bg6as6i", - "t25nml2cfbljvn4goqtclhifepvfnicv6g7mfmmvq", - "t2nuqrg7vuysaue2pistjjnt3fadsdzvyuatqtfei", - "t24dd4ox4c2vpf5vk5wkadgyyn6qtuvgcpxxon64a", - "t2gfvuyh7v2sx3patm5k23wdzmhyhtmqctasbr23y", - "t3vvmn62lofvhjd2ugzca6sof2j2ubwok6cj4xxbfzz4yuxfkgobpihhd2thlanmsh3w2ptld2gqkn2jvlss4a", - "t3wmuu6crofhqmm3v4enos73okk2l366ck6yc4owxwbdtkmpk42ohkqxfitcpa57pjdcftql4tojda2poeruwa", - "t3s2q2hzhkpiknjgmf4zq3ejab2rh62qbndueslmsdzervrhapxr7dftie4kpnpdiv2n6tvkr743ndhrsw6d3a", - "t3q22fijmmlckhl56rn5nkyamkph3mcfu5ed6dheq53c244hfmnq2i7efdma3cj5voxenwiummf2ajlsbxc65a", - "t3u5zgwa4ael3vuocgc5mfgygo4yuqocrntuuhcklf4xzg5tcaqwbyfabxetwtj4tsam3pbhnwghyhijr5mixa", -} - -func TestVectorsIDAddress(t *testing.T) { - testCases := []struct { - input uint64 - expected string - }{ - {uint64(0), "t00"}, - {uint64(1), "t01"}, - {uint64(10), "t010"}, - {uint64(150), "t0150"}, - {uint64(499), "t0499"}, - {uint64(1024), "t01024"}, - {uint64(1729), "t01729"}, - {uint64(999999), "t0999999"}, - {math.MaxUint64, fmt.Sprintf("t0%s", strconv.FormatUint(math.MaxUint64, 10))}, - } - - for _, tc := range testCases { - tc := tc - t.Run(fmt.Sprintf("testing actorID address: %s", tc.expected), func(t *testing.T) { - assert := assert.New(t) - - // Round trip encoding and decoding from string - addr, err := NewIDAddress(tc.input) - assert.NoError(err) - assert.Equal(tc.expected, addr.String()) - - maybeAddr, err := NewFromString(tc.expected) - assert.NoError(err) - assert.Equal(ID, maybeAddr.Protocol()) - id, _, err := varint.FromUvarint(maybeAddr.Payload()) - assert.NoError(err) - assert.Equal(tc.input, id) - - // Round trip to and from bytes - maybeAddrBytes, err := NewFromBytes(maybeAddr.Bytes()) - assert.NoError(err) - assert.Equal(maybeAddr, maybeAddrBytes) - - // Round trip encoding and decoding json - b, err := addr.MarshalJSON() - assert.NoError(err) - - var newAddr Address - err = newAddr.UnmarshalJSON(b) - assert.NoError(err) - assert.Equal(addr, newAddr) - }) - } - -} - -func TestSecp256k1Address(t *testing.T) { - assert := assert.New(t) - - sk, err := crypto.GenerateKey() - assert.NoError(err) - - addr, err := NewSecp256k1Address(crypto.PublicKey(sk)) - assert.NoError(err) - assert.Equal(SECP256K1, addr.Protocol()) - - str, err := encode(Mainnet, addr) - assert.NoError(err) - - maybe, err := decode(str) - assert.NoError(err) - assert.Equal(addr, maybe) - -} - -func TestVectorSecp256k1Address(t *testing.T) { - testCases := []struct { - input []byte - expected string - }{ - {[]byte{4, 148, 2, 250, 195, 126, 100, 50, 164, 22, 163, 160, 202, 84, - 38, 181, 24, 90, 179, 178, 79, 97, 52, 239, 162, 92, 228, 135, 200, - 45, 46, 78, 19, 191, 69, 37, 17, 224, 210, 36, 84, 33, 248, 97, 59, - 193, 13, 114, 250, 33, 102, 102, 169, 108, 59, 193, 57, 32, 211, - 255, 35, 63, 208, 188, 5}, - "t15ihq5ibzwki2b4ep2f46avlkrqzhpqgtga7pdrq"}, - - {[]byte{4, 118, 135, 185, 16, 55, 155, 242, 140, 190, 58, 234, 103, 75, - 18, 0, 12, 107, 125, 186, 70, 255, 192, 95, 108, 148, 254, 42, 34, - 187, 204, 38, 2, 255, 127, 92, 118, 242, 28, 165, 93, 54, 149, 145, - 82, 176, 225, 232, 135, 145, 124, 57, 53, 118, 238, 240, 147, 246, - 30, 189, 58, 208, 111, 127, 218}, - "t12fiakbhe2gwd5cnmrenekasyn6v5tnaxaqizq6a"}, - {[]byte{4, 222, 253, 208, 16, 1, 239, 184, 110, 1, 222, 213, 206, 52, - 248, 71, 167, 58, 20, 129, 158, 230, 65, 188, 182, 11, 185, 41, 147, - 89, 111, 5, 220, 45, 96, 95, 41, 133, 248, 209, 37, 129, 45, 172, - 65, 99, 163, 150, 52, 155, 35, 193, 28, 194, 255, 53, 157, 229, 75, - 226, 135, 234, 98, 49, 155}, - "t1wbxhu3ypkuo6eyp6hjx6davuelxaxrvwb2kuwva"}, - {[]byte{4, 3, 237, 18, 200, 20, 182, 177, 13, 46, 224, 157, 149, 180, - 104, 141, 178, 209, 128, 208, 169, 163, 122, 107, 106, 125, 182, 61, - 41, 129, 30, 233, 115, 4, 121, 216, 239, 145, 57, 233, 18, 73, 202, - 189, 57, 50, 145, 207, 229, 210, 119, 186, 118, 222, 69, 227, 224, - 133, 163, 118, 129, 191, 54, 69, 210}, - "t1xtwapqc6nh4si2hcwpr3656iotzmlwumogqbuaa"}, - {[]byte{4, 247, 150, 129, 154, 142, 39, 22, 49, 175, 124, 24, 151, 151, - 181, 69, 214, 2, 37, 147, 97, 71, 230, 1, 14, 101, 98, 179, 206, 158, - 254, 139, 16, 20, 65, 97, 169, 30, 208, 180, 236, 137, 8, 0, 37, 63, - 166, 252, 32, 172, 144, 251, 241, 251, 242, 113, 48, 164, 236, 195, - 228, 3, 183, 5, 118}, - "t1xcbgdhkgkwht3hrrnui3jdopeejsoatkzmoltqy"}, - {[]byte{4, 66, 131, 43, 248, 124, 206, 158, 163, 69, 185, 3, 80, 222, - 125, 52, 149, 133, 156, 164, 73, 5, 156, 94, 136, 221, 231, 66, 133, - 223, 251, 158, 192, 30, 186, 188, 95, 200, 98, 104, 207, 234, 235, - 167, 174, 5, 191, 184, 214, 142, 183, 90, 82, 104, 120, 44, 248, 111, - 200, 112, 43, 239, 138, 31, 224}, - "t17uoq6tp427uzv7fztkbsnn64iwotfrristwpryy"}, - } - - for _, tc := range testCases { - tc := tc - t.Run(fmt.Sprintf("testing secp256k1 address: %s", tc.expected), func(t *testing.T) { - assert := assert.New(t) - - // Round trip encoding and decoding from string - addr, err := NewSecp256k1Address(tc.input) - assert.NoError(err) - assert.Equal(tc.expected, addr.String()) - - maybeAddr, err := NewFromString(tc.expected) - assert.NoError(err) - assert.Equal(SECP256K1, maybeAddr.Protocol()) - assert.Equal(addressHash(tc.input), maybeAddr.Payload()) - - // Round trip to and from bytes - maybeAddrBytes, err := NewFromBytes(maybeAddr.Bytes()) - assert.NoError(err) - assert.Equal(maybeAddr, maybeAddrBytes) - - // Round trip encoding and decoding json - b, err := addr.MarshalJSON() - assert.NoError(err) - - var newAddr Address - err = newAddr.UnmarshalJSON(b) - assert.NoError(err) - assert.Equal(addr, newAddr) - }) - } -} - -func TestRandomActorAddress(t *testing.T) { - assert := assert.New(t) - - actorMsg := make([]byte, 20) - rand.Read(actorMsg) - - addr, err := NewActorAddress(actorMsg) - assert.NoError(err) - assert.Equal(Actor, addr.Protocol()) - - str, err := encode(Mainnet, addr) - assert.NoError(err) - - maybe, err := decode(str) - assert.NoError(err) - assert.Equal(addr, maybe) - -} - -func TestVectorActorAddress(t *testing.T) { - testCases := []struct { - input []byte - expected string - }{ - {[]byte{118, 18, 129, 144, 205, 240, 104, 209, 65, 128, 68, 172, 192, - 62, 11, 103, 129, 151, 13, 96}, - "t24vg6ut43yw2h2jqydgbg2xq7x6f4kub3bg6as6i"}, - {[]byte{44, 175, 184, 226, 224, 107, 186, 152, 234, 101, 124, 92, 245, - 244, 32, 35, 170, 35, 232, 142}, - "t25nml2cfbljvn4goqtclhifepvfnicv6g7mfmmvq"}, - {[]byte{2, 44, 158, 14, 162, 157, 143, 64, 197, 106, 190, 195, 92, 141, - 88, 125, 160, 166, 76, 24}, - "t2nuqrg7vuysaue2pistjjnt3fadsdzvyuatqtfei"}, - {[]byte{223, 236, 3, 14, 32, 79, 15, 89, 216, 15, 29, 94, 233, 29, 253, - 6, 109, 127, 99, 189}, - "t24dd4ox4c2vpf5vk5wkadgyyn6qtuvgcpxxon64a"}, - {[]byte{61, 58, 137, 232, 221, 171, 84, 120, 50, 113, 108, 109, 70, 140, - 53, 96, 201, 244, 127, 216}, - "t2gfvuyh7v2sx3patm5k23wdzmhyhtmqctasbr23y"}, - } - - for _, tc := range testCases { - tc := tc - t.Run(fmt.Sprintf("testing Actor address: %s", tc.expected), func(t *testing.T) { - assert := assert.New(t) - - // Round trip encoding and decoding from string - addr, err := NewActorAddress(tc.input) - assert.NoError(err) - assert.Equal(tc.expected, addr.String()) - - maybeAddr, err := NewFromString(tc.expected) - assert.NoError(err) - assert.Equal(Actor, maybeAddr.Protocol()) - assert.Equal(addressHash(tc.input), maybeAddr.Payload()) - - // Round trip to and from bytes - maybeAddrBytes, err := NewFromBytes(maybeAddr.Bytes()) - assert.NoError(err) - assert.Equal(maybeAddr, maybeAddrBytes) - - // Round trip encoding and decoding json - b, err := addr.MarshalJSON() - assert.NoError(err) - - var newAddr Address - err = newAddr.UnmarshalJSON(b) - assert.NoError(err) - assert.Equal(addr, newAddr) - }) - } -} - -func TestVectorBLSAddress(t *testing.T) { - testCases := []struct { - input []byte - expected string - }{ - {[]byte{173, 88, 223, 105, 110, 45, 78, 145, 234, 134, 200, 129, 233, 56, - 186, 78, 168, 27, 57, 94, 18, 121, 123, 132, 185, 207, 49, 75, 149, 70, - 112, 94, 131, 156, 122, 153, 214, 6, 178, 71, 221, 180, 249, 172, 122, - 52, 20, 221}, - "t3vvmn62lofvhjd2ugzca6sof2j2ubwok6cj4xxbfzz4yuxfkgobpihhd2thlanmsh3w2ptld2gqkn2jvlss4a"}, - {[]byte{179, 41, 79, 10, 46, 41, 224, 198, 110, 188, 35, 93, 47, 237, - 202, 86, 151, 191, 120, 74, 246, 5, 199, 90, 246, 8, 230, 166, 61, 92, - 211, 142, 168, 92, 168, 152, 158, 14, 253, 233, 24, 139, 56, 47, - 147, 114, 70, 13}, - "t3wmuu6crofhqmm3v4enos73okk2l366ck6yc4owxwbdtkmpk42ohkqxfitcpa57pjdcftql4tojda2poeruwa"}, - {[]byte{150, 161, 163, 228, 234, 122, 20, 212, 153, 133, 230, 97, 178, - 36, 1, 212, 79, 237, 64, 45, 29, 9, 37, 178, 67, 201, 35, 88, 156, - 15, 188, 126, 50, 205, 4, 226, 158, 215, 141, 21, 211, 125, 58, 170, - 63, 230, 218, 51}, - "t3s2q2hzhkpiknjgmf4zq3ejab2rh62qbndueslmsdzervrhapxr7dftie4kpnpdiv2n6tvkr743ndhrsw6d3a"}, - {[]byte{134, 180, 84, 37, 140, 88, 148, 117, 247, 209, 111, 90, 172, 1, - 138, 121, 246, 193, 22, 157, 32, 252, 51, 146, 29, 216, 181, 206, 28, - 172, 108, 52, 143, 144, 163, 96, 54, 36, 246, 174, 185, 27, 100, 81, - 140, 46, 128, 149}, - "t3q22fijmmlckhl56rn5nkyamkph3mcfu5ed6dheq53c244hfmnq2i7efdma3cj5voxenwiummf2ajlsbxc65a"}, - {[]byte{167, 114, 107, 3, 128, 34, 247, 90, 56, 70, 23, 88, 83, 96, 206, - 230, 41, 7, 10, 45, 157, 40, 113, 41, 101, 229, 242, 110, 204, 64, - 133, 131, 130, 128, 55, 36, 237, 52, 242, 114, 3, 54, 240, 157, 182, - 49, 240, 116}, - "t3u5zgwa4ael3vuocgc5mfgygo4yuqocrntuuhcklf4xzg5tcaqwbyfabxetwtj4tsam3pbhnwghyhijr5mixa"}, - } - - for _, tc := range testCases { - tc := tc - t.Run(fmt.Sprintf("testing bls address: %s", tc.expected), func(t *testing.T) { - assert := assert.New(t) - - // Round trip encoding and decoding from string - addr, err := NewBLSAddress(tc.input) - assert.NoError(err) - assert.Equal(tc.expected, addr.String()) - - maybeAddr, err := NewFromString(tc.expected) - assert.NoError(err) - assert.Equal(BLS, maybeAddr.Protocol()) - assert.Equal(tc.input, maybeAddr.Payload()) - - // Round trip to and from bytes - maybeAddrBytes, err := NewFromBytes(maybeAddr.Bytes()) - assert.NoError(err) - assert.Equal(maybeAddr, maybeAddrBytes) - - // Round trip encoding and decoding json - b, err := addr.MarshalJSON() - assert.NoError(err) - - var newAddr Address - err = newAddr.UnmarshalJSON(b) - assert.NoError(err) - assert.Equal(addr, newAddr) - }) - } -} - -func TestInvalidStringAddresses(t *testing.T) { - testCases := []struct { - input string - expetErr error - }{ - {"Q2gfvuyh7v2sx3patm5k23wdzmhyhtmqctasbr23y", ErrUnknownNetwork}, - {"t4gfvuyh7v2sx3patm5k23wdzmhyhtmqctasbr23y", ErrUnknownProtocol}, - {"t2gfvuyh7v2sx3patm5k23wdzmhyhtmqctasbr24y", ErrInvalidChecksum}, - {"t0banananananannnnnnnnn", ErrInvalidLength}, - {"t0banananananannnnnnnn", ErrInvalidPayload}, - {"t2gfvuyh7v2sx3patm1k23wdzmhyhtmqctasbr24y", base32.CorruptInputError(16)}, // '1' is not in base32 alphabet - {"t2gfvuyh7v2sx3paTm1k23wdzmhyhtmqctasbr24y", base32.CorruptInputError(14)}, // 'T' is not in base32 alphabet - {"t2", ErrInvalidLength}, - } - - for _, tc := range testCases { - tc := tc - t.Run(fmt.Sprintf("testing string address: %s", tc.expetErr), func(t *testing.T) { - assert := assert.New(t) - - _, err := NewFromString(tc.input) - assert.Equal(tc.expetErr, err) - }) - } - -} - -func TestInvalidByteAddresses(t *testing.T) { - testCases := []struct { - input []byte - expetErr error - }{ - // Unknown Protocol - {[]byte{4, 4, 4}, ErrUnknownProtocol}, - - // ID protocol - {[]byte{0}, ErrInvalidLength}, - - // SECP256K1 Protocol - {append([]byte{1}, make([]byte, PayloadHashLength-1)...), ErrInvalidPayload}, - {append([]byte{1}, make([]byte, PayloadHashLength+1)...), ErrInvalidPayload}, - // Actor Protocol - {append([]byte{2}, make([]byte, PayloadHashLength-1)...), ErrInvalidPayload}, - {append([]byte{2}, make([]byte, PayloadHashLength+1)...), ErrInvalidPayload}, - - // BLS Protocol - {append([]byte{3}, make([]byte, BlsPublicKeyBytes-1)...), ErrInvalidPayload}, - {append([]byte{3}, make([]byte, BlsPrivateKeyBytes+1)...), ErrInvalidPayload}, - } - - for _, tc := range testCases { - tc := tc - t.Run(fmt.Sprintf("testing byte address: %s", tc.expetErr), func(t *testing.T) { - assert := assert.New(t) - - _, err := NewFromBytes(tc.input) - assert.Equal(tc.expetErr, err) - }) - } - -} - -func TestChecksum(t *testing.T) { - assert := assert.New(t) - - data := []byte("helloworld") - bata := []byte("kittinmittins") - - cksm := Checksum(data) - assert.Len(cksm, ChecksumHashLength) - - assert.True(ValidateChecksum(data, cksm)) - assert.False(ValidateChecksum(bata, cksm)) - -} - -func TestCborMarshal(t *testing.T) { - for _, a := range allTestAddresses { - addr, err := NewFromString(a) - if err != nil { - t.Fatal(err) - } - - buf := new(bytes.Buffer) - if err := addr.MarshalCBOR(buf); err != nil { - t.Fatal(err) - } - - /* - // Note: this is commented out because we're currently serializing addresses as cbor "text strings", not "byte strings". - // This is to get around the restriction that refmt only allows string keys in maps. - // if you change it to serialize to byte strings and uncomment this, the tests pass fine - oldbytes, err := cbor.DumpObject(addr) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(oldbytes, buf.Bytes()) { - t.Fatalf("serialization doesnt match old serialization: %s", a) - } - */ - - var out Address - if err := out.UnmarshalCBOR(buf); err != nil { - t.Fatal(err) - } - - if out != addr { - t.Fatalf("failed to round trip %s", a) - } - } -} - -func BenchmarkCborMarshal(b *testing.B) { - addr, err := NewFromString("t15ihq5ibzwki2b4ep2f46avlkrqzhpqgtga7pdrq") - if err != nil { - b.Fatal(err) - } - - b.ReportAllocs() - b.ResetTimer() - - buf := new(bytes.Buffer) - for i := 0; i < b.N; i++ { - buf.Reset() - if err := addr.MarshalCBOR(buf); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkCborUnmarshal(b *testing.B) { - addr, err := NewFromString("t15ihq5ibzwki2b4ep2f46avlkrqzhpqgtga7pdrq") - if err != nil { - b.Fatal(err) - } - - buf := new(bytes.Buffer) - if err := addr.MarshalCBOR(buf); err != nil { - b.Fatal(err) - } - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - var a Address - if err := a.UnmarshalCBOR(bytes.NewReader(buf.Bytes())); err != nil { - b.Fatal(err) - } - } -} - -func TestIDEdgeCase(t *testing.T) { - a, err := NewFromBytes([]byte{0, 0x80}) - _ = a.String() - assert.Error(t, err) -} diff --git a/vendor/github.com/filecoin-project/go-address/bench_test.go b/vendor/github.com/filecoin-project/go-address/bench_test.go deleted file mode 100644 index 2c21d549e9..0000000000 --- a/vendor/github.com/filecoin-project/go-address/bench_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package address - -import ( - "fmt" - "math/rand" - - "testing" -) - -func blsaddr(n int64) Address { - buf := make([]byte, 48) - r := rand.New(rand.NewSource(n)) - r.Read(buf) - - addr, err := NewBLSAddress(buf) - if err != nil { - panic(err) // ok - } - - return addr -} - -func makeActorAddresses(n int) [][]byte { - var addrs [][]byte - for i := 0; i < n; i++ { - a, err := NewActorAddress([]byte(fmt.Sprintf("ACTOR ADDRESS %d", i))) - if err != nil { - panic(err) // ok - } - addrs = append(addrs, a.Bytes()) - } - - return addrs -} - -func makeBlsAddresses(n int64) [][]byte { - var addrs [][]byte - for i := int64(0); i < n; i++ { - addrs = append(addrs, blsaddr(n).Bytes()) - } - return addrs -} - -func makeSecpAddresses(n int) [][]byte { - var addrs [][]byte - for i := 0; i < n; i++ { - r := rand.New(rand.NewSource(int64(i))) - buf := make([]byte, 32) - r.Read(buf) - - a, err := NewSecp256k1Address(buf) - if err != nil { - panic(err) // ok - } - - addrs = append(addrs, a.Bytes()) - } - return addrs -} - -func makeIDAddresses(n int) [][]byte { - var addrs [][]byte - for i := 0; i < n; i++ { - - a, err := NewIDAddress(uint64(i)) - if err != nil { - panic(err) // ok - } - - addrs = append(addrs, a.Bytes()) - } - return addrs -} - -func BenchmarkParseActorAddress(b *testing.B) { - benchTestWithAddrs := func(a [][]byte) func(b *testing.B) { - return func(b *testing.B) { - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, err := NewFromBytes(a[i%len(a)]) - if err != nil { - b.Fatal(err) - } - } - } - } - - b.Run("actor", benchTestWithAddrs(makeActorAddresses(20))) - b.Run("bls", benchTestWithAddrs(makeBlsAddresses(20))) - b.Run("secp256k1", benchTestWithAddrs(makeSecpAddresses(20))) - b.Run("id", benchTestWithAddrs(makeIDAddresses(20))) -} diff --git a/vendor/github.com/filecoin-project/go-address/go.mod b/vendor/github.com/filecoin-project/go-address/go.mod index 133139973a..5ed051ced9 100644 --- a/vendor/github.com/filecoin-project/go-address/go.mod +++ b/vendor/github.com/filecoin-project/go-address/go.mod @@ -4,11 +4,11 @@ go 1.13 require ( github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 - github.com/ipfs/go-ipld-cbor v0.0.3 + github.com/ipfs/go-ipld-cbor v0.0.4 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 github.com/multiformats/go-varint v0.0.5 github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 github.com/stretchr/testify v1.4.0 - github.com/whyrusleeping/cbor-gen v0.0.0-20200501232601-351665a6e756 - golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 + github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 ) diff --git a/vendor/github.com/filecoin-project/go-address/go.sum b/vendor/github.com/filecoin-project/go-address/go.sum index c09220bdfc..f22ff83028 100644 --- a/vendor/github.com/filecoin-project/go-address/go.sum +++ b/vendor/github.com/filecoin-project/go-address/go.sum @@ -13,15 +13,13 @@ github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmv github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 h1:QN88Q0kT2QiDaLxpR/SDsqOBtNIEF/F3n96gSDUimkA= -github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= -github.com/ipfs/go-ipld-cbor v0.0.3 h1:ENsxvybwkmke7Z/QJOmeJfoguj6GH3Y0YOaGrfy9Q0I= -github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= +github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= @@ -41,18 +39,22 @@ github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbV github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-varint v0.0.2 h1:6sUvyh2YHpJCb8RZ6eYzj6iJQ4+chWYmyIHxszqlPTA= -github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -71,10 +73,9 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0 h1:efb/4CnrubzNGqQOeHErxyQ6rIsJb7GcgeSDF7fqWeI= -github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= -github.com/whyrusleeping/cbor-gen v0.0.0-20200501232601-351665a6e756 h1:SSUF3WR9bHjHZBalytHBOAoOxJ9vdvTWUS6ztyRk3Qc= -github.com/whyrusleeping/cbor-gen v0.0.0-20200501232601-351665a6e756/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c h1:BMg3YUwLEUIYBJoYZVhA4ZDTciXRj6r7ffOCshWrsoE= +github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -87,9 +88,10 @@ golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v2/amt.go b/vendor/github.com/filecoin-project/go-amt-ipld/v2/amt.go index 9cea1cb25e..baf3e720c4 100644 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v2/amt.go +++ b/vendor/github.com/filecoin-project/go-amt-ipld/v2/amt.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "math/bits" cid "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" @@ -149,12 +148,13 @@ func (r *Root) Get(ctx context.Context, i uint64, out interface{}) error { func (n *Node) get(ctx context.Context, bs cbor.IpldStore, height int, i uint64, out interface{}) error { subi := i / nodesForHeight(height) - set, _ := n.getBit(subi) - if !set { + if !n.isSet(subi) { return &ErrNotFound{i} } if height == 0 { - n.expandValues() + if err := n.expandValues(); err != nil { + return err + } d := n.expVals[i] @@ -213,12 +213,13 @@ func (r *Root) Delete(ctx context.Context, i uint64) error { func (n *Node) delete(ctx context.Context, bs cbor.IpldStore, height int, i uint64) error { subi := i / nodesForHeight(height) - set, _ := n.getBit(subi) - if !set { + if !n.isSet(subi) { return &ErrNotFound{i} } if height == 0 { - n.expandValues() + if err := n.expandValues(); err != nil { + return err + } n.expVals[i] = nil n.clearBit(i) @@ -262,7 +263,9 @@ func (r *Root) ForEachAt(ctx context.Context, start uint64, cb func(uint64, *cbg func (n *Node) forEachAt(ctx context.Context, bs cbor.IpldStore, height int, start, offset uint64, cb func(uint64, *cbg.Deferred) error) error { if height == 0 { - n.expandValues() + if err := n.expandValues(); err != nil { + return err + } for i, v := range n.expVals { if v != nil { @@ -281,7 +284,9 @@ func (n *Node) forEachAt(ctx context.Context, bs cbor.IpldStore, height int, sta } if n.cache == nil { - n.expandLinks() + if err := n.expandLinks(); err != nil { + return err + } } subCount := nodesForHeight(height) @@ -319,7 +324,9 @@ var errNoVals = fmt.Errorf("no values") func (n *Node) firstSetIndex(ctx context.Context, bs cbor.IpldStore, height int) (uint64, error) { if height == 0 { - n.expandValues() + if err := n.expandValues(); err != nil { + return 0, err + } for i, v := range n.expVals { if v != nil { return uint64(i), nil @@ -330,12 +337,13 @@ func (n *Node) firstSetIndex(ctx context.Context, bs cbor.IpldStore, height int) } if n.cache == nil { - n.expandLinks() + if err := n.expandLinks(); err != nil { + return 0, err + } } for i := 0; i < width; i++ { - ok, _ := n.getBit(uint64(i)) - if ok { + if n.isSet(uint64(i)) { subn, err := n.loadNode(ctx, bs, uint64(i), false) if err != nil { return 0, err @@ -354,24 +362,32 @@ func (n *Node) firstSetIndex(ctx context.Context, bs cbor.IpldStore, height int) return 0, errNoVals } -func (n *Node) expandValues() { +func (n *Node) expandValues() error { if len(n.expVals) == 0 { n.expVals = make([]*cbg.Deferred, width) + i := 0 for x := uint64(0); x < width; x++ { - set, ix := n.getBit(x) - if set { - n.expVals[x] = n.Values[ix] + if n.isSet(x) { + if i >= len(n.Values) { + n.expVals = nil + return fmt.Errorf("bitfield does not match values") + } + n.expVals[x] = n.Values[i] + i++ } } } + return nil } func (n *Node) set(ctx context.Context, bs cbor.IpldStore, height int, i uint64, val *cbg.Deferred) (bool, error) { //nfh := nodesForHeight(height) //fmt.Printf("[set] h: %d, i: %d, subi: %d\n", height, i, i/nfh) if height == 0 { - n.expandValues() - alreadySet, _ := n.getBit(i) + if err := n.expandValues(); err != nil { + return false, err + } + alreadySet := n.isSet(i) n.expVals[i] = val n.setBit(i) @@ -388,21 +404,12 @@ func (n *Node) set(ctx context.Context, bs cbor.IpldStore, height int, i uint64, return subn.set(ctx, bs, height-1, i%nfh, val) } -func (n *Node) getBit(i uint64) (bool, int) { +func (n *Node) isSet(i uint64) bool { if i > 7 { panic("cant deal with wider arrays yet") } - if len(n.Bmap) == 0 { - return false, 0 - } - - if n.Bmap[0]&byte(1<= len(n.Links) { + n.cache = nil + n.expLinks = nil + return fmt.Errorf("bitfield does not match links") + } + n.expLinks[x] = n.Links[i] + i++ } } + return nil } func (n *Node) loadNode(ctx context.Context, bs cbor.IpldStore, i uint64, create bool) (*Node, error) { if n.cache == nil { - n.expandLinks() + if err := n.expandLinks(); err != nil { + return nil, err + } } else { if n := n.cache[i]; n != nil { return n, nil } } - set, _ := n.getBit(i) - var subn *Node - if set { + if n.isSet(i) { var sn Node if err := bs.Get(ctx, n.expLinks[i], &sn); err != nil { return nil, err @@ -500,6 +514,7 @@ func (n *Node) Flush(ctx context.Context, bs cbor.IpldStore, depth int) error { if len(n.expVals) == 0 { return nil } + n.Bmap = [...]byte{0} n.Values = nil for i := uint64(0); i < width; i++ { v := n.expVals[i] diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.mod b/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.mod index 2b36bc7959..76c2351aa2 100644 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.mod +++ b/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.mod @@ -8,6 +8,6 @@ require ( github.com/ipfs/go-ipld-cbor v0.0.4 github.com/ipfs/go-log v1.0.4 github.com/stretchr/testify v1.6.1 - github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377 + github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 ) diff --git a/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.sum b/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.sum index ebb6eabab7..e739be5907 100644 --- a/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.sum +++ b/vendor/github.com/filecoin-project/go-amt-ipld/v2/go.sum @@ -93,8 +93,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377 h1:LHFlP/ktDvOnCap7PsT87cs7Gwd0p+qv6Qm5g2ZPR+I= -github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b h1:Tju61pLCTYt5KZ9Y4wJKNR+IXB1k29M+0w3eW48Xqy0= +github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= diff --git a/vendor/github.com/filecoin-project/go-bitfield/.github/workflows/go.yml b/vendor/github.com/filecoin-project/go-bitfield/.github/workflows/go.yml deleted file mode 100644 index 2208ea2a52..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/.github/workflows/go.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Go - -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] - -jobs: - - build: - name: Build - runs-on: ubuntu-latest - steps: - - - name: Set up Go 1.x - uses: actions/setup-go@v2 - with: - go-version: ^1.14 - id: go - - uses: actions/cache@v1 - with: - path: ~/go/pkg/mod - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - - name: Build - run: go build -v . - - - name: Test - run: go test -v . diff --git a/vendor/github.com/filecoin-project/go-bitfield/README.md b/vendor/github.com/filecoin-project/go-bitfield/README.md index 8c23252b2e..68c481eff5 100644 --- a/vendor/github.com/filecoin-project/go-bitfield/README.md +++ b/vendor/github.com/filecoin-project/go-bitfield/README.md @@ -6,6 +6,8 @@ > Advanced RLE+ implementation +Features iterator based primitives that scale with number of runs instead of number of bits. + ## License The Filecoin Project is dual-licensed under Apache 2.0 and MIT terms: diff --git a/vendor/github.com/filecoin-project/go-bitfield/bitfield.go b/vendor/github.com/filecoin-project/go-bitfield/bitfield.go index 7d1ca9b5a0..591889c4e2 100644 --- a/vendor/github.com/filecoin-project/go-bitfield/bitfield.go +++ b/vendor/github.com/filecoin-project/go-bitfield/bitfield.go @@ -15,6 +15,12 @@ var ( ErrNoBitsSet = errors.New("bitfield has no set bits") ) +// MaxEncodedSize is the maximum encoded size of a bitfield. When expanded into +// a slice of runs, a bitfield of this size should not exceed 2MiB of memory. +// +// This bitfield can fit at least 3072 sparse elements. +const MaxEncodedSize = 32 << 10 + type BitField struct { rle rlepluslazy.RLE @@ -45,8 +51,8 @@ func NewFromBytes(rle []byte) (BitField, error) { } -func newWithRle(rle rlepluslazy.RLE) *BitField { - return &BitField{ +func newWithRle(rle rlepluslazy.RLE) BitField { + return BitField{ set: make(map[uint64]struct{}), unset: make(map[uint64]struct{}), rle: rle, @@ -54,8 +60,8 @@ func newWithRle(rle rlepluslazy.RLE) *BitField { } // NewFromSet constructs a bitfield from the given set. -func NewFromSet(setBits []uint64) *BitField { - res := &BitField{ +func NewFromSet(setBits []uint64) BitField { + res := BitField{ set: make(map[uint64]struct{}, len(setBits)), unset: make(map[uint64]struct{}), } @@ -66,15 +72,15 @@ func NewFromSet(setBits []uint64) *BitField { } // NewFromIter constructs a BitField from the RunIterator. -func NewFromIter(r rlepluslazy.RunIterator) (*BitField, error) { +func NewFromIter(r rlepluslazy.RunIterator) (BitField, error) { buf, err := rlepluslazy.EncodeRuns(r, nil) if err != nil { - return nil, err + return BitField{}, err } rle, err := rlepluslazy.FromBuf(buf) if err != nil { - return nil, err + return BitField{}, err } return newWithRle(rle), nil @@ -92,30 +98,30 @@ func NewFromIter(r rlepluslazy.RunIterator) (*BitField, error) { // 1 1 1 1 1 // // This operation's runtime is O(number of runs). -func MergeBitFields(a, b *BitField) (*BitField, error) { +func MergeBitFields(a, b BitField) (BitField, error) { ra, err := a.RunIterator() if err != nil { - return nil, err + return BitField{}, err } rb, err := b.RunIterator() if err != nil { - return nil, err + return BitField{}, err } merge, err := rlepluslazy.Or(ra, rb) if err != nil { - return nil, err + return BitField{}, err } mergebytes, err := rlepluslazy.EncodeRuns(merge, nil) if err != nil { - return nil, err + return BitField{}, err } rle, err := rlepluslazy.FromBuf(mergebytes) if err != nil { - return nil, err + return BitField{}, err } return newWithRle(rle), nil @@ -127,7 +133,7 @@ func MergeBitFields(a, b *BitField) (*BitField, error) { // more efficient when merging more than two BitFields. // // This operation's runtime is O(number of runs * number of bitfields). -func MultiMerge(bfs ...*BitField) (*BitField, error) { +func MultiMerge(bfs ...BitField) (BitField, error) { if len(bfs) == 0 { return NewFromSet(nil), nil } @@ -136,19 +142,109 @@ func MultiMerge(bfs ...*BitField) (*BitField, error) { for _, bf := range bfs { iter, err := bf.RunIterator() if err != nil { - return nil, err + return BitField{}, err } iters = append(iters, iter) } iter, err := rlepluslazy.Union(iters...) if err != nil { - return nil, err + return BitField{}, err } return NewFromIter(iter) } -func (bf *BitField) RunIterator() (rlepluslazy.RunIterator, error) { +// CutBitField cuts bitfield B from bitfield A. For every bit in B cut from A, +// subsequent entries in A are shifted down by one. +// +// For example: +// +// a: 0 1 0 1 1 1 +// b: 0 1 1 0 0 0 +// +// c: 0 1 1 1 // cut +// c: 0 1 1 1 // remove holes +func CutBitField(a, b BitField) (BitField, error) { + aiter, err := a.RunIterator() + if err != nil { + return BitField{}, err + } + + biter, err := b.RunIterator() + if err != nil { + return BitField{}, err + } + + var ( + run, cutRun rlepluslazy.Run + output []rlepluslazy.Run + ) + for { + if !run.Valid() { + if !aiter.HasNext() { + // All done. + break + } + + run, err = aiter.NextRun() + if err != nil { + return BitField{}, err + } + } + + if !cutRun.Valid() && biter.HasNext() { + cutRun, err = biter.NextRun() + if err != nil { + return BitField{}, err + } + } + + var newRun rlepluslazy.Run + if !cutRun.Valid() { + newRun = run // keep remaining runs + run.Len = 0 + } else if cutRun.Len >= run.Len { + if !cutRun.Val { + newRun = run + } + cutRun.Len -= run.Len + run.Len = 0 + } else { + if !cutRun.Val { + newRun = rlepluslazy.Run{ + Val: run.Val, + Len: cutRun.Len, + } + } + run.Len -= cutRun.Len + cutRun.Len = 0 + } + + if newRun.Valid() { + if len(output) > 0 && output[len(output)-1].Val == newRun.Val { + // Join adjacent runs of 1s. We may cut in the middle of + // a run. + output[len(output)-1].Len += newRun.Len + } else { + output = append(output, newRun) + } + } + } + + buf, err := rlepluslazy.EncodeRuns(&rlepluslazy.RunSliceIterator{Runs: output}, nil) + if err != nil { + return BitField{}, err + } + + rle, err := rlepluslazy.FromBuf(buf) + if err != nil { + return BitField{}, err + } + + return BitField{rle: rle}, nil +} + +func (bf BitField) RunIterator() (rlepluslazy.RunIterator, error) { iter, err := bf.rle.RunIterator() if err != nil { return nil, err @@ -191,7 +287,7 @@ func (bf *BitField) RunIterator() (rlepluslazy.RunIterator, error) { // // This operation's runtime is O(1) up-front. However, it adds an O(bits // explicitly set) cost to all other operations. -func (bf *BitField) Set(bit uint64) { +func (bf BitField) Set(bit uint64) { delete(bf.unset, bit) bf.set[bit] = struct{}{} } @@ -200,7 +296,7 @@ func (bf *BitField) Set(bit uint64) { // // This operation's runtime is O(1). However, it adds an O(bits // explicitly unset) cost to all other operations. -func (bf *BitField) Unset(bit uint64) { +func (bf BitField) Unset(bit uint64) { delete(bf.set, bit) bf.unset[bit] = struct{}{} } @@ -214,7 +310,7 @@ func (bf *BitField) Unset(bit uint64) { // Count() will return 3. // // This operation's runtime is O(number of runs). -func (bf *BitField) Count() (uint64, error) { +func (bf BitField) Count() (uint64, error) { s, err := bf.RunIterator() if err != nil { return 0, err @@ -233,7 +329,7 @@ func (bf *BitField) Count() (uint64, error) { // []uint64{0, 3} // // This operation's runtime is O(number of bits). -func (bf *BitField) All(max uint64) ([]uint64, error) { +func (bf BitField) All(max uint64) ([]uint64, error) { c, err := bf.Count() if err != nil { return nil, xerrors.Errorf("count errror: %w", err) @@ -266,7 +362,7 @@ func (bf *BitField) All(max uint64) ([]uint64, error) { // map[uint64]bool{0: true, 3: true} // // This operation's runtime is O(number of bits). -func (bf *BitField) AllMap(max uint64) (map[uint64]bool, error) { +func (bf BitField) AllMap(max uint64) (map[uint64]bool, error) { c, err := bf.Count() if err != nil { return nil, xerrors.Errorf("count errror: %w", err) @@ -292,29 +388,32 @@ func (bf *BitField) AllMap(max uint64) (map[uint64]bool, error) { return out, nil } -func (bf *BitField) MarshalCBOR(w io.Writer) error { - if bf == nil { - _, err := w.Write(cbg.CborNull) - return err - } - s, err := bf.RunIterator() - if err != nil { - return err - } +func (bf BitField) MarshalCBOR(w io.Writer) error { + var rle []byte + if len(bf.set) == 0 && len(bf.unset) == 0 { + // If unmodified, avoid re-encoding. + rle = bf.rle.Bytes() + } else { - rle, err := rlepluslazy.EncodeRuns(s, []byte{}) - if err != nil { - return err + s, err := bf.RunIterator() + if err != nil { + return err + } + + rle, err = rlepluslazy.EncodeRuns(s, []byte{}) + if err != nil { + return err + } } - if len(rle) > 8192 { + if len(rle) > MaxEncodedSize { return xerrors.Errorf("encoded bitfield was too large (%d)", len(rle)) } if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(rle)))); err != nil { return err } - if _, err = w.Write(rle); err != nil { + if _, err := w.Write(rle); err != nil { return xerrors.Errorf("writing rle: %w", err) } return nil @@ -327,7 +426,7 @@ func (bf *BitField) UnmarshalCBOR(r io.Reader) error { if err != nil { return err } - if extra > 8192 { + if extra > MaxEncodedSize { return fmt.Errorf("array too large") } @@ -351,7 +450,7 @@ func (bf *BitField) UnmarshalCBOR(r io.Reader) error { return nil } -func (bf *BitField) MarshalJSON() ([]byte, error) { +func (bf BitField) MarshalJSON() ([]byte, error) { c, err := bf.Copy() if err != nil { @@ -375,7 +474,7 @@ func (bf *BitField) UnmarshalJSON(b []byte) error { // ForEach iterates over each set bit. // // This operation's runtime is O(bits set). -func (bf *BitField) ForEach(f func(uint64) error) error { +func (bf BitField) ForEach(f func(uint64) error) error { iter, err := bf.RunIterator() if err != nil { return err @@ -405,7 +504,7 @@ func (bf *BitField) ForEach(f func(uint64) error) error { // IsSet returns true if the given bit is set. // // This operation's runtime is O(number of runs). -func (bf *BitField) IsSet(x uint64) (bool, error) { +func (bf BitField) IsSet(x uint64) (bool, error) { if _, ok := bf.set[x]; ok { return true, nil } @@ -426,7 +525,7 @@ func (bf *BitField) IsSet(x uint64) (bool, error) { // ErrNoBitsSet when no bits have been set. // // This operation's runtime is O(1). -func (bf *BitField) First() (uint64, error) { +func (bf BitField) First() (uint64, error) { iter, err := bf.RunIterator() if err != nil { return 0, err @@ -448,10 +547,41 @@ func (bf *BitField) First() (uint64, error) { return 0, ErrNoBitsSet } +// Last returns the index of the last set bit. This function returns +// ErrNoBitsSet when no bits have been set. +// +// This operation's runtime is O(n). +func (bf BitField) Last() (uint64, error) { + iter, err := bf.RunIterator() + if err != nil { + return 0, err + } + + var ( + at, maxplusone uint64 + ) + for iter.HasNext() { + run, err := iter.NextRun() + if err != nil { + return 0, err + } + + at += run.Len + + if run.Val { + maxplusone = at + } + } + if maxplusone == 0 { + return 0, ErrNoBitsSet + } + return maxplusone - 1, nil +} + // IsEmpty returns true if the bitset is empty. // // This operation's runtime is O(1). -func (bf *BitField) IsEmpty() (bool, error) { +func (bf BitField) IsEmpty() (bool, error) { _, err := bf.First() switch err { case ErrNoBitsSet: @@ -476,10 +606,10 @@ func (bf *BitField) IsEmpty() (bool, error) { // 0 0 0 1 0 1 0 // // This operation's runtime is O(number of runs). -func (bf *BitField) Slice(start, count uint64) (*BitField, error) { +func (bf BitField) Slice(start, count uint64) (BitField, error) { iter, err := bf.RunIterator() if err != nil { - return nil, err + return BitField{}, err } valsUntilStart := start @@ -489,7 +619,7 @@ func (bf *BitField) Slice(start, count uint64) (*BitField, error) { for iter.HasNext() && valsUntilStart > 0 { r, err := iter.NextRun() if err != nil { - return nil, err + return BitField{}, err } if r.Val { @@ -519,7 +649,7 @@ func (bf *BitField) Slice(start, count uint64) (*BitField, error) { for iter.HasNext() && outcount < count { r, err := iter.NextRun() if err != nil { - return nil, err + return BitField{}, err } if r.Val { @@ -538,20 +668,20 @@ func (bf *BitField) Slice(start, count uint64) (*BitField, error) { } } if outcount < count { - return nil, fmt.Errorf("not enough bits set in field to satisfy slice count") + return BitField{}, fmt.Errorf("not enough bits set in field to satisfy slice count") } buf, err := rlepluslazy.EncodeRuns(&rlepluslazy.RunSliceIterator{Runs: sliceRuns}, nil) if err != nil { - return nil, err + return BitField{}, err } rle, err := rlepluslazy.FromBuf(buf) if err != nil { - return nil, err + return BitField{}, err } - return &BitField{rle: rle}, nil + return BitField{rle: rle}, nil } // IntersectBitField returns the intersection of the two BitFields. @@ -566,30 +696,30 @@ func (bf *BitField) Slice(start, count uint64) (*BitField, error) { // 0 1 0 0 0 // // This operation's runtime is O(number of runs). -func IntersectBitField(a, b *BitField) (*BitField, error) { +func IntersectBitField(a, b BitField) (BitField, error) { ar, err := a.RunIterator() if err != nil { - return nil, err + return BitField{}, err } br, err := b.RunIterator() if err != nil { - return nil, err + return BitField{}, err } andIter, err := rlepluslazy.And(ar, br) if err != nil { - return nil, err + return BitField{}, err } buf, err := rlepluslazy.EncodeRuns(andIter, nil) if err != nil { - return nil, err + return BitField{}, err } rle, err := rlepluslazy.FromBuf(buf) if err != nil { - return nil, err + return BitField{}, err } return newWithRle(rle), nil @@ -608,30 +738,30 @@ func IntersectBitField(a, b *BitField) (*BitField, error) { // 0 0 1 0 1 // // This operation's runtime is O(number of runs). -func SubtractBitField(a, b *BitField) (*BitField, error) { +func SubtractBitField(a, b BitField) (BitField, error) { ar, err := a.RunIterator() if err != nil { - return nil, err + return BitField{}, err } br, err := b.RunIterator() if err != nil { - return nil, err + return BitField{}, err } andIter, err := rlepluslazy.Subtract(ar, br) if err != nil { - return nil, err + return BitField{}, err } buf, err := rlepluslazy.EncodeRuns(andIter, nil) if err != nil { - return nil, err + return BitField{}, err } rle, err := rlepluslazy.FromBuf(buf) if err != nil { - return nil, err + return BitField{}, err } return newWithRle(rle), nil @@ -639,27 +769,27 @@ func SubtractBitField(a, b *BitField) (*BitField, error) { // Copy flushes the bitfield and returns a copy that can be mutated // without changing the original values -func (bf *BitField) Copy() (*BitField, error) { +func (bf BitField) Copy() (BitField, error) { r, err := bf.RunIterator() if err != nil { - return nil, err + return BitField{}, err } buf, err := rlepluslazy.EncodeRuns(r, nil) if err != nil { - return nil, err + return BitField{}, err } rle, err := rlepluslazy.FromBuf(buf) if err != nil { - return nil, err + return BitField{}, err } return newWithRle(rle), nil } // BitIterator iterates over the bits in the bitmap -func (bf *BitField) BitIterator() (rlepluslazy.BitIterator, error) { +func (bf BitField) BitIterator() (rlepluslazy.BitIterator, error) { r, err := bf.RunIterator() if err != nil { return nil, err diff --git a/vendor/github.com/filecoin-project/go-bitfield/bitfield_benchmark_test.go b/vendor/github.com/filecoin-project/go-bitfield/bitfield_benchmark_test.go deleted file mode 100644 index 0e29230394..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/bitfield_benchmark_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package bitfield - -import ( - "fmt" - "testing" -) - -func benchmark(b *testing.B, cb func(b *testing.B, bf *BitField)) { - for _, size := range []int{ - 0, - 1, - 10, - 1000, - 1000000, - } { - benchmarkSize(b, size, cb) - } -} - -func benchmarkSize(b *testing.B, size int, cb func(b *testing.B, bf *BitField)) { - b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { - vals := getRandIndexSet(size) - bf := NewFromSet(vals) - b.Run("basic", func(b *testing.B) { - for i := 0; i < b.N; i++ { - cb(b, bf) - } - }) - - if size < 1 { - return - } - - // Set and unset some bits - i := uint64(size / 10) - bf.Set(i) - bf.Set(i + 1) - bf.Set(i * 2) - bf.Unset(i / 2) - bf.Unset(uint64(size) - 1) - - b.Run("modified", func(b *testing.B) { - for i := 0; i < b.N; i++ { - cb(b, bf) - } - }) - }) -} - -func BenchmarkCount(b *testing.B) { - benchmark(b, func(b *testing.B, bf *BitField) { - _, err := bf.Count() - if err != nil { - b.Fatal(err) - } - }) -} - -func BenchmarkIsEmpty(b *testing.B) { - benchmark(b, func(b *testing.B, bf *BitField) { - _, err := bf.IsEmpty() - if err != nil { - b.Fatal(err) - } - }) -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/bitfield_test.go b/vendor/github.com/filecoin-project/go-bitfield/bitfield_test.go deleted file mode 100644 index 63eb044093..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/bitfield_test.go +++ /dev/null @@ -1,522 +0,0 @@ -package bitfield - -import ( - "encoding/json" - "fmt" - "math/rand" - "sort" - "testing" - - rlepluslazy "github.com/filecoin-project/go-bitfield/rle" -) - -func slicesEqual(a, b []uint64) bool { - if len(a) != len(b) { - return false - } - - for i, v := range a { - if b[i] != v { - return false - } - } - return true -} - -func getRandIndexSet(n int) []uint64 { - return getRandIndexSetSeed(n, 55) -} - -func getRandIndexSetSeed(n int, seed int64) []uint64 { - r := rand.New(rand.NewSource(seed)) - - var items []uint64 - for i := 0; i < n; i++ { - if r.Intn(3) != 0 { - items = append(items, uint64(i)) - } - } - return items -} - -func TestBitfieldSlice(t *testing.T) { - vals := getRandIndexSet(10000) - - bf := NewFromSet(vals) - - sl, err := bf.Slice(600, 500) - if err != nil { - t.Fatal(err) - } - - expslice := vals[600:1100] - - outvals, err := sl.All(10000) - if err != nil { - t.Fatal(err) - } - - if !slicesEqual(expslice, outvals) { - fmt.Println(expslice) - fmt.Println(outvals) - t.Fatal("output slice was not correct") - } -} - -func TestBitfieldSliceSmall(t *testing.T) { - vals := []uint64{1, 5, 6, 7, 10, 11, 12, 15} - - testPerm := func(start, count uint64) func(*testing.T) { - return func(t *testing.T) { - - bf := NewFromSet(vals) - - sl, err := bf.Slice(start, count) - if err != nil { - t.Fatal(err) - } - - expslice := vals[start : start+count] - - outvals, err := sl.All(10000) - if err != nil { - t.Fatal(err) - } - - if !slicesEqual(expslice, outvals) { - fmt.Println(expslice) - fmt.Println(outvals) - t.Fatal("output slice was not correct") - } - } - } - - /* - t.Run("all", testPerm(0, 8)) - t.Run("not first", testPerm(1, 7)) - t.Run("last item", testPerm(7, 1)) - t.Run("start during gap", testPerm(1, 4)) - t.Run("start during run", testPerm(3, 4)) - t.Run("end during run", testPerm(1, 1)) - */ - - for i := 0; i < len(vals); i++ { - for j := 0; j < len(vals)-i; j++ { - t.Run(fmt.Sprintf("comb-%d-%d", i, j), testPerm(uint64(i), uint64(j))) - } - } -} - -func unionArrs(a, b []uint64) []uint64 { - m := make(map[uint64]bool) - for _, v := range a { - m[v] = true - } - for _, v := range b { - m[v] = true - } - - out := make([]uint64, 0, len(m)) - for v := range m { - out = append(out, v) - } - - sort.Slice(out, func(i, j int) bool { - return out[i] < out[j] - }) - - return out -} - -func TestBitfieldUnion(t *testing.T) { - a := getRandIndexSetSeed(100, 1) - b := getRandIndexSetSeed(100, 2) - - bfa := NewFromSet(a) - bfb := NewFromSet(b) - - bfu, err := MergeBitFields(bfa, bfb) - if err != nil { - t.Fatal(err) - } - - out, err := bfu.All(100000) - if err != nil { - t.Fatal(err) - } - - exp := unionArrs(a, b) - - if !slicesEqual(out, exp) { - fmt.Println(out) - fmt.Println(exp) - t.Fatal("union was wrong") - } -} - -func multiUnionArrs(arrs [][]uint64) []uint64 { - base := arrs[0] - for i := 1; i < len(arrs); i++ { - base = unionArrs(base, arrs[i]) - } - return base -} - -func TestBitfieldMultiUnion(t *testing.T) { - var sets [][]uint64 - var bfs []*BitField - for i := 0; i < 15; i++ { - s := getRandIndexSetSeed(10000, 1) - sets = append(sets, s) - bfs = append(bfs, NewFromSet(s)) - } - - bfu, err := MultiMerge(bfs...) - if err != nil { - t.Fatal(err) - } - - out, err := bfu.All(100000) - if err != nil { - t.Fatal(err) - } - - exp := multiUnionArrs(sets) - - if !slicesEqual(out, exp) { - fmt.Println(out) - fmt.Println(exp) - t.Fatal("union was wrong") - } -} - -func TestBitfieldJson(t *testing.T) { - vals := []uint64{1, 5, 6, 7, 10, 11, 12, 15} - - bf := NewFromSet(vals) - - b, err := bf.MarshalJSON() - if err != nil { - t.Fatal(err) - } - - var buf []uint64 - if err := json.Unmarshal(b, &buf); err != nil { - t.Fatal(err) - } - - // (0) (1) (2, 3, 4), (5, 6, 7), (8, 9), (10, 11, 12), (13, 14), 15 - runs := []uint64{1, 1, 3, 3, 2, 3, 2, 1} - if !slicesEqual(runs, buf) { - t.Fatal("runs not encoded correctly") - } -} - -func TestEmptyBitfieldJson(t *testing.T) { - type ct struct { - B *BitField - } - - ebf := New() - s := &ct{ - B: &ebf, - } - - b, err := json.Marshal(s) - if err != nil { - t.Fatal(err) - } - - var u ct - if err := json.Unmarshal(b, &u); err != nil { - t.Fatal(err) - } - - if u.B == nil { - t.Fatal("u.B is nil", string(b)) - } - - set, err := u.B.Count() - if err != nil { - t.Fatal(err) - } - - if set > 0 { - t.Errorf("expected 0 bits to be set") - } -} - -func TestBitfieldJsonRoundTrip(t *testing.T) { - vals := getRandIndexSet(100000) - - bf := NewFromSet(vals) - - b, err := bf.MarshalJSON() - if err != nil { - t.Fatal(err) - } - - var out BitField - if err := out.UnmarshalJSON(b); err != nil { - t.Fatal(err) - } - - outv, err := out.All(100000) - if err != nil { - t.Fatal(err) - } - - if !slicesEqual(vals, outv) { - t.Fatal("round trip failed") - } -} - -func setIntersect(a, b []uint64) []uint64 { - m := make(map[uint64]bool) - for _, v := range a { - m[v] = true - } - - var out []uint64 - for _, v := range b { - if m[v] { - out = append(out, v) - } - } - return out -} - -func TestBitfieldIntersect(t *testing.T) { - a := getRandIndexSetSeed(100, 1) - b := getRandIndexSetSeed(100, 2) - - bfa := NewFromSet(a) - bfb := NewFromSet(b) - - inter, err := IntersectBitField(bfa, bfb) - if err != nil { - t.Fatal(err) - } - - out, err := inter.All(10000) - if err != nil { - t.Fatal(err) - } - - exp := setIntersect(a, b) - - if !slicesEqual(out, exp) { - fmt.Println(a) - fmt.Println(b) - fmt.Println(out) - fmt.Println(exp) - t.Fatal("intersection is wrong") - } -} - -func setSubtract(a, b []uint64) []uint64 { - m := make(map[uint64]bool) - for _, v := range a { - m[v] = true - } - for _, v := range b { - delete(m, v) - } - - out := make([]uint64, 0, len(m)) - for v := range m { - out = append(out, v) - } - - sort.Slice(out, func(i, j int) bool { - return out[i] < out[j] - }) - - return out -} - -func TestBitfieldOrDifferentLenZeroSuffix(t *testing.T) { - ra := &rlepluslazy.RunSliceIterator{ - Runs: []rlepluslazy.Run{ - {Val: false, Len: 5}, - }, - } - - rb := &rlepluslazy.RunSliceIterator{ - Runs: []rlepluslazy.Run{ - {Val: false, Len: 8}, - }, - } - - merge, err := rlepluslazy.Or(ra, rb) - if err != nil { - t.Fatal(err) - } - - mergebytes, err := rlepluslazy.EncodeRuns(merge, nil) - if err != nil { - t.Fatal(err) - } - - b, err := NewFromBytes(mergebytes) - if err != nil { - t.Fatal(err) - } - - c, err := b.Count() - if err != nil { - t.Fatal(err) - } - - if c != 0 { - t.Error("expected 0 set bits", c) - } -} - -func TestBitfieldSubDifferentLenZeroSuffix(t *testing.T) { - ra := &rlepluslazy.RunSliceIterator{ - Runs: []rlepluslazy.Run{ - {Val: true, Len: 5}, - {Val: false, Len: 5}, - }, - } - - rb := &rlepluslazy.RunSliceIterator{ - Runs: []rlepluslazy.Run{ - {Val: true, Len: 5}, - {Val: false, Len: 8}, - }, - } - - merge, err := rlepluslazy.Subtract(ra, rb) - if err != nil { - t.Fatal(err) - } - - mergebytes, err := rlepluslazy.EncodeRuns(merge, nil) - if err != nil { - t.Fatal(err) - } - - b, err := NewFromBytes(mergebytes) - if err != nil { - t.Fatal(err) - } - - c, err := b.Count() - if err != nil { - t.Fatal(err) - } - - if c != 0 { - t.Error("expected 0 set bits", c) - } -} - -func TestBitfieldSubtract(t *testing.T) { - a := getRandIndexSetSeed(100, 1) - b := getRandIndexSetSeed(100, 2) - - bfa := NewFromSet(a) - bfb := NewFromSet(b) - - inter, err := SubtractBitField(bfa, bfb) - if err != nil { - t.Fatal(err) - } - - out, err := inter.All(10000) - if err != nil { - t.Fatal(err) - } - - exp := setSubtract(a, b) - - if !slicesEqual(out, exp) { - fmt.Println(a) - fmt.Println(b) - fmt.Println(out) - fmt.Println(exp) - t.Fatal("subtraction is wrong") - } -} - -// -func BitFieldUnion(bfs ...*BitField) (*BitField, error) { - // TODO: optimize me - for len(bfs) > 1 { - var next []*BitField - for i := 0; i < len(bfs); i += 2 { - if i+1 >= len(bfs) { - next = append(next, bfs[i]) - break - } - merged, err := MergeBitFields(bfs[i], bfs[i+1]) - if err != nil { - return nil, err - } - - next = append(next, merged) - } - bfs = next - } - return bfs[0], nil -} - -// -func TestBitfieldSubtractMore(t *testing.T) { - have := NewFromSet([]uint64{5, 6, 8, 10, 11, 13, 14, 17}) - s1, err := SubtractBitField(NewFromSet([]uint64{5, 6}), have) - if err != nil { - t.Fatal(err) - } - s2, err := SubtractBitField(NewFromSet([]uint64{8, 10}), have) - if err != nil { - t.Fatal(err) - } - s3, err := SubtractBitField(NewFromSet([]uint64{11, 13}), have) - if err != nil { - t.Fatal(err) - } - s4, err := SubtractBitField(NewFromSet([]uint64{14, 17}), have) - if err != nil { - t.Fatal(err) - } - - u, err := BitFieldUnion(s1, s2, s3, s4) - if err != nil { - t.Fatal(err) - } - - c, err := u.Count() - if err != nil { - t.Fatal(err) - } - if c != 0 { - ua, err := u.All(500) - fmt.Printf("%s %+v", err, ua) - t.Error("expected 0", c) - } -} - -func TestBitfieldCopy(t *testing.T) { - start := []uint64{5, 6, 8, 10, 11, 13, 14, 17} - - orig := NewFromSet(start) - - cp, err := orig.Copy() - if err != nil { - t.Fatal(err) - } - - cp.Unset(10) - - s, err := orig.IsSet(10) - if err != nil { - t.Fatal(err) - } - if !s { - t.Fatal("mutation affected original bitfield") - } - -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/codecov.yml b/vendor/github.com/filecoin-project/go-bitfield/codecov.yml new file mode 100644 index 0000000000..01da6d5dd5 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-bitfield/codecov.yml @@ -0,0 +1,3 @@ +comment: off +github_checks: + annotations: false diff --git a/vendor/github.com/filecoin-project/go-bitfield/go.mod b/vendor/github.com/filecoin-project/go-bitfield/go.mod index e09e76c703..40fd22aa19 100644 --- a/vendor/github.com/filecoin-project/go-bitfield/go.mod +++ b/vendor/github.com/filecoin-project/go-bitfield/go.mod @@ -4,7 +4,6 @@ go 1.13 require ( github.com/ipfs/go-cid v0.0.5 // indirect - github.com/multiformats/go-varint v0.0.5 github.com/stretchr/testify v1.4.0 github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 diff --git a/vendor/github.com/filecoin-project/go-bitfield/go.sum b/vendor/github.com/filecoin-project/go-bitfield/go.sum index 2eba620503..60b96d700b 100644 --- a/vendor/github.com/filecoin-project/go-bitfield/go.sum +++ b/vendor/github.com/filecoin-project/go-bitfield/go.sum @@ -1,7 +1,5 @@ github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= @@ -11,19 +9,12 @@ github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= @@ -52,24 +43,16 @@ golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/bits.go b/vendor/github.com/filecoin-project/go-bitfield/rle/bits.go index 8e1534d200..cef81fdb45 100644 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/bits.go +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/bits.go @@ -122,7 +122,7 @@ func (it *it2r) prep() error { if !it.HasNext() { return nil } - if it.run[0].Val == false { + if !it.run[0].Val { it.run[1].Val = true it.run[1].Len = 1 return nil diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/bits_test.go b/vendor/github.com/filecoin-project/go-bitfield/rle/bits_test.go deleted file mode 100644 index 31e62f4577..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/bits_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package rlepluslazy - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestRunsFromBits(t *testing.T) { - expected := []Run{{Val: false, Len: 0x1}, - {Val: true, Len: 0x3}, - {Val: false, Len: 0x2}, - {Val: true, Len: 0x3}, - } - rit, err := RunsFromBits(BitsFromSlice([]uint64{1, 2, 3, 6, 7, 8})) - assert.NoError(t, err) - i := 10 - output := make([]Run, 0, 4) - for rit.HasNext() && i > 0 { - run, err := rit.NextRun() - assert.NoError(t, err) - i-- - output = append(output, run) - } - assert.NotEqual(t, 0, i, "too many iterations") - assert.Equal(t, expected, output) -} - -func TestNthSlice(t *testing.T) { - testIter(t, func(t *testing.T, bits []uint64) BitIterator { - iter := BitsFromSlice(bits) - return iter - }) -} - -func TestNthRuns(t *testing.T) { - testIter(t, func(t *testing.T, bits []uint64) BitIterator { - riter, err := RunsFromSlice(bits) - assert.NoError(t, err) - biter, err := BitsFromRuns(riter) - assert.NoError(t, err) - return biter - }) -} - -func testIter(t *testing.T, ctor func(t *testing.T, bits []uint64) BitIterator) { - for i := 0; i < 10; i++ { - bits := randomBits(1000, 1500) - iter := ctor(t, bits) - - n, err := iter.Nth(10) - assert.NoError(t, err) - assert.Equal(t, bits[10], n) - - n, err = iter.Nth(0) - assert.NoError(t, err) - assert.Equal(t, bits[11], n) - - n, err = iter.Nth(1) - assert.NoError(t, err) - assert.Equal(t, bits[13], n) - - n, err = iter.Next() - assert.NoError(t, err) - assert.Equal(t, bits[14], n) - - runs, err := RunsFromBits(iter) - assert.NoError(t, err) - - remainingBits, err := SliceFromRuns(runs) - assert.NoError(t, err) - - assert.Equal(t, bits[15:], remainingBits) - } - for i := 0; i < 10; i++ { - bits := randomBits(1000, 1500) - iter := ctor(t, bits) - - last, err := iter.Nth(uint64(len(bits) - 1)) - assert.NoError(t, err) - assert.Equal(t, bits[len(bits)-1], last) - assert.False(t, iter.HasNext()) - _, err = iter.Nth(0) - assert.Equal(t, ErrEndOfIterator, err) - } -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec.go b/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec.go index 0aca2578f1..df6f77ad39 100644 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec.go +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec.go @@ -34,14 +34,50 @@ var bitMasks = [9]byte{ 0xFF, } +func (bv *rbitvec) GetByte() byte { + // Advancing byte by byte is simpler than advancing an odd number of + // bits because we _always_ load the next byte. + res := byte(bv.bits) + bv.bits >>= 8 + + if bv.index < len(bv.vec) { // if vector allows + // add bits onto the end of temporary storage + bv.bits |= uint16(bv.vec[bv.index]) << (bv.bitCap - 8) + } + + bv.index += 1 + return res +} + +func (bv *rbitvec) GetBit() bool { + // The specialized GetBit is easier for the compiler to optimize, for some reason. + + res := (bv.bits&0x1 != 0) + bv.bits >>= 1 + bv.bitCap -= 1 + + if bv.index < len(bv.vec) { // if vector allows + // add bits onto the end of temporary storage + bv.bits |= uint16(bv.vec[bv.index]) << bv.bitCap + } + + // When we advance one by one, this branch is very predictable (and + // faster than fancy math). + if bv.bitCap < 8 { + bv.index += 1 + bv.bitCap += 8 + } + return res +} + func (bv *rbitvec) Get(count byte) byte { res := byte(bv.bits) & bitMasks[count] // select count bits - bv.bits = bv.bits >> count // remove those bits from storage - bv.bitCap = bv.bitCap - count // decrease nuber of stored bits + bv.bits >>= count // remove those bits from storage + bv.bitCap -= count // decrease nuber of stored bits if bv.index < len(bv.vec) { // if vector allows // add bits onto the end of temporary storage - bv.bits = bv.bits | uint16(bv.vec[bv.index])<> 7 // inc == 1 iff bitcap<8 (+10% perf) - bv.index = bv.index + int(inc) // increase index if we need more bits - bv.bitCap = bv.bitCap + inc*8 // increase bitCap by 8 + inc := (bv.bitCap - 8) >> 7 // inc == 1 iff bitcap<8 (+10% perf) + bv.index += int(inc) // increase index if we need more bits + bv.bitCap += inc * 8 // increase bitCap by 8 return res } @@ -69,25 +105,30 @@ func writeBitvec(buf []byte) *wbitvec { } type wbitvec struct { - buf []byte // buffer we will be saving to - index int // index of at which the next byte will be saved + buf []byte // buffer we will be saving to bits uint16 // temporary storage for bits bitCap byte // number of bits stored in temporary storage } +// Returns the resulting bitvector, with any trailing zero bytes removed. func (bv *wbitvec) Out() []byte { if bv.bitCap != 0 { // if there are some bits in temporary storage we need to save them - bv.buf = append(bv.buf, 0)[:bv.index+1] - bv.buf[bv.index] = byte(bv.bits) + bv.buf = append(bv.buf, byte(bv.bits)) } if bv.bitCap > 8 { // if we store some needed bits in second byte, save them also bv.buf = append(bv.buf, byte(bv.bits>>8)) - bv.index++ - bv.bits = bv.bits - 8 } + bv.bitCap = 0 + bv.bits = 0 + + // Minimally encode. + for len(bv.buf) > 0 && bv.buf[len(bv.buf)-1] == 0 { + bv.buf = bv.buf[:len(bv.buf)-1] + } + return bv.buf } @@ -97,18 +138,9 @@ func (bv *wbitvec) Put(val byte, count byte) { // increase bitCap by the number of bits bv.bitCap = bv.bitCap + count - // increase len of the buffer if it is needed - if bv.index+1 > cap(bv.buf) { - bv.buf = append(bv.buf, 0) + if bv.bitCap >= 8 { + bv.buf = append(bv.buf, byte(bv.bits)) + bv.bitCap -= 8 + bv.bits >>= 8 } - bv.buf = bv.buf[:bv.index+1] - // save the bits - bv.buf[bv.index] = byte(bv.bits) - - // Warning, dragons again - // if bitCap is greater than 7 it underflows, same thing as in Put - inc := (7 - bv.bitCap) >> 7 // inc == 1 iff bitcap>=8 - bv.index = bv.index + int(inc) // increase index for the next save - bv.bitCap = bv.bitCap - inc*8 // we store less bits now in temporary buffer - bv.bits = bv.bits >> (inc * 8) // we can discard those bits as they were saved } diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec_test.go b/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec_test.go deleted file mode 100644 index 6938bd7932..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/bitvec_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package rlepluslazy - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestReadBitVec(t *testing.T) { - buf := []byte{0x0, 0xff} - bv := readBitvec(buf) - - o := bv.Get(1) - assert.EqualValues(t, 0, o) - - o = bv.Get(8) - assert.EqualValues(t, 0x80, o) - - o = bv.Get(7) - assert.EqualValues(t, 0x7f, o) -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/bitvector.go b/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/bitvector.go deleted file mode 100644 index e5094e3e7e..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/bitvector.go +++ /dev/null @@ -1,154 +0,0 @@ -package rleplus - -import ( - "errors" - "log" -) - -var ( - // ErrOutOfRange - the index passed is out of range for the BitVector - ErrOutOfRange = errors.New("index out of range") -) - -// BitNumbering indicates the ordering of bits, either -// least-significant bit in position 0, or most-significant bit -// in position 0. -// -// It it used in 3 ways with BitVector: -// 1. Ordering of bits within the Buf []byte structure -// 2. What order to add bits when using Extend() -// 3. What order to read bits when using Take() -// -// https://en.wikipedia.org/wiki/Bit_numbering -type BitNumbering int - -const ( - // LSB0 - bit ordering starts with the low-order bit - LSB0 BitNumbering = iota - - // MSB0 - bit ordering starts with the high-order bit - MSB0 -) - -// BitVector is used to manipulate ordered collections of bits -type BitVector struct { - Buf []byte - - // BytePacking is the bit ordering within bytes - BytePacking BitNumbering - - // Len is the logical number of bits in the vector. - // The last byte in Buf may have undefined bits if Len is not a multiple of 8 - Len uint -} - -// NewBitVector constructs a new BitVector from a slice of bytes. -// -// The bytePacking parameter is required to know how to interpret the bit ordering within the bytes. -func NewBitVector(buf []byte, bytePacking BitNumbering) *BitVector { - return &BitVector{ - BytePacking: bytePacking, - Buf: buf, - Len: uint(len(buf) * 8), - } -} - -// Push adds a single bit to the BitVector. -// -// Although it takes a byte, only the low-order bit is used, so just use 0 or 1. -func (v *BitVector) Push(val byte) { - if v.Len%8 == 0 { - v.Buf = append(v.Buf, 0) - } - lastIdx := v.Len / 8 - - switch v.BytePacking { - case LSB0: - v.Buf[lastIdx] |= (val & 1) << (v.Len % 8) - default: - v.Buf[lastIdx] |= (val & 1) << (7 - (v.Len % 8)) - } - - v.Len++ -} - -// Get returns a single bit as a byte -- either 0 or 1 -func (v *BitVector) Get(idx uint) (byte, error) { - if idx >= v.Len { - return 0, ErrOutOfRange - } - blockIdx := idx / 8 - - switch v.BytePacking { - case LSB0: - return v.Buf[blockIdx] >> (idx % 8) & 1, nil - default: - return v.Buf[blockIdx] >> (7 - idx%8) & 1, nil - } -} - -// Extend adds up to 8 bits to the receiver -// -// Given a byte b == 0b11010101 -// v.Extend(b, 4, LSB0) would add < 1, 0, 1, 0 > -// v.Extend(b, 4, MSB0) would add < 1, 1, 0, 1 > -// -// Panics if count is out of range -func (v *BitVector) Extend(val byte, count uint, order BitNumbering) { - if count > 8 { - log.Panicf("invalid count") - } - - for i := uint(0); i < count; i++ { - switch order { - case LSB0: - v.Push((val >> i) & 1) - default: - v.Push((val >> (7 - i)) & 1) - } - } -} - -// Take reads up to 8 bits at the given index. -// -// Given a BitVector < 1, 1, 0, 1, 0, 1, 0, 1 > -// v.Take(0, 4, LSB0) would return 0b00001011 -// v.Take(0, 4, MSB0) would return 0b11010000 -// -// Panics if count is out of range -func (v *BitVector) Take(index uint, count uint, order BitNumbering) (out byte) { - if count > 8 { - log.Panicf("invalid count") - } - - for i := uint(0); i < count; i++ { - val, _ := v.Get(index + i) - - switch order { - case LSB0: - out |= val << i - default: - out |= val << (7 - i) - } - } - return -} - -// Iterator returns a function, which when invoked, returns the number -// of bits requested, and increments an internal cursor. -// -// When the end of the BitVector is reached, it returns zeroes indefinitely -// -// Panics if count is out of range -func (v *BitVector) Iterator(order BitNumbering) func(uint) byte { - cursor := uint(0) - return func(count uint) (out byte) { - if count > 8 { - log.Panicf("invalid count") - } - - out = v.Take(cursor, count, order) - cursor += count - return - } -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/bitvector_test.go b/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/bitvector_test.go deleted file mode 100644 index eb3c8db0de..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/bitvector_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package rleplus - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestBitVector(t *testing.T) { - t.Run("zero value", func(t *testing.T) { - var v BitVector - - assert.Equal(t, LSB0, v.BytePacking) - }) - - t.Run("Push", func(t *testing.T) { - // MSB0 bit numbering - v := BitVector{BytePacking: MSB0} - v.Push(1) - v.Push(0) - v.Push(1) - v.Push(1) - - assert.Equal(t, byte(176), v.Buf[0]) - - // LSB0 bit numbering - v = BitVector{BytePacking: LSB0} - v.Push(1) - v.Push(0) - v.Push(1) - v.Push(1) - - assert.Equal(t, byte(13), v.Buf[0]) - }) - - t.Run("Get", func(t *testing.T) { - bits := []byte{1, 0, 1, 1, 0, 0, 1, 0} - - for _, numbering := range []BitNumbering{MSB0, LSB0} { - v := BitVector{BytePacking: numbering} - - for _, bit := range bits { - v.Push(bit) - } - - for idx, expected := range bits { - actual, _ := v.Get(uint(idx)) - assert.Equal(t, expected, actual) - } - } - }) - - t.Run("Extend", func(t *testing.T) { - val := byte(171) // 0b10101011 - - var v BitVector - - // MSB0 bit numbering - v = BitVector{} - v.Extend(val, 4, MSB0) - assertBitVector(t, []byte{1, 0, 1, 0}, v) - v.Extend(val, 5, MSB0) - assertBitVector(t, []byte{1, 0, 1, 0, 1, 0, 1, 0, 1}, v) - - // LSB0 bit numbering - v = BitVector{} - v.Extend(val, 4, LSB0) - assertBitVector(t, []byte{1, 1, 0, 1}, v) - v.Extend(val, 5, LSB0) - assertBitVector(t, []byte{1, 1, 0, 1, 1, 1, 0, 1, 0}, v) - }) - - t.Run("invalid counts to Take/Extend/Iterator cause panics", func(t *testing.T) { - v := BitVector{BytePacking: LSB0} - - assert.Panics(t, func() { v.Extend(0xff, 9, LSB0) }) - - assert.Panics(t, func() { v.Take(0, 9, LSB0) }) - - next := v.Iterator(LSB0) - assert.Panics(t, func() { next(9) }) - }) - - t.Run("Take", func(t *testing.T) { - var v BitVector - - bits := []byte{1, 0, 1, 0, 1, 0, 1, 1} - for _, bit := range bits { - v.Push(bit) - } - - assert.Equal(t, byte(176), v.Take(4, 4, MSB0)) - assert.Equal(t, byte(13), v.Take(4, 4, LSB0)) - }) - - t.Run("Iterator", func(t *testing.T) { - var buf []byte - - // make a bitvector of 256 sample bits - for i := 0; i < 32; i++ { - buf = append(buf, 128+32) - } - - v := NewBitVector(buf, LSB0) - - next := v.Iterator(LSB0) - - // compare to Get() - for i := uint(0); i < v.Len; i++ { - expected, _ := v.Get(i) - assert.Equal(t, expected, next(1)) - } - - // out of range should return zero - assert.Equal(t, byte(0), next(1)) - assert.Equal(t, byte(0), next(8)) - - // compare to Take() - next = v.Iterator(LSB0) - assert.Equal(t, next(5), v.Take(0, 5, LSB0)) - assert.Equal(t, next(8), v.Take(5, 8, LSB0)) - }) -} - -// Note: When using this helper assertion, expectedBits should *only* be 0s and 1s. -func assertBitVector(t *testing.T, expectedBits []byte, actual BitVector) { - assert.Equal(t, uint(len(expectedBits)), actual.Len) - - for idx, bit := range expectedBits { - actualBit, err := actual.Get(uint(idx)) - assert.NoError(t, err) - assert.Equal(t, bit, actualBit) - } -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/rleplus.go b/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/rleplus.go deleted file mode 100644 index 1c48ebcec0..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/rleplus.go +++ /dev/null @@ -1,202 +0,0 @@ -package rleplus - -import ( - "encoding/binary" - "errors" - "fmt" - "sort" -) - -// Version is the 2 lowest bits of this constant -const Version = 0 - -var ( - // ErrRunLengthTooLarge - data implies a run-length which isn't supported - ErrRunLengthTooLarge = fmt.Errorf("run length too large for RLE+ version %d", Version) - - // ErrDecode - invalid encoding for this version - ErrDecode = fmt.Errorf("invalid encoding for RLE+ version %d", Version) - - // ErrWrongVersion - wrong version of RLE+ - ErrWrongVersion = errors.New("invalid RLE+ version") -) - -// Encode returns the RLE+ representation of the provided integers. -// Also returned is the number of bits required by this encoding, -// which is not necessarily on a byte boundary. -// -// The RLE+ spec is here: https://github.com/filecoin-project/specs/blob/master/data-structures.md#rle-bitset-encoding -// and is described by the BNF Grammar: -// -// ::=
-//
::= -// ::= "00" -// ::= | "" -// ::= | | -// ::= "1" -// ::= "01" -// ::= "00" -// ::= "0" | "1" -// -// Filecoin specific: -// The encoding is returned as a []byte, each byte packed starting with the low-order bit (LSB0) -func Encode(ints []uint64) ([]byte, uint, error) { - v := BitVector{BytePacking: LSB0} - firstBit, runs := RunLengths(ints) - - // Add version header - v.Extend(Version, 2, LSB0) - - v.Push(firstBit) - - for _, run := range runs { - switch { - case run == 1: - v.Push(1) - case run < 16: - v.Push(0) - v.Push(1) - v.Extend(byte(run), 4, LSB0) - case run >= 16: - v.Push(0) - v.Push(0) - // 10 bytes needed to encode MaxUint64 - buf := make([]byte, 10) - numBytes := binary.PutUvarint(buf, run) - for i := 0; i < numBytes; i++ { - v.Extend(buf[i], 8, LSB0) - } - default: - return nil, 0, ErrRunLengthTooLarge - } - } - - return v.Buf, v.Len, nil -} - -// Decode returns integers represented by the given RLE+ encoding -// -// The length of the encoding is not specified. It is inferred by -// reading zeroes from the (possibly depleted) BitVector, by virtue -// of the behavior of BitVector.Take() returning 0 when the end of -// the BitVector has been reached. This has the downside of not -// being able to detect corrupt encodings. -// -// The passed []byte should be packed in LSB0 bit numbering -func Decode(buf []byte) (ints []uint64, err error) { - if len(buf) == 0 { - return - } - - v := NewBitVector(buf, LSB0) - take := v.Iterator(LSB0) - - // Read version and check - // Version check - ver := take(2) - if ver != Version { - return nil, ErrWrongVersion - } - - curIdx := uint64(0) - curBit := take(1) - var runLength int - done := false - - for done == false { - y := take(1) - switch y { - case 1: - runLength = 1 - case 0: - val := take(1) - - if val == 1 { - // short block - runLength = int(take(4)) - } else { - // long block - var buf []byte - for { - b := take(8) - buf = append(buf, b) - - if b&0x80 == 0 { - break - } - - // 10 bytes is required to store math.MaxUint64 in a uvarint - if len(buf) > 10 { - return nil, ErrDecode - } - } - x, _ := binary.Uvarint(buf) - - if x == 0 { - done = true - } - runLength = int(x) - } - } - - if curBit == 1 { - for j := 0; j < runLength; j++ { - ints = append(ints, curIdx+uint64(j)) - } - } - curIdx += uint64(runLength) - curBit = 1 - curBit - } - - return -} - -// RunLengths transforms integers into its bit-set-run-length representation. -// -// A set of unsigned integers { 0, 2, 4, 5, 6 } can be thought of as -// indices into a bitset { 1, 0, 1, 0, 1, 1, 1 } where bitset[index] == 1. -// -// The bit set run lengths of this set would then be { 1, 1, 1, 1, 3 }, -// representing lengths of runs alternating between 1 and 0, starting -// with a first bit of 1. -// -// Duplicated numbers are ignored. -// -// This is a helper function for Encode() -func RunLengths(ints []uint64) (firstBit byte, runs []uint64) { - if len(ints) == 0 { - return - } - - // Sort our incoming numbers - sort.Slice(ints, func(i, j int) bool { return ints[i] < ints[j] }) - - prev := ints[0] - - // Initialize our return value - if prev == 0 { - firstBit = 1 - } - - if firstBit == 0 { - // first run of zeroes - runs = append(runs, prev) - } - runs = append(runs, 1) - - for _, cur := range ints[1:] { - delta := cur - prev - switch { - case delta == 1: - runs[len(runs)-1]++ - case delta > 1: - // add run of zeroes if there is a gap - runs = append(runs, delta-1) - runs = append(runs, 1) - default: - // repeated number? - } - prev = cur - } - return -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/rleplus_test.go b/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/rleplus_test.go deleted file mode 100644 index 0147b6fbe5..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/internal/rleplus/rleplus_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package rleplus - -import ( - "fmt" - "math" - "sort" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestRleplus(t *testing.T) { - - t.Run("Encode", func(t *testing.T) { - // Encode an intset - ints := []uint64{ - // run of 1 - 0, - // gap of 1 - // run of 1 - 2, - // gap of 1 - // run of 3 - 4, 5, 6, - // gap of 4 - // run of 17 - 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - } - - expectedBits := []byte{ - 0, 0, // version - 1, // first bit - 1, // run of 1 - 1, // gap of 1 - 1, // run of 1 - 1, // gap of 1 - 0, 1, 1, 1, 0, 0, // run of 3 - 0, 1, 0, 0, 1, 0, // gap of 4 - - // run of 17 < 0 0 (varint) > - 0, 0, - 1, 0, 0, 0, 1, 0, 0, 0, - } - - v := BitVector{} - for _, bit := range expectedBits { - v.Push(bit) - } - actualBytes, _, err := Encode(ints) - assert.NoError(t, err) - - assert.Equal(t, len(v.Buf), len(actualBytes)) - for idx, expected := range v.Buf { - assert.Equal( - t, - fmt.Sprintf("%08b", expected), - fmt.Sprintf("%08b", actualBytes[idx]), - ) - } - }) - - t.Run("Encode allows all runs sizes possible uint64", func(t *testing.T) { - // create a run of math.MaxUint64 - ints := []uint64{math.MaxUint64} - - // There would be 64 bits(1) for the UvarInt, totally 9 bytes. - expected := []byte{0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x20} - encodeBytes, _, err := Encode(ints) - assert.NoError(t, err) - for idx, v := range encodeBytes { - assert.Equal( - t, - fmt.Sprintf("%8b", v), - fmt.Sprintf("%8b", expected[idx]), - ) - } - }) - - t.Run("Encode for some big numbers", func(t *testing.T) { - // create a run of math.MaxUint64 - ints := make([]uint64, 1024) - - // ints {2^63 .. 2^63+1023} - for i := uint64(0); i < 1024; i++ { - ints[i] = uint64(1)<<63 + i - } - - expected := []byte{0x00, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x30, 0x00, 0x40, 0x04} - encodeBytes, _, err := Encode(ints) - assert.NoError(t, err) - for idx, v := range encodeBytes { - // fmt.Println(v, expected[idx]) - assert.Equal( - t, - fmt.Sprintf("%8b", v), - fmt.Sprintf("%8b", expected[idx]), - ) - } - }) - - t.Run("Decode", func(t *testing.T) { - testCases := [][]uint64{ - {}, - {1}, - {0}, - {0, 1, 2, 3}, - { - // run of 1 - 0, - // gap of 1 - // run of 1 - 2, - // gap of 1 - // run of 3 - 4, 5, 6, - // gap of 4 - // run of 17 - 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, - }, - } - - for _, tc := range testCases { - encoded, _, err := Encode(tc) - assert.NoError(t, err) - - result, err := Decode(encoded) - assert.NoError(t, err) - - sort.Slice(tc, func(i, j int) bool { return tc[i] < tc[j] }) - sort.Slice(result, func(i, j int) bool { return result[i] < result[j] }) - - assert.Equal(t, len(tc), len(result)) - - for idx, expected := range tc { - assert.Equal(t, expected, result[idx]) - } - } - }) - - t.Run("Decode version check", func(t *testing.T) { - _, err := Decode([]byte{0xff}) - assert.Error(t, err, "invalid RLE+ version") - }) - - t.Run("Decode returns an error with a bad encoding", func(t *testing.T) { - // create an encoding with a buffer with a run which is too long - _, err := Decode([]byte{0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}) - assert.Error(t, err, "invalid encoding for RLE+ version 0") - }) - - t.Run("outputs same as reference implementation", func(t *testing.T) { - // Encoding bitvec![LittleEndian; 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - // in the Rust reference implementation gives an encoding of [223, 145, 136, 0] (without version field) - // The bit vector is equivalent to the integer set { 0, 2, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 } - - // This is the above reference output with a version header "00" manually added - referenceEncoding := []byte{124, 71, 34, 2} - - expectedNumbers := []uint64{0, 2, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27} - - encoded, _, err := Encode(expectedNumbers) - assert.NoError(t, err) - - // Our encoded bytes are the same as the ref bytes - assert.Equal(t, len(referenceEncoding), len(encoded)) - for idx, expected := range referenceEncoding { - assert.Equal(t, expected, encoded[idx]) - } - - decoded, err := Decode(referenceEncoding) - assert.NoError(t, err) - - // Our decoded integers are the same as expected - sort.Slice(decoded, func(i, j int) bool { return decoded[i] < decoded[j] }) - assert.Equal(t, len(expectedNumbers), len(decoded)) - for idx, expected := range expectedNumbers { - assert.Equal(t, expected, decoded[idx]) - } - }) - - t.Run("RunLengths", func(t *testing.T) { - testCases := []struct { - ints []uint64 - first byte - runs []uint64 - }{ - // empty - {}, - - // leading with ones - {[]uint64{0}, 1, []uint64{1}}, - {[]uint64{0, 1}, 1, []uint64{2}}, - {[]uint64{0, 0xffffffff, 0xffffffff + 1}, 1, []uint64{1, 0xffffffff - 1, 2}}, - - // leading with zeroes - {[]uint64{1}, 0, []uint64{1, 1}}, - {[]uint64{2}, 0, []uint64{2, 1}}, - {[]uint64{10, 11, 13, 20}, 0, []uint64{10, 2, 1, 1, 6, 1}}, - {[]uint64{10, 11, 11, 13, 20, 10, 11, 13, 20}, 0, []uint64{10, 2, 1, 1, 6, 1}}, - } - - for _, testCase := range testCases { - first, runs := RunLengths(testCase.ints) - assert.Equal(t, testCase.first, first) - assert.Equal(t, len(testCase.runs), len(runs)) - for idx, runLength := range testCase.runs { - assert.Equal(t, runLength, runs[idx]) - } - } - }) -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/joinclose_test.go b/vendor/github.com/filecoin-project/go-bitfield/rle/joinclose_test.go deleted file mode 100644 index 7e43e01911..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/joinclose_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package rlepluslazy - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestJoinClose(t *testing.T) { - inBits := []uint64{0, 1, 4, 5, 9, 14} - var tests = []struct { - name string - given []uint64 - expected []uint64 - closeness uint64 - }{ - {"closeness 0", inBits, []uint64{0, 1, 4, 5, 9, 14}, 0}, - {"closeness 2", inBits, []uint64{0, 1, 2, 3, 4, 5, 9, 14}, 2}, - {"closeness 3", inBits, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 14}, 3}, - {"closeness 4", inBits, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 4}, - } - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - a, err := RunsFromSlice(tt.given) - assert.NoError(t, err) - jc, err := JoinClose(a, tt.closeness) - assert.NoError(t, err) - bits, err := SliceFromRuns(jc) - assert.Equal(t, tt.expected, bits) - }) - } - -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus.go b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus.go index 791a48e14d..fe19c17971 100644 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus.go +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus.go @@ -17,8 +17,8 @@ var ( ) type RLE struct { - buf []byte - runs []Run + buf []byte + validated bool } func FromBuf(buf []byte) (RLE, error) { @@ -31,29 +31,49 @@ func FromBuf(buf []byte) (RLE, error) { return rle, nil } -func (rle *RLE) RunIterator() (RunIterator, error) { - if rle.runs == nil { +// Bytes returns the encoded RLE. +// +// Do not modify. +func (rle *RLE) Bytes() []byte { + return rle.buf +} + +// Validate is a separate function to show up on profile for repeated decode evaluation +func (rle *RLE) Validate() error { + if !rle.validated { source, err := DecodeRLE(rle.buf) if err != nil { - return nil, xerrors.Errorf("decoding RLE: %w", err) + return xerrors.Errorf("decoding RLE: %w", err) } var length uint64 - var runs []Run + for source.HasNext() { r, err := source.NextRun() if err != nil { - return nil, xerrors.Errorf("reading run: %w", err) + return xerrors.Errorf("reading run: %w", err) } if math.MaxUint64-r.Len < length { - return nil, xerrors.New("RLE+ overflows") + return xerrors.New("RLE+ overflows") } length += r.Len - runs = append(runs, r) } - rle.runs = runs + rle.validated = true } + return nil +} - return &RunSliceIterator{Runs: rle.runs}, nil +func (rle *RLE) RunIterator() (RunIterator, error) { + err := rle.Validate() + if err != nil { + return nil, xerrors.Errorf("validation failed: %w", err) + } + + source, err := DecodeRLE(rle.buf) + if err != nil { + return nil, xerrors.Errorf("decoding RLE: %w", err) + } + + return source, nil } func (rle *RLE) Count() (uint64, error) { @@ -105,7 +125,7 @@ func (rle *RLE) UnmarshalJSON(b []byte) error { return err } - rle.runs = []Run{} + runs := []Run{} val := false for i, v := range buf { if v == 0 { @@ -113,13 +133,18 @@ func (rle *RLE) UnmarshalJSON(b []byte) error { return xerrors.New("Cannot have a zero-length run except at start") } } else { - rle.runs = append(rle.runs, Run{ + runs = append(runs, Run{ Val: val, Len: v, }) } val = !val } + enc, err := EncodeRuns(&RunSliceIterator{Runs: runs}, []byte{}) + if err != nil { + return xerrors.Errorf("encoding runs: %w", err) + } + rle.buf = enc return nil } diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_golden_test.go b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_golden_test.go deleted file mode 100644 index 091d9826c4..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_golden_test.go +++ /dev/null @@ -1,3 +0,0 @@ -package rlepluslazy - -var goldenRLE = []byte{0x20, 0xfc, 0x40, 0xc2, 0xcc, 0xe5, 0xd8, 0xc1, 0xe1, 0x1e, 0x23, 0xd3, 0x2, 0x2e, 0xcd, 0x3, 0x3e, 0x83, 0x16, 0x26, 0x3a, 0x5c, 0x30, 0x8e, 0x0, 0x5, 0xcc, 0x24, 0xe, 0x96, 0x15, 0x48, 0xa0, 0x2a, 0x40, 0x4, 0x92, 0xe, 0x94, 0xc0, 0x48, 0x91, 0xee, 0x5, 0x28, 0x98, 0x55, 0x90, 0xa0, 0xa4, 0x50, 0x98, 0x14, 0x40, 0x4, 0x59, 0xa, 0x22, 0x0, 0x74, 0xb0, 0x40, 0x66, 0x30, 0xf9, 0x66, 0x90, 0x51, 0xc7, 0x70, 0x74, 0x40, 0x48, 0x7f, 0xf0, 0x80, 0x24, 0x20, 0x85, 0x58, 0x6, 0x7, 0x66, 0x4, 0x87, 0xe5, 0x5, 0x28, 0x0, 0xa4, 0xf0, 0x61, 0x2e, 0x90, 0x8, 0x44, 0x70, 0x38, 0x34, 0xf0, 0x8, 0x4f, 0x70, 0x68, 0xad, 0xd0, 0x90, 0x1e, 0x90, 0x38, 0xc1, 0x85, 0x76, 0x4, 0x15, 0x7c, 0x4, 0x28, 0x28, 0x17, 0x70, 0xe0, 0x15, 0x0, 0x82, 0xfb, 0x11, 0xb, 0x76, 0x9, 0x22, 0xb8, 0x2f, 0x90, 0x20, 0x5f, 0x80, 0x84, 0xc9, 0x10, 0x85, 0x66, 0x9, 0x5, 0xc9, 0x3, 0x2d, 0x19, 0xa4, 0x5a, 0x70, 0x45, 0xa5, 0x40, 0xc2, 0x5, 0x6c, 0x4a, 0xf, 0x64, 0xf4, 0x19, 0xb0, 0x40, 0x28, 0x8b, 0x2, 0x6c, 0x20, 0x3e, 0x90, 0x40, 0x1, 0x19, 0xea, 0x9, 0x20, 0x60, 0x2f, 0x50, 0x60, 0x15, 0x0, 0x4, 0x69, 0x6, 0xb1, 0x8c, 0xa9, 0x85, 0xc6, 0x1f, 0x13, 0x54, 0x3e, 0x58, 0x40, 0x17, 0x60, 0x41, 0x2e, 0x8a, 0x42, 0x3c, 0x8b, 0xb, 0x3f, 0x8, 0x10, 0x5c, 0x32, 0x38, 0xd4, 0x1e, 0x68, 0x18, 0x5b, 0x70, 0xc1, 0x2c, 0x6, 0x17, 0x6c, 0x17, 0xf8, 0x74, 0x36, 0x28, 0x9c, 0xe, 0x20, 0x2, 0xa1, 0x84, 0xf, 0xa2, 0x82, 0xe, 0xbf, 0x82, 0x45, 0x29, 0x84, 0x7, 0x16, 0x25, 0x10, 0x12, 0x38, 0x8c, 0x13, 0x50, 0x41, 0xff, 0x3, 0x13, 0x70, 0x16, 0x80, 0x81, 0x63, 0x85, 0x8f, 0xa0, 0x81, 0xa, 0xe1, 0x80, 0x84, 0xa3, 0x81, 0x89, 0xff, 0x84, 0xc7, 0x3c, 0x87, 0x49, 0xbb, 0x81, 0x4b, 0x21, 0x82, 0x3, 0x2f, 0x83, 0x84, 0x29, 0x83, 0x8c, 0xe0, 0x0, 0x40, 0x8c, 0xda, 0x20, 0x42, 0x6, 0x13, 0x64, 0x7, 0x24, 0x85, 0xc, 0xa6, 0x21, 0x24, 0x6c, 0x7, 0x74, 0x20, 0x13, 0x3c, 0x2, 0x13, 0x54, 0xba, 0x1d, 0x14, 0x9e, 0xb, 0xe0, 0x0, 0x39, 0x82, 0xa, 0x72, 0xb, 0x80, 0x60, 0x53, 0x20, 0xd5, 0xe, 0x32, 0xd5, 0xe, 0x12, 0xb9, 0x1, 0x44, 0x90, 0x27, 0x51, 0x59, 0xe0, 0x12, 0xc3, 0x4, 0xe8, 0x1, 0x78, 0x40, 0xd8, 0xe0, 0x3, 0x7b, 0x60, 0x93, 0x6b, 0x20, 0x72, 0xf8, 0x20, 0xf1, 0xe1, 0xe0, 0x4d, 0x90, 0xdf, 0x2, 0x1f, 0x69, 0x3, 0x36, 0x30, 0x8d, 0x20, 0x82, 0x58, 0xa, 0x8f, 0xec, 0x2, 0x8d, 0xe8, 0x0, 0x11, 0x55, 0x81, 0x58, 0x17, 0xa0, 0x82, 0xa5, 0x81, 0x88, 0xb6, 0x83, 0x88, 0xed, 0x86, 0x88, 0xe0, 0x86, 0x4b, 0x6e, 0x82, 0x45, 0xe1, 0x89, 0xc8, 0x2c, 0x41, 0x78, 0x9, 0x12, 0xbd, 0x91, 0x50, 0x1, 0x42, 0x90, 0x85, 0x70, 0x69, 0xb5, 0x20, 0x83, 0x5e, 0x2, 0x8d, 0xfb, 0x2, 0xf, 0xfb, 0xa, 0x9, 0xcc, 0x4, 0x8b, 0xe4, 0x5, 0x87, 0xc9, 0x2, 0xf, 0xfc, 0x1, 0x5c, 0x8, 0xf, 0xd0, 0x1, 0x4e, 0xd0, 0x60, 0x4d, 0x50, 0xa1, 0x3c, 0xf0, 0x20, 0x6c, 0x88, 0xe1, 0x41, 0x5c, 0x11, 0x1c, 0xc8, 0x25, 0x4c, 0xc0, 0x1d, 0x1c, 0x4e, 0x61, 0x54, 0x2a, 0x5, 0x14, 0xf6, 0x54, 0x3c, 0xb, 0x44, 0xe4, 0x5, 0x64, 0x42, 0x1f, 0x64, 0x32, 0x23, 0xa0, 0x20, 0xf9, 0x1, 0xc, 0x40, 0x27, 0xf8, 0x40, 0x3c, 0xc0, 0x82, 0x72, 0x1, 0xd, 0xd1, 0x43, 0x11, 0x1, 0xd, 0xaa, 0x35, 0xc8, 0xe0, 0x1e, 0x42, 0x41, 0x5b, 0x20, 0x8b, 0x4, 0xe, 0xc9, 0x3, 0xa, 0xea, 0x1, 0x3e, 0x5f, 0x12, 0xab, 0x7, 0x40, 0xf0, 0xad, 0x60, 0x42, 0x6f, 0xa1, 0xd2, 0x39, 0x60, 0x91, 0x64, 0xf1, 0xe4, 0x10, 0x3f, 0x60, 0xc2, 0x5c, 0x94, 0x62, 0x7, 0x1f, 0x67, 0x5, 0x87, 0xcc, 0x7, 0x9f, 0xee, 0xe, 0x13, 0xd8, 0x2, 0x38, 0xa0, 0xb5, 0x20, 0x2, 0x4e, 0x1, 0x85, 0x7c, 0xc, 0x4e, 0xb0, 0xde, 0x90, 0x80, 0x2c, 0x50, 0xd8, 0x77, 0x88, 0xf6, 0x80, 0x4, 0x22, 0x85, 0xce, 0xf2, 0x82, 0xcb, 0xb4, 0x5, 0x14, 0x68, 0xa9, 0x70, 0x77, 0xb8, 0x34, 0x1b, 0xf0, 0x1, 0xec, 0x1, 0x13, 0x58, 0x1a, 0x78, 0x88, 0xb, 0x58, 0x80, 0x27, 0x98, 0x20, 0x4b, 0x50, 0x41, 0x69, 0x84, 0xc9, 0x63, 0x81, 0x8, 0x7b, 0x6, 0x10, 0xd8, 0x1e, 0x38, 0x1c, 0x17, 0x68, 0x0, 0x2e, 0xa8, 0xe4, 0x23, 0x78, 0xdc, 0xa, 0x28, 0x38, 0xb, 0x28, 0xcc, 0x1f, 0x40, 0x1, 0x6f, 0x83, 0x88, 0x75, 0x82, 0xc7, 0x8c, 0x4, 0xec, 0x8, 0x28, 0xcc, 0x7b, 0xe4, 0xb8, 0x0, 0xe, 0x46, 0x35, 0x2c, 0xc2, 0x2d, 0xb8, 0xe0, 0xb5, 0x42, 0xa2, 0x75, 0x40, 0x2, 0xfc, 0xc4, 0x43, 0x98, 0x41, 0xa1, 0x56, 0x40, 0xc4, 0x5a, 0x1, 0x13, 0xda, 0x33, 0x5c, 0xf2, 0x7, 0x44, 0x74, 0x3f, 0x5c, 0x5a, 0x49, 0x24, 0x64, 0x3, 0x2c, 0xd8, 0x23, 0x34, 0x78, 0x19, 0xb8, 0x61, 0x15, 0x42, 0xa3, 0xc6, 0x46, 0x73, 0x80, 0xa, 0x52, 0x17, 0x0, 0xa1, 0x7d, 0x40, 0x26, 0x19, 0xc1, 0x6, 0xf0, 0x40, 0x45, 0x90, 0xc2, 0x81, 0xf6, 0xc0, 0x22, 0xf3, 0x80, 0x15, 0x2, 0x7, 0x2c, 0x9a, 0x3, 0xd8, 0xc0, 0x95, 0x21, 0xbb, 0x4, 0x80, 0x0, 0x99, 0xe0, 0x90, 0x19, 0x21, 0x93, 0xfe, 0x21, 0x43, 0x1a, 0x90, 0x64, 0x2, 0x2e, 0x20, 0x65, 0xc8, 0x79, 0x86, 0xc4, 0x78, 0x4, 0x2a, 0x38, 0x9e, 0x50, 0x82, 0xaf, 0x5, 0x17, 0x50, 0x33, 0xa0, 0x42, 0x21, 0x5, 0x24, 0xc0, 0xf, 0x64, 0xb0, 0x40, 0xe5, 0x3a, 0x42, 0xc1, 0xfd, 0x2, 0x3c, 0x10, 0x9, 0x88, 0x40, 0x5a, 0x40, 0x84, 0x11, 0x41, 0x83, 0x7f, 0x42, 0x82, 0x10, 0x3, 0x8, 0x8a, 0x1b, 0xc8, 0xc0, 0xb2, 0x81, 0x16, 0x6e, 0x68, 0x21, 0x1a, 0x2, 0x11, 0x30, 0x0, 0x42, 0x71, 0xc1, 0xe7, 0x89, 0x18, 0x60, 0x5, 0x3c, 0x70, 0x3c, 0xa2, 0x35, 0x14, 0xe6, 0x25, 0x98, 0x20, 0x97, 0x20, 0xa8, 0x2, 0x26, 0x8e, 0x12, 0x6c, 0x30, 0x2a, 0xa0, 0x22, 0x5e, 0xc2, 0x5, 0x8f, 0x8, 0xa, 0xea, 0x4, 0xa, 0x9b, 0x4, 0x3e, 0xf4, 0x3, 0x2e, 0x92, 0x5, 0x26, 0x65, 0xe0, 0x28, 0xe, 0x38, 0x50, 0xb0, 0x2f, 0x61, 0xe1, 0x6a, 0xa0, 0xf3, 0x2c, 0x12, 0x49, 0x6, 0x26, 0x20, 0x84, 0x30, 0x51, 0x5d, 0x90, 0xb0, 0x57, 0x70, 0x11, 0x3d, 0x70, 0x10, 0x53, 0x1c, 0x98, 0xf8, 0x54, 0x0, 0x2, 0xc3, 0x10, 0x58, 0xb0, 0x4d, 0xb0, 0x20, 0xd, 0x71, 0x40, 0x4d, 0x20, 0x4, 0x79, 0x5, 0x1f, 0x7b, 0x1, 0x89, 0x69, 0x2, 0x28, 0xc8, 0x17, 0x30, 0x81, 0x15, 0xd0, 0xc0, 0x5f, 0x50, 0x19, 0x1e, 0x72, 0x4d, 0x8d, 0xc1, 0x1, 0x91, 0x7e, 0x5, 0x34, 0x30, 0xe, 0x91, 0xc9, 0x74, 0x20, 0x84, 0xd5, 0x13, 0xb, 0x61, 0xd, 0x8f, 0xd8, 0x0, 0x32, 0x88, 0x4e, 0x20, 0x4, 0xd7, 0x84, 0xbc, 0x6, 0x38, 0x6c, 0x1f, 0xc8, 0xe0, 0x12, 0x68, 0xb4, 0x16, 0xa0, 0x41, 0x3a, 0x7, 0x27, 0x90, 0x1a, 0x88, 0x18, 0xa, 0xa5, 0x71, 0x25, 0xf1, 0x9, 0xa8, 0x50, 0x9d, 0xe1, 0x82, 0xbc, 0x81, 0x4, 0xc9, 0x9, 0x7c, 0x10, 0x3e, 0x40, 0x5, 0xe9, 0x2, 0x3a, 0xce, 0xd, 0x5c, 0x70, 0xfa, 0x60, 0x91, 0x41, 0x9, 0xc7, 0x8, 0xa, 0x93, 0x3, 0x7c, 0xa0, 0x3f, 0xe0, 0x53, 0x4b, 0x40, 0x6, 0xea, 0xd, 0xa, 0xcd, 0x18, 0xa, 0x83, 0x5, 0x2a, 0x29, 0xe, 0xc3, 0x1, 0x54, 0xb0, 0x7c, 0x41, 0x4, 0xaf, 0x2, 0x54, 0x40, 0xbe, 0x20, 0x41, 0xa7, 0x21, 0x29, 0xe1, 0x62, 0x6a, 0x81, 0x8, 0xb8, 0x4, 0x64, 0xb0, 0x29, 0xa0, 0xb0, 0x7f, 0xe0, 0x0, 0x3a, 0xc0, 0x5, 0xcb, 0x2, 0xb8, 0xe0, 0x6e, 0xc0, 0x8, 0xda, 0x13, 0x78, 0xe0, 0x1a, 0x0, 0x4, 0xe2, 0x8, 0xa, 0x1b, 0x16, 0x9d, 0x35, 0x88, 0x0, 0xa, 0x61, 0xe3, 0x1d, 0x61, 0xa1, 0x8f, 0xe0, 0xe3, 0x7d, 0x0, 0x8, 0x56, 0x16, 0xb9, 0x5, 0x7c, 0x50, 0x5a, 0xe2, 0xd1, 0xc9, 0x40, 0x8, 0x93, 0xa, 0x36, 0xc4, 0x2, 0xe, 0xf7, 0x15, 0xa, 0xae, 0x1b, 0xa, 0x83, 0xd, 0x36, 0xe8, 0xa, 0x16, 0xde, 0x11, 0x3e, 0x89, 0xb, 0x50, 0x70, 0xd9, 0x10, 0xca, 0x0, 0x38, 0xc0, 0x8f, 0xc1, 0x83, 0x77, 0x86, 0xc, 0x1b, 0xe8, 0x0, 0x9e, 0x78, 0x8, 0xb, 0xa0, 0xc3, 0xb7, 0x84, 0x82, 0x18, 0x4b, 0x54, 0x3b, 0x70, 0x81, 0xbe, 0x41, 0xcc, 0xd, 0x8, 0x61, 0x8e, 0x17, 0x0, 0xd, 0xd8, 0x40, 0x34, 0x20, 0xbd, 0x12, 0xe, 0x83, 0x16, 0x3a, 0x8a, 0x16, 0x48, 0xd0, 0x59, 0xe0, 0x21, 0x4c, 0x60, 0x42, 0xaa, 0xa0, 0x60, 0x69, 0x0, 0x5, 0xb9, 0x2, 0x12, 0x94, 0xf, 0x48, 0x60, 0xfe, 0x90, 0x66, 0x3, 0x4e, 0xf8, 0x4c, 0x50, 0xd8, 0x5f, 0x70, 0x99, 0x1f, 0xd0, 0xf0, 0x55, 0x88, 0x74, 0x81, 0xd, 0x6b, 0x80, 0x84, 0xb9, 0x8d, 0xc7, 0x34, 0x81, 0x4e, 0xa4, 0x8a, 0x82, 0xa1, 0x2, 0x15, 0x40, 0x7, 0x70, 0x81, 0x7f, 0x8d, 0x3, 0x61, 0x2, 0x11, 0x1c, 0x1e, 0x78, 0xb8, 0x1a, 0x80, 0x2, 0xa3, 0x80, 0x2, 0x69, 0x80, 0xc7, 0xf3, 0x1, 0x2c, 0xdc, 0x13, 0x90, 0x81, 0xad, 0x81, 0x88, 0xb8, 0x81, 0x82, 0x78, 0x84, 0x4e, 0xb6, 0x83, 0x45, 0xb3, 0x6, 0x37, 0x90, 0x5e, 0x68, 0x48, 0x82, 0x28, 0x0, 0xc1, 0x1, 0xe5, 0x80, 0x86, 0xf2, 0x81, 0xcc, 0xb9, 0x81, 0x88, 0xeb, 0x80, 0x3, 0xb7, 0x87, 0x8d, 0x69, 0x83, 0x45, 0x74, 0x5, 0x22, 0x84, 0x43, 0x48, 0x48, 0x9f, 0xa4, 0xf3, 0x40, 0xa4, 0x78, 0x80, 0xb, 0xec, 0xb, 0x2c, 0x8, 0x7, 0x74, 0x88, 0x15, 0x34, 0xc2, 0xa2, 0xa1, 0x33, 0xd, 0xc1, 0x8, 0x91, 0x18, 0xf0, 0x60, 0x38, 0x40, 0x7, 0x83, 0x1, 0x8c, 0x10, 0x19, 0x40, 0x4, 0xbb, 0x1a, 0x68, 0x30, 0x2b, 0x60, 0x92, 0xaa, 0x80, 0x8, 0xef, 0x2, 0x54, 0x50, 0x7f, 0x80, 0xa, 0x89, 0x2, 0x32, 0x94, 0x17, 0x1a, 0xe1, 0x4, 0xe, 0xa6, 0x1, 0xa, 0x97, 0xb, 0x16, 0xe4, 0xa, 0x91, 0x3c, 0x90, 0xf9, 0x1e, 0x10, 0xf1, 0x25, 0x70, 0x29, 0x1d, 0xa0, 0x82, 0xf0, 0x4, 0x40, 0xe8, 0x56, 0x40, 0x5, 0x54, 0xf, 0x1b, 0xc6, 0x1, 0x2a, 0x98, 0x35, 0xf0, 0x30, 0xbd, 0x80, 0x7, 0xd9, 0x6, 0x20, 0x90, 0x5e, 0x9, 0xf9, 0xb, 0x1c, 0xac, 0x22, 0x20, 0xc1, 0x96, 0x4f, 0x70, 0x80, 0x4b, 0xae, 0x81, 0x47, 0xb9, 0x8d, 0x8d, 0x3d, 0x82, 0x45, 0x65, 0x80, 0xca, 0xb9, 0x5, 0x18, 0x6c, 0x26, 0xa4, 0x53, 0x40, 0xc7, 0xb2, 0xc2, 0x6, 0xdf, 0x80, 0xa, 0xa0, 0x29, 0x30, 0x23, 0x32, 0x81, 0x8, 0x7c, 0x3c, 0x3e, 0x37, 0xb8, 0x20, 0xd7, 0x80, 0xb, 0xfc, 0x25, 0x24, 0xe8, 0x1d, 0x2c, 0x50, 0xd, 0x3c, 0xe0, 0x88, 0x60, 0x3a, 0x83, 0xc, 0xa6, 0x11, 0xb2, 0x5c, 0xc1, 0x7, 0xf2, 0x1, 0x2a, 0xce, 0x1, 0x12, 0xd7, 0x64, 0xe, 0xb2, 0x7, 0xa9, 0x6e, 0xd0, 0x71, 0x1d, 0x70, 0x80, 0xe5, 0x8, 0x6d, 0x83, 0x84, 0x6a, 0x0, 0x19, 0x78, 0xe, 0x48, 0xc4, 0x7, 0x80, 0x81, 0xae, 0x80, 0x44, 0x3f, 0x1, 0x13, 0x4c, 0x1f, 0x58, 0xe0, 0x4a, 0x48, 0x8c, 0x2e, 0x4, 0x78, 0x81, 0x16, 0x7a, 0x5, 0x90, 0xa0, 0xdd, 0xc3, 0xa7, 0xd8, 0xc2, 0x44, 0xfc, 0x40, 0x2, 0xda, 0x0, 0xc, 0x54, 0x11, 0xe0, 0xc0, 0xba, 0x80, 0x9, 0x7a, 0x9, 0xa0, 0x0, 0x52, 0x40, 0x61, 0x51, 0x44, 0x67, 0x12, 0x3, 0xc, 0x0, 0xf, 0x70, 0xe1, 0x31, 0xc0, 0xe1, 0xb6, 0xc2, 0xa1, 0xb9, 0xc1, 0x46, 0x50, 0x40, 0x1, 0x9b, 0x1, 0x1a, 0xc2, 0x11, 0x72, 0x4c, 0xa1, 0x2, 0x5b, 0x20, 0xb1, 0x28, 0xc0, 0x4, 0xc7, 0x12, 0x40, 0x0, 0xee, 0x0, 0x8, 0xd1, 0x2a, 0x32, 0xd0, 0x11, 0x81, 0x7f, 0xa0, 0x84, 0x77, 0xa, 0x93, 0xc8, 0x0, 0x97, 0xe7, 0x1, 0x9, 0x50, 0x1, 0x5, 0xf5, 0x0, 0x85, 0xee, 0x3, 0x93, 0x59, 0x82, 0x3c, 0x59, 0x14, 0x52, 0x28, 0xa4, 0xb, 0x28, 0x30, 0xa, 0x84, 0x4d, 0x81, 0x7f, 0x43, 0xa7, 0x56, 0x80, 0xb, 0x92, 0xf, 0x3c, 0x98, 0x47, 0xa8, 0x0, 0x35, 0x20, 0xde, 0x3, 0x2a, 0x8b, 0x2, 0x2e, 0x8f, 0x6, 0x58, 0x40, 0x58, 0xe0, 0x33, 0x4d, 0xc1, 0xa, 0xc0, 0x6, 0xc0, 0xc0, 0x4, 0x5, 0xf5, 0x4, 0x16, 0xfa, 0x26, 0x60, 0x60, 0x9c, 0xa0, 0x63, 0x2f, 0x20, 0x13, 0xbe, 0x81, 0xc, 0x97, 0xd, 0x32, 0xd0, 0x1, 0x22, 0x3e, 0x80, 0x20, 0x49, 0xe0, 0x50, 0x18, 0x21, 0x2, 0x19, 0xe0, 0x20, 0xbd, 0x10, 0xc8, 0x4, 0x9b, 0x40, 0xd, 0x24, 0xe0, 0xcd, 0x90, 0xe9, 0x3, 0x82, 0x16, 0x11, 0x28, 0x2a, 0xd0, 0xb5, 0x0, 0x83, 0x73, 0xb, 0x19, 0xd6, 0x7, 0x1b, 0x71, 0x5, 0x26, 0x28, 0x6d, 0x0, 0x82, 0x7f, 0x5, 0x2a, 0xb8, 0x56, 0x80, 0x84, 0x79, 0x4, 0x86, 0xa0, 0x7e, 0x50, 0x20, 0xe3, 0x3, 0x20, 0x36, 0x88, 0xaf, 0x90, 0x91, 0xd5, 0x60, 0x82, 0xe4, 0x1, 0x93, 0xf4, 0x8, 0x95, 0xc0, 0x5, 0x1b, 0x61, 0x6, 0x87, 0xc3, 0x1, 0x47, 0xb5, 0x41, 0x51, 0xc9, 0x26, 0x0, 0x3, 0xc1, 0x0, 0x87, 0x46, 0x5, 0x8f, 0x5a, 0x1, 0x95, 0xef, 0x9, 0x89, 0xca, 0x0, 0x8b, 0xf7, 0x2, 0x19, 0x5b, 0xb, 0x32, 0x8, 0x82, 0x2, 0xec, 0x0, 0x13, 0x23, 0x11, 0x40, 0x4, 0x20, 0xe0, 0x26, 0x51, 0x89, 0x85, 0x40, 0x84, 0xc2, 0x1, 0xf, 0x51, 0x1, 0xd, 0x8, 0x6e, 0xf8, 0x6e, 0x0, 0x82, 0x6a, 0x1, 0xf, 0x72, 0x4, 0x2c, 0x40, 0x4d, 0x50, 0x41, 0x16, 0x8, 0x67, 0x46, 0x4a, 0xd, 0x90, 0xc0, 0xfe, 0xc1, 0x42, 0x30, 0x1, 0x9, 0x50, 0xd, 0x5c, 0x16, 0x9, 0xc0, 0x0, 0xf6, 0x41, 0x27, 0x92, 0xc0, 0x63, 0x7a, 0xc2, 0xa1, 0x9c, 0x40, 0x21, 0x27, 0xec, 0x25, 0x4c, 0x80, 0x1b, 0x20, 0x81, 0x2b, 0xa0, 0x0, 0x18, 0xe0, 0xe1, 0x6a, 0x21, 0x2, 0xa3, 0xd2, 0xaf, 0xe0, 0x3, 0x7a, 0x0, 0x4, 0x99, 0x26, 0x50, 0x70, 0xcf, 0xa0, 0xe1, 0x58, 0xa1, 0xd3, 0x38, 0xa0, 0x30, 0x79, 0x20, 0xd2, 0x3a, 0x20, 0xb3, 0x5e, 0x60, 0xa1, 0xeb, 0x80, 0x5, 0xc8, 0x1, 0x4c, 0xc0, 0x5b, 0xa0, 0x60, 0xc8, 0x41, 0x4, 0x6d, 0xc8, 0x20, 0xab, 0x10, 0xe3, 0x7, 0x1f, 0x4b, 0x7, 0x28, 0x70, 0x17, 0x80, 0x84, 0xf9, 0x6, 0x46, 0x90, 0x44, 0xf0, 0x40, 0xf6, 0x60, 0x5, 0xca, 0x7, 0x1b, 0xcf, 0x2, 0xb, 0x61, 0x12, 0x15} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_reader.go b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_reader.go index 0b3a77da00..fe824e6ceb 100644 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_reader.go +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_reader.go @@ -1,11 +1,15 @@ package rlepluslazy import ( - "github.com/multiformats/go-varint" "golang.org/x/xerrors" ) func DecodeRLE(buf []byte) (RunIterator, error) { + if len(buf) > 0 && buf[len(buf)-1] == 0 { + // trailing zeros bytes not allowed. + return nil, xerrors.Errorf("not minimally encoded: %w", ErrDecode) + } + bv := readBitvec(buf) ver := bv.Get(2) // Read version @@ -40,36 +44,33 @@ func (it *rleIterator) NextRun() (Run, error) { } func (it *rleIterator) prep() error { - x := it.bv.Get(1) - - switch x { - case 1: + if it.bv.GetBit() { it.nextRun.Len = 1 - - case 0: - y := it.bv.Get(1) - switch y { - case 1: - it.nextRun.Len = uint64(it.bv.Get(4)) - case 0: - var buf = make([]byte, 0, 10) - for { - b := it.bv.Get(8) - buf = append(buf, b) - if b&0x80 == 0 { - break - } - if len(buf) > 10 { + } else if it.bv.GetBit() { + it.nextRun.Len = uint64(it.bv.Get(4)) + } else { + // Modified from the go standard library. Copyright the Go Authors and + // released under the BSD License. + var x uint64 + var s uint + for i := 0; ; i++ { + if i == 10 { + return xerrors.Errorf("run too long: %w", ErrDecode) + } + b := it.bv.GetByte() + if b < 0x80 { + if i > 9 || i == 9 && b > 1 { return xerrors.Errorf("run too long: %w", ErrDecode) + } else if b == 0 && s > 0 { + return xerrors.Errorf("invalid run: %w", ErrDecode) } + x |= uint64(b) << s + break } - var err error - it.nextRun.Len, _, err = varint.FromUvarint(buf) - if err != nil { - return err - } - + x |= uint64(b&0x7f) << s + s += 7 } + it.nextRun.Len = x } it.nextRun.Val = !it.nextRun.Val diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_test.go b/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_test.go deleted file mode 100644 index 0731d0aef8..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/rleplus_test.go +++ /dev/null @@ -1,184 +0,0 @@ -package rlepluslazy - -import ( - "math/rand" - "testing" - - "github.com/filecoin-project/go-bitfield/rle/internal/rleplus" - "github.com/stretchr/testify/assert" -) - -func TestDecode(t *testing.T) { - // Encoding bitvec![LittleEndian; 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - // in the Rust reference implementation gives an encoding of [223, 145, 136, 0] (without version field) - // The bit vector is equivalent to the integer set { 0, 2, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 } - - // This is the above reference output with a version header "00" manually added - referenceEncoding := []byte{124, 71, 34, 2} - - expectedNumbers := []uint64{0, 2, 4, 5, 6, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27} - - runs, err := RunsFromBits(BitsFromSlice(expectedNumbers)) - assert.NoError(t, err) - encoded, err := EncodeRuns(runs, []byte{}) - assert.NoError(t, err) - - // Our encoded bytes are the same as the ref bytes - assert.Equal(t, len(referenceEncoding), len(encoded)) - assert.Equal(t, referenceEncoding, encoded) - - rle, err := FromBuf(encoded) - assert.NoError(t, err) - decoded := make([]uint64, 0, len(expectedNumbers)) - - rit, err := rle.RunIterator() - assert.NoError(t, err) - - it, err := BitsFromRuns(rit) - assert.NoError(t, err) - for it.HasNext() { - bit, err := it.Next() - assert.NoError(t, err) - decoded = append(decoded, bit) - } - - // Our decoded integers are the same as expected - assert.Equal(t, expectedNumbers, decoded) -} - -func TestGoldenGen(t *testing.T) { - t.SkipNow() - N := 10000 - mod := uint32(1) << 20 - runExProp := float32(0.93) - - bits := make([]uint64, N) - - for i := 0; i < N; i++ { - x := rand.Uint32() % mod - bits[i] = uint64(x) - for rand.Float32() < runExProp && i+1 < N { - i++ - x = (x + 1) % mod - bits[i] = uint64(x) - } - } - - out, _, err := rleplus.Encode(bits) - assert.NoError(t, err) - t.Logf("%#v", out) - _, runs := rleplus.RunLengths(bits) - t.Logf("runs: %v", runs) - t.Logf("len: %d", len(out)) -} - -func TestGolden(t *testing.T) { - expected, _ := rleplus.Decode(goldenRLE) - res := make([]uint64, 0, len(expected)) - - rle, err := FromBuf(goldenRLE) - assert.NoError(t, err) - rit, err := rle.RunIterator() - assert.NoError(t, err) - it, err := BitsFromRuns(rit) - assert.NoError(t, err) - for it.HasNext() { - bit, err := it.Next() - assert.NoError(t, err) - res = append(res, bit) - } - assert.Equal(t, expected, res) -} - -func TestGoldenLoop(t *testing.T) { - rle, err := FromBuf(goldenRLE) - assert.NoError(t, err) - - rit, err := rle.RunIterator() - assert.NoError(t, err) - - buf, err := EncodeRuns(rit, nil) - assert.NoError(t, err) - - assert.Equal(t, goldenRLE, buf) -} - -func TestEncodeConsecutiveFails(t *testing.T) { - ra := &RunSliceIterator{ - Runs: []Run{ - {Val: true, Len: 5}, - {Val: true, Len: 8}, - }, - } - - _, err := EncodeRuns(ra, nil) - if err != ErrSameValRuns { - t.Fatal("expected ErrSameValRuns") - } -} - -var Res uint64 = 0 - -func BenchmarkRunIterator(b *testing.B) { - b.ReportAllocs() - var r uint64 - for i := 0; i < b.N; i++ { - rle, _ := FromBuf(goldenRLE) - rit, _ := rle.RunIterator() - for rit.HasNext() { - run, _ := rit.NextRun() - if run.Val { - r = r + run.Len - } - } - } - Res = Res + r -} - -func BenchmarkRunsToBits(b *testing.B) { - b.ReportAllocs() - var r uint64 - for i := 0; i < b.N; i++ { - rle, _ := FromBuf(goldenRLE) - rit, _ := rle.RunIterator() - it, _ := BitsFromRuns(rit) - for it.HasNext() { - bit, _ := it.Next() - if bit < 1<<63 { - r++ - } - } - } - Res = Res + r -} - -func BenchmarkOldRLE(b *testing.B) { - b.ReportAllocs() - var r uint64 - for i := 0; i < b.N; i++ { - rle, _ := rleplus.Decode(goldenRLE) - r = r + uint64(len(rle)) - } - Res = Res + r -} - -func BenchmarkDecodeEncode(b *testing.B) { - b.ReportAllocs() - var r uint64 - out := make([]byte, 0, len(goldenRLE)) - for i := 0; i < b.N; i++ { - rle, _ := FromBuf(goldenRLE) - rit, _ := rle.RunIterator() - out, _ = EncodeRuns(rit, out) - r = r + uint64(len(out)) - } - - /* - for i := 0; i < b.N; i++ { - rle, _ := rleplus.Decode(goldenRLE) - out, _, _ := rleplus.Encode(rle) - r = r + uint64(len(out)) - } - */ - Res = Res + r -} diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/runs.go b/vendor/github.com/filecoin-project/go-bitfield/rle/runs.go index e991b34c4b..69dc36e540 100644 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/runs.go +++ b/vendor/github.com/filecoin-project/go-bitfield/rle/runs.go @@ -298,12 +298,6 @@ func (it *peekIter) NextRun() (Run, error) { return it.it.NextRun() } -func (it *peekIter) peek() (Run, error) { - run, err := it.NextRun() - it.put(run, err) - return run, err -} - func (it *peekIter) put(run Run, err error) { it.stash = nextRun{ set: true, @@ -353,39 +347,29 @@ func (it *normIter) NextRun() (Run, error) { return it.it.NextRun() } -func LastIndex(iter RunIterator, val bool) (uint64, error) { - var at uint64 - var max uint64 +// Returns iterator with all bits up to the last bit set: +// in: 11100000111010001110000 +// out: 1111111111111111111 +func Fill(iter RunIterator) (RunIterator, error) { + var at, length uint64 for iter.HasNext() { r, err := iter.NextRun() if err != nil { - return 0, err + return nil, err } at += r.Len - if r.Val == val { - max = at + if r.Val { + length = at } } - return max, nil -} - -// Returns iterator with all bits up to the last bit set: -// in: 11100000111010001110000 -// out: 1111111111111111111 -func Fill(i RunIterator) (RunIterator, error) { - max, err := LastIndex(i, true) - if err != nil { - return nil, err - } - var runs []Run - if max > 0 { + if length > 0 { runs = append(runs, Run{ Val: true, - Len: max, + Len: length, }) } diff --git a/vendor/github.com/filecoin-project/go-bitfield/rle/runs_test.go b/vendor/github.com/filecoin-project/go-bitfield/rle/runs_test.go deleted file mode 100644 index c8a71400ba..0000000000 --- a/vendor/github.com/filecoin-project/go-bitfield/rle/runs_test.go +++ /dev/null @@ -1,177 +0,0 @@ -package rlepluslazy - -import ( - "math" - "math/rand" - "sort" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestOrRuns(t *testing.T) { - { - a, err := RunsFromSlice([]uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 14}) - assert.NoError(t, err) - b, err := RunsFromSlice([]uint64{0, 1, 2, 3, 9, 10, 16, 17, 18, 50, 51, 70}) - assert.NoError(t, err) - - s, err := Or(a, b) - assert.NoError(t, err) - bis, err := SliceFromRuns(s) - assert.Equal(t, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 50, 51, 70}, bis) - assert.NoError(t, err) - } - - { - a, err := RunsFromSlice([]uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 14}) - assert.NoError(t, err) - b, err := RunsFromSlice([]uint64{0, 1, 2, 3, 9, 10, 16, 17, 18, 50, 51, 70}) - assert.NoError(t, err) - - s, err := Or(b, a) - assert.NoError(t, err) - bis, err := SliceFromRuns(s) - assert.Equal(t, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 50, 51, 70}, bis) - assert.NoError(t, err) - } -} - -func randomBits(N int, max uint64) []uint64 { - all := make(map[uint64]struct{}) - for len(all) <= N { - x := rand.Uint64() % max - if _, has := all[x]; has { - continue - } - all[x] = struct{}{} - } - - res := make([]uint64, 0, N) - for x := range all { - res = append(res, x) - } - sort.Slice(res, func(i, j int) bool { return res[i] < res[j] }) - - return res -} - -func sum(a, b []uint64) []uint64 { - all := make(map[uint64]struct{}) - for _, x := range a { - all[x] = struct{}{} - } - for _, x := range b { - all[x] = struct{}{} - } - res := make([]uint64, 0, len(all)) - for x := range all { - res = append(res, x) - } - sort.Slice(res, func(i, j int) bool { return res[i] < res[j] }) - - return res -} - -func and(a, b []uint64) []uint64 { - amap := make(map[uint64]struct{}) - for _, x := range a { - amap[x] = struct{}{} - } - - res := make([]uint64, 0) - for _, x := range b { - if _, ok := amap[x]; ok { - res = append(res, x) - } - - } - sort.Slice(res, func(i, j int) bool { return res[i] < res[j] }) - - return res -} - -func TestOrRandom(t *testing.T) { - N := 100 - for i := 0; i < N; i++ { - abits := randomBits(1000, 1500) - bbits := randomBits(1000, 1500) - sumbits := sum(abits, bbits) - - a, err := RunsFromSlice(abits) - assert.NoError(t, err) - b, err := RunsFromSlice(bbits) - assert.NoError(t, err) - - s, err := Or(b, a) - assert.NoError(t, err) - bis, err := SliceFromRuns(s) - assert.NoError(t, err) - assert.Equal(t, sumbits, bis) - } -} - -func TestIsSet(t *testing.T) { - set := []uint64{0, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 14} - setMap := make(map[uint64]struct{}) - for _, v := range set { - setMap[v] = struct{}{} - } - - for i := uint64(0); i < 30; i++ { - a, err := RunsFromSlice(set) - assert.NoError(t, err) - res, err := IsSet(a, i) - assert.NoError(t, err) - _, should := setMap[i] - assert.Equal(t, should, res, "IsSet result missmatch at: %d", i) - - } -} - -func TestCount(t *testing.T) { - tests := []struct { - name string - runs []Run - count uint64 - shouldFail bool - }{ - { - name: "count-20", - runs: []Run{{false, 4}, {true, 7}, {false, 10}, {true, 3}, {false, 13}, {true, 10}}, - count: 20, - }, - { - name: "count-2024", - runs: []Run{{false, 4}, {true, 1000}, {false, 10}, {true, 1000}, - {false, 13}, {true, 24}, {false, 4}}, - count: 2024, - }, - { - name: "fail-set-over-max", - runs: []Run{{false, 4}, {true, math.MaxUint64 / 2}, {false, 10}, {true, math.MaxUint64 / 2}, - {false, 13}, {true, 24}, {false, 4}}, - shouldFail: true, - }, - { - name: "length-over-max", - runs: []Run{{false, math.MaxUint64 / 2}, {true, 4}, {false, 10}, {true, math.MaxUint64 / 2}, - {false, 13}, {true, 24}, {false, 4}}, - shouldFail: true, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - runs := &RunSliceIterator{Runs: test.runs} - c, err := Count(runs) - if test.shouldFail { - assert.Error(t, err, "test indicated it should fail") - } else { - assert.NoError(t, err) - assert.EqualValues(t, test.count, c) - } - }) - } -} diff --git a/vendor/github.com/filecoin-project/go-cbor-util/.circleci/config.yml b/vendor/github.com/filecoin-project/go-cbor-util/.circleci/config.yml deleted file mode 100644 index 521d7fb390..0000000000 --- a/vendor/github.com/filecoin-project/go-cbor-util/.circleci/config.yml +++ /dev/null @@ -1,161 +0,0 @@ -version: 2.1 -orbs: - go: gotest/tools@0.0.9 - -executors: - golang: - docker: - - image: circleci/golang:1.13 - resource_class: small - -commands: - install-deps: - steps: - - go/install-ssh - - go/install: {package: git} - prepare: - parameters: - linux: - default: true - description: is a linux build environment? - type: boolean - steps: - - checkout - - when: - condition: << parameters.linux >> - steps: - - run: sudo apt-get update - - run: sudo apt-get install ocl-icd-opencl-dev - build-all: - - -jobs: - mod-tidy-check: - executor: golang - steps: - - install-deps - - prepare - - go/mod-download - - go/mod-tidy-check - - test: &test - description: | - Run tests with gotestsum. - parameters: - executor: - type: executor - default: golang - go-test-flags: - type: string - default: "-timeout 5m" - description: Flags passed to go test. - packages: - type: string - default: "./..." - description: Import paths of packages to be tested. - test-suite-name: - type: string - default: unit - description: Test suite name to report to CircleCI. - gotestsum-format: - type: string - default: short - description: gotestsum format. https://github.com/gotestyourself/gotestsum#format - coverage: - type: string - default: -coverprofile=coverage.txt - description: Coverage flag. Set to the empty string to disable. - codecov-upload: - type: boolean - default: false - description: | - Upload coverage report to https://codecov.io/. Requires the codecov API token to be - set as an environment variable for private projects. - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - go/mod-download - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - - go/install-gotestsum: - gobin: $HOME/.local/bin - - run: - name: go test - environment: - GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml - GOTESTSUM_FORMAT: << parameters.gotestsum-format >> - command: | - mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> - gotestsum -- \ - << parameters.coverage >> \ - << parameters.go-test-flags >> \ - << parameters.packages >> - no_output_timeout: 30m - - store_test_results: - path: /tmp/test-reports - - when: - condition: << parameters.codecov-upload >> - steps: - - go/install: {package: bash} - - go/install: {package: curl} - - run: - shell: /bin/bash -eo pipefail - command: | - bash <(curl -s https://codecov.io/bash) - - save_cache: - name: save go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - paths: - - "~/go/pkg" - - "~/go/src/github.com" - - "~/go/src/golang.org" - - lint: &lint - description: | - Run golangci-lint. - parameters: - executor: - type: executor - default: golang - golangci-lint-version: - type: string - default: 1.21.0 - concurrency: - type: string - default: '2' - description: | - Concurrency used to run linters. Defaults to 2 because NumCPU is not - aware of container CPU limits. - args: - type: string - default: '' - description: | - Arguments to pass to golangci-lint - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - go/install-golangci-lint: - gobin: $HOME/.local/bin - version: << parameters.golangci-lint-version >> - - run: - name: Lint - command: | - $HOME/.local/bin/golangci-lint run -v \ - --concurrency << parameters.concurrency >> << parameters.args >> - lint-changes: - <<: *lint - - lint-all: - <<: *lint - -workflows: - version: 2.1 - ci: - jobs: - - lint-changes: - args: "--new-from-rev origin/master" - - test - - mod-tidy-check diff --git a/vendor/github.com/filecoin-project/go-crypto/.circleci/config.yml b/vendor/github.com/filecoin-project/go-crypto/.circleci/config.yml deleted file mode 100644 index 521d7fb390..0000000000 --- a/vendor/github.com/filecoin-project/go-crypto/.circleci/config.yml +++ /dev/null @@ -1,161 +0,0 @@ -version: 2.1 -orbs: - go: gotest/tools@0.0.9 - -executors: - golang: - docker: - - image: circleci/golang:1.13 - resource_class: small - -commands: - install-deps: - steps: - - go/install-ssh - - go/install: {package: git} - prepare: - parameters: - linux: - default: true - description: is a linux build environment? - type: boolean - steps: - - checkout - - when: - condition: << parameters.linux >> - steps: - - run: sudo apt-get update - - run: sudo apt-get install ocl-icd-opencl-dev - build-all: - - -jobs: - mod-tidy-check: - executor: golang - steps: - - install-deps - - prepare - - go/mod-download - - go/mod-tidy-check - - test: &test - description: | - Run tests with gotestsum. - parameters: - executor: - type: executor - default: golang - go-test-flags: - type: string - default: "-timeout 5m" - description: Flags passed to go test. - packages: - type: string - default: "./..." - description: Import paths of packages to be tested. - test-suite-name: - type: string - default: unit - description: Test suite name to report to CircleCI. - gotestsum-format: - type: string - default: short - description: gotestsum format. https://github.com/gotestyourself/gotestsum#format - coverage: - type: string - default: -coverprofile=coverage.txt - description: Coverage flag. Set to the empty string to disable. - codecov-upload: - type: boolean - default: false - description: | - Upload coverage report to https://codecov.io/. Requires the codecov API token to be - set as an environment variable for private projects. - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - go/mod-download - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - - go/install-gotestsum: - gobin: $HOME/.local/bin - - run: - name: go test - environment: - GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml - GOTESTSUM_FORMAT: << parameters.gotestsum-format >> - command: | - mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> - gotestsum -- \ - << parameters.coverage >> \ - << parameters.go-test-flags >> \ - << parameters.packages >> - no_output_timeout: 30m - - store_test_results: - path: /tmp/test-reports - - when: - condition: << parameters.codecov-upload >> - steps: - - go/install: {package: bash} - - go/install: {package: curl} - - run: - shell: /bin/bash -eo pipefail - command: | - bash <(curl -s https://codecov.io/bash) - - save_cache: - name: save go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - paths: - - "~/go/pkg" - - "~/go/src/github.com" - - "~/go/src/golang.org" - - lint: &lint - description: | - Run golangci-lint. - parameters: - executor: - type: executor - default: golang - golangci-lint-version: - type: string - default: 1.21.0 - concurrency: - type: string - default: '2' - description: | - Concurrency used to run linters. Defaults to 2 because NumCPU is not - aware of container CPU limits. - args: - type: string - default: '' - description: | - Arguments to pass to golangci-lint - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - go/install-golangci-lint: - gobin: $HOME/.local/bin - version: << parameters.golangci-lint-version >> - - run: - name: Lint - command: | - $HOME/.local/bin/golangci-lint run -v \ - --concurrency << parameters.concurrency >> << parameters.args >> - lint-changes: - <<: *lint - - lint-all: - <<: *lint - -workflows: - version: 2.1 - ci: - jobs: - - lint-changes: - args: "--new-from-rev origin/master" - - test - - mod-tidy-check diff --git a/vendor/github.com/filecoin-project/go-crypto/crypto.go b/vendor/github.com/filecoin-project/go-crypto/crypto.go index 7ef5d6718a..977e8ae9a3 100644 --- a/vendor/github.com/filecoin-project/go-crypto/crypto.go +++ b/vendor/github.com/filecoin-project/go-crypto/crypto.go @@ -7,7 +7,7 @@ import ( "crypto/rand" "io" - secp256k1 "github.com/ethereum/go-ethereum/crypto/secp256k1" + secp256k1 "github.com/ipsn/go-secp256k1" ) // PrivateKeyBytes is the size of a serialized private key. diff --git a/vendor/github.com/filecoin-project/go-crypto/crypto_test.go b/vendor/github.com/filecoin-project/go-crypto/crypto_test.go deleted file mode 100644 index 6e20493ed0..0000000000 --- a/vendor/github.com/filecoin-project/go-crypto/crypto_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package crypto_test - -import ( - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-crypto" -) - -func TestGenerateKey(t *testing.T) { - rand.Seed(time.Now().UnixNano()) - - sk, err := crypto.GenerateKey() - assert.NoError(t, err) - - assert.Equal(t, len(sk), 32) - - msg := make([]byte, 32) - for i := 0; i < len(msg); i++ { - msg[i] = byte(i) - } - - digest, err := crypto.Sign(sk, msg) - assert.NoError(t, err) - assert.Equal(t, len(digest), 65) - pk := crypto.PublicKey(sk) - - // valid signature - assert.True(t, crypto.Verify(pk, msg, digest)) - - // invalid signature - different message (too short) - assert.False(t, crypto.Verify(pk, msg[3:], digest)) - - // invalid signature - different message - msg2 := make([]byte, 32) - copy(msg2, msg) - rand.Shuffle(len(msg2), func(i, j int) { msg2[i], msg2[j] = msg2[j], msg2[i] }) - assert.False(t, crypto.Verify(pk, msg2, digest)) - - // invalid signature - different digest - digest2 := make([]byte, 65) - copy(digest2, digest) - rand.Shuffle(len(digest2), func(i, j int) { digest2[i], digest2[j] = digest2[j], digest2[i] }) - assert.False(t, crypto.Verify(pk, msg, digest2)) - - // invalid signature - digest too short - assert.False(t, crypto.Verify(pk, msg, digest[3:])) - assert.False(t, crypto.Verify(pk, msg, digest[:29])) - - // invalid signature - digest too long - digest3 := make([]byte, 70) - copy(digest3, digest) - assert.False(t, crypto.Verify(pk, msg, digest3)) - - recovered, err := crypto.EcRecover(msg, digest) - assert.NoError(t, err) - assert.Equal(t, recovered, crypto.PublicKey(sk)) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/.circleci/config.yml b/vendor/github.com/filecoin-project/go-data-transfer/.circleci/config.yml deleted file mode 100644 index 521d7fb390..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/.circleci/config.yml +++ /dev/null @@ -1,161 +0,0 @@ -version: 2.1 -orbs: - go: gotest/tools@0.0.9 - -executors: - golang: - docker: - - image: circleci/golang:1.13 - resource_class: small - -commands: - install-deps: - steps: - - go/install-ssh - - go/install: {package: git} - prepare: - parameters: - linux: - default: true - description: is a linux build environment? - type: boolean - steps: - - checkout - - when: - condition: << parameters.linux >> - steps: - - run: sudo apt-get update - - run: sudo apt-get install ocl-icd-opencl-dev - build-all: - - -jobs: - mod-tidy-check: - executor: golang - steps: - - install-deps - - prepare - - go/mod-download - - go/mod-tidy-check - - test: &test - description: | - Run tests with gotestsum. - parameters: - executor: - type: executor - default: golang - go-test-flags: - type: string - default: "-timeout 5m" - description: Flags passed to go test. - packages: - type: string - default: "./..." - description: Import paths of packages to be tested. - test-suite-name: - type: string - default: unit - description: Test suite name to report to CircleCI. - gotestsum-format: - type: string - default: short - description: gotestsum format. https://github.com/gotestyourself/gotestsum#format - coverage: - type: string - default: -coverprofile=coverage.txt - description: Coverage flag. Set to the empty string to disable. - codecov-upload: - type: boolean - default: false - description: | - Upload coverage report to https://codecov.io/. Requires the codecov API token to be - set as an environment variable for private projects. - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - go/mod-download - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - - go/install-gotestsum: - gobin: $HOME/.local/bin - - run: - name: go test - environment: - GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml - GOTESTSUM_FORMAT: << parameters.gotestsum-format >> - command: | - mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> - gotestsum -- \ - << parameters.coverage >> \ - << parameters.go-test-flags >> \ - << parameters.packages >> - no_output_timeout: 30m - - store_test_results: - path: /tmp/test-reports - - when: - condition: << parameters.codecov-upload >> - steps: - - go/install: {package: bash} - - go/install: {package: curl} - - run: - shell: /bin/bash -eo pipefail - command: | - bash <(curl -s https://codecov.io/bash) - - save_cache: - name: save go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - paths: - - "~/go/pkg" - - "~/go/src/github.com" - - "~/go/src/golang.org" - - lint: &lint - description: | - Run golangci-lint. - parameters: - executor: - type: executor - default: golang - golangci-lint-version: - type: string - default: 1.21.0 - concurrency: - type: string - default: '2' - description: | - Concurrency used to run linters. Defaults to 2 because NumCPU is not - aware of container CPU limits. - args: - type: string - default: '' - description: | - Arguments to pass to golangci-lint - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - go/install-golangci-lint: - gobin: $HOME/.local/bin - version: << parameters.golangci-lint-version >> - - run: - name: Lint - command: | - $HOME/.local/bin/golangci-lint run -v \ - --concurrency << parameters.concurrency >> << parameters.args >> - lint-changes: - <<: *lint - - lint-all: - <<: *lint - -workflows: - version: 2.1 - ci: - jobs: - - lint-changes: - args: "--new-from-rev origin/master" - - test - - mod-tidy-check diff --git a/vendor/github.com/filecoin-project/go-data-transfer/CHANGELOG.md b/vendor/github.com/filecoin-project/go-data-transfer/CHANGELOG.md new file mode 100644 index 0000000000..369eda1281 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/CHANGELOG.md @@ -0,0 +1,407 @@ +# go-data-transfer changelog + +# go-data-transfer 0.9.0 + +Major release of the 1.1 data transfer protocol, which supports restarts of data transfers. + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - Message compatibility on graphsync (#102) ([filecoin-project/go-data-transfer#102](https://github.com/filecoin-project/go-data-transfer/pull/102)) + - Handle network errors/stalls (#101) ([filecoin-project/go-data-transfer#101](https://github.com/filecoin-project/go-data-transfer/pull/101)) + - Resume Data Transfer (#100) ([filecoin-project/go-data-transfer#100](https://github.com/filecoin-project/go-data-transfer/pull/100)) + - docs(CHANGELOG): update for 0.6.7 release ([filecoin-project/go-data-transfer#98](https://github.com/filecoin-project/go-data-transfer/pull/98)) +- github.com/ipfs/go-graphsync (v0.2.1 -> v0.3.0): + - feat(CHANGELOG): update for 0.3.0 + - docs(CHANGELOG): update for 0.2.1 ([ipfs/go-graphsync#103](https://github.com/ipfs/go-graphsync/pull/103)) + - Track actual network operations in a response (#102) ([ipfs/go-graphsync#102](https://github.com/ipfs/go-graphsync/pull/102)) + - feat(responsecache): prune blocks more intelligently (#101) ([ipfs/go-graphsync#101](https://github.com/ipfs/go-graphsync/pull/101)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Aarsh Shah | 1 | +9597/-2220 | 67 | +| Hannah Howard | 4 | +2355/-1018 | 51 | +| hannahhoward | 1 | +25/-3 | 4 | + +# go-data-transfer 0.6.7 + +Minor update w/ fixes to support go-fil-markets 0.7.0 + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - Feat/cleanup errors (#90) ([filecoin-project/go-data-transfer#90](https://github.com/filecoin-project/go-data-transfer/pull/90)) + - Disambiguate whether a revalidator recognized a request when checking for a need to revalidate (#87) ([filecoin-project/go-data-transfer#87](https://github.com/filecoin-project/go-data-transfer/pull/87)) + - docs(CHANGELOG): update for 0.6.6 ([filecoin-project/go-data-transfer#89](https://github.com/filecoin-project/go-data-transfer/pull/89)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 2 | +167/-30 | 9 | + +# go-data-transfer 0.6.6 + +Dependency update - go graphsync fix + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - feat(deps): update graphsync (#86) ([filecoin-project/go-data-transfer#86](https://github.com/filecoin-project/go-data-transfer/pull/86)) + - docs(CHANGELOG): updates for 0.6.5 ([filecoin-project/go-data-transfer#85](https://github.com/filecoin-project/go-data-transfer/pull/85)) +- github.com/ipfs/go-graphsync (v0.2.0 -> v0.2.1): + - docs(CHANGELOG): update for 0.2.1 + - Release/0.2.0 ([ipfs/go-graphsync#99](https://github.com/ipfs/go-graphsync/pull/99)) + - fix(metadata): fix cbor-gen (#98) ([ipfs/go-graphsync#98](https://github.com/ipfs/go-graphsync/pull/98)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| hannahhoward | 1 | +83/-68 | 1 | +| Hannah Howard | 2 | +15/-19 | 5 | + +# go-data-transfer 0.6.5 + +Dependency update - go-graphsync and go-ipld-prime + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - feat(deps): update graphsync 0.2.0 (#83) ([filecoin-project/go-data-transfer#83](https://github.com/filecoin-project/go-data-transfer/pull/83)) + - docs(CHANGELOG): update for 0.6.4 ([filecoin-project/go-data-transfer#82](https://github.com/filecoin-project/go-data-transfer/pull/82)) +- github.com/hannahhoward/cbor-gen-for (v0.0.0-20191218204337-9ab7b1bcc099 -> v0.0.0-20200817222906-ea96cece81f1): + - add flag to select map encoding ([hannahhoward/cbor-gen-for#1](https://github.com/hannahhoward/cbor-gen-for/pull/1)) + - fix(deps): update cbor-gen-to-latest +- github.com/ipfs/go-graphsync (v0.1.2 -> v0.2.0): + - docs(CHANGELOG): update for 0.2.0 + - style(imports): fix imports + - fix(selectorvalidator): memory optimization (#97) ([ipfs/go-graphsync#97](https://github.com/ipfs/go-graphsync/pull/97)) + - Update go-ipld-prime@v0.5.0 (#92) ([ipfs/go-graphsync#92](https://github.com/ipfs/go-graphsync/pull/92)) + - refactor(metadata): use cbor-gen encoding (#96) ([ipfs/go-graphsync#96](https://github.com/ipfs/go-graphsync/pull/96)) + - Release/v0.1.2 ([ipfs/go-graphsync#95](https://github.com/ipfs/go-graphsync/pull/95)) + - Return Request context cancelled error (#93) ([ipfs/go-graphsync#93](https://github.com/ipfs/go-graphsync/pull/93)) + - feat(benchmarks): add p2p stress test (#91) ([ipfs/go-graphsync#91](https://github.com/ipfs/go-graphsync/pull/91)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Eric Myhre | 1 | +2919/-121 | 39 | +| Hannah Howard | 4 | +453/-143 | 29 | +| hannahhoward | 3 | +83/-63 | 10 | +| whyrusleeping | 1 | +31/-18 | 2 | +| Aarsh Shah | 1 | +27/-1 | 3 | + +# go-data-transfer 0.6.4 + +security fix for messages + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - Ensure valid messages are returned from FromNet() (#74) ([filecoin-project/go-data-transfer#74](https://github.com/filecoin-project/go-data-transfer/pull/74)) + - Release/v0.6.3 ([filecoin-project/go-data-transfer#70](https://github.com/filecoin-project/go-data-transfer/pull/70)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Ingar Shu | 1 | +20/-1 | 2 | + +# go-data-transfer 0.6.3 + +Primarily a bug fix release-- graphsync performance and some better shutdown +logic + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - fix(deps): update graphsync, small cleanup + - Stop data transfer correctly and some minor cleanp (#69) ([filecoin-project/go-data-transfer#69](https://github.com/filecoin-project/go-data-transfer/pull/69)) + - docs(CHANGELOG): update for 0.6.2 release ([filecoin-project/go-data-transfer#68](https://github.com/filecoin-project/go-data-transfer/pull/68)) +- github.com/ipfs/go-graphsync (v0.1.1 -> v0.1.2): + - fix(asyncloader): remove send on close channel + - docs(CHANGELOG): update for 0.1.2 release + - Benchmark framework + First memory fixes (#89) ([ipfs/go-graphsync#89](https://github.com/ipfs/go-graphsync/pull/89)) + - docs(CHANGELOG): update for v0.1.1 ([ipfs/go-graphsync#85](https://github.com/ipfs/go-graphsync/pull/85)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +1055/-39 | 17 | +| Aarsh Shah | 1 | +53/-68 | 8 | +| hannahhoward | 3 | +67/-34 | 11 | + +# go-data-transfer 0.6.2 + +Minor bug fix release for request cancelling + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - Fix Pull Request Cancelling (#67) ([filecoin-project/go-data-transfer#67](https://github.com/filecoin-project/go-data-transfer/pull/67)) + - docs(CHANGELOG): update for 0.6.1 ([filecoin-project/go-data-transfer#66](https://github.com/filecoin-project/go-data-transfer/pull/66)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +265/-9 | 4 | + +# go-data-transfer 0.6.1 + +Update graphsync with critical bug fix for multiple transfers across custom stores + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - Update graphsync 0.1.1 (#65) ([filecoin-project/go-data-transfer#65](https://github.com/filecoin-project/go-data-transfer/pull/65)) +- github.com/ipfs/go-graphsync (v0.1.0 -> v0.1.1): + - docs(CHANGELOG): update for v0.1.1 + - docs(CHANGELOG): update for v0.1.0 release ([ipfs/go-graphsync#84](https://github.com/ipfs/go-graphsync/pull/84)) + - Dedup by key extension (#83) ([ipfs/go-graphsync#83](https://github.com/ipfs/go-graphsync/pull/83)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 2 | +456/-57 | 17 | +| hannahhoward | 1 | +18/-1 | 2 | + +# go-data-transfer 0.6.0 + +Includes two small breaking change updates: + +- Update go-ipfs-blockstore to address blocks by-hash instead of by-cid. This brings go-data-transfer in-line with lotus. +- Update cbor-gen for some performance improvements and some API-breaking changes. + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - Update cbor-gen (#63) ([filecoin-project/go-data-transfer#63](https://github.com/filecoin-project/go-data-transfer/pull/63)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|--------------|---------|---------|---------------| +| Steven Allen | 1 | +30/-23 | 5 | + +# go-data-transfer 0.5.3 + +Minor fixes + update to release process + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - fix(deps): update graphsync + - Release infrastructure (#61) ([filecoin-project/go-data-transfer#61](https://github.com/filecoin-project/go-data-transfer/pull/61)) + - Update cbor-gen (#60) ([filecoin-project/go-data-transfer#60](https://github.com/filecoin-project/go-data-transfer/pull/60)) +- github.com/ipfs/go-graphsync (v0.0.6-0.20200731020347-9ff2ade94aa4 -> v0.1.0): + - docs(CHANGELOG): update for v0.1.0 release + - Release infrastructure (#81) ([ipfs/go-graphsync#81](https://github.com/ipfs/go-graphsync/pull/81)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 2 | +1202/-223 | 91 | +| Łukasz Magiera | 1 | +176/-176 | 8 | +| hannahhoward | 2 | +48/-3 | 3 | + +# go-data-transfer 0.5.2 + +Security fix release + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - fix(deps): update graphsync + - fix(message): add error check to FromNet (#59) ([filecoin-project/go-data-transfer#59](https://github.com/filecoin-project/go-data-transfer/pull/59)) +- github.com/ipfs/go-graphsync (v0.0.6-0.20200721211002-c376cbe14c0a -> v0.0.6-0.20200731020347-9ff2ade94aa4): + - feat(persistenceoptions): add unregister ability (#80) ([ipfs/go-graphsync#80](https://github.com/ipfs/go-graphsync/pull/80)) + - fix(message): regen protobuf code (#79) ([ipfs/go-graphsync#79](https://github.com/ipfs/go-graphsync/pull/79)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 3 | +442/-302 | 7 | +| hannahhoward | 1 | +3/-3 | 2 | + +# go-data-transfer v0.5.1 + +Support custom configruation of transports + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - Allow custom configuration of transports (#57) ([filecoin-project/go-data-transfer#57](https://github.com/filecoin-project/go-data-transfer/pull/57)) +- github.com/ipfs/go-graphsync (v0.0.6-0.20200715204712-ef06b3d32e83 -> v0.0.6-0.20200721211002-c376cbe14c0a): + - feat(persistenceoptions): add unregister ability + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 1 | +1049/-751 | 35 | +| hannahhoward | 1 | +113/-32 | 5 | + +# go-data-transfer 0.5.0 + +Additional changes to support implementation of retrieval on top of go-data-transfer + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - Minor fixes for retrieval on data transfer (#56) ([filecoin-project/go-data-transfer#56](https://github.com/filecoin-project/go-data-transfer/pull/56)) +- github.com/ipfs/go-graphsync (v0.0.6-0.20200708073926-caa872f68b2c -> v0.0.6-0.20200715204712-ef06b3d32e83): + - feat(requestmanager): run response hooks on completed requests (#77) ([ipfs/go-graphsync#77](https://github.com/ipfs/go-graphsync/pull/77)) + - Revert "add extensions on complete (#76)" + - add extensions on complete (#76) ([ipfs/go-graphsync#76](https://github.com/ipfs/go-graphsync/pull/76)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 3 | +515/-218 | 26 | +| hannahhoward | 1 | +155/-270 | 9 | + +# go-data-transfer 0.4.0 + +Major rewrite of library -- transports, persisted state, revalidators, etc. To support retrieval + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - The new data transfer (#55) ([filecoin-project/go-data-transfer#55](https://github.com/filecoin-project/go-data-transfer/pull/55)) + - Actually track progress for send/receive (#53) ([filecoin-project/go-data-transfer#53](https://github.com/filecoin-project/go-data-transfer/pull/53)) +- github.com/ipfs/go-graphsync (v0.0.6-0.20200504202014-9d5f2c26a103 -> v0.0.6-0.20200708073926-caa872f68b2c): + - All changes to date including pause requests & start paused, along with new adds for cleanups and checking of execution (#75) ([ipfs/go-graphsync#75](https://github.com/ipfs/go-graphsync/pull/75)) + - More fine grained response controls (#71) ([ipfs/go-graphsync#71](https://github.com/ipfs/go-graphsync/pull/71)) + - Refactor request execution and use IPLD SkipMe functionality for proper partial results on a request (#70) ([ipfs/go-graphsync#70](https://github.com/ipfs/go-graphsync/pull/70)) + - feat(graphsync): implement do-no-send-cids extension (#69) ([ipfs/go-graphsync#69](https://github.com/ipfs/go-graphsync/pull/69)) + - Incoming Block Hooks (#68) ([ipfs/go-graphsync#68](https://github.com/ipfs/go-graphsync/pull/68)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 7 | +12381/-4583 | 133 | + +# go-data-transfer 0.3.0 + +Additional refactors to refactors to registry + +### Changelog +- github.com/filecoin-project/go-data-transfer: + - feat(graphsyncimpl): fix open/close events (#52) ([filecoin-project/go-data-transfer#52](https://github.com/filecoin-project/go-data-transfer/pull/52)) + - chore(deps): update graphsync ([filecoin-project/go-data-transfer#51](https://github.com/filecoin-project/go-data-transfer/pull/51)) + - Refactor registry and encoding (#50) ([filecoin-project/go-data-transfer#50](https://github.com/filecoin-project/go-data-transfer/pull/50)) + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hannah Howard | 2 | +993/-496 | 30 | + +# go-data-transfer 0.2.1 + +Bug fix release -- critical nil check + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - chore(deps): update graphsync +- github.com/ipfs/go-graphsync (v0.0.6-0.20200428204348-97a8cf76a482 -> v0.0.6-0.20200504202014-9d5f2c26a103): + - fix(responsemanager): add nil check (#67) ([ipfs/go-graphsync#67](https://github.com/ipfs/go-graphsync/pull/67)) + - Add autocomment configuration + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Hector Sanjuan | 1 | +68/-0 | 1 | +| hannahhoward | 1 | +3/-3 | 2 | +| Hannah Howard | 1 | +4/-0 | 1 | + +# go-data-transfer 0.2.0 + +Initial extracted release for Testnet Phase 2 (v0.1.0 + v0.1.1 were lotus tags prior to extraction) + +### Changelog + +- github.com/filecoin-project/go-data-transfer: + - Upgrade graphsync + ipld-prime (#49) ([filecoin-project/go-data-transfer#49](https://github.com/filecoin-project/go-data-transfer/pull/49)) + - Use extracted generic pubsub (#48) ([filecoin-project/go-data-transfer#48](https://github.com/filecoin-project/go-data-transfer/pull/48)) + - Refactor & Cleanup In Preparation For Added Complexity (#47) ([filecoin-project/go-data-transfer#47](https://github.com/filecoin-project/go-data-transfer/pull/47)) + - feat(graphsync): complete notifications for responder (#46) ([filecoin-project/go-data-transfer#46](https://github.com/filecoin-project/go-data-transfer/pull/46)) + - Update graphsync ([filecoin-project/go-data-transfer#45](https://github.com/filecoin-project/go-data-transfer/pull/45)) + - docs(docs): remove outdated docs + - docs(README): clean up README + - docs(license): add license + contrib + - ci(circle): add config + - build(datatransfer): add go.mod + - build(cbor-gen): add tools for cbor-gen-for . + - fix links in datatransfer README (#11) ([filecoin-project/go-data-transfer#11](https://github.com/filecoin-project/go-data-transfer/pull/11)) + - feat(shared): add shared tools and types (#9) ([filecoin-project/go-data-transfer#9](https://github.com/filecoin-project/go-data-transfer/pull/9)) + - Feat/datatransfer readme, contributing, design doc (rename) + - refactor(datatransfer): move to local module + - feat(datatransfer): switch to graphsync implementation + - Don't respond with error in gsReqRcdHook when we can't find the datatransfer extension. (#754) ([filecoin-project/go-data-transfer#754](https://github.com/filecoin-project/go-data-transfer/pull/754)) + - Feat/dt subscribe, file Xfer round trip (#720) ([filecoin-project/go-data-transfer#720](https://github.com/filecoin-project/go-data-transfer/pull/720)) + - Feat/dt gs pullrequests (#693) ([filecoin-project/go-data-transfer#693](https://github.com/filecoin-project/go-data-transfer/pull/693)) + - DTM sends data over graphsync for validated push requests (#665) ([filecoin-project/go-data-transfer#665](https://github.com/filecoin-project/go-data-transfer/pull/665)) + - Techdebt/dt split graphsync impl receiver (#651) ([filecoin-project/go-data-transfer#651](https://github.com/filecoin-project/go-data-transfer/pull/651)) + - Feat/dt initiator cleanup (#645) ([filecoin-project/go-data-transfer#645](https://github.com/filecoin-project/go-data-transfer/pull/645)) + - Feat/dt graphsync pullreqs (#627) ([filecoin-project/go-data-transfer#627](https://github.com/filecoin-project/go-data-transfer/pull/627)) + - fix(datatransfer): fix tests + - Graphsync response is scheduled when a valid push request is received (#625) ([filecoin-project/go-data-transfer#625](https://github.com/filecoin-project/go-data-transfer/pull/625)) + - responses alert subscribers when request is not accepted (#607) ([filecoin-project/go-data-transfer#607](https://github.com/filecoin-project/go-data-transfer/pull/607)) + - feat(datatransfer): milestone 2 infrastructure + - other tests passing + - send data transfer response + - a better reflection + - remove unused fmt import in graphsync_test + - cleanup for PR + - tests passing + - Initiate push and pull requests (#536) ([filecoin-project/go-data-transfer#536](https://github.com/filecoin-project/go-data-transfer/pull/536)) + - Fix tests + - Respond to PR comments: * Make DataTransferRequest/Response be returned in from Net * Regenerate cbor_gen and fix the generator caller so it works better * Please the linters + - Cleanup for PR, clarifying and additional comments + - Some cleanup for PR + - all message tests passing, some others in datatransfer + - WIP trying out some stuff * Embed request/response in message so all the interfaces work AND the CBOR unmarshaling works: this is more like the spec anyway * get rid of pb stuff + - * Bring cbor-gen stuff into datatransfer package * make transferRequest private struct * add transferResponse + funcs * Rename VoucherID to VoucherType * more tests passing + - WIP using CBOR encoding for dataxfermsg + - feat(datatransfer): setup implementation path + - Duplicate comment ([filecoin-project/go-data-transfer#619](https://github.com/filecoin-project/go-data-transfer/pull/619)) + - fix typo ([filecoin-project/go-data-transfer#621](https://github.com/filecoin-project/go-data-transfer/pull/621)) + - fix types typo + - refactor(datatransfer): implement style fixes + - refactor(deals): move type instantiation to modules + - refactor(datatransfer): xerrors, cbor-gen, tweaks + - feat(datatransfer): make dag service dt async + - refactor(datatransfer): add comments, renames + - feat(datatransfer): integration w/ simple merkledag + +### Contributors + +| Contributor | Commits | Lines ± | Files Changed | +|-------------|---------|---------|---------------| +| Shannon Wells | 12 | +4337/-3455 | 53 | +| hannahhoward | 20 | +5090/-1692 | 99 | +| shannonwells | 13 | +1720/-983 | 65 | +| Hannah Howard | 6 | +1393/-1262 | 45 | +| wanghui | 2 | +4/-4 | 2 | +| 郭光华 | 1 | +0/-1 | 1 | + +### 🙌🏽 Want to contribute? + +Would you like to contribute to this repo and don’t know how? Here are a few places you can get started: + +- Check out the [Contributing Guidelines](https://github.com/filecoin-project/go-data-transfer/blob/master/CONTRIBUTING.md) +- Look for issues with the `good-first-issue` label in [go-fil-markets](https://github.com/filecoin-project/go-data-transfer/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22e-good-first-issue%22+) diff --git a/vendor/github.com/filecoin-project/go-data-transfer/CONTRIBUTING.md b/vendor/github.com/filecoin-project/go-data-transfer/CONTRIBUTING.md index dbc47e6ce7..eb4746b16c 100644 --- a/vendor/github.com/filecoin-project/go-data-transfer/CONTRIBUTING.md +++ b/vendor/github.com/filecoin-project/go-data-transfer/CONTRIBUTING.md @@ -38,6 +38,8 @@ import ( [external packages] + [other-filecoin-project packages] + [go-data-transfer packages] ) ``` @@ -55,10 +57,14 @@ import ( "github.com/ipfs/go-cid" cborgen "github.com/whyrusleeping/cbor-gen" - "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-statemachine" + + datatransfer "github.com/filecoin-project/go-data-transfer" ) ``` +You can run `script/fiximports` to put all your code in the desired format + #### Comments Comments are a communication to other developers (including your future self) to help them understand and maintain code. Good comments describe the _intent_ of the code, without repeating the procedures directly. diff --git a/vendor/github.com/filecoin-project/go-data-transfer/Makefile b/vendor/github.com/filecoin-project/go-data-transfer/Makefile index 1f6e2060ed..6ad9e7d874 100644 --- a/vendor/github.com/filecoin-project/go-data-transfer/Makefile +++ b/vendor/github.com/filecoin-project/go-data-transfer/Makefile @@ -14,4 +14,19 @@ test: go test ./... type-gen: build - go run ./cbor-gen/main.go + go generate ./... + +imports: + scripts/fiximports + +cbor-gen: + go generate ./... + +tidy: + go mod tidy + +lint: + git fetch + golangci-lint run -v --concurrency 2 --new-from-rev origin/master + +prepare-pr: cbor-gen tidy imports lint \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-data-transfer/README.md b/vendor/github.com/filecoin-project/go-data-transfer/README.md index f1dfbbd2dd..9783676141 100644 --- a/vendor/github.com/filecoin-project/go-data-transfer/README.md +++ b/vendor/github.com/filecoin-project/go-data-transfer/README.md @@ -36,17 +36,19 @@ Install the module in your package or app with `go get "github.com/filecoin-proj import ( gsimpl "github.com/ipfs/go-graphsync/impl" - "github.com/filecoin-project/go-data-transfer/datatransfer" + datatransfer "github.com/filecoin-project/go-data-transfer/impl" + gstransport "github.com/filecoin-project/go-data-transfer/transport/graphsync" "github.com/libp2p/go-libp2p-core/host" ) ``` 1. Provide or create a [libp2p host.Host](https://github.com/libp2p/go-libp2p-examples/tree/master/libp2p-host) -1. Provide or create a [go-graphsync GraphExchange](https://github.com/ipfs/go-graphsync#initializing-a-graphsync-exchange) -1. Create a new instance of GraphsyncDataTransfer +1. You will need a transport protocol. The current default transport is graphsync. [go-graphsync GraphExchange](https://github.com/ipfs/go-graphsync#initializing-a-graphsync-exchange) +1. Create a data transfer by building a transport interface and then initializing a new data transfer instance ```go - func NewGraphsyncDatatransfer(h host.Host, gs graphsync.GraphExchange) { - dt := datatransfer.NewGraphSyncDataTransfer(h, gs) + func NewGraphsyncDataTransfer(h host.Host, gs graphsync.GraphExchange) { + tp := gstransport.NewTransport(h.ID(), gs) + dt := impl.NewDataTransfer(h, tp) } ``` @@ -81,7 +83,7 @@ func (v *myVoucher) Type() string { type myValidator struct { ctx context.Context - validationsReceived chan receivedValidation + ValidationsReceived chan receivedValidation } func (vl *myValidator) ValidatePush( @@ -126,7 +128,8 @@ by its `reflect.Type` and `dataTransfer.RequestValidator` for vouchers that must be sent with the request. Using the trivial examples above: ```go func NewGraphsyncDatatransfer(h host.Host, gs graphsync.GraphExchange) { - dt := datatransfer.NewGraphSyncDataTransfer(h, gs) + tp := gstransport.NewTransport(h.ID(), gs) + dt := impl.NewDataTransfer(h, tp) vouch := &myVoucher{} mv := &myValidator{} @@ -134,7 +137,7 @@ must be sent with the request. Using the trivial examples above: } ``` -For more detail, please see the [unit tests](https://github.com/filecoin-project/go-data-transfer/blob/master/impl/graphsync/graphsync_impl_test.go). +For more detail, please see the [unit tests](https://github.com/filecoin-project/go-data-transfer/blob/master/impl/impl_test.go). ### Open a Push or Pull Request For a push or pull request, provide a context, a `datatransfer.Voucher`, a host recipient `peer.ID`, a baseCID `cid.CID` and a selector `ipld.Node`. These diff --git a/vendor/github.com/filecoin-project/go-data-transfer/channels/channels.go b/vendor/github.com/filecoin-project/go-data-transfer/channels/channels.go deleted file mode 100644 index 84506160c9..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/channels/channels.go +++ /dev/null @@ -1,171 +0,0 @@ -package channels - -import ( - "errors" - "sync" - "sync/atomic" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - peer "github.com/libp2p/go-libp2p-core/peer" -) - -// channel represents all the parameters for a single data transfer -type channel struct { - // an identifier for this channel shared by request and responder, set by requester through protocol - transferID datatransfer.TransferID - // base CID for the piece being transferred - baseCid cid.Cid - // portion of Piece to return, specified by an IPLD selector - selector ipld.Node - // used to verify this channel - voucher datatransfer.Voucher - // the party that is sending the data (not who initiated the request) - sender peer.ID - // the party that is receiving the data (not who initiated the request) - recipient peer.ID - // expected amount of data to be transferred - totalSize uint64 -} - -// NewChannel makes a new channel -func NewChannel(transferID datatransfer.TransferID, baseCid cid.Cid, - selector ipld.Node, - voucher datatransfer.Voucher, - sender peer.ID, - recipient peer.ID, - totalSize uint64) datatransfer.Channel { - return channel{transferID, baseCid, selector, voucher, sender, recipient, totalSize} -} - -// TransferID returns the transfer id for this channel -func (c channel) TransferID() datatransfer.TransferID { return c.transferID } - -// BaseCID returns the CID that is at the root of this data transfer -func (c channel) BaseCID() cid.Cid { return c.baseCid } - -// Selector returns the IPLD selector for this data transfer (represented as -// an IPLD node) -func (c channel) Selector() ipld.Node { return c.selector } - -// Voucher returns the voucher for this data transfer -func (c channel) Voucher() datatransfer.Voucher { return c.voucher } - -// Sender returns the peer id for the node that is sending data -func (c channel) Sender() peer.ID { return c.sender } - -// Recipient returns the peer id for the node that is receiving data -func (c channel) Recipient() peer.ID { return c.recipient } - -// TotalSize returns the total size for the data being transferred -func (c channel) TotalSize() uint64 { return c.totalSize } - -// ChannelState is immutable channel data plus mutable state -type ChannelState struct { - datatransfer.Channel - // total bytes sent from this node (0 if receiver) - sent uint64 - // total bytes received by this node (0 if sender) - received uint64 -} - -// EmptyChannelState is the zero value for channel state, meaning not present -var EmptyChannelState = ChannelState{} - -// Sent returns the number of bytes sent -func (c ChannelState) Sent() uint64 { return c.sent } - -// Received returns the number of bytes received -func (c ChannelState) Received() uint64 { return c.received } - -type internalChannel struct { - datatransfer.Channel - sent *uint64 - received *uint64 -} - -// ErrNotFound is returned when a channel cannot be found with a given channel ID -var ErrNotFound = errors.New("No channel for this channel ID") - -// ErrWrongType is returned when a caller attempts to change the type of implementation data after setting it -var ErrWrongType = errors.New("Cannot change type of implementation specific data after setting it") - -// Channels is a thread safe list of channels -type Channels struct { - channelsLk sync.RWMutex - channels map[datatransfer.ChannelID]internalChannel -} - -// New returns a new thread safe list of channels -func New() *Channels { - return &Channels{ - sync.RWMutex{}, - make(map[datatransfer.ChannelID]internalChannel), - } -} - -// CreateNew creates a new channel id and channel state and saves to channels. -// returns error if the channel exists already. -func (c *Channels) CreateNew(tid datatransfer.TransferID, baseCid cid.Cid, selector ipld.Node, voucher datatransfer.Voucher, initiator, dataSender, dataReceiver peer.ID) (datatransfer.ChannelID, error) { - chid := datatransfer.ChannelID{Initiator: initiator, ID: tid} - c.channelsLk.Lock() - defer c.channelsLk.Unlock() - _, ok := c.channels[chid] - if ok { - return chid, errors.New("tried to create channel but it already exists") - } - c.channels[chid] = internalChannel{Channel: NewChannel(0, baseCid, selector, voucher, dataSender, dataReceiver, 0), sent: new(uint64), received: new(uint64)} - return chid, nil -} - -// InProgress returns a list of in progress channels -func (c *Channels) InProgress() map[datatransfer.ChannelID]datatransfer.ChannelState { - c.channelsLk.RLock() - defer c.channelsLk.RUnlock() - channelsCopy := make(map[datatransfer.ChannelID]datatransfer.ChannelState, len(c.channels)) - for channelID, internalChannel := range c.channels { - channelsCopy[channelID] = ChannelState{ - internalChannel.Channel, atomic.LoadUint64(internalChannel.sent), atomic.LoadUint64(internalChannel.received), - } - } - return channelsCopy -} - -// GetByID searches for a channel in the slice of channels with id `chid`. -// Returns datatransfer.EmptyChannelState if there is no channel with that id -func (c *Channels) GetByID(chid datatransfer.ChannelID) (datatransfer.ChannelState, error) { - c.channelsLk.RLock() - internalChannel, ok := c.channels[chid] - c.channelsLk.RUnlock() - if !ok { - return EmptyChannelState, ErrNotFound - } - return ChannelState{ - internalChannel.Channel, atomic.LoadUint64(internalChannel.sent), atomic.LoadUint64(internalChannel.received), - }, nil -} - -// IncrementSent increments the total sent on the given channel by the given amount (returning -// the new total) -func (c *Channels) IncrementSent(chid datatransfer.ChannelID, delta uint64) (uint64, error) { - c.channelsLk.RLock() - channel, ok := c.channels[chid] - c.channelsLk.RUnlock() - if !ok { - return 0, ErrNotFound - } - return atomic.AddUint64(channel.sent, delta), nil -} - -// IncrementReceived increments the total received on the given channel by the given amount (returning -// the new total) -func (c *Channels) IncrementReceived(chid datatransfer.ChannelID, delta uint64) (uint64, error) { - c.channelsLk.RLock() - channel, ok := c.channels[chid] - c.channelsLk.RUnlock() - if !ok { - return 0, ErrNotFound - } - return atomic.AddUint64(channel.received, delta), nil -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/channels/channels_test.go b/vendor/github.com/filecoin-project/go-data-transfer/channels/channels_test.go deleted file mode 100644 index e6b21e951a..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/channels/channels_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package channels_test - -import ( - "testing" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/channels" - "github.com/filecoin-project/go-data-transfer/testutil" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/stretchr/testify/require" -) - -func TestChannels(t *testing.T) { - channelList := channels.New() - - tid1 := datatransfer.TransferID(0) - tid2 := datatransfer.TransferID(1) - fv1 := &testutil.FakeDTType{} - fv2 := &testutil.FakeDTType{} - cids := testutil.GenerateCids(2) - selector := builder.NewSelectorSpecBuilder(basicnode.Style.Any).Matcher().Node() - peers := testutil.GeneratePeers(4) - - t.Run("adding channels", func(t *testing.T) { - chid, err := channelList.CreateNew(tid1, cids[0], selector, fv1, peers[0], peers[0], peers[1]) - require.NoError(t, err) - require.Equal(t, peers[0], chid.Initiator) - require.Equal(t, tid1, chid.ID) - - // cannot add twice for same channel id - _, err = channelList.CreateNew(tid1, cids[1], selector, fv2, peers[0], peers[1], peers[0]) - require.Error(t, err) - - // can add for different id - chid, err = channelList.CreateNew(tid2, cids[1], selector, fv2, peers[3], peers[2], peers[3]) - require.NoError(t, err) - require.Equal(t, peers[3], chid.Initiator) - require.Equal(t, tid2, chid.ID) - }) - - t.Run("in progress channels", func(t *testing.T) { - inProgress := channelList.InProgress() - require.Len(t, inProgress, 2) - require.Contains(t, inProgress, datatransfer.ChannelID{Initiator: peers[0], ID: tid1}) - require.Contains(t, inProgress, datatransfer.ChannelID{Initiator: peers[3], ID: tid2}) - }) - - t.Run("get by id and sender", func(t *testing.T) { - state, err := channelList.GetByID(datatransfer.ChannelID{Initiator: peers[0], ID: tid1}) - require.NoError(t, err) - require.NotEqual(t, channels.EmptyChannelState, state) - require.Equal(t, cids[0], state.BaseCID()) - require.Equal(t, selector, state.Selector()) - require.Equal(t, fv1, state.Voucher()) - require.Equal(t, peers[0], state.Sender()) - require.Equal(t, peers[1], state.Recipient()) - - // empty if channel does not exist - state, err = channelList.GetByID(datatransfer.ChannelID{Initiator: peers[1], ID: tid1}) - require.Equal(t, channels.EmptyChannelState, state) - require.EqualError(t, err, channels.ErrNotFound.Error()) - - // works for other channel as well - state, err = channelList.GetByID(datatransfer.ChannelID{Initiator: peers[3], ID: tid2}) - require.NotEqual(t, channels.EmptyChannelState, state) - require.NoError(t, err) - }) - - t.Run("updating send/receive values", func(t *testing.T) { - state, err := channelList.GetByID(datatransfer.ChannelID{Initiator: peers[0], ID: tid1}) - require.NoError(t, err) - require.NotEqual(t, channels.EmptyChannelState, state) - require.Equal(t, uint64(0), state.Received()) - require.Equal(t, uint64(0), state.Sent()) - - received, err := channelList.IncrementReceived(datatransfer.ChannelID{Initiator: peers[0], ID: tid1}, 50) - require.Equal(t, uint64(50), received) - require.NoError(t, err) - sent, err := channelList.IncrementSent(datatransfer.ChannelID{Initiator: peers[0], ID: tid1}, 100) - require.Equal(t, uint64(100), sent) - require.NoError(t, err) - - state, err = channelList.GetByID(datatransfer.ChannelID{Initiator: peers[0], ID: tid1}) - require.NoError(t, err) - require.Equal(t, uint64(50), state.Received()) - require.Equal(t, uint64(100), state.Sent()) - - // errors if channel does not exist - _, err = channelList.IncrementReceived(datatransfer.ChannelID{Initiator: peers[1], ID: tid1}, 200) - require.EqualError(t, err, channels.ErrNotFound.Error()) - _, err = channelList.IncrementSent(datatransfer.ChannelID{Initiator: peers[1], ID: tid1}, 200) - require.EqualError(t, err, channels.ErrNotFound.Error()) - - received, err = channelList.IncrementReceived(datatransfer.ChannelID{Initiator: peers[0], ID: tid1}, 50) - require.Equal(t, uint64(100), received) - require.NoError(t, err) - sent, err = channelList.IncrementSent(datatransfer.ChannelID{Initiator: peers[0], ID: tid1}, 25) - require.Equal(t, uint64(125), sent) - require.NoError(t, err) - }) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/coverage.txt b/vendor/github.com/filecoin-project/go-data-transfer/coverage.txt new file mode 100644 index 0000000000..e3bb72b7fa --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/coverage.txt @@ -0,0 +1,1250 @@ +mode: set +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:19.46,20.59 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:28.2,28.48 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:36.2,36.31 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:20.59,23.17 3 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:26.3,26.26 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:23.17,25.4 1 0 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:28.48,31.17 3 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:34.3,34.26 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:31.17,33.4 1 0 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:47.56,49.53 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:54.2,55.45 2 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:59.2,59.55 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:64.2,65.16 2 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:68.2,69.63 2 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:72.2,72.48 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:49.53,51.3 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:55.45,57.3 1 0 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:59.55,61.3 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:65.16,67.3 1 0 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:69.63,71.3 1 0 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:79.79,83.16 4 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:86.2,86.29 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:83.16,85.3 1 0 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:93.78,96.45 3 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:99.2,101.16 3 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:104.2,104.21 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:96.45,98.3 1 0 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:101.16,103.3 1 0 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:111.82,114.45 3 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:117.2,118.16 2 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:121.2,121.21 1 1 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:114.45,116.3 1 0 +github.com/filecoin-project/go-data-transfer/encoding/encoding.go:118.16,120.3 1 0 +github.com/filecoin-project/go-data-transfer/registry/registry.go:33.30,37.2 1 1 +github.com/filecoin-project/go-data-transfer/registry/registry.go:40.89,43.16 3 1 +github.com/filecoin-project/go-data-transfer/registry/registry.go:46.2,48.40 3 1 +github.com/filecoin-project/go-data-transfer/registry/registry.go:51.2,52.12 2 1 +github.com/filecoin-project/go-data-transfer/registry/registry.go:43.16,45.3 1 1 +github.com/filecoin-project/go-data-transfer/registry/registry.go:48.40,50.3 1 1 +github.com/filecoin-project/go-data-transfer/registry/registry.go:56.93,61.2 4 1 +github.com/filecoin-project/go-data-transfer/registry/registry.go:64.88,69.2 4 1 +github.com/filecoin-project/go-data-transfer/registry/registry.go:72.109,75.43 3 0 +github.com/filecoin-project/go-data-transfer/registry/registry.go:81.2,81.12 1 0 +github.com/filecoin-project/go-data-transfer/registry/registry.go:75.43,77.17 2 0 +github.com/filecoin-project/go-data-transfer/registry/registry.go:77.17,79.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:20.63,21.14 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:25.2,25.66 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:29.2,32.40 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:36.2,36.111 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:39.2,39.68 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:45.2,45.106 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:50.2,50.38 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:54.2,54.109 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:57.2,57.66 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:62.2,62.38 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:66.2,66.109 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:69.2,69.66 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:75.2,75.63 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:80.2,80.50 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:85.2,85.35 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:89.2,89.106 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:92.2,92.63 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:97.2,97.38 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:101.2,101.109 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:104.2,104.66 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:110.2,110.105 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:116.2,116.102 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:122.2,122.100 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:128.2,128.104 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:133.2,133.36 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:137.2,137.107 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:140.2,140.64 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:145.2,145.37 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:149.2,149.103 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:152.2,152.31 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:159.2,159.43 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:163.2,163.109 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:166.2,166.37 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:173.2,173.41 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:177.2,177.107 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:180.2,180.35 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:185.2,185.12 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:21.14,24.3 2 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:25.66,27.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:32.40,34.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:36.111,38.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:39.68,41.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:45.106,47.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:50.38,52.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:54.109,56.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:57.66,59.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:62.38,64.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:66.109,68.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:69.66,71.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:75.63,77.3 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:80.50,82.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:85.35,87.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:89.106,91.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:92.63,94.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:97.38,99.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:101.109,103.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:104.66,106.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:110.105,112.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:116.102,118.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:122.100,124.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:128.104,130.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:133.36,135.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:137.107,139.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:140.64,142.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:145.37,147.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:149.103,151.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:152.31,153.42 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:153.42,155.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:159.43,161.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:163.109,165.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:166.37,167.42 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:167.42,169.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:173.41,175.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:177.107,179.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:180.35,181.56 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:181.56,183.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:188.65,195.16 5 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:198.2,198.25 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:202.2,202.17 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:360.2,361.16 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:365.2,365.27 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:369.2,369.25 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:373.2,373.15 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:377.2,377.34 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:389.2,390.16 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:394.2,394.27 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:398.2,398.25 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:402.2,402.15 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:406.2,406.34 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:418.2,419.16 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:423.2,423.27 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:427.2,427.25 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:431.2,431.15 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:435.2,435.34 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:444.2,444.12 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:195.16,197.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:198.25,200.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:202.17,204.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:208.2,210.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:214.3,214.32 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:210.17,212.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:218.2,221.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:224.3,224.32 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:227.3,227.48 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:221.17,223.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:224.32,226.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:232.2,234.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:238.3,238.30 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:234.17,236.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:242.2,244.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:248.3,248.30 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:244.17,246.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:252.2,255.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:259.3,259.16 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:255.17,257.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:264.2,268.54 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:268.54,270.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:274.2,276.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:280.3,280.27 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:276.17,278.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:284.2,286.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:290.3,290.30 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:286.17,288.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:294.2,297.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:300.3,300.32 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:303.3,303.30 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:297.17,299.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:300.32,302.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:308.2,311.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:314.3,314.32 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:317.3,317.40 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:311.17,313.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:314.32,316.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:322.2,325.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:328.3,328.32 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:331.3,331.25 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:325.17,327.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:328.32,330.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:336.2,339.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:342.3,342.32 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:345.3,345.29 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:339.17,341.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:342.32,344.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:350.2,352.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:356.3,356.27 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:352.17,354.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:361.16,363.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:365.27,367.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:369.25,371.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:373.15,375.3 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:377.34,380.45 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:384.3,384.20 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:380.45,382.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:390.16,392.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:394.27,396.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:398.25,400.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:402.15,404.3 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:406.34,409.45 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:413.3,413.26 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:409.45,411.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:419.16,421.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:423.27,425.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:427.25,429.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:431.15,433.3 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:435.34,438.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:441.3,441.24 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:438.17,440.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:449.57,450.14 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:454.2,454.60 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:458.2,461.33 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:465.2,465.104 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:468.2,468.61 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:473.2,473.49 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:476.2,476.12 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:450.14,453.3 2 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:454.60,456.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:461.33,463.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:465.104,467.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:468.61,470.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:473.49,475.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:479.59,486.16 5 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:489.2,489.25 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:493.2,493.16 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:517.2,517.12 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:486.16,488.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:489.25,491.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:493.16,495.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:499.2,501.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:505.3,505.45 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:501.17,503.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:509.2,513.53 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:513.53,515.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:522.63,523.14 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:527.2,527.66 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:531.2,534.33 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:538.2,538.104 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:541.2,541.61 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:546.2,546.55 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:549.2,549.12 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:523.14,526.3 2 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:527.66,529.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:534.33,536.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:538.104,540.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:541.61,543.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:546.55,548.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:552.65,559.16 5 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:562.2,562.25 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:566.2,566.16 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:590.2,590.12 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:559.16,561.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:562.25,564.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:566.16,568.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:572.2,574.17 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:578.3,578.45 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:574.17,576.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:582.2,586.59 2 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel_cbor_gen.go:586.59,588.4 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:56.52,56.71 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:59.37,59.54 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:62.41,62.62 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:65.60,65.83 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:68.41,68.61 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:72.44,76.16 4 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:79.2,79.24 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:76.16,78.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:83.54,87.2 3 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:90.48,92.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:95.40,95.59 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:98.43,98.65 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:101.42,101.64 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:104.37,106.2 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:108.58,109.14 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:109.14,111.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:111.8,113.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:116.40,118.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:120.57,122.37 2 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:127.2,127.17 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:122.37,126.3 3 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:130.58,134.2 3 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:136.70,140.2 3 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:142.69,144.43 2 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:149.2,149.23 1 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:144.43,148.3 3 1 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:152.61,153.27 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:156.2,156.17 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:153.27,155.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channel_state.go:159.45,161.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:51.45,66.16 3 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:69.2,70.15 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:66.16,68.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:73.77,75.9 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:78.2,79.9 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:82.2,88.87 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:75.9,77.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:79.9,81.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:93.224,95.29 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:100.2,102.16 3 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:105.2,106.16 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:109.2,129.16 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:132.2,132.60 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:95.29,97.3 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:97.8,99.3 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:102.16,104.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:106.16,108.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:129.16,131.3 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:136.95,139.16 3 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:142.2,143.51 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:147.2,147.22 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:139.16,141.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:143.51,146.3 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:152.113,155.16 3 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:158.2,158.86 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:155.16,157.3 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:162.62,164.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:167.63,169.2 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:171.91,173.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:175.95,177.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:180.70,182.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:185.70,187.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:190.71,192.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:195.71,197.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:200.96,202.16 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:205.2,205.76 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:202.16,204.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:209.114,211.16 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:214.2,214.94 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:211.16,213.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:218.64,220.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:223.70,225.2 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:228.74,230.2 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:233.83,235.2 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:238.71,240.2 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:243.62,245.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:248.72,250.2 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:253.74,255.2 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:257.110,259.16 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:262.2,262.10 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:265.2,265.50 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels.go:259.16,261.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels.go:262.10,264.3 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:30.121,34.3 3 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:43.121,46.3 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:48.124,51.3 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:53.105,56.4 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:58.111,62.4 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:112.101,114.28 2 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:117.2,119.50 3 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:114.28,116.3 1 0 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:130.55,131.42 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:137.2,137.14 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:131.42,132.14 1 1 +github.com/filecoin-project/go-data-transfer/channels/channels_fsm.go:132.14,134.4 1 1 +github.com/filecoin-project/go-data-transfer/channels/internalchannel.go:61.146,82.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message.go:20.45,22.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message.go:25.65,26.20 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message.go:29.2,29.33 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message.go:26.20,28.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message.go:34.53,36.2 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:17.58,18.14 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:22.2,22.61 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:27.2,27.49 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:32.2,32.49 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:37.2,37.50 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:40.2,40.12 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:18.14,21.3 2 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:22.61,24.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:27.49,29.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:32.49,34.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:37.50,39.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:43.60,50.16 5 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:53.2,53.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:57.2,57.16 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:63.2,64.16 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:67.2,67.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:70.2,70.15 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:116.2,116.12 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:50.16,52.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:53.25,55.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:57.16,59.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:64.16,66.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:67.25,69.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:71.10,72.17 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:73.10,74.16 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:75.10,76.88 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:80.2,83.17 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:86.3,86.27 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:83.17,85.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:86.27,87.42 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:90.4,91.54 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:87.42,89.5 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:91.54,93.5 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:99.2,102.17 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:105.3,105.27 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:102.17,104.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:105.27,106.42 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:109.4,110.55 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:106.42,108.5 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go:110.55,112.5 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:37.46,39.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:41.46,43.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:45.68,47.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:49.80,50.44 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:53.2,53.32 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:50.44,52.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:56.42,58.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:60.45,62.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:64.46,66.2 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:68.45,70.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:72.66,74.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:78.43,80.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:83.71,85.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:88.91,89.22 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:92.2,92.46 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:89.22,91.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:95.49,97.2 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:100.47,101.21 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:104.2,104.18 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:101.21,103.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:108.59,109.21 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:112.2,115.16 4 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:118.2,118.29 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:109.21,111.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:115.16,117.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:122.45,124.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:127.46,129.2 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request.go:133.54,140.2 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:18.58,19.14 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:23.2,23.61 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:27.2,31.19 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:43.2,43.100 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:48.2,48.49 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:53.2,53.49 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:58.2,58.49 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:63.2,63.46 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:68.2,68.47 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:73.2,73.33 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:77.2,77.104 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:80.2,80.61 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:86.2,86.102 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:91.2,91.56 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:94.2,94.12 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:19.14,22.3 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:23.61,25.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:31.19,32.50 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:32.50,34.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:35.8,36.62 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:36.62,38.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:43.100,45.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:48.49,50.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:53.49,55.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:58.49,60.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:63.46,65.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:68.47,70.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:73.33,75.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:77.104,79.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:80.61,82.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:86.102,88.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:91.56,93.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:97.60,104.16 5 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:107.2,107.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:111.2,111.17 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:153.2,154.16 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:157.2,157.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:160.2,160.15 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:170.2,171.16 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:174.2,174.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:177.2,177.15 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:187.2,188.16 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:191.2,191.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:194.2,194.15 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:255.2,255.12 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:104.16,106.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:107.25,109.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:111.17,113.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:117.2,120.17 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:123.3,123.27 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:120.17,122.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:123.27,124.42 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:128.4,129.18 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:133.4,133.15 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:124.42,126.5 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:129.18,131.5 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:139.2,142.17 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:145.3,145.32 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:148.3,148.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:142.17,144.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:145.32,147.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:154.16,156.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:157.25,159.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:161.10,162.17 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:163.10,164.16 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:165.10,166.88 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:171.16,173.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:174.25,176.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:178.10,179.17 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:180.10,181.16 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:182.10,183.88 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:188.16,190.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:191.25,193.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:195.10,196.17 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:197.10,198.16 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:199.10,200.88 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:204.2,208.50 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:208.50,210.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:214.2,218.51 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:218.51,220.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:224.2,226.17 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:230.3,230.45 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:226.17,228.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:234.2,237.17 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:240.3,240.32 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:243.3,243.27 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:237.17,239.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:240.32,242.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:248.2,250.60 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go:250.60,252.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:25.68,27.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:30.48,32.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:35.44,37.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:40.47,42.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:45.47,47.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:50.47,52.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:55.49,57.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:59.54,62.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:65.47,67.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:69.79,71.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:73.99,74.22 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:77.2,77.46 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:74.22,76.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:80.47,82.2 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:84.57,86.2 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response.go:90.56,97.2 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:18.59,19.14 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:23.2,23.62 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:27.2,31.100 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:36.2,36.49 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:41.2,41.49 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:47.2,47.102 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:52.2,52.46 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:57.2,57.33 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:61.2,61.104 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:64.2,64.61 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:67.2,67.12 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:19.14,22.3 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:23.62,25.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:31.100,33.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:36.49,38.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:41.49,43.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:47.102,49.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:52.46,54.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:57.33,59.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:61.104,63.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:64.61,66.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:70.61,77.16 5 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:80.2,80.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:84.2,84.16 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:104.2,105.16 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:108.2,108.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:111.2,111.15 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:121.2,122.16 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:125.2,125.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:128.2,128.15 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:170.2,170.12 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:77.16,79.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:80.25,82.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:84.16,86.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:90.2,93.17 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:96.3,96.32 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:99.3,99.25 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:93.17,95.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:96.32,98.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:105.16,107.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:108.25,110.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:112.10,113.17 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:114.10,115.16 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:116.10,117.88 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:122.16,124.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:125.25,127.3 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:129.10,130.17 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:131.10,132.16 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:133.10,134.88 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:138.2,141.17 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:144.3,144.32 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:147.3,147.27 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:141.17,143.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:144.32,146.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:152.2,156.50 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:156.50,158.4 1 0 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:162.2,164.17 2 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:168.3,168.45 1 1 +github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go:164.17,166.4 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:29.204,31.16 2 1 +github.com/filecoin-project/go-data-transfer/message/message.go:34.2,34.26 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:37.2,38.16 2 1 +github.com/filecoin-project/go-data-transfer/message/message.go:42.2,43.15 2 1 +github.com/filecoin-project/go-data-transfer/message/message.go:49.2,57.8 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:31.16,33.3 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:34.26,36.3 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:38.16,40.3 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:43.15,45.3 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:45.8,47.3 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:61.91,65.2 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:68.69,73.2 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:76.84,82.2 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:85.142,87.16 2 0 +github.com/filecoin-project/go-data-transfer/message/message.go:90.2,95.8 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:87.16,89.3 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:99.192,101.16 2 0 +github.com/filecoin-project/go-data-transfer/message/message.go:104.2,111.8 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:101.16,103.3 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:115.188,117.16 2 1 +github.com/filecoin-project/go-data-transfer/message/message.go:120.2,127.8 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:117.16,119.3 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:131.198,133.16 2 0 +github.com/filecoin-project/go-data-transfer/message/message.go:136.2,143.8 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:133.16,135.3 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:147.86,153.2 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:156.71,161.2 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:164.195,166.16 2 1 +github.com/filecoin-project/go-data-transfer/message/message.go:169.2,176.8 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:166.16,168.3 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:180.57,183.16 3 1 +github.com/filecoin-project/go-data-transfer/message/message.go:186.2,186.23 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:189.2,189.28 1 1 +github.com/filecoin-project/go-data-transfer/message/message.go:183.16,185.3 1 0 +github.com/filecoin-project/go-data-transfer/message/message.go:186.23,188.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:52.74,65.2 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:78.34,79.21 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:82.2,83.16 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:86.2,90.54 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:93.2,98.29 5 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:111.2,114.12 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:79.21,81.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:83.16,85.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:90.54,92.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:98.29,100.35 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:103.3,104.17 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:107.3,109.36 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:100.35,102.4 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:104.17,106.4 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:117.87,119.6 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:119.6,120.10 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:121.21,122.30 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:123.29,124.11 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:127.4,127.19 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:124.11,126.5 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:132.115,135.67 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:143.2,143.61 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:135.67,137.63 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:140.3,140.9 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:137.63,139.4 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:143.61,145.23 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:148.3,149.17 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:145.23,147.4 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:149.17,151.4 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:155.112,156.6 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:156.6,159.9 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:163.3,165.18 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:168.3,168.10 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:159.9,162.4 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:165.18,167.4 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:169.21,170.58 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:171.18,171.18 0 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:179.9,180.21 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:183.2,184.16 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:187.2,187.25 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:191.2,193.48 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:196.2,196.53 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:180.21,182.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:184.16,186.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:187.25,189.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:193.48,195.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:203.9,204.21 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:207.2,208.16 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:211.2,212.16 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:219.2,219.25 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:222.2,225.48 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:230.2,230.70 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:204.21,206.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:208.16,210.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:212.16,214.17 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:217.3,217.42 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:214.17,216.4 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:219.25,221.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:225.48,229.3 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:234.90,235.21 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:238.2,239.16 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:242.2,242.25 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:252.2,253.48 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:256.2,257.54 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:235.21,237.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:239.16,241.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:242.25,246.10 4 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:249.3,250.13 2 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:246.10,248.4 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:253.48,255.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:262.65,265.8 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:268.2,268.21 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:265.8,267.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:272.78,273.21 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:276.2,285.12 10 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:273.21,275.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:289.110,291.16 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:294.2,297.12 4 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:291.16,293.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:300.135,304.20 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:308.2,310.25 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:317.2,321.16 4 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:325.2,326.16 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:330.2,331.8 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:334.2,334.21 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:304.20,306.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:310.25,313.3 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:313.8,316.3 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:321.16,324.3 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:326.16,329.3 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:331.8,333.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:337.160,342.9 4 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:346.2,347.48 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:352.2,352.34 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:342.9,344.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:347.48,350.3 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:352.34,354.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:357.158,360.9 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:364.2,367.38 4 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:370.2,373.48 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:378.2,378.34 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:382.2,382.16 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:360.9,363.3 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:367.38,369.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:373.48,376.3 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:378.34,380.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:382.16,384.17 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:388.3,388.43 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:384.17,387.4 2 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:394.127,398.16 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:404.2,404.16 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:408.2,410.21 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:422.2,422.28 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:431.2,431.48 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:436.2,436.34 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:440.2,442.48 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:450.2,453.21 4 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:458.2,459.8 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:462.2,463.31 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:398.16,401.3 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:404.16,406.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:410.21,415.3 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:415.8,420.3 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:422.28,424.26 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:428.3,428.43 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:424.26,427.4 2 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:431.48,434.3 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:436.34,438.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:442.48,446.34 4 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:446.34,448.4 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:453.21,455.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:455.8,457.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:459.8,461.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:468.128,473.9 4 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:477.2,477.42 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:473.9,475.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:477.42,480.17 3 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:480.17,482.4 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:486.85,495.8 9 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:501.2,501.24 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:495.8,497.17 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:497.17,499.4 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:504.163,509.9 4 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:513.2,515.28 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:524.2,524.48 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:509.9,511.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:515.28,517.26 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:521.3,521.43 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:517.26,520.4 2 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:524.48,526.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:531.139,537.9 4 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:541.2,543.28 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:552.2,552.16 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:537.9,539.3 1 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:543.28,545.26 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:549.3,549.53 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:545.26,548.4 2 0 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:552.16,554.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:557.136,561.16 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:566.2,566.16 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:570.2,570.21 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:581.2,581.95 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:585.2,586.59 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:561.16,563.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:566.16,568.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:570.21,573.96 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:576.3,577.53 2 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:573.96,575.4 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:581.95,583.3 1 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:589.92,594.8 4 1 +github.com/filecoin-project/go-data-transfer/transport/graphsync/graphsync.go:594.8,596.3 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:24.60,30.2 2 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:40.113,42.2 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:47.39,50.16 2 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:54.2,54.53 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:63.2,64.18 2 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:50.16,52.3 1 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:54.53,55.37 1 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:59.3,59.13 1 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:55.37,58.4 2 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:68.65,71.2 2 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:73.89,75.2 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:78.75,81.27 2 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:86.2,86.6 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:81.27,84.3 2 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:86.6,88.17 2 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:97.3,101.27 4 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:88.17,89.21 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:94.4,94.10 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:89.21,93.5 3 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:101.27,103.10 2 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:103.10,104.58 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:104.58,106.6 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:106.11,108.6 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:110.9,112.10 2 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:112.10,114.5 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:119.54,121.2 1 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:123.73,125.2 1 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:127.80,129.2 1 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:131.89,132.21 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:136.2,137.34 2 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:140.2,140.53 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:144.2,144.22 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:154.2,154.56 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:157.2,157.12 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:132.21,134.3 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:137.34,139.3 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:140.53,142.3 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:145.28,146.38 1 1 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:150.10,151.73 1 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:146.38,149.4 2 0 +github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go:154.56,156.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:18.70,20.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:23.2,23.10 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:26.2,26.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:20.16,22.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:23.10,25.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:29.98,31.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:34.2,34.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:52.2,52.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:31.16,33.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:34.32,37.119 3 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:42.3,42.34 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:37.119,41.4 3 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:42.34,44.18 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:49.4,49.14 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:44.18,45.98 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:45.98,47.6 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:55.118,57.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:60.2,60.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:72.2,72.17 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:57.16,59.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:60.32,63.119 3 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:68.3,68.34 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:63.119,67.4 3 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:68.34,70.4 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:75.127,76.25 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:80.2,80.21 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:83.2,83.24 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:87.2,87.25 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:90.2,90.24 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:93.2,94.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:97.2,98.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:101.2,102.53 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:105.2,105.17 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:76.25,78.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:80.21,82.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:83.24,86.3 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:87.25,89.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:90.24,92.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:94.16,96.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:98.16,100.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:102.53,104.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:108.105,109.25 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:112.2,112.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:140.2,140.50 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:149.2,149.25 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:152.2,152.28 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:109.25,111.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:112.32,113.37 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:123.3,123.27 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:126.3,126.23 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:133.3,133.27 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:113.37,115.18 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:118.4,119.18 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:115.18,117.5 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:119.18,121.5 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:123.27,125.4 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:126.23,128.18 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:128.18,130.5 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:133.27,135.18 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:135.18,137.5 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:140.50,141.27 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:144.3,145.17 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:141.27,143.4 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:145.17,147.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:149.25,151.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:155.72,160.2 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:162.87,163.13 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:185.2,185.66 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:163.13,164.33 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:183.3,183.41 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:164.33,166.18 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:169.4,169.18 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:175.4,175.22 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:181.4,181.38 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:166.18,168.5 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:169.18,170.98 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:170.98,173.6 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:175.22,176.23 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:179.5,179.37 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:176.23,178.6 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:188.132,191.19 3 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:194.2,194.17 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:191.19,193.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:199.64,202.19 3 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:205.2,205.17 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:202.19,204.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:209.69,212.27 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:216.2,216.98 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:220.2,221.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:225.2,226.48 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:229.2,231.19 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:237.2,237.49 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:240.2,241.9 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:245.2,246.41 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:252.2,252.27 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:212.27,214.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:216.98,218.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:221.16,223.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:226.48,228.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:231.19,233.17 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:233.17,235.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:237.49,239.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:241.9,244.3 2 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:246.41,248.17 2 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:248.17,250.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:257.69,260.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:264.2,265.48 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:268.2,271.23 3 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:279.2,280.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:283.2,283.19 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:289.2,289.48 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:292.2,293.9 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:297.2,298.41 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:304.2,304.27 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:260.16,262.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:265.48,267.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:271.23,274.3 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:274.8,277.3 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:280.16,282.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:283.19,285.17 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:285.17,287.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:289.48,291.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:293.9,296.3 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:298.41,300.17 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:300.17,302.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:317.76,319.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:322.2,325.12 4 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:331.2,332.27 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:319.16,321.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:325.12,327.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:327.8,329.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:342.91,344.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:347.2,351.27 4 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:344.16,346.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:354.130,356.18 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:362.2,362.62 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:356.18,358.17 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:358.17,360.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:365.152,367.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:370.2,370.46 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:373.2,373.61 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:367.16,369.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:370.46,372.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:376.157,379.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:382.2,382.19 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:388.2,388.40 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:396.2,396.22 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:403.2,403.31 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:379.16,381.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:382.19,384.17 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:384.17,386.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:388.40,390.17 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:393.3,393.44 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:390.17,392.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:396.22,398.17 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:401.3,401.45 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:398.17,400.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/events.go:406.95,409.118 3 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:414.2,414.19 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:421.2,421.55 1 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:409.118,413.3 3 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:414.19,416.17 2 1 +github.com/filecoin-project/go-data-transfer/impl/events.go:416.17,418.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:48.75,50.9 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:53.2,54.9 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:57.2,58.12 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:50.9,52.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:54.9,56.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:62.203,75.16 3 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:78.2,79.15 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:75.16,77.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:82.100,84.10 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:87.2,87.22 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:84.10,86.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:90.84,92.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:92.16,94.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:98.52,102.2 3 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:105.51,107.16 2 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:111.2,112.33 2 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:118.2,118.15 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:107.16,109.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:112.33,113.63 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:113.63,115.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:126.120,128.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:131.2,131.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:128.16,130.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:136.178,138.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:142.2,144.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:147.2,148.9 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:152.2,153.79 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:158.2,158.18 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:138.16,140.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:144.16,146.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:148.9,151.3 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:153.79,157.3 3 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:163.178,165.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:169.2,171.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:174.2,175.9 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:179.2,180.118 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:185.2,185.18 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:165.16,167.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:171.16,173.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:175.9,178.3 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:180.118,184.3 3 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:189.122,191.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:194.2,194.37 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:197.2,198.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:201.2,201.105 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:206.2,206.50 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:191.16,193.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:194.37,196.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:198.16,200.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:201.105,205.3 3 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:210.100,212.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:215.2,216.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:220.2,220.113 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:226.2,226.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:212.16,214.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:216.16,218.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:220.113,224.3 3 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:230.100,233.9 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:237.2,238.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:242.2,242.112 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:248.2,248.22 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:233.9,235.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:238.16,240.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:242.112,246.3 3 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:252.101,254.9 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:258.2,259.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:263.2,263.23 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:254.9,256.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:259.16,261.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:267.111,269.16 2 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:272.2,272.22 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:269.16,271.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:276.98,278.2 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:281.121,283.2 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:290.117,292.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:295.2,295.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:292.16,294.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:300.90,302.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:305.2,305.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:302.16,304.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:310.132,312.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:315.2,315.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:312.16,314.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:319.102,321.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:327.2,327.52 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:332.2,333.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:344.2,344.12 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:321.16,323.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:327.52,329.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/impl.go:334.30,335.55 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:336.30,337.55 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:338.29,339.48 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:340.29,341.48 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:347.102,349.22 2 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:360.2,360.27 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:365.2,365.31 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:349.22,351.28 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:356.3,356.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:351.28,353.4 1 1 +github.com/filecoin-project/go-data-transfer/impl/impl.go:360.27,362.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:23.33,25.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:25.16,27.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:30.112,34.42 3 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:45.2,45.21 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:67.2,67.41 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:71.2,71.23 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:76.2,76.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:34.42,36.17 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:39.3,39.61 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:42.3,42.19 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:36.17,38.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:39.61,41.4 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:45.21,46.94 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:46.94,48.28 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:56.4,57.150 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:48.28,50.19 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:53.5,53.43 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:50.19,52.6 1 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:57.150,59.5 1 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:60.9,61.94 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:61.94,63.5 1 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:67.41,69.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:71.23,74.3 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:84.34,86.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:86.16,88.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:93.40,96.34 3 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:99.2,99.16 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:103.2,103.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:96.34,98.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:99.16,102.3 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:106.44,108.2 1 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:112.33,115.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:121.2,122.34 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:128.2,128.55 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:134.2,134.52 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:140.2,140.52 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:145.2,145.52 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:115.16,118.3 2 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:122.34,125.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:128.55,131.3 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:134.52,137.3 2 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:140.52,143.3 2 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:146.29,147.72 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:150.29,151.72 1 1 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:154.10,155.54 1 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:147.72,149.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/receiver.go:151.72,153.4 1 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:33.111,34.65 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:39.2,41.82 2 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:34.65,36.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:44.111,45.64 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:49.2,52.82 2 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:45.64,47.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:55.96,62.16 3 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:67.2,67.130 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:71.2,71.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:62.16,64.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:67.130,69.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:74.104,82.16 7 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:86.2,87.9 2 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:91.2,92.79 2 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:98.2,98.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:82.16,84.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:87.9,90.3 2 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:92.79,96.3 3 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:101.104,109.16 7 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:113.2,114.9 2 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:118.2,119.137 2 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:125.2,125.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:109.16,111.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:114.9,117.3 2 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:119.137,123.3 3 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:128.143,131.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:136.2,136.52 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:141.2,141.48 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:146.2,146.40 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:151.2,152.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:155.2,155.51 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:159.2,160.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:163.2,164.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:168.2,168.36 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:172.2,172.12 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:131.16,133.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:136.52,138.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:141.48,143.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:146.40,148.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:152.16,154.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/restart.go:155.51,157.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:160.16,162.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:164.16,166.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/restart.go:168.36,170.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:18.59,19.24 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:24.2,24.14 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:19.24,20.14 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:20.14,22.4 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:34.173,36.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:39.2,40.91 2 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:36.16,38.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/utils.go:43.169,47.26 4 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:50.2,50.15 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:54.2,54.11 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:57.2,57.92 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:47.26,49.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:50.15,52.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:54.11,56.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:60.149,64.26 4 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:67.2,67.87 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:64.26,66.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:70.61,71.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:74.2,74.41 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:71.32,73.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:77.60,78.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:81.2,81.40 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:78.32,80.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:84.66,85.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:88.2,88.41 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:85.32,87.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:91.65,92.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:95.2,95.40 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:92.32,94.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:98.83,99.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:102.2,102.47 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:99.32,101.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:105.82,106.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:109.2,109.46 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:106.32,108.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:112.83,113.32 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:116.2,116.40 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:113.32,115.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:119.107,122.10 3 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:125.2,126.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:129.2,129.51 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:122.10,124.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/utils.go:126.16,128.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/utils.go:132.122,135.10 3 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:138.2,139.16 2 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:142.2,142.51 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:135.10,137.3 1 1 +github.com/filecoin-project/go-data-transfer/impl/utils.go:139.16,141.3 1 0 +github.com/filecoin-project/go-data-transfer/impl/environment.go:13.63,15.2 1 0 +github.com/filecoin-project/go-data-transfer/impl/environment.go:17.70,19.2 1 1 +github.com/filecoin-project/go-data-transfer/impl/environment.go:21.44,23.2 1 1 +github.com/filecoin-project/go-data-transfer/impl/environment.go:25.75,27.2 1 1 diff --git a/vendor/github.com/filecoin-project/go-data-transfer/encoding/encoding.go b/vendor/github.com/filecoin-project/go-data-transfer/encoding/encoding.go index 7cca7d73c0..d2cb84d6e7 100644 --- a/vendor/github.com/filecoin-project/go-data-transfer/encoding/encoding.go +++ b/vendor/github.com/filecoin-project/go-data-transfer/encoding/encoding.go @@ -47,7 +47,7 @@ type Decoder interface { func NewDecoder(decodeType Encodable) (Decoder, error) { // check if type is ipld.Node, if so, just use style if ipldDecodable, ok := decodeType.(ipld.Node); ok { - return &ipldDecoder{ipldDecodable.Style()}, nil + return &ipldDecoder{ipldDecodable.Prototype()}, nil } // check if type is a pointer, as we need that to make new copies // for cborgen types & regular IPLD types @@ -73,7 +73,7 @@ func NewDecoder(decodeType Encodable) (Decoder, error) { } type ipldDecoder struct { - style ipld.NodeStyle + style ipld.NodePrototype } func (decoder *ipldDecoder) DecodeFromCbor(encoded []byte) (Encodable, error) { diff --git a/vendor/github.com/filecoin-project/go-data-transfer/encoding/encoding_test.go b/vendor/github.com/filecoin-project/go-data-transfer/encoding/encoding_test.go deleted file mode 100644 index 43a66f5d89..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/encoding/encoding_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package encoding_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-data-transfer/encoding" - "github.com/filecoin-project/go-data-transfer/encoding/testdata" -) - -func TestRoundTrip(t *testing.T) { - testCases := map[string]struct { - val encoding.Encodable - }{ - "can encode/decode IPLD prime types": { - val: testdata.Prime, - }, - "can encode/decode cbor-gen types": { - val: testdata.Cbg, - }, - "can encode/decode old ipld format types": { - val: testdata.Standard, - }, - } - for testCase, data := range testCases { - t.Run(testCase, func(t *testing.T) { - encoded, err := encoding.Encode(data.val) - require.NoError(t, err) - decoder, err := encoding.NewDecoder(data.val) - require.NoError(t, err) - decoded, err := decoder.DecodeFromCbor(encoded) - require.NoError(t, err) - require.Equal(t, data.val, decoded) - }) - } -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/encoding/testdata/testdata.go b/vendor/github.com/filecoin-project/go-data-transfer/encoding/testdata/testdata.go deleted file mode 100644 index c76abc247c..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/encoding/testdata/testdata.go +++ /dev/null @@ -1,37 +0,0 @@ -package testdata - -import ( - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/ipld/go-ipld-prime/fluent" - basicnode "github.com/ipld/go-ipld-prime/node/basic" -) - -// Prime = an instance of an ipld prime piece of data -var Prime = fluent.MustBuildMap(basicnode.Style.Map, 2, func(na fluent.MapAssembler) { - nva := na.AssembleEntry("X") - nva.AssignInt(100) - nva = na.AssembleEntry("Y") - nva.AssignString("appleSauce") -}) - -type standardType struct { - X int - Y string -} - -func init() { - cbor.RegisterCborType(standardType{}) -} - -// Standard = an instance that is neither ipld prime nor cbor -var Standard *standardType = &standardType{X: 100, Y: "appleSauce"} - -//go:generate cbor-gen-for cbgType - -type cbgType struct { - X uint64 - Y string -} - -// Cbg = an instance of a cbor-gen type -var Cbg *cbgType = &cbgType{X: 100, Y: "appleSauce"} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/encoding/testdata/testdata_cbor_gen.go b/vendor/github.com/filecoin-project/go-data-transfer/encoding/testdata/testdata_cbor_gen.go deleted file mode 100644 index 67c6c68878..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/encoding/testdata/testdata_cbor_gen.go +++ /dev/null @@ -1,84 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package testdata - -import ( - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *cbgType) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.X (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.X))); err != nil { - return err - } - - // t.Y (string) (string) - if len(t.Y) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Y was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Y)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Y)); err != nil { - return err - } - return nil -} - -func (t *cbgType) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.X (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.X = uint64(extra) - - } - // t.Y (string) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Y = string(sval) - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/errors.go b/vendor/github.com/filecoin-project/go-data-transfer/errors.go new file mode 100644 index 0000000000..be3b546225 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/errors.go @@ -0,0 +1,41 @@ +package datatransfer + +type errorType string + +func (e errorType) Error() string { + return string(e) +} + +// ErrHandlerAlreadySet means an event handler was already set for this instance of +// hooks +const ErrHandlerAlreadySet = errorType("already set event handler") + +// ErrHandlerNotSet means you cannot issue commands to this interface because the +// handler has not been set +const ErrHandlerNotSet = errorType("event handler has not been set") + +// ErrChannelNotFound means the channel this command was issued for does not exist +const ErrChannelNotFound = errorType("channel not found") + +// ErrPause is a special error that the DataReceived / DataSent hooks can +// use to pause the channel +const ErrPause = errorType("pause channel") + +// ErrResume is a special error that the RequestReceived / ResponseReceived hooks can +// use to resume the channel +const ErrResume = errorType("resume channel") + +// ErrIncomplete indicates a channel did not finish transferring data successfully +const ErrIncomplete = errorType("incomplete response") + +// ErrRejected indicates a request was not accepted +const ErrRejected = errorType("response rejected") + +// ErrUnsupported indicates an operation is not supported by the transport protocol +const ErrUnsupported = errorType("unsupported") + +// ErrDisconnected indicates the other peer may have hung up and you should try restarting the channel. +const ErrDisconnected = errorType("other peer appears to have hung up. restart Channel") + +// ErrRemoved indicates the channel was inactive long enough that it was put in a permaneant error state +const ErrRemoved = errorType("channel removed due to inactivity") diff --git a/vendor/github.com/filecoin-project/go-data-transfer/events.go b/vendor/github.com/filecoin-project/go-data-transfer/events.go new file mode 100644 index 0000000000..6841bcb631 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/events.go @@ -0,0 +1,109 @@ +package datatransfer + +import "time" + +// EventCode is a name for an event that occurs on a data transfer channel +type EventCode int + +const ( + // Open is an event occurs when a channel is first opened + Open EventCode = iota + + // Accept is an event that emits when the data transfer is first accepted + Accept + + // Restart is an event that emits when the data transfer is restarted + Restart + + // DataReceived is emitted when data is received on the channel from a remote peer + DataReceived + + // DataSent is emitted when data is sent on the channel to the remote peer + DataSent + + // Cancel indicates one side has cancelled the transfer + Cancel + + // Error is an event that emits when an error occurs in a data transfer + Error + + // CleanupComplete emits when a request is cleaned up + CleanupComplete + + // NewVoucher means we have a new voucher on this channel + NewVoucher + + // NewVoucherResult means we have a new voucher result on this channel + NewVoucherResult + + // PauseInitiator emits when the data sender pauses transfer + PauseInitiator + + // ResumeInitiator emits when the data sender resumes transfer + ResumeInitiator + + // PauseResponder emits when the data receiver pauses transfer + PauseResponder + + // ResumeResponder emits when the data receiver resumes transfer + ResumeResponder + + // FinishTransfer emits when the initiator has completed sending/receiving data + FinishTransfer + + // ResponderCompletes emits when the initiator receives a message that the responder is finished + ResponderCompletes + + // ResponderBeginsFinalization emits when the initiator receives a message that the responder is finilizing + ResponderBeginsFinalization + + // BeginFinalizing emits when the responder completes its operations but awaits a response from the + // initiator + BeginFinalizing + + // Disconnected emits when we are not able to connect to the other party + Disconnected + + // Complete is emitted when a data transfer is complete + Complete + + // CompleteCleanupOnRestart is emitted when a data transfer channel is restarted to signal + // that channels that were cleaning up should finish cleanup + CompleteCleanupOnRestart +) + +// Events are human readable names for data transfer events +var Events = map[EventCode]string{ + Open: "Open", + Accept: "Accept", + DataSent: "DataSent", + DataReceived: "DataReceived", + Cancel: "Cancel", + Error: "Error", + CleanupComplete: "CleanupComplete", + NewVoucher: "NewVoucher", + NewVoucherResult: "NewVoucherResult", + PauseInitiator: "PauseInitiator", + ResumeInitiator: "ResumeInitiator", + PauseResponder: "PauseResponder", + ResumeResponder: "ResumeResponder", + FinishTransfer: "FinishTransfer", + ResponderBeginsFinalization: "ResponderBeginsFinalization", + ResponderCompletes: "ResponderCompletes", + BeginFinalizing: "BeginFinalizing", + Complete: "Complete", + CompleteCleanupOnRestart: "CompleteCleanupOnRestart", +} + +// Event is a struct containing information about a data transfer event +type Event struct { + Code EventCode // What type of event it is + Message string // Any clarifying information about the event + Timestamp time.Time // when the event happened +} + +// Subscriber is a callback that is called when events are emitted +type Subscriber func(event Event, channelState ChannelState) + +// Unsubscribe is a function that gets called to unsubscribe from data transfer events +type Unsubscribe func() diff --git a/vendor/github.com/filecoin-project/go-data-transfer/go.mod b/vendor/github.com/filecoin-project/go-data-transfer/go.mod index f3b7036ada..f2340f32a4 100644 --- a/vendor/github.com/filecoin-project/go-data-transfer/go.mod +++ b/vendor/github.com/filecoin-project/go-data-transfer/go.mod @@ -3,30 +3,34 @@ module github.com/filecoin-project/go-data-transfer go 1.13 require ( + github.com/filecoin-project/go-ds-versioning v0.1.0 + github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099 + github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e + github.com/hashicorp/go-multierror v1.1.0 github.com/ipfs/go-block-format v0.0.2 github.com/ipfs/go-blockservice v0.1.3 - github.com/ipfs/go-cid v0.0.5 - github.com/ipfs/go-datastore v0.4.4 - github.com/ipfs/go-graphsync v0.0.6-0.20200527235412-ea9535659648 - github.com/ipfs/go-ipfs-blockstore v0.1.4 + github.com/ipfs/go-cid v0.0.7 + github.com/ipfs/go-datastore v0.4.5 + github.com/ipfs/go-graphsync v0.3.0 + github.com/ipfs/go-ipfs-blockstore v1.0.1 github.com/ipfs/go-ipfs-blocksutil v0.0.1 github.com/ipfs/go-ipfs-chunker v0.0.5 github.com/ipfs/go-ipfs-exchange-offline v0.0.1 github.com/ipfs/go-ipfs-files v0.0.8 github.com/ipfs/go-ipld-cbor v0.0.4 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log v1.0.2 - github.com/ipfs/go-merkledag v0.3.1 + github.com/ipfs/go-log/v2 v2.0.3 + github.com/ipfs/go-merkledag v0.3.2 github.com/ipfs/go-unixfs v0.2.4 - github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e + github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c + github.com/jpillora/backoff v1.0.0 github.com/libp2p/go-libp2p v0.6.0 github.com/libp2p/go-libp2p-core v0.5.0 - github.com/prometheus/common v0.10.0 github.com/stretchr/testify v1.5.1 - github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105 - golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 + github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 + go.uber.org/atomic v1.6.0 + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 ) diff --git a/vendor/github.com/filecoin-project/go-data-transfer/go.sum b/vendor/github.com/filecoin-project/go-data-transfer/go.sum index 2285a1dbbf..ceb12dbac2 100644 --- a/vendor/github.com/filecoin-project/go-data-transfer/go.sum +++ b/vendor/github.com/filecoin-project/go-data-transfer/go.sum @@ -1,21 +1,30 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= @@ -31,6 +40,10 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= @@ -57,18 +70,24 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= +github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= +github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe h1:dF8u+LEWeIcTcfUcCf3WFVlc81Fr2JKg8zPzIbBDKDw= +github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= +github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ= +github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= @@ -76,20 +95,37 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= @@ -99,10 +135,14 @@ github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvK github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= -github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099 h1:vQqOW42RRM5LoM/1K5dK940VipLqpH8lEVGrMz+mNjU= -github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099/go.mod h1:WVPCl0HO/0RAL5+vBH2GMxBomlxBF70MAS78+Lu1//k= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1 h1:F9k+7wv5OIk1zcq23QpdiL0hfDuXPjuOmMNaC6fgQ0Q= +github.com/hannahhoward/cbor-gen-for v0.0.0-20200817222906-ea96cece81f1/go.mod h1:jvfsLIxk0fY/2BKSQ1xf2406AKA5dwMmKKv0ADcOfN8= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -113,6 +153,7 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= @@ -130,9 +171,14 @@ github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYI github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-cid v0.0.7 h1:ysQJVJA3fNDF1qigJbsSQOdjhVLsOEoPdh0+R97k3jY= +github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= @@ -140,8 +186,11 @@ github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRV github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4 h1:rjvQ9+muFaJ+QZ7dN5B1MSDNQ0JVZKkkES/rMZmA8X8= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= +github.com/ipfs/go-datastore v0.4.5 h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg= +github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -149,12 +198,14 @@ github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaH github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-graphsync v0.0.6-0.20200527235412-ea9535659648 h1:OEyYvBH/4gUd4Hskef1CRBqQ4fpnlwQW3pj8taPt8ko= -github.com/ipfs/go-graphsync v0.0.6-0.20200527235412-ea9535659648/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= +github.com/ipfs/go-graphsync v0.3.0 h1:I6Y20kSuCWkUvPoUWo4V3am704/9QjgDVVkf0zIV8+8= +github.com/ipfs/go-graphsync v0.3.0/go.mod h1:gEBvJUNelzMkaRPJTpg/jaKN4AQW/7wDWu0K92D8o10= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ= github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= +github.com/ipfs/go-ipfs-blockstore v1.0.1 h1:fnuVj4XdZp4yExhd0CnUwAiMNJHiPnfInhiuwz4lW1w= +github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1 h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw= @@ -168,6 +219,8 @@ github.com/ipfs/go-ipfs-ds-help v0.0.1 h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1 h1:IW/bXGeaAZV2VH0Kuok+Ohva/zHkHmeLFBxC1k7mNPc= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= +github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= +github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= @@ -196,13 +249,22 @@ github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= +github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= +github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= github.com/ipfs/go-log v1.0.2 h1:s19ZwJxH8rPWzypjcDpqPLIyV7BnbLqvpli3iZoqYK0= github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= +github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= +github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= +github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.2 h1:xguurydRdfKMJjKyxNXNU8lYP0VZH1NUwJRwUorjuEw= github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= +github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= +github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ= github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= +github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= @@ -214,10 +276,10 @@ github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e h1:ZISbJlM0urTANR9KRfRaqlBmyOj5uUtxs2r4Up9IXsA= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1 h1:K1Ysr7kgIlo7YQkPqdkA6H7BVdIugvuAz7OQUTJxLdE= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f h1:XpOuNQ5GbXxUcSukbQcW9jkE7REpaFGJU2/T00fo9kA= +github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6 h1:6Mq+tZGSEMEoJJ1NbJRhddeelkXZcU8yfH/ZRYUo/Es= +github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA= @@ -232,14 +294,18 @@ github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr10= github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= +github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= +github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -251,9 +317,10 @@ github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muM github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d h1:68u9r4wEvL3gYg2jvAOgROwZ3H+Y3hIDk4tbbmIjcYQ= github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -394,11 +461,13 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= @@ -411,8 +480,6 @@ github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKU github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= @@ -421,6 +488,8 @@ github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= @@ -445,9 +514,12 @@ github.com/multiformats/go-multiaddr-net v0.1.2 h1:P7zcBH9FRETdPkDrylcXVjQLQ2t1J github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= @@ -459,7 +531,6 @@ github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXS github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -474,7 +545,6 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -486,26 +556,16 @@ github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXx github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= @@ -514,6 +574,8 @@ github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2 github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= @@ -526,6 +588,7 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/src-d/envconfig v1.0.0 h1:/AJi6DtjFhZKNx3OB2qMsq7y4yT5//AeSZIe7rk+PX8= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -544,11 +607,11 @@ github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a h1:xc8sbWMwBsvi8OrxFZR8zxw/fdCneHBLFDJJaV14eaE= -github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= +github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105 h1:Sh6UG5dW5xW8Ek2CtRGq4ipdEvvx9hOyBJjEGyTYDl0= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163 h1:TtcUeY2XZSriVWR1pXyfCBWIf/NGC2iUdNw1lofUjUU= +github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= @@ -562,9 +625,12 @@ github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84 github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7 h1:E9S12nwJwEOXe2d6gT6qxdvqMnNq+VnSsKPgm2ZZNds= github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= +github.com/x-cray/logrus-prefixed-formatter v0.5.2 h1:00txxvfBM9muc0jiLIEAkAcIMJzfthRT6usrui8uGmg= github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.1 h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -574,14 +640,20 @@ go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= +go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -592,6 +664,7 @@ golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= @@ -599,59 +672,107 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -659,27 +780,73 @@ golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361 h1:RIIXAeV6GvDBuADKumTODatUqANFZ+5BPMnzsy4hulY= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3 h1:r3P/5xOq/dK1991B65Oy6E1fRF/2d/fSYZJ/fXGVfJc= +golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= @@ -688,6 +855,7 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= +gopkg.in/src-d/go-log.v1 v1.0.1 h1:heWvX7J6qbGWbeFS/aRmiy1eYaT+QMV6wNvHDyMjQV4= gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -695,6 +863,14 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/github.com/filecoin-project/go-data-transfer/impl/dagservice/dagservice.go b/vendor/github.com/filecoin-project/go-data-transfer/impl/dagservice/dagservice.go deleted file mode 100644 index 7f228cc9e4..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/impl/dagservice/dagservice.go +++ /dev/null @@ -1,87 +0,0 @@ -package datatransfer - -import ( - "context" - "time" - - "github.com/ipfs/go-cid" - ipldformat "github.com/ipfs/go-ipld-format" - "github.com/ipfs/go-merkledag" - "github.com/ipld/go-ipld-prime" - "github.com/libp2p/go-libp2p-core/peer" - "golang.org/x/xerrors" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/channels" -) - -// This file implements a VERY simple, incomplete version of the data transfer -// module that allows us to make the necessary insertions of data transfer -// functionality into the storage market -// It does not: -// -- actually validate requests -// -- support Push requests -// -- support multiple subscribers -// -- do any actual network coordination or use Graphsync - -type dagserviceImpl struct { - dag ipldformat.DAGService - subscriber datatransfer.Subscriber -} - -// NewDAGServiceDataTransfer returns a data transfer manager based on -// an IPLD DAGService -func NewDAGServiceDataTransfer(dag ipldformat.DAGService) datatransfer.Manager { - return &dagserviceImpl{dag, nil} -} - -// RegisterVoucherType registers a validator for the given voucher type -// will error if voucher type does not implement voucher -// or if there is a voucher type registered with an identical identifier -func (impl *dagserviceImpl) RegisterVoucherType(voucherType datatransfer.Voucher, validator datatransfer.RequestValidator) error { - return nil -} - -// open a data transfer that will send data to the recipient peer and -// transfer parts of the piece that match the selector -func (impl *dagserviceImpl) OpenPushDataChannel(ctx context.Context, to peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, Selector ipld.Node) (datatransfer.ChannelID, error) { - return datatransfer.ChannelID{}, xerrors.Errorf("not implemented") -} - -// open a data transfer that will request data from the sending peer and -// transfer parts of the piece that match the selector -func (impl *dagserviceImpl) OpenPullDataChannel(ctx context.Context, to peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, Selector ipld.Node) (datatransfer.ChannelID, error) { - ctx, cancel := context.WithCancel(ctx) - go func() { - defer cancel() - err := merkledag.FetchGraph(ctx, baseCid, impl.dag) - event := datatransfer.Event{Timestamp: time.Now()} - if err != nil { - event.Code = datatransfer.Error - event.Message = err.Error() - } else { - event.Code = datatransfer.Complete - } - impl.subscriber(event, channels.ChannelState{Channel: channels.NewChannel(0, baseCid, Selector, voucher, to, "", 0)}) - }() - return datatransfer.ChannelID{}, nil -} - -// close an open channel (effectively a cancel) -func (impl *dagserviceImpl) CloseDataTransferChannel(x datatransfer.ChannelID) {} - -// get status of a transfer -func (impl *dagserviceImpl) TransferChannelStatus(x datatransfer.ChannelID) datatransfer.Status { - return datatransfer.ChannelNotFoundError -} - -// get notified when certain types of events happen -func (impl *dagserviceImpl) SubscribeToEvents(subscriber datatransfer.Subscriber) datatransfer.Unsubscribe { - impl.subscriber = subscriber - return func() {} -} - -// get all in progress transfers -func (impl *dagserviceImpl) InProgressChannels() map[datatransfer.ChannelID]datatransfer.ChannelState { - return nil -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/extension/gsextension.go b/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/extension/gsextension.go deleted file mode 100644 index 658096e8cf..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/extension/gsextension.go +++ /dev/null @@ -1,62 +0,0 @@ -package extension - -import ( - "bytes" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/ipfs/go-graphsync" - "github.com/libp2p/go-libp2p-core/peer" -) - -const ( - // ExtensionDataTransfer is the identifier for the data transfer extension to graphsync - ExtensionDataTransfer = graphsync.ExtensionName("fil/data-transfer") -) - -//go:generate cbor-gen-for TransferData - -// TransferData is the extension data for -// the graphsync extension. -type TransferData struct { - TransferID uint64 - Initiator peer.ID - IsPull bool -} - -// GetChannelID gets the channelID for this extension, given the peers on either side -func (e TransferData) GetChannelID() datatransfer.ChannelID { - return datatransfer.ChannelID{Initiator: e.Initiator, ID: datatransfer.TransferID(e.TransferID)} -} - -// NewTransferData returns transfer data to encode in a graphsync request -func NewTransferData(transferID datatransfer.TransferID, initiator peer.ID, isPull bool) TransferData { - return TransferData{ - TransferID: uint64(transferID), - Initiator: initiator, - IsPull: isPull, - } -} - -// GsExtended is a small interface used by getExtensionData -type GsExtended interface { - Extension(name graphsync.ExtensionName) ([]byte, bool) -} - -// GetTransferData unmarshals extension data. -// Returns: -// * nil + nil if the extension is not found -// * nil + error if the extendedData fails to unmarshal -// * unmarshaled ExtensionDataTransferData + nil if all goes well -func GetTransferData(extendedData GsExtended) (*TransferData, error) { - data, ok := extendedData.Extension(ExtensionDataTransfer) - if !ok { - return nil, nil - } - var extStruct TransferData - - reader := bytes.NewReader(data) - if err := extStruct.UnmarshalCBOR(reader); err != nil { - return nil, err - } - return &extStruct, nil -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/extension/gsextension_cbor_gen.go b/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/extension/gsextension_cbor_gen.go deleted file mode 100644 index 8d7aeb1b18..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/extension/gsextension_cbor_gen.go +++ /dev/null @@ -1,107 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package extension - -import ( - "fmt" - "io" - - "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *TransferData) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.TransferID (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.TransferID))); err != nil { - return err - } - - // t.Initiator (peer.ID) (string) - if len(t.Initiator) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Initiator was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Initiator)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Initiator)); err != nil { - return err - } - - // t.IsPull (bool) (bool) - if err := cbg.WriteBool(w, t.IsPull); err != nil { - return err - } - return nil -} - -func (t *TransferData) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.TransferID (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TransferID = uint64(extra) - - } - // t.Initiator (peer.ID) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Initiator = peer.ID(sval) - } - // t.IsPull (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.IsPull = false - case 21: - t.IsPull = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_impl.go b/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_impl.go deleted file mode 100644 index 26980852ef..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_impl.go +++ /dev/null @@ -1,307 +0,0 @@ -package graphsyncimpl - -import ( - "bytes" - "context" - "errors" - "fmt" - "time" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-graphsync" - logging "github.com/ipfs/go-log" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/peer" - "golang.org/x/xerrors" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/channels" - "github.com/filecoin-project/go-data-transfer/impl/graphsync/extension" - "github.com/filecoin-project/go-data-transfer/impl/graphsync/hooks" - "github.com/filecoin-project/go-data-transfer/message" - "github.com/filecoin-project/go-data-transfer/network" - "github.com/filecoin-project/go-data-transfer/registry" - "github.com/filecoin-project/go-storedcounter" - "github.com/hannahhoward/go-pubsub" -) - -var log = logging.Logger("graphsync-impl") - -type graphsyncImpl struct { - dataTransferNetwork network.DataTransferNetwork - validatedTypes *registry.Registry - pubSub *pubsub.PubSub - channels *channels.Channels - gs graphsync.GraphExchange - peerID peer.ID - storedCounter *storedcounter.StoredCounter -} - -type internalEvent struct { - evt datatransfer.Event - state datatransfer.ChannelState -} - -func dispatcher(evt pubsub.Event, subscriberFn pubsub.SubscriberFn) error { - ie, ok := evt.(internalEvent) - if !ok { - return errors.New("wrong type of event") - } - cb, ok := subscriberFn.(datatransfer.Subscriber) - if !ok { - return errors.New("wrong type of event") - } - cb(ie.evt, ie.state) - return nil -} - -// NewGraphSyncDataTransfer initializes a new graphsync based data transfer manager -func NewGraphSyncDataTransfer(host host.Host, gs graphsync.GraphExchange, storedCounter *storedcounter.StoredCounter) datatransfer.Manager { - dataTransferNetwork := network.NewFromLibp2pHost(host) - impl := &graphsyncImpl{ - dataTransferNetwork: dataTransferNetwork, - validatedTypes: registry.NewRegistry(), - pubSub: pubsub.New(dispatcher), - channels: channels.New(), - gs: gs, - peerID: host.ID(), - storedCounter: storedCounter, - } - - dtReceiver := &graphsyncReceiver{impl} - dataTransferNetwork.SetDelegate(dtReceiver) - - hooksManager := hooks.NewManager(host.ID(), impl) - hooksManager.RegisterHooks(gs) - return impl -} - -func (impl *graphsyncImpl) OnRequestSent(chid datatransfer.ChannelID) error { - _, err := impl.channels.GetByID(chid) - return err -} - -func (impl *graphsyncImpl) OnDataReceived(chid datatransfer.ChannelID, link ipld.Link, size uint64) error { - _, err := impl.channels.IncrementReceived(chid, size) - if err != nil { - return err - } - chst, err := impl.channels.GetByID(chid) - if err != nil { - return err - } - evt := datatransfer.Event{ - Code: datatransfer.Progress, - Message: fmt.Sprintf("Received %d more bytes", size), - Timestamp: time.Now(), - } - err = impl.pubSub.Publish(internalEvent{evt, chst}) - if err != nil { - log.Warnf("err publishing DT event: %s", err.Error()) - } - return nil -} - -func (impl *graphsyncImpl) OnDataSent(chid datatransfer.ChannelID, link ipld.Link, size uint64) error { - _, err := impl.channels.IncrementSent(chid, size) - if err != nil { - return err - } - chst, err := impl.channels.GetByID(chid) - if err != nil { - return err - } - evt := datatransfer.Event{ - Code: datatransfer.Progress, - Message: fmt.Sprintf("Sent %d more bytes", size), - Timestamp: time.Now(), - } - err = impl.pubSub.Publish(internalEvent{evt, chst}) - if err != nil { - log.Warnf("err publishing DT event: %s", err.Error()) - } - return nil -} - -func (impl *graphsyncImpl) OnRequestReceived(chid datatransfer.ChannelID) error { - _, err := impl.channels.GetByID(chid) - return err -} - -func (impl *graphsyncImpl) OnResponseCompleted(chid datatransfer.ChannelID, success bool) error { - chst, err := impl.channels.GetByID(chid) - if err != nil { - return err - } - - evt := datatransfer.Event{ - Code: datatransfer.Error, - Timestamp: time.Now(), - } - if success { - evt.Code = datatransfer.Complete - } - err = impl.pubSub.Publish(internalEvent{evt, chst}) - if err != nil { - log.Warnf("err publishing DT event: %s", err.Error()) - } - return nil -} - -// RegisterVoucherType registers a validator for the given voucher type -// returns error if: -// * voucher type does not implement voucher -// * there is a voucher type registered with an identical identifier -// * voucherType's Kind is not reflect.Ptr -func (impl *graphsyncImpl) RegisterVoucherType(voucherType datatransfer.Voucher, validator datatransfer.RequestValidator) error { - err := impl.validatedTypes.Register(voucherType, validator) - if err != nil { - return xerrors.Errorf("error registering voucher type: %w", err) - } - return nil -} - -// OpenPushDataChannel opens a data transfer that will send data to the recipient peer and -// transfer parts of the piece that match the selector -func (impl *graphsyncImpl) OpenPushDataChannel(ctx context.Context, requestTo peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.ChannelID, error) { - tid, err := impl.sendDtRequest(ctx, selector, false, voucher, baseCid, requestTo) - if err != nil { - return datatransfer.ChannelID{}, err - } - - chid, err := impl.channels.CreateNew(tid, baseCid, selector, voucher, - impl.peerID, impl.peerID, requestTo) // initiator = us, sender = us, receiver = them - if err != nil { - return chid, err - } - evt := datatransfer.Event{ - Code: datatransfer.Open, - Message: "New Request Initiated", - Timestamp: time.Now(), - } - chst, err := impl.channels.GetByID(chid) - if err != nil { - return chid, err - } - err = impl.pubSub.Publish(internalEvent{evt, chst}) - if err != nil { - log.Warnf("err publishing DT event: %s", err.Error()) - } - return chid, nil -} - -// OpenPullDataChannel opens a data transfer that will request data from the sending peer and -// transfer parts of the piece that match the selector -func (impl *graphsyncImpl) OpenPullDataChannel(ctx context.Context, requestTo peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.ChannelID, error) { - - tid, err := impl.sendDtRequest(ctx, selector, true, voucher, baseCid, requestTo) - if err != nil { - return datatransfer.ChannelID{}, err - } - // initiator = us, sender = them, receiver = us - chid, err := impl.channels.CreateNew(tid, baseCid, selector, voucher, - impl.peerID, requestTo, impl.peerID) - if err != nil { - return chid, err - } - evt := datatransfer.Event{ - Code: datatransfer.Open, - Message: "New Request Initiated", - Timestamp: time.Now(), - } - chst, err := impl.channels.GetByID(chid) - if err != nil { - return chid, err - } - err = impl.pubSub.Publish(internalEvent{evt, chst}) - if err != nil { - log.Warnf("err publishing DT event: %s", err.Error()) - } - return chid, nil -} - -// sendDtRequest encapsulates message creation and posting to the data transfer network with the provided parameters -func (impl *graphsyncImpl) sendDtRequest(ctx context.Context, selector ipld.Node, isPull bool, voucher datatransfer.Voucher, baseCid cid.Cid, to peer.ID) (datatransfer.TransferID, error) { - next, err := impl.storedCounter.Next() - if err != nil { - return 0, err - } - tid := datatransfer.TransferID(next) - req, err := message.NewRequest(tid, isPull, voucher.Type(), voucher, baseCid, selector) - if err != nil { - return 0, err - } - if err := impl.dataTransferNetwork.SendMessage(ctx, to, req); err != nil { - return 0, err - } - return tid, nil -} - -func (impl *graphsyncImpl) sendResponse(ctx context.Context, isAccepted bool, to peer.ID, tid datatransfer.TransferID) { - resp := message.NewResponse(tid, isAccepted) - if err := impl.dataTransferNetwork.SendMessage(ctx, to, resp); err != nil { - log.Error(err) - } -} - -// close an open channel (effectively a cancel) -func (impl *graphsyncImpl) CloseDataTransferChannel(x datatransfer.ChannelID) {} - -// get status of a transfer -func (impl *graphsyncImpl) TransferChannelStatus(x datatransfer.ChannelID) datatransfer.Status { - return datatransfer.ChannelNotFoundError -} - -// get notified when certain types of events happen -func (impl *graphsyncImpl) SubscribeToEvents(subscriber datatransfer.Subscriber) datatransfer.Unsubscribe { - return datatransfer.Unsubscribe(impl.pubSub.Subscribe(subscriber)) -} - -// get all in progress transfers -func (impl *graphsyncImpl) InProgressChannels() map[datatransfer.ChannelID]datatransfer.ChannelState { - return impl.channels.InProgress() -} - -// sendGsRequest assembles a graphsync request and determines if the transfer was completed/successful. -// notifies subscribers of final request status. -func (impl *graphsyncImpl) sendGsRequest(ctx context.Context, initiator peer.ID, transferID datatransfer.TransferID, isPull bool, dataSender peer.ID, root cidlink.Link, stor ipld.Node) { - extDtData := extension.NewTransferData(transferID, initiator, isPull) - var buf bytes.Buffer - if err := extDtData.MarshalCBOR(&buf); err != nil { - log.Error(err) - } - extData := buf.Bytes() - _, errChan := impl.gs.Request(ctx, dataSender, root, stor, - graphsync.ExtensionData{ - Name: extension.ExtensionDataTransfer, - Data: extData, - }) - go func() { - var lastError error - for err := range errChan { - lastError = err - } - evt := datatransfer.Event{ - Code: datatransfer.Error, - Timestamp: time.Now(), - } - chid := datatransfer.ChannelID{Initiator: initiator, ID: transferID} - chst, err := impl.channels.GetByID(chid) - if err != nil { - msg := "cannot find a matching channel for this request" - evt.Message = msg - } else { - if lastError == nil { - evt.Code = datatransfer.Complete - } else { - evt.Message = lastError.Error() - } - } - err = impl.pubSub.Publish(internalEvent{evt, chst}) - if err != nil { - log.Warnf("err publishing DT event: %s", err.Error()) - } - }() -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_impl_test.go b/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_impl_test.go deleted file mode 100644 index c3143908b1..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_impl_test.go +++ /dev/null @@ -1,1066 +0,0 @@ -package graphsyncimpl_test - -import ( - "bytes" - "context" - "errors" - "math/rand" - "testing" - "time" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-graphsync" - gsmsg "github.com/ipfs/go-graphsync/message" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - datatransfer "github.com/filecoin-project/go-data-transfer" - . "github.com/filecoin-project/go-data-transfer/impl/graphsync" - "github.com/filecoin-project/go-data-transfer/impl/graphsync/extension" - "github.com/filecoin-project/go-data-transfer/message" - "github.com/filecoin-project/go-data-transfer/network" - "github.com/filecoin-project/go-data-transfer/testutil" -) - -type receivedMessage struct { - message message.DataTransferMessage - sender peer.ID -} - -// Receiver is an interface for receiving messages from the GraphSyncNetwork. -type receiver struct { - messageReceived chan receivedMessage -} - -func (r *receiver) ReceiveRequest( - ctx context.Context, - sender peer.ID, - incoming message.DataTransferRequest) { - - select { - case <-ctx.Done(): - case r.messageReceived <- receivedMessage{incoming, sender}: - } -} - -func (r *receiver) ReceiveResponse( - ctx context.Context, - sender peer.ID, - incoming message.DataTransferResponse) { - - select { - case <-ctx.Done(): - case r.messageReceived <- receivedMessage{incoming, sender}: - } -} - -func (r *receiver) ReceiveError(err error) { -} - -func TestDataTransferOneWay(t *testing.T) { - // create network - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 - host2 := gsData.Host2 - // setup receiving peer to just record message coming in - dtnet2 := network.NewFromLibp2pHost(host2) - r := &receiver{ - messageReceived: make(chan receivedMessage), - } - dtnet2.SetDelegate(r) - - gs := gsData.SetupGraphsyncHost1() - dt := NewGraphSyncDataTransfer(host1, gs, gsData.StoredCounter1) - - t.Run("OpenPushDataTransfer", func(t *testing.T) { - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) - - // this is the selector for "get the whole DAG" - // TODO: support storage deals with custom payload selectors - stor := ssb.ExploreRecursive(selector.RecursionLimitNone(), - ssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node() - - voucher := testutil.NewFakeDTType() - baseCid := testutil.GenerateCids(1)[0] - channelID, err := dt.OpenPushDataChannel(ctx, host2.ID(), voucher, baseCid, stor) - require.NoError(t, err) - require.NotNil(t, channelID) - require.Equal(t, channelID.Initiator, host1.ID()) - require.NoError(t, err) - - var messageReceived receivedMessage - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case messageReceived = <-r.messageReceived: - } - - sender := messageReceived.sender - require.Equal(t, sender, host1.ID()) - - received := messageReceived.message - require.True(t, received.IsRequest()) - receivedRequest, ok := received.(message.DataTransferRequest) - require.True(t, ok) - - require.Equal(t, receivedRequest.TransferID(), channelID.ID) - require.Equal(t, receivedRequest.BaseCid(), baseCid) - require.False(t, receivedRequest.IsCancel()) - require.False(t, receivedRequest.IsPull()) - receivedSelector, err := receivedRequest.Selector() - require.NoError(t, err) - require.Equal(t, receivedSelector, stor) - testutil.AssertFakeDTVoucher(t, receivedRequest, voucher) - }) - - t.Run("OpenPullDataTransfer", func(t *testing.T) { - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) - - stor := ssb.ExploreRecursive(selector.RecursionLimitNone(), - ssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node() - - voucher := testutil.NewFakeDTType() - baseCid := testutil.GenerateCids(1)[0] - channelID, err := dt.OpenPullDataChannel(ctx, host2.ID(), voucher, baseCid, stor) - require.NoError(t, err) - require.NotNil(t, channelID) - require.Equal(t, channelID.Initiator, host1.ID()) - require.NoError(t, err) - - var messageReceived receivedMessage - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case messageReceived = <-r.messageReceived: - } - - sender := messageReceived.sender - require.Equal(t, sender, host1.ID()) - - received := messageReceived.message - require.True(t, received.IsRequest()) - receivedRequest, ok := received.(message.DataTransferRequest) - require.True(t, ok) - - require.Equal(t, receivedRequest.TransferID(), channelID.ID) - require.Equal(t, receivedRequest.BaseCid(), baseCid) - require.False(t, receivedRequest.IsCancel()) - require.True(t, receivedRequest.IsPull()) - receivedSelector, err := receivedRequest.Selector() - require.NoError(t, err) - require.Equal(t, receivedSelector, stor) - testutil.AssertFakeDTVoucher(t, receivedRequest, voucher) - }) -} - -type receivedValidation struct { - isPull bool - other peer.ID - voucher datatransfer.Voucher - baseCid cid.Cid - selector ipld.Node -} - -type fakeValidator struct { - ctx context.Context - validationsReceived chan receivedValidation -} - -func (fv *fakeValidator) ValidatePush( - sender peer.ID, - voucher datatransfer.Voucher, - baseCid cid.Cid, - selector ipld.Node) error { - - select { - case <-fv.ctx.Done(): - case fv.validationsReceived <- receivedValidation{false, sender, voucher, baseCid, selector}: - } - return nil -} - -func (fv *fakeValidator) ValidatePull( - receiver peer.ID, - voucher datatransfer.Voucher, - baseCid cid.Cid, - selector ipld.Node) error { - - select { - case <-fv.ctx.Done(): - case fv.validationsReceived <- receivedValidation{true, receiver, voucher, baseCid, selector}: - } - return nil -} - -func TestDataTransferValidation(t *testing.T) { - // create network - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 - host2 := gsData.Host2 - dtnet1 := network.NewFromLibp2pHost(host1) - r := &receiver{ - messageReceived: make(chan receivedMessage), - } - dtnet1.SetDelegate(r) - - gs2 := testutil.NewFakeGraphSync() - - fv := &fakeValidator{ctx, make(chan receivedValidation)} - - id := datatransfer.TransferID(rand.Int31()) - - t.Run("ValidatePush", func(t *testing.T) { - dt2 := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - err := dt2.RegisterVoucherType(&testutil.FakeDTType{}, fv) - require.NoError(t, err) - // create push request - voucher, baseCid, request := createDTRequest(t, false, id, gsData.AllSelector) - - require.NoError(t, dtnet1.SendMessage(ctx, host2.ID(), request)) - - var validation receivedValidation - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case validation = <-fv.validationsReceived: - assert.False(t, validation.isPull) - } - - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case _ = <-r.messageReceived: - } - - assert.False(t, validation.isPull) - assert.Equal(t, host1.ID(), validation.other) - assert.Equal(t, &voucher, validation.voucher) - assert.Equal(t, baseCid, validation.baseCid) - assert.Equal(t, gsData.AllSelector, validation.selector) - }) - - t.Run("ValidatePull", func(t *testing.T) { - // create pull request - voucher, baseCid, request := createDTRequest(t, true, id, gsData.AllSelector) - require.NoError(t, dtnet1.SendMessage(ctx, host2.ID(), request)) - - var validation receivedValidation - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case validation = <-fv.validationsReceived: - } - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case _ = <-r.messageReceived: - } - - assert.True(t, validation.isPull) - assert.Equal(t, validation.other, host1.ID()) - assert.Equal(t, &voucher, validation.voucher) - assert.Equal(t, baseCid, validation.baseCid) - assert.Equal(t, gsData.AllSelector, validation.selector) - }) -} - -func createDTRequest(t *testing.T, isPull bool, id datatransfer.TransferID, selector ipld.Node) (testutil.FakeDTType, cid.Cid, message.DataTransferRequest) { - voucher := &testutil.FakeDTType{Data: "applesauce"} - baseCid := testutil.GenerateCids(1)[0] - request, err := message.NewRequest(id, isPull, voucher.Type(), voucher, baseCid, selector) - require.NoError(t, err) - return *voucher, baseCid, request -} - -type stubbedValidator struct { - didPush bool - didPull bool - expectPush bool - expectPull bool - pushError error - pullError error -} - -func newSV() *stubbedValidator { - return &stubbedValidator{false, false, false, false, nil, nil} -} - -func (sv *stubbedValidator) ValidatePush( - sender peer.ID, - voucher datatransfer.Voucher, - baseCid cid.Cid, - selector ipld.Node) error { - sv.didPush = true - return sv.pushError -} - -func (sv *stubbedValidator) ValidatePull( - receiver peer.ID, - voucher datatransfer.Voucher, - baseCid cid.Cid, - selector ipld.Node) error { - sv.didPull = true - return sv.pullError -} - -func (sv *stubbedValidator) stubErrorPush() { - sv.pushError = errors.New("something went wrong") -} - -func (sv *stubbedValidator) stubSuccessPush() { - sv.pullError = nil -} - -func (sv *stubbedValidator) expectSuccessPush() { - sv.expectPush = true - sv.stubSuccessPush() -} - -func (sv *stubbedValidator) expectErrorPush() { - sv.expectPush = true - sv.stubErrorPush() -} - -func (sv *stubbedValidator) stubErrorPull() { - sv.pullError = errors.New("something went wrong") -} - -func (sv *stubbedValidator) stubSuccessPull() { - sv.pullError = nil -} - -func (sv *stubbedValidator) expectSuccessPull() { - sv.expectPull = true - sv.stubSuccessPull() -} - -func (sv *stubbedValidator) expectErrorPull() { - sv.expectPull = true - sv.stubErrorPull() -} - -func (sv *stubbedValidator) verifyExpectations(t *testing.T) { - if sv.expectPush { - require.True(t, sv.didPush) - } - if sv.expectPull { - require.True(t, sv.didPull) - } -} - -func TestGraphsyncImpl_RegisterVoucherType(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 - - gs1 := testutil.NewFakeGraphSync() - dt := NewGraphSyncDataTransfer(host1, gs1, gsData.StoredCounter1) - fv := &fakeValidator{ctx, make(chan receivedValidation)} - - // a voucher type can be registered - assert.NoError(t, dt.RegisterVoucherType(&testutil.FakeDTType{}, fv)) - - // it cannot be re-registered - assert.EqualError(t, dt.RegisterVoucherType(&testutil.FakeDTType{}, fv), "error registering voucher type: identifier already registered: FakeDTType") - - // it must be registered as a pointer - assert.EqualError(t, dt.RegisterVoucherType(testutil.FakeDTType{}, fv), - "error registering voucher type: registering entry type FakeDTType: type must be a pointer") -} - -func TestDataTransferSubscribing(t *testing.T) { - // create network - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 - host2 := gsData.Host2 - - gs1 := testutil.NewFakeGraphSync() - gs2 := testutil.NewFakeGraphSync() - sv := newSV() - sv.stubErrorPull() - sv.stubErrorPush() - dt2 := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - require.NoError(t, dt2.RegisterVoucherType(&testutil.FakeDTType{}, sv)) - voucher := testutil.FakeDTType{Data: "applesauce"} - baseCid := testutil.GenerateCids(1)[0] - - dt1 := NewGraphSyncDataTransfer(host1, gs1, gsData.StoredCounter1) - - subscribe1Calls := make(chan struct{}, 1) - subscribe1 := func(event datatransfer.Event, channelState datatransfer.ChannelState) { - if event.Code == datatransfer.Error { - subscribe1Calls <- struct{}{} - } - } - subscribe2Calls := make(chan struct{}, 1) - subscribe2 := func(event datatransfer.Event, channelState datatransfer.ChannelState) { - if event.Code == datatransfer.Error { - subscribe2Calls <- struct{}{} - } - } - unsub1 := dt1.SubscribeToEvents(subscribe1) - unsub2 := dt1.SubscribeToEvents(subscribe2) - _, err := dt1.OpenPushDataChannel(ctx, host2.ID(), &voucher, baseCid, gsData.AllSelector) - require.NoError(t, err) - select { - case <-ctx.Done(): - t.Fatal("subscribed events not received") - case <-subscribe1Calls: - } - select { - case <-ctx.Done(): - t.Fatal("subscribed events not received") - case <-subscribe2Calls: - } - unsub1() - unsub2() - - subscribe3Calls := make(chan struct{}, 1) - subscribe3 := func(event datatransfer.Event, channelState datatransfer.ChannelState) { - if event.Code == datatransfer.Error { - subscribe3Calls <- struct{}{} - } - } - subscribe4Calls := make(chan struct{}, 1) - subscribe4 := func(event datatransfer.Event, channelState datatransfer.ChannelState) { - if event.Code == datatransfer.Error { - subscribe4Calls <- struct{}{} - } - } - unsub3 := dt1.SubscribeToEvents(subscribe3) - unsub4 := dt1.SubscribeToEvents(subscribe4) - _, err = dt1.OpenPullDataChannel(ctx, host2.ID(), &voucher, baseCid, gsData.AllSelector) - require.NoError(t, err) - select { - case <-ctx.Done(): - t.Fatal("subscribed events not received") - case <-subscribe1Calls: - t.Fatal("received channel that should have been unsubscribed") - case <-subscribe2Calls: - t.Fatal("received channel that should have been unsubscribed") - case <-subscribe3Calls: - } - select { - case <-ctx.Done(): - t.Fatal("subscribed events not received") - case <-subscribe1Calls: - t.Fatal("received channel that should have been unsubscribed") - case <-subscribe2Calls: - t.Fatal("received channel that should have been unsubscribed") - case <-subscribe4Calls: - } - unsub3() - unsub4() -} - -func TestDataTransferInitiatingPushGraphsyncRequests(t *testing.T) { - // create network - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 - host2 := gsData.Host2 - - gs2 := testutil.NewFakeGraphSync() - - // setup receiving peer to just record message coming in - dtnet1 := network.NewFromLibp2pHost(host1) - r := &receiver{ - messageReceived: make(chan receivedMessage), - } - dtnet1.SetDelegate(r) - - id := datatransfer.TransferID(rand.Int31()) - - _, baseCid, request := createDTRequest(t, false, id, gsData.AllSelector) - - t.Run("with successful validation", func(t *testing.T) { - sv := newSV() - sv.expectSuccessPush() - - dt2 := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - require.NoError(t, dt2.RegisterVoucherType(&testutil.FakeDTType{}, sv)) - - require.NoError(t, dtnet1.SendMessage(ctx, host2.ID(), request)) - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case <-r.messageReceived: - } - sv.verifyExpectations(t) - - requestReceived := gs2.AssertRequestReceived(ctx, t) - - sv.verifyExpectations(t) - - receiver := requestReceived.P - require.Equal(t, receiver, host1.ID()) - - cl, ok := requestReceived.Root.(cidlink.Link) - require.True(t, ok) - require.Equal(t, baseCid, cl.Cid) - - require.Equal(t, gsData.AllSelector, requestReceived.Selector) - - }) - - t.Run("with error validation", func(t *testing.T) { - sv := newSV() - sv.expectErrorPush() - - dt2 := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - require.NoError(t, dt2.RegisterVoucherType(&testutil.FakeDTType{}, sv)) - - require.NoError(t, dtnet1.SendMessage(ctx, host2.ID(), request)) - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case <-r.messageReceived: - } - sv.verifyExpectations(t) - - // no graphsync request should be scheduled - gs2.AssertNoRequestReceived(t) - - }) - -} - -func TestDataTransferInitiatingPullGraphsyncRequests(t *testing.T) { - ctx := context.Background() - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 // initiates the pull request - host2 := gsData.Host2 // sends the data - - voucher := testutil.FakeDTType{Data: "applesauce"} - baseCid := testutil.GenerateCids(1)[0] - - t.Run("with successful validation", func(t *testing.T) { - gs1Init := testutil.NewFakeGraphSync() - gs2Sender := testutil.NewFakeGraphSync() - - sv := newSV() - sv.expectSuccessPull() - - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - dtInit := NewGraphSyncDataTransfer(host1, gs1Init, gsData.StoredCounter1) - dtSender := NewGraphSyncDataTransfer(host2, gs2Sender, gsData.StoredCounter2) - err := dtSender.RegisterVoucherType(&testutil.FakeDTType{}, sv) - require.NoError(t, err) - - _, err = dtInit.OpenPullDataChannel(ctx, host2.ID(), &voucher, baseCid, gsData.AllSelector) - require.NoError(t, err) - - requestReceived := gs1Init.AssertRequestReceived(ctx, t) - sv.verifyExpectations(t) - - receiver := requestReceived.P - require.Equal(t, receiver, host2.ID()) - - cl, ok := requestReceived.Root.(cidlink.Link) - require.True(t, ok) - require.Equal(t, baseCid.String(), cl.Cid.String()) - - require.Equal(t, gsData.AllSelector, requestReceived.Selector) - }) - - t.Run("with error validation", func(t *testing.T) { - gs1 := testutil.NewFakeGraphSync() - gs2 := testutil.NewFakeGraphSync() - - dt1 := NewGraphSyncDataTransfer(host1, gs1, gsData.StoredCounter1) - sv := newSV() - sv.expectErrorPull() - - dt2 := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - err := dt2.RegisterVoucherType(&testutil.FakeDTType{}, sv) - require.NoError(t, err) - - subscribeCalls := make(chan struct{}, 1) - subscribe := func(event datatransfer.Event, channelState datatransfer.ChannelState) { - if event.Code == datatransfer.Error { - subscribeCalls <- struct{}{} - } - } - unsub := dt1.SubscribeToEvents(subscribe) - _, err = dt1.OpenPullDataChannel(ctx, host2.ID(), &voucher, baseCid, gsData.AllSelector) - require.NoError(t, err) - - select { - case <-ctx.Done(): - t.Fatal("subscribed events not received") - case <-subscribeCalls: - } - - sv.verifyExpectations(t) - - // no graphsync request should be scheduled - gs1.AssertNoRequestReceived(t) - unsub() - }) - - t.Run("does not schedule graphsync request if is push request", func(t *testing.T) { - gs1 := testutil.NewFakeGraphSync() - gs2 := testutil.NewFakeGraphSync() - - sv := newSV() - sv.expectSuccessPush() - - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - dt1 := NewGraphSyncDataTransfer(host1, gs1, gsData.StoredCounter1) - dt2 := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - err := dt2.RegisterVoucherType(&testutil.FakeDTType{}, sv) - require.NoError(t, err) - - subscribeCalls := make(chan struct{}, 1) - subscribe := func(event datatransfer.Event, channelState datatransfer.ChannelState) { - if event.Code == datatransfer.Progress { - subscribeCalls <- struct{}{} - } - } - unsub := dt1.SubscribeToEvents(subscribe) - _, err = dt1.OpenPushDataChannel(ctx, host2.ID(), &voucher, baseCid, gsData.AllSelector) - require.NoError(t, err) - - select { - case <-ctx.Done(): - t.Fatal("subscribed events not received") - case <-subscribeCalls: - } - sv.verifyExpectations(t) - - // no graphsync request should be scheduled - gs1.AssertNoRequestReceived(t) - unsub() - }) -} - -type receivedGraphSyncMessage struct { - message gsmsg.GraphSyncMessage - p peer.ID -} - -type fakeGraphSyncReceiver struct { - receivedMessages chan receivedGraphSyncMessage -} - -func (fgsr *fakeGraphSyncReceiver) ReceiveMessage(ctx context.Context, sender peer.ID, incoming gsmsg.GraphSyncMessage) { - select { - case <-ctx.Done(): - case fgsr.receivedMessages <- receivedGraphSyncMessage{incoming, sender}: - } -} - -func (fgsr *fakeGraphSyncReceiver) ReceiveError(_ error) { -} -func (fgsr *fakeGraphSyncReceiver) Connected(p peer.ID) { -} -func (fgsr *fakeGraphSyncReceiver) Disconnected(p peer.ID) { -} - -func (fgsr *fakeGraphSyncReceiver) consumeResponses(ctx context.Context, t *testing.T) graphsync.ResponseStatusCode { - var gsMessageReceived receivedGraphSyncMessage - for { - select { - case <-ctx.Done(): - t.Fail() - case gsMessageReceived = <-fgsr.receivedMessages: - responses := gsMessageReceived.message.Responses() - if (len(responses) > 0) && gsmsg.IsTerminalResponseCode(responses[0].Status()) { - return responses[0].Status() - } - } - } -} - -func TestRespondingToPushGraphsyncRequests(t *testing.T) { - // create network - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 // initiator and data sender - host2 := gsData.Host2 // data recipient, makes graphsync request for data - voucher := testutil.FakeDTType{Data: "applesauce"} - link := gsData.LoadUnixFSFile(t, false) - - // setup receiving peer to just record message coming in - dtnet2 := network.NewFromLibp2pHost(host2) - r := &receiver{ - messageReceived: make(chan receivedMessage), - } - dtnet2.SetDelegate(r) - - gsr := &fakeGraphSyncReceiver{ - receivedMessages: make(chan receivedGraphSyncMessage), - } - gsData.GsNet2.SetDelegate(gsr) - - gs1 := gsData.SetupGraphsyncHost1() - dt1 := NewGraphSyncDataTransfer(host1, gs1, gsData.StoredCounter1) - - t.Run("when request is initiated", func(t *testing.T) { - _, err := dt1.OpenPushDataChannel(ctx, host2.ID(), &voucher, link.(cidlink.Link).Cid, gsData.AllSelector) - require.NoError(t, err) - - var messageReceived receivedMessage - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case messageReceived = <-r.messageReceived: - } - requestReceived := messageReceived.message.(message.DataTransferRequest) - - var buf bytes.Buffer - extStruct := &extension.TransferData{TransferID: uint64(requestReceived.TransferID()), Initiator: host1.ID()} - err = extStruct.MarshalCBOR(&buf) - require.NoError(t, err) - extData := buf.Bytes() - - request := gsmsg.NewRequest(graphsync.RequestID(rand.Int31()), link.(cidlink.Link).Cid, gsData.AllSelector, graphsync.Priority(rand.Int31()), graphsync.ExtensionData{ - Name: extension.ExtensionDataTransfer, - Data: extData, - }) - gsmessage := gsmsg.New() - gsmessage.AddRequest(request) - require.NoError(t, gsData.GsNet2.SendMessage(ctx, host1.ID(), gsmessage)) - - status := gsr.consumeResponses(ctx, t) - require.False(t, gsmsg.IsTerminalFailureCode(status)) - }) - - t.Run("when no request is initiated", func(t *testing.T) { - var buf bytes.Buffer - extStruct := &extension.TransferData{TransferID: rand.Uint64(), Initiator: host1.ID()} - err := extStruct.MarshalCBOR(&buf) - require.NoError(t, err) - extData := buf.Bytes() - - request := gsmsg.NewRequest(graphsync.RequestID(rand.Int31()), link.(cidlink.Link).Cid, gsData.AllSelector, graphsync.Priority(rand.Int31()), graphsync.ExtensionData{ - Name: extension.ExtensionDataTransfer, - Data: extData, - }) - gsmessage := gsmsg.New() - gsmessage.AddRequest(request) - require.NoError(t, gsData.GsNet2.SendMessage(ctx, host1.ID(), gsmessage)) - - status := gsr.consumeResponses(ctx, t) - require.True(t, gsmsg.IsTerminalFailureCode(status)) - }) -} - -func TestResponseHookWhenExtensionNotFound(t *testing.T) { - // create network - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 // initiator and data sender - host2 := gsData.Host2 // data recipient, makes graphsync request for data - voucher := testutil.FakeDTType{Data: "applesauce"} - link := gsData.LoadUnixFSFile(t, false) - - // setup receiving peer to just record message coming in - dtnet2 := network.NewFromLibp2pHost(host2) - r := &receiver{ - messageReceived: make(chan receivedMessage), - } - dtnet2.SetDelegate(r) - - gsr := &fakeGraphSyncReceiver{ - receivedMessages: make(chan receivedGraphSyncMessage), - } - gsData.GsNet2.SetDelegate(gsr) - - gs1 := gsData.SetupGraphsyncHost1() - dt1 := NewGraphSyncDataTransfer(host1, gs1, gsData.StoredCounter1) - - t.Run("when it's not our extension, does not error and does not validate", func(t *testing.T) { - //register a hook that validates the request so we don't fail in gs because the request - //never gets processed - validateHook := func(p peer.ID, req graphsync.RequestData, ha graphsync.IncomingRequestHookActions) { - ha.ValidateRequest() - } - gs1.RegisterIncomingRequestHook(validateHook) - - _, err := dt1.OpenPushDataChannel(ctx, host2.ID(), &voucher, link.(cidlink.Link).Cid, gsData.AllSelector) - require.NoError(t, err) - - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case <-r.messageReceived: - } - - request := gsmsg.NewRequest(graphsync.RequestID(rand.Int31()), link.(cidlink.Link).Cid, gsData.AllSelector, graphsync.Priority(rand.Int31())) - gsmessage := gsmsg.New() - gsmessage.AddRequest(request) - require.NoError(t, gsData.GsNet2.SendMessage(ctx, host1.ID(), gsmessage)) - - status := gsr.consumeResponses(ctx, t) - assert.False(t, gsmsg.IsTerminalFailureCode(status)) - }) -} - -func TestRespondingToPullGraphsyncRequests(t *testing.T) { - //create network - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 // initiator, and recipient, makes graphync request - host2 := gsData.Host2 // data sender - - // setup receiving peer to just record message coming in - dtnet1 := network.NewFromLibp2pHost(host1) - r := &receiver{ - messageReceived: make(chan receivedMessage), - } - dtnet1.SetDelegate(r) - - gsr := &fakeGraphSyncReceiver{ - receivedMessages: make(chan receivedGraphSyncMessage), - } - gsData.GsNet1.SetDelegate(gsr) - - gs2 := gsData.SetupGraphsyncHost2() - - link := gsData.LoadUnixFSFile(t, true) - - id := datatransfer.TransferID(rand.Int31()) - - t.Run("When a pull request is initiated and validated", func(t *testing.T) { - sv := newSV() - sv.expectSuccessPull() - - dt1 := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - require.NoError(t, dt1.RegisterVoucherType(&testutil.FakeDTType{}, sv)) - - _, _, request := createDTRequest(t, true, id, gsData.AllSelector) - require.NoError(t, dtnet1.SendMessage(ctx, host2.ID(), request)) - var messageReceived receivedMessage - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case messageReceived = <-r.messageReceived: - } - sv.verifyExpectations(t) - receivedResponse, ok := messageReceived.message.(message.DataTransferResponse) - require.True(t, ok) - require.True(t, receivedResponse.Accepted()) - extStruct := &extension.TransferData{ - TransferID: uint64(receivedResponse.TransferID()), - Initiator: host1.ID(), - IsPull: true, - } - - var buf2 = bytes.Buffer{} - err := extStruct.MarshalCBOR(&buf2) - require.NoError(t, err) - extData := buf2.Bytes() - - gsRequest := gsmsg.NewRequest(graphsync.RequestID(rand.Int31()), link.(cidlink.Link).Cid, gsData.AllSelector, graphsync.Priority(rand.Int31()), graphsync.ExtensionData{ - Name: extension.ExtensionDataTransfer, - Data: extData, - }) - - // initiator requests data over graphsync network - gsmessage := gsmsg.New() - gsmessage.AddRequest(gsRequest) - require.NoError(t, gsData.GsNet1.SendMessage(ctx, host2.ID(), gsmessage)) - status := gsr.consumeResponses(ctx, t) - require.False(t, gsmsg.IsTerminalFailureCode(status)) - }) - - t.Run("When request is not initiated, graphsync response is error", func(t *testing.T) { - _ = NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - extStruct := &extension.TransferData{TransferID: rand.Uint64(), Initiator: host1.ID()} - - var buf2 bytes.Buffer - err := extStruct.MarshalCBOR(&buf2) - require.NoError(t, err) - extData := buf2.Bytes() - request := gsmsg.NewRequest(graphsync.RequestID(rand.Int31()), link.(cidlink.Link).Cid, gsData.AllSelector, graphsync.Priority(rand.Int31()), graphsync.ExtensionData{ - Name: extension.ExtensionDataTransfer, - Data: extData, - }) - gsmessage := gsmsg.New() - gsmessage.AddRequest(request) - - // non-initiator requests data over graphsync network, but should not get it - // because there was no previous request - require.NoError(t, gsData.GsNet1.SendMessage(ctx, host2.ID(), gsmessage)) - status := gsr.consumeResponses(ctx, t) - require.True(t, gsmsg.IsTerminalFailureCode(status)) - }) -} - -func TestDataTransferPushRoundTrip(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 // initiator, data sender - host2 := gsData.Host2 // data recipient - - root := gsData.LoadUnixFSFile(t, false) - rootCid := root.(cidlink.Link).Cid - gs1 := gsData.SetupGraphsyncHost1() - gs2 := gsData.SetupGraphsyncHost2() - - dt1 := NewGraphSyncDataTransfer(host1, gs1, gsData.StoredCounter1) - dt2 := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - - finished := make(chan struct{}, 2) - errChan := make(chan struct{}, 2) - opened := make(chan struct{}, 2) - sent := make(chan uint64, 21) - received := make(chan uint64, 21) - var subscriber datatransfer.Subscriber = func(event datatransfer.Event, channelState datatransfer.ChannelState) { - if event.Code == datatransfer.Progress { - if channelState.Received() > 0 { - received <- channelState.Received() - } else if channelState.Sent() > 0 { - sent <- channelState.Sent() - } - } - if event.Code == datatransfer.Complete { - finished <- struct{}{} - } - if event.Code == datatransfer.Error { - errChan <- struct{}{} - } - if event.Code == datatransfer.Open { - opened <- struct{}{} - } - } - dt1.SubscribeToEvents(subscriber) - dt2.SubscribeToEvents(subscriber) - voucher := testutil.FakeDTType{Data: "applesauce"} - sv := newSV() - sv.expectSuccessPull() - require.NoError(t, dt2.RegisterVoucherType(&testutil.FakeDTType{}, sv)) - - chid, err := dt1.OpenPushDataChannel(ctx, host2.ID(), &voucher, rootCid, gsData.AllSelector) - require.NoError(t, err) - opens := 0 - completes := 0 - sentIncrements := make([]uint64, 0, 21) - receivedIncrements := make([]uint64, 0, 21) - for opens < 2 || completes < 2 || len(sentIncrements) < 21 || len(receivedIncrements) < 21 { - select { - case <-ctx.Done(): - t.Fatal("Did not complete succcessful data transfer") - case <-finished: - completes++ - case <-opened: - opens++ - case sentIncrement := <-sent: - sentIncrements = append(sentIncrements, sentIncrement) - case receivedIncrement := <-received: - receivedIncrements = append(receivedIncrements, receivedIncrement) - case <-errChan: - t.Fatal("received error on data transfer") - } - } - require.Equal(t, sentIncrements, receivedIncrements) - gsData.VerifyFileTransferred(t, root, true) - assert.Equal(t, chid.Initiator, host1.ID()) -} - -func TestDataTransferPullRoundTrip(t *testing.T) { - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 - host2 := gsData.Host2 - - root := gsData.LoadUnixFSFile(t, false) - rootCid := root.(cidlink.Link).Cid - gs1 := gsData.SetupGraphsyncHost1() - gs2 := gsData.SetupGraphsyncHost2() - - dt1 := NewGraphSyncDataTransfer(host1, gs1, gsData.StoredCounter1) - dt2 := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - - finished := make(chan struct{}, 2) - errChan := make(chan struct{}, 2) - opened := make(chan struct{}, 2) - sent := make(chan uint64, 21) - received := make(chan uint64, 21) - var subscriber datatransfer.Subscriber = func(event datatransfer.Event, channelState datatransfer.ChannelState) { - if event.Code == datatransfer.Progress { - if channelState.Received() > 0 { - received <- channelState.Received() - } else if channelState.Sent() > 0 { - sent <- channelState.Sent() - } - } - if event.Code == datatransfer.Complete { - finished <- struct{}{} - } - if event.Code == datatransfer.Error { - errChan <- struct{}{} - } - if event.Code == datatransfer.Open { - opened <- struct{}{} - } - } - dt1.SubscribeToEvents(subscriber) - dt2.SubscribeToEvents(subscriber) - voucher := testutil.FakeDTType{Data: "applesauce"} - sv := newSV() - sv.expectSuccessPull() - require.NoError(t, dt1.RegisterVoucherType(&testutil.FakeDTType{}, sv)) - - _, err := dt2.OpenPullDataChannel(ctx, host1.ID(), &voucher, rootCid, gsData.AllSelector) - require.NoError(t, err) - opens := 0 - completes := 0 - sentIncrements := make([]uint64, 0, 21) - receivedIncrements := make([]uint64, 0, 21) - for opens < 2 || completes < 2 || len(sentIncrements) < 21 || len(receivedIncrements) < 21 { - select { - case <-ctx.Done(): - t.Fatal("Did not complete succcessful data transfer") - case <-finished: - completes++ - case <-opened: - opens++ - case sentIncrement := <-sent: - sentIncrements = append(sentIncrements, sentIncrement) - case receivedIncrement := <-received: - receivedIncrements = append(receivedIncrements, receivedIncrement) - case <-errChan: - t.Fatal("received error on data transfer") - } - } - require.Equal(t, sentIncrements, receivedIncrements) - gsData.VerifyFileTransferred(t, root, true) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_receiver.go b/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_receiver.go deleted file mode 100644 index 66b5947330..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_receiver.go +++ /dev/null @@ -1,153 +0,0 @@ -package graphsyncimpl - -import ( - "context" - "time" - - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/libp2p/go-libp2p-core/peer" - xerrors "golang.org/x/xerrors" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/message" -) - -type graphsyncReceiver struct { - impl *graphsyncImpl -} - -// ReceiveRequest takes an incoming data transfer request, validates the voucher and -// processes the message. -func (receiver *graphsyncReceiver) ReceiveRequest( - ctx context.Context, - initiator peer.ID, - incoming message.DataTransferRequest) { - err := receiver.receiveRequest(initiator, incoming) - if err != nil { - log.Error(err) - } - if err == nil && !incoming.IsPull() { - stor, _ := incoming.Selector() - receiver.impl.sendGsRequest(ctx, initiator, incoming.TransferID(), incoming.IsPull(), initiator, cidlink.Link{Cid: incoming.BaseCid()}, stor) - } - receiver.impl.sendResponse(ctx, err == nil, initiator, incoming.TransferID()) -} - -func (receiver *graphsyncReceiver) receiveRequest( - initiator peer.ID, - incoming message.DataTransferRequest) error { - - voucher, err := receiver.validateVoucher(initiator, incoming) - if err != nil { - return err - } - stor, _ := incoming.Selector() - - var dataSender, dataReceiver peer.ID - if incoming.IsPull() { - dataSender = receiver.impl.peerID - dataReceiver = initiator - } else { - dataSender = initiator - dataReceiver = receiver.impl.peerID - } - - chid, err := receiver.impl.channels.CreateNew(incoming.TransferID(), incoming.BaseCid(), stor, voucher, initiator, dataSender, dataReceiver) - if err != nil { - return err - } - evt := datatransfer.Event{ - Code: datatransfer.Open, - Message: "Incoming request accepted", - Timestamp: time.Now(), - } - chst, err := receiver.impl.channels.GetByID(chid) - if err != nil { - return err - } - err = receiver.impl.pubSub.Publish(internalEvent{evt, chst}) - if err != nil { - log.Warnf("err publishing DT event: %s", err.Error()) - } - return nil -} - -// validateVoucher converts a voucher in an incoming message to its appropriate -// voucher struct, then runs the validator and returns the results. -// returns error if: -// * reading voucher fails -// * deserialization of selector fails -// * validation fails -func (receiver *graphsyncReceiver) validateVoucher(sender peer.ID, incoming message.DataTransferRequest) (datatransfer.Voucher, error) { - - vtypStr := datatransfer.TypeIdentifier(incoming.VoucherType()) - decoder, has := receiver.impl.validatedTypes.Decoder(vtypStr) - if !has { - return nil, xerrors.Errorf("unknown voucher type: %s", vtypStr) - } - encodable, err := incoming.Voucher(decoder) - if err != nil { - return nil, err - } - vouch := encodable.(datatransfer.Registerable) - - var validatorFunc func(peer.ID, datatransfer.Voucher, cid.Cid, ipld.Node) error - processor, _ := receiver.impl.validatedTypes.Processor(vtypStr) - validator := processor.(datatransfer.RequestValidator) - if incoming.IsPull() { - validatorFunc = validator.ValidatePull - } else { - validatorFunc = validator.ValidatePush - } - - stor, err := incoming.Selector() - if err != nil { - return vouch, err - } - - if err = validatorFunc(sender, vouch, incoming.BaseCid(), stor); err != nil { - return nil, err - } - - return vouch, nil -} - -// ReceiveResponse handles responses to our Push or Pull data transfer request. -// It schedules a graphsync transfer only if our Pull Request is accepted. -func (receiver *graphsyncReceiver) ReceiveResponse( - ctx context.Context, - sender peer.ID, - incoming message.DataTransferResponse) { - evt := datatransfer.Event{ - Code: datatransfer.Error, - Message: "", - Timestamp: time.Now(), - } - chid := datatransfer.ChannelID{Initiator: receiver.impl.peerID, ID: incoming.TransferID()} - chst, err := receiver.impl.channels.GetByID(chid) - if err != nil { - log.Warnf("received response from unknown peer %s, transfer ID %d", sender, incoming.TransferID) - return - } - - if incoming.Accepted() { - evt.Code = datatransfer.Progress - // if we are handling a response to a pull request then they are sending data and the - // initiator is us - if chst.Sender() == sender { - baseCid := chst.BaseCID() - root := cidlink.Link{Cid: baseCid} - receiver.impl.sendGsRequest(ctx, receiver.impl.peerID, incoming.TransferID(), true, sender, root, chst.Selector()) - } - } - err = receiver.impl.pubSub.Publish(internalEvent{evt, chst}) - if err != nil { - log.Warnf("err publishing DT event: %s", err.Error()) - } -} - -func (receiver *graphsyncReceiver) ReceiveError(err error) { - log.Errorf("received error message on data transfer: %s", err.Error()) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_receiver_test.go b/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_receiver_test.go deleted file mode 100644 index 58d726354b..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/graphsync_receiver_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package graphsyncimpl_test - -import ( - "context" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - datatransfer "github.com/filecoin-project/go-data-transfer" - . "github.com/filecoin-project/go-data-transfer/impl/graphsync" - "github.com/filecoin-project/go-data-transfer/message" - "github.com/filecoin-project/go-data-transfer/network" - "github.com/filecoin-project/go-data-transfer/testutil" -) - -func TestSendResponseToIncomingRequest(t *testing.T) { - // create network - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - gsData := testutil.NewGraphsyncTestingData(ctx, t) - host1 := gsData.Host1 - host2 := gsData.Host2 - - // setup receiving peer to just record message coming in - dtnet1 := network.NewFromLibp2pHost(host1) - r := &receiver{ - messageReceived: make(chan receivedMessage), - } - dtnet1.SetDelegate(r) - - gs2 := testutil.NewFakeGraphSync() - - voucher := testutil.NewFakeDTType() - baseCid := testutil.GenerateCids(1)[0] - - t.Run("Response to push with successful validation", func(t *testing.T) { - id := datatransfer.TransferID(rand.Int31()) - sv := newSV() - sv.expectSuccessPush() - - dt := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - require.NoError(t, dt.RegisterVoucherType(&testutil.FakeDTType{}, sv)) - - isPull := false - _, err := message.NewRequest(id, isPull, voucher.Type(), voucher, baseCid, gsData.AllSelector) - require.NoError(t, err) - request, err := message.NewRequest(id, isPull, voucher.Type(), voucher, baseCid, gsData.AllSelector) - require.NoError(t, err) - require.NoError(t, dtnet1.SendMessage(ctx, host2.ID(), request)) - var messageReceived receivedMessage - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case messageReceived = <-r.messageReceived: - } - - sv.verifyExpectations(t) - - sender := messageReceived.sender - require.Equal(t, sender, host2.ID()) - - received := messageReceived.message - require.False(t, received.IsRequest()) - receivedResponse, ok := received.(message.DataTransferResponse) - require.True(t, ok) - - assert.Equal(t, receivedResponse.TransferID(), id) - require.True(t, receivedResponse.Accepted()) - - }) - - t.Run("Response to push with error validation", func(t *testing.T) { - id := datatransfer.TransferID(rand.Int31()) - sv := newSV() - sv.expectErrorPush() - dt := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - err := dt.RegisterVoucherType(&testutil.FakeDTType{}, sv) - require.NoError(t, err) - - isPull := false - - request, err := message.NewRequest(id, isPull, voucher.Type(), voucher, baseCid, gsData.AllSelector) - require.NoError(t, err) - require.NoError(t, dtnet1.SendMessage(ctx, host2.ID(), request)) - - var messageReceived receivedMessage - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case messageReceived = <-r.messageReceived: - } - - sv.verifyExpectations(t) - - sender := messageReceived.sender - require.Equal(t, sender, host2.ID()) - - received := messageReceived.message - require.False(t, received.IsRequest()) - receivedResponse, ok := received.(message.DataTransferResponse) - require.True(t, ok) - - require.Equal(t, receivedResponse.TransferID(), id) - require.False(t, receivedResponse.Accepted()) - }) - - t.Run("Response to pull with successful validation", func(t *testing.T) { - id := datatransfer.TransferID(rand.Int31()) - sv := newSV() - sv.expectSuccessPull() - - dt := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - err := dt.RegisterVoucherType(&testutil.FakeDTType{}, sv) - require.NoError(t, err) - - isPull := true - - request, err := message.NewRequest(id, isPull, voucher.Type(), voucher, baseCid, gsData.AllSelector) - require.NoError(t, err) - require.NoError(t, dtnet1.SendMessage(ctx, host2.ID(), request)) - var messageReceived receivedMessage - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case messageReceived = <-r.messageReceived: - } - - sv.verifyExpectations(t) - - sender := messageReceived.sender - require.Equal(t, sender, host2.ID()) - - received := messageReceived.message - require.False(t, received.IsRequest()) - receivedResponse, ok := received.(message.DataTransferResponse) - require.True(t, ok) - - require.Equal(t, receivedResponse.TransferID(), id) - require.True(t, receivedResponse.Accepted()) - }) - - t.Run("Response to push with error validation", func(t *testing.T) { - id := datatransfer.TransferID(rand.Int31()) - sv := newSV() - sv.expectErrorPull() - - dt := NewGraphSyncDataTransfer(host2, gs2, gsData.StoredCounter2) - err := dt.RegisterVoucherType(&testutil.FakeDTType{}, sv) - require.NoError(t, err) - - isPull := true - - request, err := message.NewRequest(id, isPull, voucher.Type(), voucher, baseCid, gsData.AllSelector) - require.NoError(t, err) - require.NoError(t, dtnet1.SendMessage(ctx, host2.ID(), request)) - - var messageReceived receivedMessage - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case messageReceived = <-r.messageReceived: - } - - sv.verifyExpectations(t) - - sender := messageReceived.sender - require.Equal(t, sender, host2.ID()) - - received := messageReceived.message - require.False(t, received.IsRequest()) - receivedResponse, ok := received.(message.DataTransferResponse) - require.True(t, ok) - - require.Equal(t, receivedResponse.TransferID(), id) - require.False(t, receivedResponse.Accepted()) - }) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/hooks/hooks.go b/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/hooks/hooks.go deleted file mode 100644 index 6febc4ca96..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/hooks/hooks.go +++ /dev/null @@ -1,174 +0,0 @@ -package hooks - -import ( - "sync" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/impl/graphsync/extension" - "github.com/ipfs/go-graphsync" - ipld "github.com/ipld/go-ipld-prime" - peer "github.com/libp2p/go-libp2p-core/peer" - "github.com/prometheus/common/log" -) - -// Events are semantic data transfer events that happen as a result of graphsync hooks -type Events interface { - // OnRequestSent is called when we ask the other peer to send us data on the - // given channel ID - // return values are: - // - nil = this request is recognized - // - error = ignore incoming data for this request - OnRequestSent(chid datatransfer.ChannelID) error - // OnDataReceive is called when we receive data for the given channel ID - // return values are: - // - nil = continue receiving data - // - error = cancel this request - OnDataReceived(chid datatransfer.ChannelID, link ipld.Link, size uint64) error - // OnDataSent is called when we send data for the given channel ID - // return values are: - // - nil = continue sending data - // - error = cancel this request - OnDataSent(chid datatransfer.ChannelID, link ipld.Link, size uint64) error - // OnRequestReceived is called when we receive a new request to send data - // for the given channel ID - // return values are: - // - nil = proceed with sending data - // - error = cancel this request - OnRequestReceived(chid datatransfer.ChannelID) error - // OnResponseCompleted is called when we finish sending data for the given channel ID - // Error returns are logged but otherwise have not effect - OnResponseCompleted(chid datatransfer.ChannelID, success bool) error -} - -type graphsyncKey struct { - requestID graphsync.RequestID - p peer.ID -} - -// Manager manages graphsync hooks for data transfer, translating from -// graphsync hooks to semantic data transfer events -type Manager struct { - events Events - peerID peer.ID - graphsyncRequestMapLk sync.RWMutex - graphsyncRequestMap map[graphsyncKey]datatransfer.ChannelID -} - -// NewManager makes a new hooks manager with the given hook events interface -func NewManager(peerID peer.ID, hookEvents Events) *Manager { - return &Manager{ - events: hookEvents, - peerID: peerID, - graphsyncRequestMap: make(map[graphsyncKey]datatransfer.ChannelID), - } -} - -// RegisterHooks registers graphsync hooks for the hooks manager -func (hm *Manager) RegisterHooks(gs graphsync.GraphExchange) { - gs.RegisterIncomingRequestHook(hm.gsReqRecdHook) - gs.RegisterCompletedResponseListener(hm.gsCompletedResponseListener) - gs.RegisterIncomingBlockHook(hm.gsIncomingBlockHook) - gs.RegisterOutgoingBlockHook(hm.gsOutgoingBlockHook) - gs.RegisterOutgoingRequestHook(hm.gsOutgoingRequestHook) -} - -func (hm *Manager) gsOutgoingRequestHook(p peer.ID, request graphsync.RequestData, hookActions graphsync.OutgoingRequestHookActions) { - transferData, _ := extension.GetTransferData(request) - - // extension not found; probably not our request. - if transferData == nil { - return - } - - chid := transferData.GetChannelID() - err := hm.events.OnRequestSent(chid) - if err != nil { - return - } - // record the outgoing graphsync request to map it to channel ID going forward - hm.graphsyncRequestMapLk.Lock() - hm.graphsyncRequestMap[graphsyncKey{request.ID(), hm.peerID}] = chid - hm.graphsyncRequestMapLk.Unlock() -} - -func (hm *Manager) gsIncomingBlockHook(p peer.ID, response graphsync.ResponseData, block graphsync.BlockData, hookActions graphsync.IncomingBlockHookActions) { - hm.graphsyncRequestMapLk.RLock() - chid, ok := hm.graphsyncRequestMap[graphsyncKey{response.RequestID(), hm.peerID}] - hm.graphsyncRequestMapLk.RUnlock() - - if !ok { - return - } - - err := hm.events.OnDataReceived(chid, block.Link(), block.BlockSize()) - if err != nil { - hookActions.TerminateWithError(err) - } -} - -func (hm *Manager) gsOutgoingBlockHook(p peer.ID, request graphsync.RequestData, block graphsync.BlockData, hookActions graphsync.OutgoingBlockHookActions) { - hm.graphsyncRequestMapLk.RLock() - chid, ok := hm.graphsyncRequestMap[graphsyncKey{request.ID(), p}] - hm.graphsyncRequestMapLk.RUnlock() - - if !ok { - return - } - - err := hm.events.OnDataSent(chid, block.Link(), block.BlockSize()) - if err != nil { - hookActions.TerminateWithError(err) - } -} - -// gsReqRecdHook is a graphsync.OnRequestReceivedHook hook -// if an incoming request does not match a previous push request, it returns an error. -func (hm *Manager) gsReqRecdHook(p peer.ID, request graphsync.RequestData, hookActions graphsync.IncomingRequestHookActions) { - - // if this is a push request the sender is us. - transferData, err := extension.GetTransferData(request) - if err != nil { - hookActions.TerminateWithError(err) - return - } - - // extension not found; probably not our request. - if transferData == nil { - return - } - - chid := transferData.GetChannelID() - - err = hm.events.OnRequestReceived(chid) - if err != nil { - hookActions.TerminateWithError(err) - return - } - - hm.graphsyncRequestMapLk.Lock() - hm.graphsyncRequestMap[graphsyncKey{request.ID(), p}] = chid - hm.graphsyncRequestMapLk.Unlock() - - raw, _ := request.Extension(extension.ExtensionDataTransfer) - respData := graphsync.ExtensionData{Name: extension.ExtensionDataTransfer, Data: raw} - hookActions.ValidateRequest() - hookActions.SendExtensionData(respData) -} - -// gsCompletedResponseListener is a graphsync.OnCompletedResponseListener. We use it learn when the data transfer is complete -// for the side that is responding to a graphsync request -func (hm *Manager) gsCompletedResponseListener(p peer.ID, request graphsync.RequestData, status graphsync.ResponseStatusCode) { - hm.graphsyncRequestMapLk.RLock() - chid, ok := hm.graphsyncRequestMap[graphsyncKey{request.ID(), p}] - hm.graphsyncRequestMapLk.RUnlock() - - if !ok { - return - } - - success := status == graphsync.RequestCompletedFull - err := hm.events.OnResponseCompleted(chid, success) - if err != nil { - log.Error(err) - } -} \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/hooks/hooks_test.go b/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/hooks/hooks_test.go deleted file mode 100644 index 8dfc1f4d14..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/impl/graphsync/hooks/hooks_test.go +++ /dev/null @@ -1,377 +0,0 @@ -package hooks_test - -import ( - "bytes" - "errors" - "math/rand" - "testing" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/impl/graphsync/extension" - "github.com/filecoin-project/go-data-transfer/impl/graphsync/hooks" - "github.com/filecoin-project/go-data-transfer/testutil" - "github.com/ipfs/go-graphsync" - ipld "github.com/ipld/go-ipld-prime" - "github.com/ipld/go-ipld-prime/traversal" - peer "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" -) - -func TestManager(t *testing.T) { - testCases := map[string]struct { - makeRequest func(id graphsync.RequestID, chid datatransfer.ChannelID) graphsync.RequestData - makeResponse func(id graphsync.RequestID, chid datatransfer.ChannelID) graphsync.ResponseData - events fakeEvents - action func(gsData *graphsyncTestData) - check func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) - }{ - "recognized outgoing request will record incoming blocks": { - action: func(gsData *graphsyncTestData) { - gsData.outgoingRequestHook() - gsData.incomingBlockHook() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.True(t, events.OnRequestSentCalled) - require.True(t, events.OnDataReceivedCalled) - require.NoError(t, gsData.incomingBlockHookActions.TerminationError) - }, - }, - "non-data-transfer outgoing request will not record incoming blocks": { - makeRequest: func(id graphsync.RequestID, chid datatransfer.ChannelID) graphsync.RequestData { - return testutil.NewFakeRequest(id, map[graphsync.ExtensionName][]byte{}) - }, - action: func(gsData *graphsyncTestData) { - gsData.outgoingRequestHook() - gsData.incomingBlockHook() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.False(t, events.OnRequestSentCalled) - require.False(t, events.OnDataReceivedCalled) - require.NoError(t, gsData.incomingBlockHookActions.TerminationError) - }, - }, - "unrecognized outgoing request will not record incoming blocks": { - events: fakeEvents{ - OnRequestSentError: errors.New("Not recognized"), - }, - action: func(gsData *graphsyncTestData) { - gsData.outgoingRequestHook() - gsData.incomingBlockHook() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.True(t, events.OnRequestSentCalled) - require.False(t, events.OnDataReceivedCalled) - require.NoError(t, gsData.incomingBlockHookActions.TerminationError) - }, - }, - "incoming block error will halt request": { - events: fakeEvents{ - OnDataReceivedError: errors.New("something went wrong"), - }, - action: func(gsData *graphsyncTestData) { - gsData.outgoingRequestHook() - gsData.incomingBlockHook() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.True(t, events.OnRequestSentCalled) - require.True(t, events.OnDataReceivedCalled) - require.Error(t, gsData.incomingBlockHookActions.TerminationError) - }, - }, - "recognized incoming request will validate request": { - action: func(gsData *graphsyncTestData) { - gsData.incomingRequestHook() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.True(t, events.OnRequestReceivedCalled) - require.True(t, gsData.incomingRequestHookActions.Validated) - require.Equal(t, extension.ExtensionDataTransfer, gsData.incomingRequestHookActions.SentExtension.Name) - require.NoError(t, gsData.incomingRequestHookActions.TerminationError) - }, - }, - "malformed data transfer extension on incoming request will terminate": { - makeRequest: func(id graphsync.RequestID, chid datatransfer.ChannelID) graphsync.RequestData { - return testutil.NewFakeRequest(id, map[graphsync.ExtensionName][]byte{ - extension.ExtensionDataTransfer: testutil.RandomBytes(100), - }) - }, - action: func(gsData *graphsyncTestData) { - gsData.incomingRequestHook() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.False(t, events.OnRequestReceivedCalled) - require.False(t, gsData.incomingRequestHookActions.Validated) - require.Error(t, gsData.incomingRequestHookActions.TerminationError) - }, - }, - "unrecognized incoming data transfer request will terminate": { - events: fakeEvents{ - OnRequestReceivedError: errors.New("something went wrong"), - }, - action: func(gsData *graphsyncTestData) { - gsData.incomingRequestHook() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.True(t, events.OnRequestReceivedCalled) - require.False(t, gsData.incomingRequestHookActions.Validated) - require.Error(t, gsData.incomingRequestHookActions.TerminationError) - }, - }, - "recognized incoming request will record outgoing blocks": { - action: func(gsData *graphsyncTestData) { - gsData.incomingRequestHook() - gsData.outgoingBlockHook() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.True(t, events.OnRequestReceivedCalled) - require.True(t, events.OnDataSentCalled) - require.NoError(t, gsData.outgoingBlockHookActions.TerminationError) - }, - }, - "non-data-transfer request will not record outgoing blocks": { - makeRequest: func(id graphsync.RequestID, chid datatransfer.ChannelID) graphsync.RequestData { - return testutil.NewFakeRequest(id, map[graphsync.ExtensionName][]byte{}) - }, - action: func(gsData *graphsyncTestData) { - gsData.incomingRequestHook() - gsData.outgoingBlockHook() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.False(t, events.OnRequestReceivedCalled) - require.False(t, events.OnDataSentCalled) - }, - }, - "outgoing data send error will terminate request": { - events: fakeEvents{ - OnDataSentError: errors.New("something went wrong"), - }, - action: func(gsData *graphsyncTestData) { - gsData.incomingRequestHook() - gsData.outgoingBlockHook() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.True(t, events.OnRequestReceivedCalled) - require.True(t, events.OnDataSentCalled) - require.Error(t, gsData.outgoingBlockHookActions.TerminationError) - }, - }, - "recognized incoming request will record successful request completion": { - makeResponse: func(id graphsync.RequestID, chid datatransfer.ChannelID) graphsync.ResponseData { - return testutil.NewFakeResponse(id, map[graphsync.ExtensionName][]byte{}, graphsync.RequestCompletedFull) - }, - action: func(gsData *graphsyncTestData) { - gsData.incomingRequestHook() - gsData.responseCompletedListener() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.True(t, events.OnRequestReceivedCalled) - require.True(t, events.OnResponseCompletedCalled) - require.True(t, events.ResponseSuccess) - }, - }, - "recognized incoming request will record unsuccessful request completion": { - makeResponse: func(id graphsync.RequestID, chid datatransfer.ChannelID) graphsync.ResponseData { - return testutil.NewFakeResponse(id, map[graphsync.ExtensionName][]byte{}, graphsync.RequestCompletedPartial) - }, - action: func(gsData *graphsyncTestData) { - gsData.incomingRequestHook() - gsData.responseCompletedListener() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.True(t, events.OnRequestReceivedCalled) - require.True(t, events.OnResponseCompletedCalled) - require.False(t, events.ResponseSuccess) - }, - }, - "non-data-transfer request will not record request completed": { - makeRequest: func(id graphsync.RequestID, chid datatransfer.ChannelID) graphsync.RequestData { - return testutil.NewFakeRequest(id, map[graphsync.ExtensionName][]byte{}) - }, - action: func(gsData *graphsyncTestData) { - gsData.incomingRequestHook() - gsData.responseCompletedListener() - }, - check: func(t *testing.T, events *fakeEvents, gsData *graphsyncTestData) { - require.False(t, events.OnRequestReceivedCalled) - require.False(t, events.OnResponseCompletedCalled) - }, - }, - } - for testCase, data := range testCases { - t.Run(testCase, func(t *testing.T) { - peers := testutil.GeneratePeers(2) - transferID := datatransfer.TransferID(rand.Uint64()) - channelID := datatransfer.ChannelID{Initiator: peers[0], ID: transferID} - requestID := graphsync.RequestID(rand.Int31()) - var request graphsync.RequestData - if data.makeRequest != nil { - request = data.makeRequest(requestID, channelID) - } else { - ext := &extension.TransferData{ - TransferID: uint64(transferID), - Initiator: peers[0], - IsPull: false, - } - buf := new(bytes.Buffer) - err := ext.MarshalCBOR(buf) - require.NoError(t, err) - request = testutil.NewFakeRequest(requestID, map[graphsync.ExtensionName][]byte{ - extension.ExtensionDataTransfer: buf.Bytes(), - }) - } - var response graphsync.ResponseData - if data.makeResponse != nil { - response = data.makeResponse(requestID, channelID) - } else { - ext := &extension.TransferData{ - TransferID: uint64(transferID), - Initiator: peers[0], - IsPull: false, - } - buf := new(bytes.Buffer) - err := ext.MarshalCBOR(buf) - require.NoError(t, err) - response = testutil.NewFakeResponse(requestID, map[graphsync.ExtensionName][]byte{ - extension.ExtensionDataTransfer: buf.Bytes(), - }, graphsync.PartialResponse) - } - block := testutil.NewFakeBlockData() - fgs := testutil.NewFakeGraphSync() - gsData := &graphsyncTestData{ - fgs: fgs, - p: peers[1], - request: request, - response: response, - block: block, - outgoingRequestHookActions: &fakeOutgoingRequestHookActions{}, - outgoingBlockHookActions: &fakeOutgoingBlockHookActions{}, - incomingBlockHookActions: &fakeIncomingBlockHookActions{}, - incomingRequestHookActions: &fakeIncomingRequestHookActions{}, - } - manager := hooks.NewManager(peers[0], &data.events) - manager.RegisterHooks(fgs) - data.action(gsData) - data.check(t, &data.events, gsData) - }) - } -} - -type fakeEvents struct { - OnRequestSentCalled bool - OnRequestSentError error - OnDataReceivedCalled bool - OnDataReceivedError error - OnDataSentCalled bool - OnDataSentError error - OnRequestReceivedCalled bool - OnRequestReceivedError error - OnResponseCompletedCalled bool - OnResponseCompletedErr error - ResponseSuccess bool -} - -func (fe *fakeEvents) OnRequestSent(chid datatransfer.ChannelID) error { - fe.OnRequestSentCalled = true - return fe.OnRequestSentError -} - -func (fe *fakeEvents) OnDataReceived(chid datatransfer.ChannelID, link ipld.Link, size uint64) error { - fe.OnDataReceivedCalled = true - return fe.OnDataReceivedError -} - -func (fe *fakeEvents) OnDataSent(chid datatransfer.ChannelID, link ipld.Link, size uint64) error { - fe.OnDataSentCalled = true - return fe.OnDataSentError -} - -func (fe *fakeEvents) OnRequestReceived(chid datatransfer.ChannelID) error { - fe.OnRequestReceivedCalled = true - return fe.OnRequestReceivedError -} - -func (fe *fakeEvents) OnResponseCompleted(chid datatransfer.ChannelID, success bool) error { - fe.OnResponseCompletedCalled = true - fe.ResponseSuccess = success - return fe.OnResponseCompletedErr -} - -type fakeOutgoingRequestHookActions struct{} - -func (fa *fakeOutgoingRequestHookActions) UsePersistenceOption(name string) {} -func (fa *fakeOutgoingRequestHookActions) UseLinkTargetNodeStyleChooser(_ traversal.LinkTargetNodeStyleChooser) { -} - -type fakeIncomingBlockHookActions struct { - TerminationError error -} - -func (fa *fakeIncomingBlockHookActions) TerminateWithError(err error) { - fa.TerminationError = err -} - -func (fa *fakeIncomingBlockHookActions) UpdateRequestWithExtensions(_ ...graphsync.ExtensionData) {} - -type fakeOutgoingBlockHookActions struct { - TerminationError error -} - -func (fa *fakeOutgoingBlockHookActions) SendExtensionData(_ graphsync.ExtensionData) {} - -func (fa *fakeOutgoingBlockHookActions) TerminateWithError(err error) { - fa.TerminationError = err -} - -func (fa *fakeOutgoingBlockHookActions) PauseResponse() {} - -type fakeIncomingRequestHookActions struct { - TerminationError error - Validated bool - SentExtension graphsync.ExtensionData -} - -func (fa *fakeIncomingRequestHookActions) SendExtensionData(ext graphsync.ExtensionData) { - fa.SentExtension = ext -} - -func (fa *fakeIncomingRequestHookActions) UsePersistenceOption(name string) {} - -func (fa *fakeIncomingRequestHookActions) UseLinkTargetNodeStyleChooser(_ traversal.LinkTargetNodeStyleChooser) { -} - -func (fa *fakeIncomingRequestHookActions) TerminateWithError(err error) { - fa.TerminationError = err -} - -func (fa *fakeIncomingRequestHookActions) ValidateRequest() { - fa.Validated = true -} - -type graphsyncTestData struct { - fgs *testutil.FakeGraphSync - p peer.ID - block graphsync.BlockData - request graphsync.RequestData - response graphsync.ResponseData - outgoingRequestHookActions *fakeOutgoingRequestHookActions - incomingBlockHookActions *fakeIncomingBlockHookActions - outgoingBlockHookActions *fakeOutgoingBlockHookActions - incomingRequestHookActions *fakeIncomingRequestHookActions -} - -func (gs *graphsyncTestData) outgoingRequestHook() { - gs.fgs.OutgoingRequestHook(gs.p, gs.request, gs.outgoingRequestHookActions) -} -func (gs *graphsyncTestData) incomingBlockHook() { - gs.fgs.IncomingBlockHook(gs.p, gs.response, gs.block, gs.incomingBlockHookActions) -} -func (gs *graphsyncTestData) outgoingBlockHook() { - gs.fgs.OutgoingBlockHook(gs.p, gs.request, gs.block, gs.outgoingBlockHookActions) -} -func (gs *graphsyncTestData) incomingRequestHook() { - gs.fgs.IncomingRequestHook(gs.p, gs.request, gs.incomingRequestHookActions) -} - -func (gs *graphsyncTestData) responseCompletedListener() { - gs.fgs.ResponseCompletedListener(gs.p, gs.request, gs.response.Status()) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/manager.go b/vendor/github.com/filecoin-project/go-data-transfer/manager.go new file mode 100644 index 0000000000..820542f32e --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/manager.go @@ -0,0 +1,130 @@ +package datatransfer + +import ( + "context" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/peer" +) + +// RequestValidator is an interface implemented by the client of the +// data transfer module to validate requests +type RequestValidator interface { + // ValidatePush validates a push request received from the peer that will send data + ValidatePush( + sender peer.ID, + voucher Voucher, + baseCid cid.Cid, + selector ipld.Node) (VoucherResult, error) + // ValidatePull validates a pull request received from the peer that will receive data + ValidatePull( + receiver peer.ID, + voucher Voucher, + baseCid cid.Cid, + selector ipld.Node) (VoucherResult, error) +} + +// Revalidator is a request validator revalidates in progress requests +// by requesting request additional vouchers, and resuming when it receives them +type Revalidator interface { + // Revalidate revalidates a request with a new voucher + Revalidate(channelID ChannelID, voucher Voucher) (VoucherResult, error) + // OnPullDataSent is called on the responder side when more bytes are sent + // for a given pull request. The first value indicates whether the request was + // recognized by this revalidator and should be considered 'handled'. If true, + // the remaining two values are interpreted. If 'false' the request is passed on + // to the next revalidators. + // It should return a VoucherResult + ErrPause to + // request revalidation or nil to continue uninterrupted, + // other errors will terminate the request. + OnPullDataSent(chid ChannelID, additionalBytesSent uint64) (bool, VoucherResult, error) + // OnPushDataReceived is called on the responder side when more bytes are received + // for a given push request. The first value indicates whether the request was + // recognized by this revalidator and should be considered 'handled'. If true, + // the remaining two values are interpreted. If 'false' the request is passed on + // to the next revalidators. It should return a VoucherResult + ErrPause to + // request revalidation or nil to continue uninterrupted, + // other errors will terminate the request + OnPushDataReceived(chid ChannelID, additionalBytesReceived uint64) (bool, VoucherResult, error) + // OnComplete is called to make a final request for revalidation -- often for the + // purpose of settlement. The first value indicates whether the request was + // recognized by this revalidator and should be considered 'handled'. If true, + // the remaining two values are interpreted. If 'false' the request is passed on + // to the next revalidators. + // if VoucherResult is non nil, the request will enter a settlement phase awaiting + // a final update + OnComplete(chid ChannelID) (bool, VoucherResult, error) +} + +// TransportConfigurer provides a mechanism to provide transport specific configuration for a given voucher type +type TransportConfigurer func(chid ChannelID, voucher Voucher, transport Transport) + +// ReadyFunc is function that gets called once when the data transfer module is ready +type ReadyFunc func(error) + +// Manager is the core interface presented by all implementations of +// of the data transfer sub system +type Manager interface { + + // Start initializes data transfer processing + Start(ctx context.Context) error + + // OnReady registers a listener for when the data transfer comes on line + OnReady(ReadyFunc) + + // Stop terminates all data transfers and ends processing + Stop(ctx context.Context) error + + // RegisterVoucherType registers a validator for the given voucher type + // will error if voucher type does not implement voucher + // or if there is a voucher type registered with an identical identifier + RegisterVoucherType(voucherType Voucher, validator RequestValidator) error + + // RegisterRevalidator registers a revalidator for the given voucher type + // Note: this is the voucher type used to revalidate. It can share a name + // with the initial validator type and CAN be the same type, or a different type. + // The revalidator can simply be the sampe as the original request validator, + // or a different validator that satisfies the revalidator interface. + RegisterRevalidator(voucherType Voucher, revalidator Revalidator) error + + // RegisterVoucherResultType allows deserialization of a voucher result, + // so that a listener can read the metadata + RegisterVoucherResultType(resultType VoucherResult) error + + // RegisterTransportConfigurer registers the given transport configurer to be run on requests with the given voucher + // type + RegisterTransportConfigurer(voucherType Voucher, configurer TransportConfigurer) error + + // open a data transfer that will send data to the recipient peer and + // transfer parts of the piece that match the selector + OpenPushDataChannel(ctx context.Context, to peer.ID, voucher Voucher, baseCid cid.Cid, selector ipld.Node) (ChannelID, error) + + // open a data transfer that will request data from the sending peer and + // transfer parts of the piece that match the selector + OpenPullDataChannel(ctx context.Context, to peer.ID, voucher Voucher, baseCid cid.Cid, selector ipld.Node) (ChannelID, error) + + // send an intermediate voucher as needed when the receiver sends a request for revalidation + SendVoucher(ctx context.Context, chid ChannelID, voucher Voucher) error + + // close an open channel (effectively a cancel) + CloseDataTransferChannel(ctx context.Context, chid ChannelID) error + + // pause a data transfer channel (only allowed if transport supports it) + PauseDataTransferChannel(ctx context.Context, chid ChannelID) error + + // resume a data transfer channel (only allowed if transport supports it) + ResumeDataTransferChannel(ctx context.Context, chid ChannelID) error + + // get status of a transfer + TransferChannelStatus(ctx context.Context, x ChannelID) Status + + // get notified when certain types of events happen + SubscribeToEvents(subscriber Subscriber) Unsubscribe + + // get all in progress transfers + InProgressChannels(ctx context.Context) (map[ChannelID]ChannelState, error) + + // RestartDataTransferChannel restarts an existing data transfer channel + RestartDataTransferChannel(ctx context.Context, chid ChannelID) error +} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/message.go b/vendor/github.com/filecoin-project/go-data-transfer/message.go new file mode 100644 index 0000000000..f033968362 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/message.go @@ -0,0 +1,61 @@ +package datatransfer + +import ( + "io" + + "github.com/ipfs/go-cid" + "github.com/ipld/go-ipld-prime" + "github.com/libp2p/go-libp2p-core/protocol" + cborgen "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-data-transfer/encoding" +) + +var ( + // ProtocolDataTransfer1_1 is the protocol identifier for graphsync messages + ProtocolDataTransfer1_1 protocol.ID = "/fil/datatransfer/1.1.0" + + // ProtocolDataTransfer1_0 is the protocol identifier for legacy graphsync messages + // This protocol does NOT support the `Restart` functionality for data transfer channels. + ProtocolDataTransfer1_0 protocol.ID = "/fil/datatransfer/1.0.0" +) + +// Message is a message for the data transfer protocol +// (either request or response) that can serialize to a protobuf +type Message interface { + IsRequest() bool + IsRestart() bool + IsNew() bool + IsUpdate() bool + IsPaused() bool + IsCancel() bool + TransferID() TransferID + cborgen.CBORMarshaler + cborgen.CBORUnmarshaler + ToNet(w io.Writer) error + MessageForProtocol(targetProtocol protocol.ID) (newMsg Message, err error) +} + +// Request is a response message for the data transfer protocol +type Request interface { + Message + IsPull() bool + IsVoucher() bool + VoucherType() TypeIdentifier + Voucher(decoder encoding.Decoder) (encoding.Encodable, error) + BaseCid() cid.Cid + Selector() (ipld.Node, error) + IsRestartExistingChannelRequest() bool + RestartChannelId() (ChannelID, error) +} + +// Response is a response message for the data transfer protocol +type Response interface { + Message + IsVoucherResult() bool + IsComplete() bool + Accepted() bool + VoucherResultType() TypeIdentifier + VoucherResult(decoder encoding.Decoder) (encoding.Encodable, error) + EmptyVoucherResult() bool +} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/message/message.go b/vendor/github.com/filecoin-project/go-data-transfer/message/message.go deleted file mode 100644 index d9b07dded4..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/message/message.go +++ /dev/null @@ -1,90 +0,0 @@ -package message - -import ( - "io" - - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - cborgen "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/encoding" -) - -// Reference file: https://github.com/ipfs/go-graphsync/blob/master/message/message.go -// though here we have a simpler message type that serializes/deserializes to two -// different types that share an interface, and we serialize to CBOR and not Protobuf. - -// DataTransferMessage is a message for the data transfer protocol -// (either request or response) that can serialize to a protobuf -type DataTransferMessage interface { - IsRequest() bool - TransferID() datatransfer.TransferID - cborgen.CBORMarshaler - cborgen.CBORUnmarshaler - ToNet(w io.Writer) error -} - -// DataTransferRequest is a response message for the data transfer protocol -type DataTransferRequest interface { - DataTransferMessage - IsPull() bool - VoucherType() datatransfer.TypeIdentifier - Voucher(decoder encoding.Decoder) (encoding.Encodable, error) - BaseCid() cid.Cid - Selector() (ipld.Node, error) - IsCancel() bool -} - -// DataTransferResponse is a response message for the data transfer protocol -type DataTransferResponse interface { - DataTransferMessage - Accepted() bool -} - -// NewRequest generates a new request for the data transfer protocol -func NewRequest(id datatransfer.TransferID, isPull bool, vtype datatransfer.TypeIdentifier, voucher encoding.Encodable, baseCid cid.Cid, selector ipld.Node) (DataTransferRequest, error) { - vbytes, err := encoding.Encode(voucher) - if err != nil { - return nil, xerrors.Errorf("Creating request: %w", err) - } - if baseCid == cid.Undef { - return nil, xerrors.Errorf("base CID must be defined") - } - selBytes, err := encoding.Encode(selector) - if err != nil { - return nil, xerrors.Errorf("Error encoding selector") - } - return &transferRequest{ - Pull: isPull, - Vouch: &cborgen.Deferred{Raw: vbytes}, - Stor: &cborgen.Deferred{Raw: selBytes}, - BCid: &baseCid, - VTyp: vtype, - XferID: uint64(id), - }, nil -} - -// CancelRequest request generates a request to cancel an in progress request -func CancelRequest(id datatransfer.TransferID) DataTransferRequest { - return &transferRequest{ - Canc: true, - XferID: uint64(id), - } -} - -// NewResponse builds a new Data Transfer response -func NewResponse(id datatransfer.TransferID, accepted bool) DataTransferResponse { - return &transferResponse{Acpt: accepted, XferID: uint64(id)} -} - -// FromNet can read a network stream to deserialize a GraphSyncMessage -func FromNet(r io.Reader) (DataTransferMessage, error) { - tresp := transferMessage{} - err := tresp.UnmarshalCBOR(r) - if tresp.IsRequest() { - return tresp.Request, nil - } - return tresp.Response, err -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/message/message_test.go b/vendor/github.com/filecoin-project/go-data-transfer/message/message_test.go deleted file mode 100644 index 2f023f3ad9..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/message/message_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package message_test - -import ( - "bytes" - "math/rand" - "testing" - - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - datatransfer "github.com/filecoin-project/go-data-transfer" - . "github.com/filecoin-project/go-data-transfer/message" - "github.com/filecoin-project/go-data-transfer/testutil" -) - -func TestNewRequest(t *testing.T) { - baseCid := testutil.GenerateCids(1)[0] - selector := builder.NewSelectorSpecBuilder(basicnode.Style.Any).Matcher().Node() - isPull := true - id := datatransfer.TransferID(rand.Int31()) - voucher := testutil.NewFakeDTType() - request, err := NewRequest(id, isPull, voucher.Type(), voucher, baseCid, selector) - require.NoError(t, err) - assert.Equal(t, id, request.TransferID()) - assert.False(t, request.IsCancel()) - assert.True(t, request.IsPull()) - assert.True(t, request.IsRequest()) - assert.Equal(t, baseCid.String(), request.BaseCid().String()) - testutil.AssertFakeDTVoucher(t, request, voucher) - receivedSelector, err := request.Selector() - require.NoError(t, err) - require.Equal(t, selector, receivedSelector) - // Sanity check to make sure we can cast to DataTransferMessage - msg, ok := request.(DataTransferMessage) - require.True(t, ok) - - assert.True(t, msg.IsRequest()) - assert.Equal(t, request.TransferID(), msg.TransferID()) -} -func TestTransferRequest_MarshalCBOR(t *testing.T) { - // sanity check MarshalCBOR does its thing w/o error - req, err := NewTestTransferRequest() - require.NoError(t, err) - wbuf := new(bytes.Buffer) - require.NoError(t, req.MarshalCBOR(wbuf)) - assert.Greater(t, wbuf.Len(), 0) -} -func TestTransferRequest_UnmarshalCBOR(t *testing.T) { - req, err := NewTestTransferRequest() - require.NoError(t, err) - wbuf := new(bytes.Buffer) - // use ToNet / FromNet - require.NoError(t, req.ToNet(wbuf)) - - desMsg, err := FromNet(wbuf) - require.NoError(t, err) - - // Verify round-trip - assert.Equal(t, req.TransferID(), desMsg.TransferID()) - assert.Equal(t, req.IsRequest(), desMsg.IsRequest()) - - desReq := desMsg.(DataTransferRequest) - assert.Equal(t, req.IsPull(), desReq.IsPull()) - assert.Equal(t, req.IsCancel(), desReq.IsCancel()) - assert.Equal(t, req.BaseCid(), desReq.BaseCid()) - testutil.AssertEqualFakeDTVoucher(t, req, desReq) - testutil.AssertEqualSelector(t, req, desReq) -} - -func TestResponses(t *testing.T) { - id := datatransfer.TransferID(rand.Int31()) - response := NewResponse(id, false) // not accepted - assert.Equal(t, response.TransferID(), id) - assert.False(t, response.Accepted()) - assert.False(t, response.IsRequest()) - - // Sanity check to make sure we can cast to DataTransferMessage - msg, ok := response.(DataTransferMessage) - require.True(t, ok) - - assert.False(t, msg.IsRequest()) - assert.Equal(t, response.TransferID(), msg.TransferID()) -} - -func TestTransferResponse_MarshalCBOR(t *testing.T) { - id := datatransfer.TransferID(rand.Int31()) - response := NewResponse(id, true) // accepted - - // sanity check that we can marshal data - wbuf := new(bytes.Buffer) - require.NoError(t, response.ToNet(wbuf)) - assert.Greater(t, wbuf.Len(), 0) -} - -func TestTransferResponse_UnmarshalCBOR(t *testing.T) { - id := datatransfer.TransferID(rand.Int31()) - response := NewResponse(id, true) // accepted - - wbuf := new(bytes.Buffer) - require.NoError(t, response.ToNet(wbuf)) - - // verify round trip - desMsg, err := FromNet(wbuf) - require.NoError(t, err) - assert.False(t, desMsg.IsRequest()) - assert.Equal(t, id, desMsg.TransferID()) - - desResp, ok := desMsg.(DataTransferResponse) - require.True(t, ok) - assert.True(t, desResp.Accepted()) -} - -func TestRequestCancel(t *testing.T) { - id := datatransfer.TransferID(rand.Int31()) - req := CancelRequest(id) - require.Equal(t, req.TransferID(), id) - require.True(t, req.IsRequest()) - require.True(t, req.IsCancel()) - - wbuf := new(bytes.Buffer) - require.NoError(t, req.ToNet(wbuf)) - - deserialized, err := FromNet(wbuf) - require.NoError(t, err) - - deserializedRequest, ok := deserialized.(DataTransferRequest) - require.True(t, ok) - require.Equal(t, deserializedRequest.TransferID(), req.TransferID()) - require.Equal(t, deserializedRequest.IsCancel(), req.IsCancel()) - require.Equal(t, deserializedRequest.IsRequest(), req.IsRequest()) -} - -func TestToNetFromNetEquivalency(t *testing.T) { - baseCid := testutil.GenerateCids(1)[0] - selector := builder.NewSelectorSpecBuilder(basicnode.Style.Any).Matcher().Node() - isPull := false - id := datatransfer.TransferID(rand.Int31()) - accepted := false - voucher := testutil.NewFakeDTType() - request, err := NewRequest(id, isPull, voucher.Type(), voucher, baseCid, selector) - require.NoError(t, err) - buf := new(bytes.Buffer) - err = request.ToNet(buf) - require.NoError(t, err) - require.Greater(t, buf.Len(), 0) - deserialized, err := FromNet(buf) - require.NoError(t, err) - - deserializedRequest, ok := deserialized.(DataTransferRequest) - require.True(t, ok) - - require.Equal(t, deserializedRequest.TransferID(), request.TransferID()) - require.Equal(t, deserializedRequest.IsCancel(), request.IsCancel()) - require.Equal(t, deserializedRequest.IsPull(), request.IsPull()) - require.Equal(t, deserializedRequest.IsRequest(), request.IsRequest()) - require.Equal(t, deserializedRequest.BaseCid(), request.BaseCid()) - testutil.AssertEqualFakeDTVoucher(t, request, deserializedRequest) - testutil.AssertEqualSelector(t, request, deserializedRequest) - - response := NewResponse(id, accepted) - err = response.ToNet(buf) - require.NoError(t, err) - deserialized, err = FromNet(buf) - require.NoError(t, err) - - deserializedResponse, ok := deserialized.(DataTransferResponse) - require.True(t, ok) - - require.Equal(t, deserializedResponse.TransferID(), response.TransferID()) - require.Equal(t, deserializedResponse.Accepted(), response.Accepted()) - require.Equal(t, deserializedResponse.IsRequest(), response.IsRequest()) - - request = CancelRequest(id) - err = request.ToNet(buf) - require.NoError(t, err) - deserialized, err = FromNet(buf) - require.NoError(t, err) - - deserializedRequest, ok = deserialized.(DataTransferRequest) - require.True(t, ok) - - require.Equal(t, deserializedRequest.TransferID(), request.TransferID()) - require.Equal(t, deserializedRequest.IsCancel(), request.IsCancel()) - require.Equal(t, deserializedRequest.IsRequest(), request.IsRequest()) -} - -func NewTestTransferRequest() (DataTransferRequest, error) { - bcid := testutil.GenerateCids(1)[0] - selector := builder.NewSelectorSpecBuilder(basicnode.Style.Any).Matcher().Node() - isPull := false - id := datatransfer.TransferID(rand.Int31()) - voucher := testutil.NewFakeDTType() - return NewRequest(id, isPull, voucher.Type(), voucher, bcid, selector) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_message.go b/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_message.go deleted file mode 100644 index dc5c116ab0..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_message.go +++ /dev/null @@ -1,36 +0,0 @@ -package message - -import ( - "io" - - "github.com/filecoin-project/go-data-transfer" -) - -//go:generate cbor-gen-for transferMessage -type transferMessage struct { - IsRq bool - - Request *transferRequest - Response *transferResponse -} - -// ========= DataTransferMessage interface - -// IsRequest returns true if this message is a data request -func (tm *transferMessage) IsRequest() bool { - return tm.IsRq -} - -// TransferID returns the TransferID of this message -func (tm *transferMessage) TransferID() datatransfer.TransferID { - if tm.IsRequest() { - return tm.Request.TransferID() - } - return tm.Response.TransferID() -} - -// ToNet serializes a transfer message type. It is simply a wrapper for MarshalCBOR, to provide -// symmetry with FromNet -func (tm *transferMessage) ToNet(w io.Writer) error { - return tm.MarshalCBOR(w) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go b/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go deleted file mode 100644 index c49b260de0..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_message_cbor_gen.go +++ /dev/null @@ -1,116 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package message - -import ( - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *transferMessage) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.IsRq (bool) (bool) - if err := cbg.WriteBool(w, t.IsRq); err != nil { - return err - } - - // t.Request (message.transferRequest) (struct) - if err := t.Request.MarshalCBOR(w); err != nil { - return err - } - - // t.Response (message.transferResponse) (struct) - if err := t.Response.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *transferMessage) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.IsRq (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.IsRq = false - case 21: - t.IsRq = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Request (message.transferRequest) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Request = new(transferRequest) - if err := t.Request.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Request pointer: %w", err) - } - } - - } - // t.Response (message.transferResponse) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Response = new(transferResponse) - if err := t.Response.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Response pointer: %w", err) - } - } - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_request.go b/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_request.go deleted file mode 100644 index 765e6fc1e5..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_request.go +++ /dev/null @@ -1,108 +0,0 @@ -package message - -import ( - "bytes" - "io" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/encoding" - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - "github.com/ipld/go-ipld-prime/codec/dagcbor" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -//go:generate cbor-gen-for transferRequest - -// transferRequest is a struct that fulfills the DataTransferRequest interface. -// its members are exported to be used by cbor-gen -type transferRequest struct { - BCid *cid.Cid - Canc bool - Part bool - Pull bool - Stor *cbg.Deferred - Vouch *cbg.Deferred - VTyp datatransfer.TypeIdentifier - XferID uint64 -} - -// IsRequest always returns true in this case because this is a transfer request -func (trq *transferRequest) IsRequest() bool { - return true -} - -func (trq *transferRequest) TransferID() datatransfer.TransferID { - return datatransfer.TransferID(trq.XferID) -} - -// ========= DataTransferRequest interface -// IsPull returns true if this is a data pull request -func (trq *transferRequest) IsPull() bool { - return trq.Pull -} - -// VoucherType returns the Voucher ID -func (trq *transferRequest) VoucherType() datatransfer.TypeIdentifier { - return trq.VTyp -} - -// Voucher returns the Voucher bytes -func (trq *transferRequest) Voucher(decoder encoding.Decoder) (encoding.Encodable, error) { - if trq.Vouch == nil { - return nil, xerrors.New("No voucher present to read") - } - return decoder.DecodeFromCbor(trq.Vouch.Raw) -} - -// BaseCid returns the Base CID -func (trq *transferRequest) BaseCid() cid.Cid { - if trq.BCid == nil { - return cid.Undef - } - return *trq.BCid -} - -// Selector returns the message Selector bytes -func (trq *transferRequest) Selector() (ipld.Node, error) { - if trq.Stor == nil { - return nil, xerrors.New("No selector present to read") - } - builder := basicnode.Style.Any.NewBuilder() - reader := bytes.NewReader(trq.Stor.Raw) - err := dagcbor.Decoder(builder, reader) - if err != nil { - return nil, xerrors.Errorf("Error decoding selector: %w", err) - } - return builder.Build(), nil -} - -// IsCancel returns true if this is a cancel request -func (trq *transferRequest) IsCancel() bool { - return trq.Canc -} - -// IsPartial returns true if this is a partial request -func (trq *transferRequest) IsPartial() bool { - return trq.Part -} - -// Cancel cancels a transfer request -func (trq *transferRequest) Cancel() error { - // do other stuff ? - trq.Canc = true - return nil -} - -// ToNet serializes a transfer request. It's a wrapper for MarshalCBOR to provide -// symmetry with FromNet -func (trq *transferRequest) ToNet(w io.Writer) error { - msg := transferMessage{ - IsRq: true, - Request: trq, - Response: nil, - } - return msg.MarshalCBOR(w) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go b/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go deleted file mode 100644 index 8407ee0f49..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_request_cbor_gen.go +++ /dev/null @@ -1,240 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package message - -import ( - "fmt" - "io" - - datatransfer "github.com/filecoin-project/go-data-transfer" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *transferRequest) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{136}); err != nil { - return err - } - - // t.BCid (cid.Cid) (struct) - - if t.BCid == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCid(w, *t.BCid); err != nil { - return xerrors.Errorf("failed to write cid field t.BCid: %w", err) - } - } - - // t.Canc (bool) (bool) - if err := cbg.WriteBool(w, t.Canc); err != nil { - return err - } - - // t.Part (bool) (bool) - if err := cbg.WriteBool(w, t.Part); err != nil { - return err - } - - // t.Pull (bool) (bool) - if err := cbg.WriteBool(w, t.Pull); err != nil { - return err - } - - // t.Stor (typegen.Deferred) (struct) - if err := t.Stor.MarshalCBOR(w); err != nil { - return err - } - - // t.Vouch (typegen.Deferred) (struct) - if err := t.Vouch.MarshalCBOR(w); err != nil { - return err - } - - // t.VTyp (datatransfer.TypeIdentifier) (string) - if len(t.VTyp) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.VTyp was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.VTyp)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.VTyp)); err != nil { - return err - } - - // t.XferID (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.XferID))); err != nil { - return err - } - - return nil -} - -func (t *transferRequest) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 8 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.BCid (cid.Cid) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.BCid: %w", err) - } - - t.BCid = &c - } - - } - // t.Canc (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Canc = false - case 21: - t.Canc = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Part (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Part = false - case 21: - t.Part = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Pull (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Pull = false - case 21: - t.Pull = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Stor (typegen.Deferred) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Stor = new(cbg.Deferred) - if err := t.Stor.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Stor pointer: %w", err) - } - } - - } - // t.Vouch (typegen.Deferred) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Vouch = new(cbg.Deferred) - if err := t.Vouch.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Vouch pointer: %w", err) - } - } - - } - // t.VTyp (datatransfer.TypeIdentifier) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.VTyp = datatransfer.TypeIdentifier(sval) - } - // t.XferID (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.XferID = uint64(extra) - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_response.go b/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_response.go deleted file mode 100644 index b51cd0fb04..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_response.go +++ /dev/null @@ -1,40 +0,0 @@ -package message - -import ( - "io" - - "github.com/filecoin-project/go-data-transfer" -) - -//go:generate cbor-gen-for transferResponse - -// transferResponse is a private struct that satisfies the DataTransferResponse interface -type transferResponse struct { - Acpt bool - XferID uint64 -} - -func (trsp *transferResponse) TransferID() datatransfer.TransferID { - return datatransfer.TransferID(trsp.XferID) -} - -// IsRequest always returns false in this case because this is a transfer response -func (trsp *transferResponse) IsRequest() bool { - return false -} - -// Accepted returns true if the request is accepted in the response -func (trsp *transferResponse) Accepted() bool { - return trsp.Acpt -} - -// ToNet serializes a transfer response. It's a wrapper for MarshalCBOR to provide -// symmetry with FromNet -func (trsp *transferResponse) ToNet(w io.Writer) error { - msg := transferMessage{ - IsRq: false, - Request: nil, - Response: trsp, - } - return msg.MarshalCBOR(w) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go b/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go deleted file mode 100644 index 3947af917d..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/message/transfer_response_cbor_gen.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package message - -import ( - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *transferResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Acpt (bool) (bool) - if err := cbg.WriteBool(w, t.Acpt); err != nil { - return err - } - - // t.XferID (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.XferID))); err != nil { - return err - } - - return nil -} - -func (t *transferResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Acpt (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.Acpt = false - case 21: - t.Acpt = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.XferID (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.XferID = uint64(extra) - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/network/interface.go b/vendor/github.com/filecoin-project/go-data-transfer/network/interface.go deleted file mode 100644 index 8cd05ad3ad..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/network/interface.go +++ /dev/null @@ -1,56 +0,0 @@ -package network - -import ( - "context" - - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/protocol" - - "github.com/filecoin-project/go-data-transfer/message" -) - -var ( - // ProtocolDataTransfer is the protocol identifier for graphsync messages - ProtocolDataTransfer protocol.ID = "/fil/datatransfer/1.0.0" -) - -// DataTransferNetwork provides network connectivity for GraphSync. -type DataTransferNetwork interface { - - // SendMessage sends a GraphSync message to a peer. - SendMessage( - context.Context, - peer.ID, - message.DataTransferMessage) error - - // SetDelegate registers the Reciver to handle messages received from the - // network. - SetDelegate(Receiver) - - // ConnectTo establishes a connection to the given peer - ConnectTo(context.Context, peer.ID) error - - NewMessageSender(context.Context, peer.ID) (MessageSender, error) -} - -// MessageSender is an interface to send messages to a peer -type MessageSender interface { - SendMsg(context.Context, message.DataTransferMessage) error - Close() error - Reset() error -} - -// Receiver is an interface for receiving messages from the GraphSyncNetwork. -type Receiver interface { - ReceiveRequest( - ctx context.Context, - sender peer.ID, - incoming message.DataTransferRequest) - - ReceiveResponse( - ctx context.Context, - sender peer.ID, - incoming message.DataTransferResponse) - - ReceiveError(error) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go b/vendor/github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go deleted file mode 100644 index 8815444517..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/network/libp2p_impl.go +++ /dev/null @@ -1,165 +0,0 @@ -package network - -import ( - "context" - "fmt" - "io" - "time" - - logging "github.com/ipfs/go-log" - "github.com/libp2p/go-libp2p-core/helpers" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-data-transfer/message" -) - -var log = logging.Logger("data_transfer_network") - -var sendMessageTimeout = time.Minute * 10 - -// NewFromLibp2pHost returns a GraphSyncNetwork supported by underlying Libp2p host. -func NewFromLibp2pHost(host host.Host) DataTransferNetwork { - dataTransferNetwork := libp2pDataTransferNetwork{ - host: host, - } - - return &dataTransferNetwork -} - -// libp2pDataTransferNetwork transforms the libp2p host interface, which sends and receives -// NetMessage objects, into the graphsync network interface. -type libp2pDataTransferNetwork struct { - host host.Host - // inbound messages from the network are forwarded to the receiver - receiver Receiver -} - -type streamMessageSender struct { - s network.Stream -} - -func (s *streamMessageSender) Close() error { - return helpers.FullClose(s.s) -} - -func (s *streamMessageSender) Reset() error { - return s.s.Reset() -} - -func (s *streamMessageSender) SendMsg(ctx context.Context, msg message.DataTransferMessage) error { - return msgToStream(ctx, s.s, msg) -} - -func msgToStream(ctx context.Context, s network.Stream, msg message.DataTransferMessage) error { - if msg.IsRequest() { - log.Debugf("Outgoing request message for transfer ID: %d", msg.TransferID()) - } - - deadline := time.Now().Add(sendMessageTimeout) - if dl, ok := ctx.Deadline(); ok { - deadline = dl - } - if err := s.SetWriteDeadline(deadline); err != nil { - log.Warnf("error setting deadline: %s", err) - } - - switch s.Protocol() { - case ProtocolDataTransfer: - if err := msg.ToNet(s); err != nil { - log.Debugf("error: %s", err) - return err - } - default: - return fmt.Errorf("unrecognized protocol on remote: %s", s.Protocol()) - } - - if err := s.SetWriteDeadline(time.Time{}); err != nil { - log.Warnf("error resetting deadline: %s", err) - } - return nil -} - -func (dtnet *libp2pDataTransferNetwork) NewMessageSender(ctx context.Context, p peer.ID) (MessageSender, error) { - s, err := dtnet.newStreamToPeer(ctx, p) - if err != nil { - return nil, err - } - - return &streamMessageSender{s: s}, nil -} - -func (dtnet *libp2pDataTransferNetwork) newStreamToPeer(ctx context.Context, p peer.ID) (network.Stream, error) { - return dtnet.host.NewStream(ctx, p, ProtocolDataTransfer) -} - -func (dtnet *libp2pDataTransferNetwork) SendMessage( - ctx context.Context, - p peer.ID, - outgoing message.DataTransferMessage) error { - - s, err := dtnet.newStreamToPeer(ctx, p) - if err != nil { - return err - } - - if err = msgToStream(ctx, s, outgoing); err != nil { - if err2 := s.Reset(); err2 != nil { - log.Error(err) - return err2 - } - return err - } - - // TODO(https://github.com/libp2p/go-libp2p-net/issues/28): Avoid this goroutine. - go helpers.AwaitEOF(s) // nolint: errcheck,gosec - return s.Close() - -} - -func (dtnet *libp2pDataTransferNetwork) SetDelegate(r Receiver) { - dtnet.receiver = r - dtnet.host.SetStreamHandler(ProtocolDataTransfer, dtnet.handleNewStream) -} - -func (dtnet *libp2pDataTransferNetwork) ConnectTo(ctx context.Context, p peer.ID) error { - return dtnet.host.Connect(ctx, peer.AddrInfo{ID: p}) -} - -// handleNewStream receives a new stream from the network. -func (dtnet *libp2pDataTransferNetwork) handleNewStream(s network.Stream) { - defer s.Close() // nolint: errcheck,gosec - - if dtnet.receiver == nil { - s.Reset() // nolint: errcheck,gosec - return - } - - for { - received, err := message.FromNet(s) - if err != nil { - if err != io.EOF { - s.Reset() // nolint: errcheck,gosec - go dtnet.receiver.ReceiveError(err) - log.Debugf("graphsync net handleNewStream from %s error: %s", s.Conn().RemotePeer(), err) - } - return - } - - p := s.Conn().RemotePeer() - ctx := context.Background() - log.Debugf("graphsync net handleNewStream from %s", s.Conn().RemotePeer()) - if received.IsRequest() { - receivedRequest, ok := received.(message.DataTransferRequest) - if ok { - dtnet.receiver.ReceiveRequest(ctx, p, receivedRequest) - } - } else { - receivedResponse, ok := received.(message.DataTransferResponse) - if ok { - dtnet.receiver.ReceiveResponse(ctx, p, receivedResponse) - } - } - } -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/network/libp2p_impl_test.go b/vendor/github.com/filecoin-project/go-data-transfer/network/libp2p_impl_test.go deleted file mode 100644 index 9c78f88e4f..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/network/libp2p_impl_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package network_test - -import ( - "context" - "math/rand" - "testing" - "time" - - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/libp2p/go-libp2p-core/peer" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/message" - "github.com/filecoin-project/go-data-transfer/network" - "github.com/filecoin-project/go-data-transfer/testutil" -) - -// Receiver is an interface for receiving messages from the DataTransferNetwork. -type receiver struct { - messageReceived chan struct{} - lastRequest message.DataTransferRequest - lastResponse message.DataTransferResponse - lastSender peer.ID - connectedPeers chan peer.ID -} - -func (r *receiver) ReceiveRequest( - ctx context.Context, - sender peer.ID, - incoming message.DataTransferRequest) { - r.lastSender = sender - r.lastRequest = incoming - select { - case <-ctx.Done(): - case r.messageReceived <- struct{}{}: - } -} - -func (r *receiver) ReceiveResponse( - ctx context.Context, - sender peer.ID, - incoming message.DataTransferResponse) { - r.lastSender = sender - r.lastResponse = incoming - select { - case <-ctx.Done(): - case r.messageReceived <- struct{}{}: - } -} - -func (r *receiver) ReceiveError(err error) { -} - -func TestMessageSendAndReceive(t *testing.T) { - // create network - ctx := context.Background() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - mn := mocknet.New(ctx) - - host1, err := mn.GenPeer() - require.NoError(t, err) - host2, err := mn.GenPeer() - require.NoError(t, err) - err = mn.LinkAll() - require.NoError(t, err) - - dtnet1 := network.NewFromLibp2pHost(host1) - dtnet2 := network.NewFromLibp2pHost(host2) - r := &receiver{ - messageReceived: make(chan struct{}), - connectedPeers: make(chan peer.ID, 2), - } - dtnet1.SetDelegate(r) - dtnet2.SetDelegate(r) - - err = dtnet1.ConnectTo(ctx, host2.ID()) - require.NoError(t, err) - - t.Run("Send Request", func(t *testing.T) { - baseCid := testutil.GenerateCids(1)[0] - selector := builder.NewSelectorSpecBuilder(basicnode.Style.Any).Matcher().Node() - isPull := false - id := datatransfer.TransferID(rand.Int31()) - voucher := testutil.NewFakeDTType() - request, err := message.NewRequest(id, isPull, voucher.Type(), voucher, baseCid, selector) - require.NoError(t, err) - require.NoError(t, dtnet1.SendMessage(ctx, host2.ID(), request)) - - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case <-r.messageReceived: - } - - sender := r.lastSender - require.Equal(t, sender, host1.ID()) - - receivedRequest := r.lastRequest - require.NotNil(t, receivedRequest) - - assert.Equal(t, request.TransferID(), receivedRequest.TransferID()) - assert.Equal(t, request.IsCancel(), receivedRequest.IsCancel()) - assert.Equal(t, request.IsPull(), receivedRequest.IsPull()) - assert.Equal(t, request.IsRequest(), receivedRequest.IsRequest()) - assert.True(t, receivedRequest.BaseCid().Equals(request.BaseCid())) - testutil.AssertEqualFakeDTVoucher(t, request, receivedRequest) - testutil.AssertEqualSelector(t, request, receivedRequest) - }) - - t.Run("Send Response", func(t *testing.T) { - accepted := false - id := datatransfer.TransferID(rand.Int31()) - response := message.NewResponse(id, accepted) - require.NoError(t, dtnet2.SendMessage(ctx, host1.ID(), response)) - - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case <-r.messageReceived: - } - - sender := r.lastSender - require.NotNil(t, sender) - assert.Equal(t, sender, host2.ID()) - - receivedResponse := r.lastResponse - - assert.Equal(t, response.TransferID(), receivedResponse.TransferID()) - assert.Equal(t, response.Accepted(), receivedResponse.Accepted()) - assert.Equal(t, response.IsRequest(), receivedResponse.IsRequest()) - - }) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/registry/registry.go b/vendor/github.com/filecoin-project/go-data-transfer/registry/registry.go deleted file mode 100644 index 4dc037079c..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/registry/registry.go +++ /dev/null @@ -1,68 +0,0 @@ -package registry - -import ( - "sync" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/encoding" - "golang.org/x/xerrors" -) - -// Processor is an interface that processes a certain type of encodable objects -// in a registry. The actual specifics of the interface that must be satisfied are -// left to the user of the registry -type Processor interface{} - -type registryEntry struct { - decoder encoding.Decoder - processor Processor -} - -// Registry maintans a register of types of encodable objects and a corresponding -// processor for those objects -// The encodable types must have a method Type() that specifies and identifier -// so they correct decoding function and processor can be identified based -// on this unique identifier -type Registry struct { - registryLk sync.RWMutex - entries map[datatransfer.TypeIdentifier]registryEntry -} - -// NewRegistry initialzes a new registy -func NewRegistry() *Registry { - return &Registry{ - entries: make(map[datatransfer.TypeIdentifier]registryEntry), - } -} - -// Register registers the given processor for the given entry type -func (r *Registry) Register(entry datatransfer.Registerable, processor Processor) error { - identifier := entry.Type() - decoder, err := encoding.NewDecoder(entry) - if err != nil { - return xerrors.Errorf("registering entry type %s: %w", identifier, err) - } - r.registryLk.Lock() - defer r.registryLk.Unlock() - if _, ok := r.entries[identifier]; ok { - return xerrors.Errorf("identifier already registered: %s", identifier) - } - r.entries[identifier] = registryEntry{decoder, processor} - return nil -} - -// Decoder gets a decoder for the given identifier -func (r *Registry) Decoder(identifier datatransfer.TypeIdentifier) (encoding.Decoder, bool) { - r.registryLk.RLock() - entry, has := r.entries[identifier] - r.registryLk.RUnlock() - return entry.decoder, has -} - -// Processor gets the processing interface for the given identifer -func (r *Registry) Processor(identifier datatransfer.TypeIdentifier) (Processor, bool) { - r.registryLk.RLock() - entry, has := r.entries[identifier] - r.registryLk.RUnlock() - return entry.processor, has -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/registry/registry_test.go b/vendor/github.com/filecoin-project/go-data-transfer/registry/registry_test.go deleted file mode 100644 index 87fbf4a7ce..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/registry/registry_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package registry_test - -import ( - "testing" - - "github.com/filecoin-project/go-data-transfer/registry" - "github.com/filecoin-project/go-data-transfer/testutil" - "github.com/stretchr/testify/require" -) - -func TestRegistry(t *testing.T) { - r := registry.NewRegistry() - t.Run("it registers", func(t *testing.T) { - err := r.Register(&testutil.FakeDTType{}, func() {}) - require.NoError(t, err) - }) - t.Run("it errors when registred again", func(t *testing.T) { - err := r.Register(&testutil.FakeDTType{}, func() {}) - require.EqualError(t, err, "identifier already registered: FakeDTType") - }) - t.Run("it errors when decoder setup fails", func(t *testing.T) { - err := r.Register(testutil.FakeDTType{}, func() {}) - require.EqualError(t, err, "registering entry type FakeDTType: type must be a pointer") - }) - t.Run("it reads decoders", func(t *testing.T) { - decoder, has := r.Decoder("FakeDTType") - require.True(t, has) - require.NotNil(t, decoder) - decoder, has = r.Decoder("OtherType") - require.False(t, has) - require.Nil(t, decoder) - }) - t.Run("it reads processors", func(t *testing.T) { - processor, has := r.Processor("FakeDTType") - require.True(t, has) - require.NotNil(t, processor) - processor, has = r.Processor("OtherType") - require.False(t, has) - require.Nil(t, processor) - }) - -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/statuses.go b/vendor/github.com/filecoin-project/go-data-transfer/statuses.go new file mode 100644 index 0000000000..6a4c89bed4 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/statuses.go @@ -0,0 +1,83 @@ +package datatransfer + +// Status is the status of transfer for a given channel +type Status uint64 + +const ( + // Requested means a data transfer was requested by has not yet been approved + Requested Status = iota + + // Ongoing means the data transfer is in progress + Ongoing + + // TransferFinished indicates the initiator is done sending/receiving + // data but is awaiting confirmation from the responder + TransferFinished + + // ResponderCompleted indicates the initiator received a message from the + // responder that it's completed + ResponderCompleted + + // Finalizing means the responder is awaiting a final message from the initator to + // consider the transfer done + Finalizing + + // Completing just means we have some final cleanup for a completed request + Completing + + // Completed means the data transfer is completed successfully + Completed + + // Failing just means we have some final cleanup for a failed request + Failing + + // Failed means the data transfer failed + Failed + + // Cancelling just means we have some final cleanup for a cancelled request + Cancelling + + // Cancelled means the data transfer ended prematurely + Cancelled + + // InitiatorPaused means the data sender has paused the channel (only the sender can unpause this) + InitiatorPaused + + // ResponderPaused means the data receiver has paused the channel (only the receiver can unpause this) + ResponderPaused + + // BothPaused means both sender and receiver have paused the channel seperately (both must unpause) + BothPaused + + // ResponderFinalizing is a unique state where the responder is awaiting a final voucher + ResponderFinalizing + + // ResponderFinalizingTransferFinished is a unique state where the responder is awaiting a final voucher + // and we have received all data + ResponderFinalizingTransferFinished + + // ChannelNotFoundError means the searched for data transfer does not exist + ChannelNotFoundError +) + +// Statuses are human readable names for data transfer states +var Statuses = map[Status]string{ + // Requested means a data transfer was requested by has not yet been approved + Requested: "Requested", + Ongoing: "Ongoing", + TransferFinished: "TransferFinished", + ResponderCompleted: "ResponderCompleted", + Finalizing: "Finalizing", + Completing: "Completing", + Completed: "Completed", + Failing: "Failing", + Failed: "Failed", + Cancelling: "Cancelling", + Cancelled: "Cancelled", + InitiatorPaused: "InitiatorPaused", + ResponderPaused: "ResponderPaused", + BothPaused: "BothPaused", + ResponderFinalizing: "ResponderFinalizing", + ResponderFinalizingTransferFinished: "ResponderFinalizingTransferFinished", + ChannelNotFoundError: "ChannelNotFoundError", +} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/testutil/fakedttype.go b/vendor/github.com/filecoin-project/go-data-transfer/testutil/fakedttype.go deleted file mode 100644 index 19071e5845..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/testutil/fakedttype.go +++ /dev/null @@ -1,52 +0,0 @@ -package testutil - -import ( - "testing" - - "github.com/stretchr/testify/require" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-data-transfer/encoding" - "github.com/filecoin-project/go-data-transfer/message" -) - -//go:generate cbor-gen-for FakeDTType - -// FakeDTType simple fake type for using with registries -type FakeDTType struct { - Data string -} - -// Type satisfies registry.Entry -func (ft FakeDTType) Type() datatransfer.TypeIdentifier { - return "FakeDTType" -} - -// AssertFakeDTVoucher asserts that a data transfer requests contains the expected fake data transfer voucher type -func AssertFakeDTVoucher(t *testing.T, request message.DataTransferRequest, expected *FakeDTType) { - require.Equal(t, datatransfer.TypeIdentifier("FakeDTType"), request.VoucherType()) - fakeDTDecoder, err := encoding.NewDecoder(&FakeDTType{}) - require.NoError(t, err) - decoded, err := request.Voucher(fakeDTDecoder) - require.NoError(t, err) - require.Equal(t, expected, decoded) -} - -// AssertEqualFakeDTVoucher asserts that two requests have the same fake data transfer voucher -func AssertEqualFakeDTVoucher(t *testing.T, expectedRequest message.DataTransferRequest, request message.DataTransferRequest) { - require.Equal(t, expectedRequest.VoucherType(), request.VoucherType()) - fakeDTDecoder, err := encoding.NewDecoder(&FakeDTType{}) - require.NoError(t, err) - expectedDecoded, err := request.Voucher(fakeDTDecoder) - require.NoError(t, err) - decoded, err := request.Voucher(fakeDTDecoder) - require.NoError(t, err) - require.Equal(t, expectedDecoded, decoded) -} - -// NewFakeDTType returns a fake dt type with random data -func NewFakeDTType() *FakeDTType { - return &FakeDTType{Data: string(RandomBytes(100))} -} - -var _ datatransfer.Registerable = &FakeDTType{} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/testutil/fakedttype_cbor_gen.go b/vendor/github.com/filecoin-project/go-data-transfer/testutil/fakedttype_cbor_gen.go deleted file mode 100644 index f9a35e37d8..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/testutil/fakedttype_cbor_gen.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package testutil - -import ( - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *FakeDTType) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Data (string) (string) - if len(t.Data) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Data was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Data)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Data)); err != nil { - return err - } - return nil -} - -func (t *FakeDTType) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Data (string) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Data = string(sval) - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/testutil/fakegraphsync.go b/vendor/github.com/filecoin-project/go-data-transfer/testutil/fakegraphsync.go deleted file mode 100644 index b2722b5d52..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/testutil/fakegraphsync.go +++ /dev/null @@ -1,228 +0,0 @@ -package testutil - -import ( - "context" - "math/rand" - "testing" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-graphsync" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" -) - -// ReceivedGraphSyncRequest contains data about a received graphsync request -type ReceivedGraphSyncRequest struct { - P peer.ID - Root ipld.Link - Selector ipld.Node - Extensions []graphsync.ExtensionData -} - -// FakeGraphSync implements a GraphExchange but does nothing -type FakeGraphSync struct { - requests chan ReceivedGraphSyncRequest // records calls to fakeGraphSync.Request - OutgoingRequestHook graphsync.OnOutgoingRequestHook - IncomingBlockHook graphsync.OnIncomingBlockHook - OutgoingBlockHook graphsync.OnOutgoingBlockHook - IncomingRequestHook graphsync.OnIncomingRequestHook - ResponseCompletedListener graphsync.OnResponseCompletedListener -} - -// NewFakeGraphSync returns a new fake graphsync implementation -func NewFakeGraphSync() *FakeGraphSync { - return &FakeGraphSync{ - requests: make(chan ReceivedGraphSyncRequest, 1), - } -} - -// AssertNoRequestReceived asserts that no requests should ahve been received by this graphsync implementation -func (fgs *FakeGraphSync) AssertNoRequestReceived(t *testing.T) { - require.Empty(t, fgs.requests, "should not receive request") -} - -// AssertRequestReceived asserts a request should be received before the context closes (and returns said request) -func (fgs *FakeGraphSync) AssertRequestReceived(ctx context.Context, t *testing.T) ReceivedGraphSyncRequest { - var requestReceived ReceivedGraphSyncRequest - select { - case <-ctx.Done(): - t.Fatal("did not receive message sent") - case requestReceived = <-fgs.requests: - } - return requestReceived -} - -// Request initiates a new GraphSync request to the given peer using the given selector spec. -func (fgs *FakeGraphSync) Request(ctx context.Context, p peer.ID, root ipld.Link, selector ipld.Node, extensions ...graphsync.ExtensionData) (<-chan graphsync.ResponseProgress, <-chan error) { - - fgs.requests <- ReceivedGraphSyncRequest{p, root, selector, extensions} - responses := make(chan graphsync.ResponseProgress) - errors := make(chan error) - close(responses) - close(errors) - return responses, errors -} - -// RegisterPersistenceOption registers an alternate loader/storer combo that can be substituted for the default -func (fgs *FakeGraphSync) RegisterPersistenceOption(name string, loader ipld.Loader, storer ipld.Storer) error { - return nil -} - -// RegisterIncomingRequestHook adds a hook that runs when a request is received -func (fgs *FakeGraphSync) RegisterIncomingRequestHook(hook graphsync.OnIncomingRequestHook) graphsync.UnregisterHookFunc { - fgs.IncomingRequestHook = hook - return nil -} - -// RegisterIncomingResponseHook adds a hook that runs when a response is received -func (fgs *FakeGraphSync) RegisterIncomingResponseHook(_ graphsync.OnIncomingResponseHook) graphsync.UnregisterHookFunc { - return nil -} - -// RegisterOutgoingRequestHook adds a hook that runs immediately prior to sending a new request -func (fgs *FakeGraphSync) RegisterOutgoingRequestHook(hook graphsync.OnOutgoingRequestHook) graphsync.UnregisterHookFunc { - fgs.OutgoingRequestHook = hook - return nil -} - -// RegisterOutgoingBlockHook adds a hook that runs every time a block is sent from a responder -func (fgs *FakeGraphSync) RegisterOutgoingBlockHook(hook graphsync.OnOutgoingBlockHook) graphsync.UnregisterHookFunc { - fgs.OutgoingBlockHook = hook - return nil -} - -// RegisterIncomingBlockHook adds a hook that runs every time a block is received by the requestor -func (fgs *FakeGraphSync) RegisterIncomingBlockHook(hook graphsync.OnIncomingBlockHook) graphsync.UnregisterHookFunc { - fgs.IncomingBlockHook = hook - return nil -} - -// RegisterRequestUpdatedHook adds a hook that runs every time an update to a request is received -func (fgs *FakeGraphSync) RegisterRequestUpdatedHook(hook graphsync.OnRequestUpdatedHook) graphsync.UnregisterHookFunc { - return nil -} - -// RegisterCompletedResponseListener adds a listener on the responder for completed responses -func (fgs *FakeGraphSync) RegisterCompletedResponseListener(listener graphsync.OnResponseCompletedListener) graphsync.UnregisterHookFunc { - fgs.ResponseCompletedListener = listener - return nil -} - -// UnpauseResponse unpauses a response that was paused in a block hook based on peer ID and request ID -func (fgs *FakeGraphSync) UnpauseResponse(_ peer.ID, _ graphsync.RequestID) error { - return nil -} - -var _ graphsync.GraphExchange = &FakeGraphSync{} - -type fakeBlkData struct { - link ipld.Link - size uint64 -} - -func (fbd fakeBlkData) Link() ipld.Link { - return fbd.link -} - -func (fbd fakeBlkData) BlockSize() uint64 { - return fbd.size -} - -func (fbd fakeBlkData) BlockSizeOnWire() uint64 { - return fbd.size -} - -// NewFakeBlockData returns a fake block that matches the block data interface -func NewFakeBlockData() graphsync.BlockData { - return &fakeBlkData{ - link: cidlink.Link{Cid: GenerateCids(1)[0]}, - size: rand.Uint64(), - } -} - -type fakeRequest struct { - id graphsync.RequestID - root cid.Cid - selector ipld.Node - priority graphsync.Priority - isCancel bool - extensions map[graphsync.ExtensionName][]byte -} - -// ID Returns the request ID for this Request -func (fr *fakeRequest) ID() graphsync.RequestID { - return fr.id -} - -// Root returns the CID to the root block of this request -func (fr *fakeRequest) Root() cid.Cid { - return fr.root -} - -// Selector returns the byte representation of the selector for this request -func (fr *fakeRequest) Selector() ipld.Node { - return fr.selector -} - -// Priority returns the priority of this request -func (fr *fakeRequest) Priority() graphsync.Priority { - return fr.priority -} - -// Extension returns the content for an extension on a response, or errors -// if extension is not present -func (fr *fakeRequest) Extension(name graphsync.ExtensionName) ([]byte, bool) { - data, has := fr.extensions[name] - return data, has -} - -// IsCancel returns true if this particular request is being cancelled -func (fr *fakeRequest) IsCancel() bool { - return fr.isCancel -} - -// NewFakeRequest returns a fake request that matches the request data interface -func NewFakeRequest(id graphsync.RequestID, extensions map[graphsync.ExtensionName][]byte) graphsync.RequestData { - return &fakeRequest{ - id: id, - root: GenerateCids(1)[0], - selector: allSelector, - priority: graphsync.Priority(rand.Int()), - isCancel: false, - extensions: extensions, - } -} - -type fakeResponse struct { - id graphsync.RequestID - status graphsync.ResponseStatusCode - extensions map[graphsync.ExtensionName][]byte -} - -// RequestID returns the request ID for this response -func (fr *fakeResponse) RequestID() graphsync.RequestID { - return fr.id -} - -// Status returns the status for a response -func (fr *fakeResponse) Status() graphsync.ResponseStatusCode { - return fr.status -} - -// Extension returns the content for an extension on a response, or errors -// if extension is not present -func (fr *fakeResponse) Extension(name graphsync.ExtensionName) ([]byte, bool) { - data, has := fr.extensions[name] - return data, has -} - -// NewFakeResponse returns a fake response that matches the response data interface -func NewFakeResponse(id graphsync.RequestID, extensions map[graphsync.ExtensionName][]byte, status graphsync.ResponseStatusCode) graphsync.ResponseData { - return &fakeResponse{ - id: id, - status: status, - extensions: extensions, - } -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/testutil/fixtures/lorem.txt b/vendor/github.com/filecoin-project/go-data-transfer/testutil/fixtures/lorem.txt deleted file mode 100644 index c558923539..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/testutil/fixtures/lorem.txt +++ /dev/null @@ -1,49 +0,0 @@ -Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu. - -Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed. - -Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique. - -Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus. - -Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh. - -Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit. - -Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet. - -Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices. - -Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis. - -Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem. - -Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis. - -Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. - -Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam. - -Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada. - -Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque. - -Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl. - -Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla. - -Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet. - -Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam. - -Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus. - -Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor. - -Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est. - -Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat. - -Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum. - -Eros in cursus turpis massa tincidunt dui ut. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed felis. Aliquam ultrices sagittis orci a. Id interdum velit laoreet id donec ultrices tincidunt arcu non. Dictum fusce ut placerat orci nulla pellentesque dignissim. At erat pellentesque adipiscing commodo elit. Ut venenatis tellus in metus vulputate eu scelerisque felis imperdiet. Lectus sit amet est placerat in. Montes nascetur ridiculus mus mauris. Libero volutpat sed cras ornare arcu. Mi sit amet mauris commodo quis imperdiet massa. Sed id semper risus in hendrerit. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-data-transfer/testutil/gstestdata.go b/vendor/github.com/filecoin-project/go-data-transfer/testutil/gstestdata.go deleted file mode 100644 index e4eb2a2f32..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/testutil/gstestdata.go +++ /dev/null @@ -1,242 +0,0 @@ -package testutil - -import ( - "bytes" - "context" - "errors" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "runtime" - "testing" - - "github.com/filecoin-project/go-storedcounter" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - dss "github.com/ipfs/go-datastore/sync" - "github.com/ipfs/go-graphsync" - gsimpl "github.com/ipfs/go-graphsync/impl" - gsnet "github.com/ipfs/go-graphsync/network" - bstore "github.com/ipfs/go-ipfs-blockstore" - chunker "github.com/ipfs/go-ipfs-chunker" - offline "github.com/ipfs/go-ipfs-exchange-offline" - files "github.com/ipfs/go-ipfs-files" - ipldformat "github.com/ipfs/go-ipld-format" - "github.com/ipfs/go-merkledag" - unixfile "github.com/ipfs/go-unixfs/file" - "github.com/ipfs/go-unixfs/importer/balanced" - ihelper "github.com/ipfs/go-unixfs/importer/helpers" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/libp2p/go-libp2p-core/host" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/require" -) - -var allSelector ipld.Node - -func init() { - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) - allSelector = ssb.ExploreRecursive(selector.RecursionLimitNone(), - ssb.ExploreAll(ssb.ExploreRecursiveEdge())).Node() -} - -const unixfsChunkSize uint64 = 1 << 10 -const unixfsLinksPerLevel = 1024 - -// GraphsyncTestingData is a test harness for testing data transfer on top of -// graphsync -type GraphsyncTestingData struct { - Ctx context.Context - StoredCounter1 *storedcounter.StoredCounter - StoredCounter2 *storedcounter.StoredCounter - Bs1 bstore.Blockstore - Bs2 bstore.Blockstore - DagService1 ipldformat.DAGService - DagService2 ipldformat.DAGService - Loader1 ipld.Loader - Loader2 ipld.Loader - Storer1 ipld.Storer - Storer2 ipld.Storer - Host1 host.Host - Host2 host.Host - GsNet1 gsnet.GraphSyncNetwork - GsNet2 gsnet.GraphSyncNetwork - AllSelector ipld.Node - OrigBytes []byte -} - -// NewGraphsyncTestingData returns a new GraphsyncTestingData instance -func NewGraphsyncTestingData(ctx context.Context, t *testing.T) *GraphsyncTestingData { - - gsData := &GraphsyncTestingData{} - gsData.Ctx = ctx - makeLoader := func(bs bstore.Blockstore) ipld.Loader { - return func(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) { - c, ok := lnk.(cidlink.Link) - if !ok { - return nil, errors.New("Incorrect Link Type") - } - // read block from one store - block, err := bs.Get(c.Cid) - if err != nil { - return nil, err - } - return bytes.NewReader(block.RawData()), nil - } - } - - makeStorer := func(bs bstore.Blockstore) ipld.Storer { - return func(lnkCtx ipld.LinkContext) (io.Writer, ipld.StoreCommitter, error) { - var buf bytes.Buffer - var committer ipld.StoreCommitter = func(lnk ipld.Link) error { - c, ok := lnk.(cidlink.Link) - if !ok { - return errors.New("Incorrect Link Type") - } - block, err := blocks.NewBlockWithCid(buf.Bytes(), c.Cid) - if err != nil { - return err - } - return bs.Put(block) - } - return &buf, committer, nil - } - } - ds1 := dss.MutexWrap(datastore.NewMapDatastore()) - ds2 := dss.MutexWrap(datastore.NewMapDatastore()) - // make a blockstore and dag service - gsData.Bs1 = bstore.NewBlockstore(namespace.Wrap(ds1, datastore.NewKey("blockstore"))) - gsData.Bs2 = bstore.NewBlockstore(namespace.Wrap(ds2, datastore.NewKey("blockstore"))) - - // make stored counters - gsData.StoredCounter1 = storedcounter.New(ds1, datastore.NewKey("counter")) - gsData.StoredCounter2 = storedcounter.New(ds2, datastore.NewKey("counter")) - - gsData.DagService1 = merkledag.NewDAGService(blockservice.New(gsData.Bs1, offline.Exchange(gsData.Bs1))) - gsData.DagService2 = merkledag.NewDAGService(blockservice.New(gsData.Bs2, offline.Exchange(gsData.Bs2))) - - // setup an IPLD loader/storer for blockstore 1 - gsData.Loader1 = makeLoader(gsData.Bs1) - gsData.Storer1 = makeStorer(gsData.Bs1) - - // setup an IPLD loader/storer for blockstore 2 - gsData.Loader2 = makeLoader(gsData.Bs2) - gsData.Storer2 = makeStorer(gsData.Bs2) - - mn := mocknet.New(ctx) - - // setup network - var err error - gsData.Host1, err = mn.GenPeer() - require.NoError(t, err) - - gsData.Host2, err = mn.GenPeer() - require.NoError(t, err) - - err = mn.LinkAll() - require.NoError(t, err) - - gsData.GsNet1 = gsnet.NewFromLibp2pHost(gsData.Host1) - gsData.GsNet2 = gsnet.NewFromLibp2pHost(gsData.Host2) - - // create a selector for the whole UnixFS dag - gsData.AllSelector = allSelector - - return gsData -} - -// SetupGraphsyncHost1 sets up a new, real graphsync instance on top of the first host -func (gsData *GraphsyncTestingData) SetupGraphsyncHost1() graphsync.GraphExchange { - // setup graphsync - return gsimpl.New(gsData.Ctx, gsData.GsNet1, gsData.Loader1, gsData.Storer1) -} - -// SetupGraphsyncHost2 sets up a new, real graphsync instance on top of the second host -func (gsData *GraphsyncTestingData) SetupGraphsyncHost2() graphsync.GraphExchange { - // setup graphsync - return gsimpl.New(gsData.Ctx, gsData.GsNet2, gsData.Loader2, gsData.Storer2) -} - -// LoadUnixFSFile loads a fixtures file we can test dag transfer with -func (gsData *GraphsyncTestingData) LoadUnixFSFile(t *testing.T, useSecondNode bool) ipld.Link { - _, curFile, _, ok := runtime.Caller(0) - require.True(t, ok) - - // read in a fixture file - path := filepath.Join(path.Dir(curFile), "fixtures", "lorem.txt") - - f, err := os.Open(path) - require.NoError(t, err) - - var buf bytes.Buffer - tr := io.TeeReader(f, &buf) - file := files.NewReaderFile(tr) - - // import to UnixFS - var dagService ipldformat.DAGService - if useSecondNode { - dagService = gsData.DagService2 - } else { - dagService = gsData.DagService1 - } - bufferedDS := ipldformat.NewBufferedDAG(gsData.Ctx, dagService) - - params := ihelper.DagBuilderParams{ - Maxlinks: unixfsLinksPerLevel, - RawLeaves: true, - CidBuilder: nil, - Dagserv: bufferedDS, - } - - db, err := params.New(chunker.NewSizeSplitter(file, int64(unixfsChunkSize))) - require.NoError(t, err) - - nd, err := balanced.Layout(db) - require.NoError(t, err) - - err = bufferedDS.Commit() - require.NoError(t, err) - - // save the original files bytes - gsData.OrigBytes = buf.Bytes() - - return cidlink.Link{Cid: nd.Cid()} -} - -// VerifyFileTransferred verifies all of the file was transfer to the given node -func (gsData *GraphsyncTestingData) VerifyFileTransferred(t *testing.T, link ipld.Link, useSecondNode bool) { - var dagService ipldformat.DAGService - if useSecondNode { - dagService = gsData.DagService2 - } else { - dagService = gsData.DagService1 - } - - c := link.(cidlink.Link).Cid - - // load the root of the UnixFS DAG from the new blockstore - otherNode, err := dagService.Get(gsData.Ctx, c) - require.NoError(t, err) - - // Setup a UnixFS file reader - n, err := unixfile.NewUnixfsFile(gsData.Ctx, dagService, otherNode) - require.NoError(t, err) - - fn, ok := n.(files.File) - require.True(t, ok) - - // Read the bytes for the UnixFS File - finalBytes, err := ioutil.ReadAll(fn) - require.NoError(t, err) - - // verify original bytes match final bytes! - require.EqualValues(t, gsData.OrigBytes, finalBytes) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/testutil/testutil.go b/vendor/github.com/filecoin-project/go-data-transfer/testutil/testutil.go deleted file mode 100644 index da2d0f1646..0000000000 --- a/vendor/github.com/filecoin-project/go-data-transfer/testutil/testutil.go +++ /dev/null @@ -1,95 +0,0 @@ -package testutil - -import ( - "bytes" - "testing" - - "github.com/filecoin-project/go-data-transfer/message" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - "github.com/jbenet/go-random" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" -) - -var blockGenerator = blocksutil.NewBlockGenerator() - -//var prioritySeq int -var seedSeq int64 - -// RandomBytes returns a byte array of the given size with random values. -func RandomBytes(n int64) []byte { - data := new(bytes.Buffer) - random.WritePseudoRandomBytes(n, data, seedSeq) // nolint: gosec,errcheck - seedSeq++ - return data.Bytes() -} - -// GenerateBlocksOfSize generates a series of blocks of the given byte size -func GenerateBlocksOfSize(n int, size int64) []blocks.Block { - generatedBlocks := make([]blocks.Block, 0, n) - for i := 0; i < n; i++ { - b := blocks.NewBlock(RandomBytes(size)) - generatedBlocks = append(generatedBlocks, b) - - } - return generatedBlocks -} - -// GenerateCids produces n content identifiers. -func GenerateCids(n int) []cid.Cid { - cids := make([]cid.Cid, 0, n) - for i := 0; i < n; i++ { - c := blockGenerator.Next().Cid() - cids = append(cids, c) - } - return cids -} - -var peerSeq int - -// GeneratePeers creates n peer ids. -func GeneratePeers(n int) []peer.ID { - peerIds := make([]peer.ID, 0, n) - for i := 0; i < n; i++ { - peerSeq++ - p := peer.ID(peerSeq) - peerIds = append(peerIds, p) - } - return peerIds -} - -// ContainsPeer returns true if a peer is found n a list of peers. -func ContainsPeer(peers []peer.ID, p peer.ID) bool { - for _, n := range peers { - if p == n { - return true - } - } - return false -} - -// IndexOf returns the index of a given cid in an array of blocks -func IndexOf(blks []blocks.Block, c cid.Cid) int { - for i, n := range blks { - if n.Cid() == c { - return i - } - } - return -1 -} - -// ContainsBlock returns true if a block is found n a list of blocks -func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { - return IndexOf(blks, block.Cid()) != -1 -} - -// AssertEqualSelector asserts two requests have the same valid selector -func AssertEqualSelector(t *testing.T, expectedRequest message.DataTransferRequest, request message.DataTransferRequest) { - expectedSelector, err := expectedRequest.Selector() - require.NoError(t, err) - selector, err := request.Selector() - require.NoError(t, err) - require.Equal(t, expectedSelector, selector) -} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/transport.go b/vendor/github.com/filecoin-project/go-data-transfer/transport.go new file mode 100644 index 0000000000..3ca94ba6cd --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/transport.go @@ -0,0 +1,109 @@ +package datatransfer + +import ( + "context" + + "github.com/ipfs/go-cid" + ipld "github.com/ipld/go-ipld-prime" + peer "github.com/libp2p/go-libp2p-core/peer" +) + +// EventsHandler are semantic data transfer events that happen as a result of graphsync hooks +type EventsHandler interface { + // OnChannelOpened is called when we ask the other peer to send us data on the + // given channel ID + // return values are: + // - nil = this channel is recognized + // - error = ignore incoming data for this channel + OnChannelOpened(chid ChannelID) error + // OnResponseReceived is called when we receive a response to a request + // - nil = continue receiving data + // - error = cancel this request + OnResponseReceived(chid ChannelID, msg Response) error + // OnDataReceive is called when we receive data for the given channel ID + // return values are: + // - nil = proceed with sending data + // - error = cancel this request + // - err == ErrPause - pause this request + OnDataReceived(chid ChannelID, link ipld.Link, size uint64) error + // OnDataSent is called when we send data for the given channel ID + // return values are: + // message = data transfer message along with data + // err = error + // - nil = proceed with sending data + // - error = cancel this request + // - err == ErrPause - pause this request + OnDataSent(chid ChannelID, link ipld.Link, size uint64) (Message, error) + // OnRequestReceived is called when we receive a new request to send data + // for the given channel ID + // return values are: + // message = data transfer message along with reply + // err = error + // - nil = proceed with sending data + // - error = cancel this request + // - err == ErrPause - pause this request (only for new requests) + // - err == ErrResume - resume this request (only for update requests) + OnRequestReceived(chid ChannelID, msg Request) (Response, error) + // OnResponseCompleted is called when we finish sending data for the given channel ID + // Error returns are logged but otherwise have not effect + OnChannelCompleted(chid ChannelID, success bool) error + + // OnRequestTimedOut is called when a request we opened (with the given channel Id) to receive data times out. + // Error returns are logged but otherwise have no effect + OnRequestTimedOut(ctx context.Context, chid ChannelID) error + + // OnRequestDisconnected is called when a network error occurs in a graphsync request + // or we appear to stall while receiving data + OnRequestDisconnected(ctx context.Context, chid ChannelID) error +} + +/* +Transport defines the interface for a transport layer for data +transfer. Where the data transfer manager will coordinate setting up push and +pull requests, validation, etc, the transport layer is responsible for moving +data back and forth, and may be medium specific. For example, some transports +may have the ability to pause and resume requests, while others may not. +Some may support individual data events, while others may only support message +events. Some transport layers may opt to use the actual data transfer network +protocols directly while others may be able to encode messages in their own +data protocol. + +Transport is the minimum interface that must be satisfied to serve as a datatransfer +transport layer. Transports must be able to open (open is always called by the receiving peer) +and close channels, and set at an event handler */ +type Transport interface { + // OpenChannel initiates an outgoing request for the other peer to send data + // to us on this channel + // Note: from a data transfer symantic standpoint, it doesn't matter if the + // request is push or pull -- OpenChannel is called by the party that is + // intending to receive data + OpenChannel(ctx context.Context, + dataSender peer.ID, + channelID ChannelID, + root ipld.Link, + stor ipld.Node, + doNotSendCids []cid.Cid, + msg Message) error + + // CloseChannel closes the given channel + CloseChannel(ctx context.Context, chid ChannelID) error + // SetEventHandler sets the handler for events on channels + SetEventHandler(events EventsHandler) error + // CleanupChannel is called on the otherside of a cancel - removes any associated + // data for the channel + CleanupChannel(chid ChannelID) +} + +// PauseableTransport is a transport that can also pause and resume channels +type PauseableTransport interface { + Transport + // PauseChannel paused the given channel ID + PauseChannel(ctx context.Context, + chid ChannelID, + ) error + // ResumeChannel resumes the given channel + ResumeChannel(ctx context.Context, + msg Message, + chid ChannelID, + ) error +} diff --git a/vendor/github.com/filecoin-project/go-data-transfer/types.go b/vendor/github.com/filecoin-project/go-data-transfer/types.go index 552e56edcc..f2de437d3d 100644 --- a/vendor/github.com/filecoin-project/go-data-transfer/types.go +++ b/vendor/github.com/filecoin-project/go-data-transfer/types.go @@ -1,19 +1,24 @@ package datatransfer import ( - "context" - "time" + "fmt" - "github.com/filecoin-project/go-data-transfer/encoding" "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime" "github.com/libp2p/go-libp2p-core/peer" + + "github.com/filecoin-project/go-data-transfer/encoding" ) +//go:generate cbor-gen-for ChannelID + // TypeIdentifier is a unique string identifier for a type of encodable object in a // registry type TypeIdentifier string +// EmptyTypeIdentifier means there is no voucher present +const EmptyTypeIdentifier = TypeIdentifier("") + // Registerable is a type of object in a registry. It must be encodable and must // have a single method that uniquely identifies its type type Registerable interface { @@ -28,22 +33,9 @@ type Registerable interface { // from bytes, and has a string identifier type type Voucher Registerable -// Status is the status of transfer for a given channel -type Status int - -const ( - // Ongoing means the data transfer is in progress - Ongoing Status = iota - - // Completed means the data transfer is completed successfully - Completed - - // Failed means the data transfer failed - Failed - - // ChannelNotFoundError means the searched for data transfer does not exist - ChannelNotFoundError -) +// VoucherResult is used to provide option additional information about a +// voucher being rejected or accepted +type VoucherResult Registerable // TransferID is an identifier for a data transfer, shared between // request/responder and unique to the requester @@ -53,9 +45,23 @@ type TransferID uint64 // party's peer ID + the transfer ID type ChannelID struct { Initiator peer.ID + Responder peer.ID ID TransferID } +func (c ChannelID) String() string { + return fmt.Sprintf("%s-%s-%d", c.Initiator, c.Responder, c.ID) +} + +// OtherParty returns the peer on the other side of the request, depending +// on whether this peer is the initiator or responder +func (c ChannelID) OtherParty(thisPeer peer.ID) peer.ID { + if thisPeer == c.Initiator { + return c.Responder + } + return c.Initiator +} + // Channel represents all the parameters for a single data transfer type Channel interface { // TransferID returns the transfer id for this channel @@ -79,91 +85,48 @@ type Channel interface { // TotalSize returns the total size for the data being transferred TotalSize() uint64 + + // IsPull returns whether this is a pull request + IsPull() bool + + // ChannelID returns the ChannelID for this request + ChannelID() ChannelID + + // OtherPeer returns the counter party peer for this channel + OtherPeer() peer.ID + + // ReceivedCids returns the cids received so far on the channel + ReceivedCids() []cid.Cid } // ChannelState is channel parameters plus it's current state type ChannelState interface { Channel + // SelfPeer returns the peer this channel belongs to + SelfPeer() peer.ID + + // Status is the current status of this channel + Status() Status + // Sent returns the number of bytes sent Sent() uint64 // Received returns the number of bytes received Received() uint64 -} - -// EventCode is a name for an event that occurs on a data transfer channel -type EventCode int - -const ( - // Open is an event occurs when a channel is first opened - Open EventCode = iota - - // Progress is an event that gets emitted every time more data is transferred - Progress - - // Error is an event that emits when an error occurs in a data transfer - Error - - // Complete is emitted when a data transfer is complete - Complete -) - -// Event is a struct containing information about a data transfer event -type Event struct { - Code EventCode // What type of event it is - Message string // Any clarifying information about the event - Timestamp time.Time // when the event happened -} - -// Subscriber is a callback that is called when events are emitted -type Subscriber func(event Event, channelState ChannelState) - -// Unsubscribe is a function that gets called to unsubscribe from data transfer events -type Unsubscribe func() - -// RequestValidator is an interface implemented by the client of the -// data transfer module to validate requests -type RequestValidator interface { - // ValidatePush validates a push request received from the peer that will send data - ValidatePush( - sender peer.ID, - voucher Voucher, - baseCid cid.Cid, - selector ipld.Node) error - // ValidatePull validates a pull request received from the peer that will receive data - ValidatePull( - receiver peer.ID, - voucher Voucher, - baseCid cid.Cid, - selector ipld.Node) error -} - -// Manager is the core interface presented by all implementations of -// of the data transfer sub system -type Manager interface { - // RegisterVoucherType registers a validator for the given voucher type - // will error if voucher type does not implement voucher - // or if there is a voucher type registered with an identical identifier - RegisterVoucherType(voucherType Voucher, validator RequestValidator) error - - // open a data transfer that will send data to the recipient peer and - // transfer parts of the piece that match the selector - OpenPushDataChannel(ctx context.Context, to peer.ID, voucher Voucher, baseCid cid.Cid, selector ipld.Node) (ChannelID, error) - // open a data transfer that will request data from the sending peer and - // transfer parts of the piece that match the selector - OpenPullDataChannel(ctx context.Context, to peer.ID, voucher Voucher, baseCid cid.Cid, selector ipld.Node) (ChannelID, error) + // Message offers additional information about the current status + Message() string - // close an open channel (effectively a cancel) - CloseDataTransferChannel(x ChannelID) + // Vouchers returns all vouchers sent on this channel + Vouchers() []Voucher - // get status of a transfer - TransferChannelStatus(x ChannelID) Status + // VoucherResults are results of vouchers sent on the channel + VoucherResults() []VoucherResult - // get notified when certain types of events happen - SubscribeToEvents(subscriber Subscriber) Unsubscribe + // LastVoucher returns the last voucher sent on the channel + LastVoucher() Voucher - // get all in progress transfers - InProgressChannels() map[ChannelID]ChannelState + // LastVoucherResult returns the last voucher result sent on the channel + LastVoucherResult() VoucherResult } diff --git a/vendor/github.com/filecoin-project/go-data-transfer/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-data-transfer/types_cbor_gen.go new file mode 100644 index 0000000000..ede6903cdf --- /dev/null +++ b/vendor/github.com/filecoin-project/go-data-transfer/types_cbor_gen.go @@ -0,0 +1,115 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package datatransfer + +import ( + "fmt" + "io" + + peer "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufChannelID = []byte{131} + +func (t *ChannelID) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufChannelID); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Initiator (peer.ID) (string) + if len(t.Initiator) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Initiator was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Initiator))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Initiator)); err != nil { + return err + } + + // t.Responder (peer.ID) (string) + if len(t.Responder) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Responder was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Responder))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Responder)); err != nil { + return err + } + + // t.ID (datatransfer.TransferID) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.ID)); err != nil { + return err + } + + return nil +} + +func (t *ChannelID) UnmarshalCBOR(r io.Reader) error { + *t = ChannelID{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Initiator (peer.ID) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Initiator = peer.ID(sval) + } + // t.Responder (peer.ID) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Responder = peer.ID(sval) + } + // t.ID (datatransfer.TransferID) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.ID = TransferID(extra) + + } + return nil +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/.circleci/config.yml b/vendor/github.com/filecoin-project/go-fil-markets/.circleci/config.yml deleted file mode 100644 index 1e0afe2124..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/.circleci/config.yml +++ /dev/null @@ -1,198 +0,0 @@ -version: 2.1 -orbs: - go: gotest/tools@0.0.9 - codecov: codecov/codecov@1.0.2 - node: circleci/node@3.0.0 - -executors: - golang: - docker: - - image: circleci/golang:1.13-node - resource_class: large - -commands: - install-deps: - steps: - - go/install-ssh - - go/install: {package: git} - prepare: - parameters: - linux: - default: true - description: is a linux build environment? - type: boolean - steps: - - checkout - - when: - condition: << parameters.linux >> - steps: - - run: sudo apt-get update - - run: sudo apt-get install ocl-icd-opencl-dev - - run: git submodule sync - - run: git submodule update --init - build-all: - - -jobs: - mod-tidy-check: - executor: golang - steps: - - install-deps - - prepare - - go/mod-download - - go/mod-tidy-check - - cbor-gen-check: - executor: golang - steps: - - install-deps - - prepare - - run: go install golang.org/x/tools/cmd/goimports - - run: go install github.com/hannahhoward/cbor-gen-for - - run: go generate ./... - - run: git --no-pager diff - - run: git --no-pager diff --quiet - - - docs-check: - executor: golang - steps: - - install-deps - - prepare - - run: - name: Install Headless Chrome Dependencies - command: | - sudo apt-get install -yq \ - gconf-service libasound2 libatk1.0-0 libatk-bridge2.0-0 libc6 libcairo2 libcups2 libdbus-1-3 \ - libexpat1 libfontconfig1 libgcc1 libgconf-2-4 libgdk-pixbuf2.0-0 libglib2.0-0 libgtk-3-0 libnspr4 \ - libpango-1.0-0 libpangocairo-1.0-0 libstdc++6 libx11-6 libx11-xcb1 libxcb1 libxcomposite1 libxcursor1 \ - libxdamage1 libxext6 libxfixes3 libxi6 libxrandr2 libxrender1 libxss1 libxtst6 ca-certificates \ - fonts-liberation libappindicator1 libnss3 lsb-release xdg-utils wget - - node/install-packages - - run: make diagrams - - run: git --no-pager diff - - run: git --no-pager diff --quiet - - build-all: - executor: golang - steps: - - install-deps - - prepare - - go/mod-download - - run: sudo apt-get update - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go-fil-markets/go.mod" }} - - run: - command: make build - - store_artifacts: - path: go-fil-markets - - store_artifacts: - path: go-fil-markets - - test: &test - description: | - Run tests with gotestsum. - parameters: - executor: - type: executor - default: golang - test-suite-name: - type: string - default: unit - description: Test suite name to report to CircleCI. - codecov-upload: - type: boolean - default: true - description: | - Upload coverage report to https://codecov.io/. Requires the codecov API token to be - set as an environment variable for private projects. - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - go/mod-download - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - - go/install-gotestsum: - gobin: $HOME/.local/bin - - run: - name: make test - environment: - GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml - command: | - mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> - make test - no_output_timeout: 30m - - store_test_results: - path: /tmp/test-reports - - when: - condition: << parameters.codecov-upload >> - steps: - - go/install: {package: bash} - - go/install: {package: curl} - - run: - shell: /bin/bash -eo pipefail - command: | - bash <(curl -s https://codecov.io/bash) - - save_cache: - name: save go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - paths: - - "~/go/pkg" - - "~/go/src/github.com" - - "~/go/src/golang.org" - - lint: &lint - description: | - Run golangci-lint. - parameters: - executor: - type: executor - default: golang - golangci-lint-version: - type: string - default: 1.21.0 - concurrency: - type: string - default: '2' - description: | - Concurrency used to run linters. Defaults to 2 because NumCPU is not - aware of container CPU limits. - args: - type: string - default: '' - description: | - Arguments to pass to golangci-lint - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - run: - command: make build - - go/install-golangci-lint: - gobin: $HOME/.local/bin - version: << parameters.golangci-lint-version >> - - run: - name: Lint - command: | - $HOME/.local/bin/golangci-lint run -v \ - --concurrency << parameters.concurrency >> << parameters.args >> - lint-changes: - <<: *lint - - lint-all: - <<: *lint - -workflows: - version: 2.1 - ci: - jobs: - - lint-changes: - args: "--new-from-rev origin/master" - - test - - mod-tidy-check - - cbor-gen-check - - docs-check - - build-all diff --git a/vendor/github.com/filecoin-project/go-fil-markets/CHANGELOG.md b/vendor/github.com/filecoin-project/go-fil-markets/CHANGELOG.md deleted file mode 100644 index 4129891195..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/CHANGELOG.md +++ /dev/null @@ -1,440 +0,0 @@ -# go-fil-markets changelog - -# go-fil-markets 0.1.0 - -Initial tagged release for Filecoin Testnet Phase 2 - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - docs(release): document release process (#206) ([filecoin-project/go-fil-markets#206](https://github.com/filecoin-project/go-fil-markets/pull/206)) - - update types_cbor_gen (#203) ([filecoin-project/go-fil-markets#203](https://github.com/filecoin-project/go-fil-markets/pull/203)) - - Upgrade to specs-actors v0.2.0 (#204) ([filecoin-project/go-fil-markets#204](https://github.com/filecoin-project/go-fil-markets/pull/204)) - - Storagemarket/provider allows subscription to events (#202) ([filecoin-project/go-fil-markets#202](https://github.com/filecoin-project/go-fil-markets/pull/202)) - - Add a test rule to Makefile, use in CI config (#200) ([filecoin-project/go-fil-markets#200](https://github.com/filecoin-project/go-fil-markets/pull/200)) - - Update to specs-actors v1.0.0 (#198) ([filecoin-project/go-fil-markets#198](https://github.com/filecoin-project/go-fil-markets/pull/198)) - - add multiple peers per payloadCID (#197) ([filecoin-project/go-fil-markets#197](https://github.com/filecoin-project/go-fil-markets/pull/197)) - - refactor(storedcounter): use extracted package (#196) ([filecoin-project/go-fil-markets#196](https://github.com/filecoin-project/go-fil-markets/pull/196)) - - Feat/no block chain ops (#190) ([filecoin-project/go-fil-markets#190](https://github.com/filecoin-project/go-fil-markets/pull/190)) - - Add a max piece size to storage asks (#188) ([filecoin-project/go-fil-markets#188](https://github.com/filecoin-project/go-fil-markets/pull/188)) - - Update proofs to v25 params (#189) ([filecoin-project/go-fil-markets#189](https://github.com/filecoin-project/go-fil-markets/pull/189)) - - Update Graphsync (#184) ([filecoin-project/go-fil-markets#184](https://github.com/filecoin-project/go-fil-markets/pull/184)) - - Support selectors on retrieval (#187) ([filecoin-project/go-fil-markets#187](https://github.com/filecoin-project/go-fil-markets/pull/187)) - - Add optional PieceCID to block unsealing (#186) ([filecoin-project/go-fil-markets#186](https://github.com/filecoin-project/go-fil-markets/pull/186)) - - Add Selector to retrieval params (#175) ([filecoin-project/go-fil-markets#175](https://github.com/filecoin-project/go-fil-markets/pull/175)) - - use PieceCID if provided in QueryParams (#181) ([filecoin-project/go-fil-markets#181](https://github.com/filecoin-project/go-fil-markets/pull/181)) - - include rejection reason in client response (#182) ([filecoin-project/go-fil-markets#182](https://github.com/filecoin-project/go-fil-markets/pull/182)) - - Do not create CAR file when propsing a storage deal using Manual Transfer (#183) ([filecoin-project/go-fil-markets#183](https://github.com/filecoin-project/go-fil-markets/pull/183)) - - add selector to BlockIO classes (#178) ([filecoin-project/go-fil-markets#178](https://github.com/filecoin-project/go-fil-markets/pull/178)) - - rename list deals interface & impls (#174) ([filecoin-project/go-fil-markets#174](https://github.com/filecoin-project/go-fil-markets/pull/174)) - - Feat/configure start epoch buffer (#171) ([filecoin-project/go-fil-markets#171](https://github.com/filecoin-project/go-fil-markets/pull/171)) - - send tipset identifier to node when interacting with chain (#172) ([filecoin-project/go-fil-markets#172](https://github.com/filecoin-project/go-fil-markets/pull/172)) - - Support Retrieval By Any CID, Not Just Root (#166) ([filecoin-project/go-fil-markets#166](https://github.com/filecoin-project/go-fil-markets/pull/166)) - - v24 groth parameters (#167) ([filecoin-project/go-fil-markets#167](https://github.com/filecoin-project/go-fil-markets/pull/167)) - - Add TipSetToken to SavePaymentVoucher (#165) ([filecoin-project/go-fil-markets#165](https://github.com/filecoin-project/go-fil-markets/pull/165)) - - retrieval client node interface passes tipset identifier to node (#164) ([filecoin-project/go-fil-markets#164](https://github.com/filecoin-project/go-fil-markets/pull/164)) - - send state identifiery when getting miner worker address (#153) ([filecoin-project/go-fil-markets#153](https://github.com/filecoin-project/go-fil-markets/pull/153)) - - chore(deps): update to ipld/go-car (#152) ([filecoin-project/go-fil-markets#152](https://github.com/filecoin-project/go-fil-markets/pull/152)) - - add TipSet identity-producing method to various Node interfaces (#149) ([filecoin-project/go-fil-markets#149](https://github.com/filecoin-project/go-fil-markets/pull/149)) - - conform imports to schema defined in CONTRIBUTING.md (#150) ([filecoin-project/go-fil-markets#150](https://github.com/filecoin-project/go-fil-markets/pull/150)) - - Refactor Storage Provider to FSM Module (#145) ([filecoin-project/go-fil-markets#145](https://github.com/filecoin-project/go-fil-markets/pull/145)) - - Feat/update to fix 32gib verification (#147) ([filecoin-project/go-fil-markets#147](https://github.com/filecoin-project/go-fil-markets/pull/147)) - - ci(codecov): remove cbor gen files from coverage - - ci(codecov): ignore cbor gen files (#146) ([filecoin-project/go-fil-markets#146](https://github.com/filecoin-project/go-fil-markets/pull/146)) - - Storage Client Statemachine Refactor (#136) ([filecoin-project/go-fil-markets#136](https://github.com/filecoin-project/go-fil-markets/pull/136)) - - upgrade to libfilecoin version that supports cache clearing (#138) ([filecoin-project/go-fil-markets#138](https://github.com/filecoin-project/go-fil-markets/pull/138)) - - fix(cborgen): update cbor gen for dataref (#137) ([filecoin-project/go-fil-markets#137](https://github.com/filecoin-project/go-fil-markets/pull/137)) - - allow manual piece commitment (#135) ([filecoin-project/go-fil-markets#135](https://github.com/filecoin-project/go-fil-markets/pull/135)) - - fix(retrievalmarket): handle self-retrieval correctly (#134) ([filecoin-project/go-fil-markets#134](https://github.com/filecoin-project/go-fil-markets/pull/134)) - - feat(retrievalmarket): support wallet address (#130) ([filecoin-project/go-fil-markets#130](https://github.com/filecoin-project/go-fil-markets/pull/130)) - - allow specification of 'wallet' for ensure funds calls (#129) ([filecoin-project/go-fil-markets#129](https://github.com/filecoin-project/go-fil-markets/pull/129)) - - update to filecoin-ffi with shared types (#127) ([filecoin-project/go-fil-markets#127](https://github.com/filecoin-project/go-fil-markets/pull/127)) - - feat(sharedcounter): persist counter to disk (#125) ([filecoin-project/go-fil-markets#125](https://github.com/filecoin-project/go-fil-markets/pull/125)) - - Use go-statemachine + FSMs in retrieval market (#124) ([filecoin-project/go-fil-markets#124](https://github.com/filecoin-project/go-fil-markets/pull/124)) - - storage client: Call EnsureFunds more correctly (#123) ([filecoin-project/go-fil-markets#123](https://github.com/filecoin-project/go-fil-markets/pull/123)) - - use latest specs-actors with uint64 lane and nonce from paych.Actor (#122) ([filecoin-project/go-fil-markets#122](https://github.com/filecoin-project/go-fil-markets/pull/122)) - - Update go-sectorbuilder to latest that uses specs-actors types (#121) ([filecoin-project/go-fil-markets#121](https://github.com/filecoin-project/go-fil-markets/pull/121)) - - Import spec actor types (#118) ([filecoin-project/go-fil-markets#118](https://github.com/filecoin-project/go-fil-markets/pull/118)) - - Update README (#120) ([filecoin-project/go-fil-markets#120](https://github.com/filecoin-project/go-fil-markets/pull/120)) - - chore(cborgen): update cborgen - - Merge branch 'head/lotus' into lotus/merge-02-10-2020 - - Storage Market integration test (#119) ([filecoin-project/go-fil-markets#119](https://github.com/filecoin-project/go-fil-markets/pull/119)) - - fix(storagemarket): add back in cid recording (#115) ([filecoin-project/go-fil-markets#115](https://github.com/filecoin-project/go-fil-markets/pull/115)) - - fix(storagemarket): assign net member (#114) ([filecoin-project/go-fil-markets#114](https://github.com/filecoin-project/go-fil-markets/pull/114)) - - Fix/flaky tests (#113) ([filecoin-project/go-fil-markets#113](https://github.com/filecoin-project/go-fil-markets/pull/113)) - - Storage market network abstraction (#109) ([filecoin-project/go-fil-markets#109](https://github.com/filecoin-project/go-fil-markets/pull/109)) - - Remove Sector ID from MinerDeal (merge from head/lotus -- PLEASE USE MERGE COMMIT) ([filecoin-project/go-fil-markets#112](https://github.com/filecoin-project/go-fil-markets/pull/112)) - - No Filestore On Storage Client (#107) ([filecoin-project/go-fil-markets#107](https://github.com/filecoin-project/go-fil-markets/pull/107)) - - take miner address as parameter (#108) ([filecoin-project/go-fil-markets#108](https://github.com/filecoin-project/go-fil-markets/pull/108)) - - skip flaky 1 block tests (#104) ([filecoin-project/go-fil-markets#104](https://github.com/filecoin-project/go-fil-markets/pull/104)) - - use go-padreader instead of local copy (#103) ([filecoin-project/go-fil-markets#103](https://github.com/filecoin-project/go-fil-markets/pull/103)) - - Handle sector id in the `OnDealSectorCommitted` callback (#58) ([filecoin-project/go-fil-markets#58](https://github.com/filecoin-project/go-fil-markets/pull/58)) - - Properly Implement Retrieval Lookups Based on CIDs (#57) ([filecoin-project/go-fil-markets#57](https://github.com/filecoin-project/go-fil-markets/pull/57)) - - Add Stop funcs to retrieval providers (#56) ([filecoin-project/go-fil-markets#56](https://github.com/filecoin-project/go-fil-markets/pull/56)) - - refactor(retrievalmarket): switch to payload CIDs (#55) ([filecoin-project/go-fil-markets#55](https://github.com/filecoin-project/go-fil-markets/pull/55)) - - Move to an explicit piecestore and explicit unsealing. (#54) ([filecoin-project/go-fil-markets#54](https://github.com/filecoin-project/go-fil-markets/pull/54)) - - Improve test coverage, fix any bugs (#53) ([filecoin-project/go-fil-markets#53](https://github.com/filecoin-project/go-fil-markets/pull/53)) - - Techdebt/1 block file retrieval test (#51) ([filecoin-project/go-fil-markets#51](https://github.com/filecoin-project/go-fil-markets/pull/51)) - - ci(config): use large resource_class (#52) ([filecoin-project/go-fil-markets#52](https://github.com/filecoin-project/go-fil-markets/pull/52)) - - Sync up DealState to match spec (#50) ([filecoin-project/go-fil-markets#50](https://github.com/filecoin-project/go-fil-markets/pull/50)) - - Support arbitrary dag retrieval (#46) ([filecoin-project/go-fil-markets#46](https://github.com/filecoin-project/go-fil-markets/pull/46)) - - RetrievalMarket: Query + Deal integration test, + bug fixes uncovered during writing the test (#36) ([filecoin-project/go-fil-markets#36](https://github.com/filecoin-project/go-fil-markets/pull/36)) - - Remove filestore as a go between with StorageMiner, pass direct io.reader to StorageMiner (#49) ([filecoin-project/go-fil-markets#49](https://github.com/filecoin-project/go-fil-markets/pull/49)) - - Feat/find providers (#43) ([filecoin-project/go-fil-markets#43](https://github.com/filecoin-project/go-fil-markets/pull/43)) - - Retrieval Deals, Spec V0 (#37) ([filecoin-project/go-fil-markets#37](https://github.com/filecoin-project/go-fil-markets/pull/37)) - - Lotus updates ([filecoin-project/go-fil-markets#45](https://github.com/filecoin-project/go-fil-markets/pull/45)) - - storagemarket: close channel on return (#42) ([filecoin-project/go-fil-markets#42](https://github.com/filecoin-project/go-fil-markets/pull/42)) - - Feat/verify data before publishing deal (#40) ([filecoin-project/go-fil-markets#40](https://github.com/filecoin-project/go-fil-markets/pull/40)) - - Use CAR and padding for piece data (#27) ([filecoin-project/go-fil-markets#27](https://github.com/filecoin-project/go-fil-markets/pull/27)) - - Upgrade Query Protocol to Spec V0 (#25) ([filecoin-project/go-fil-markets#25](https://github.com/filecoin-project/go-fil-markets/pull/25)) - - Merge branch 'lotus-updates' - - fix(retrievalmarket): add mutex around subscribers (#32) (#33) ([filecoin-project/go-fil-markets#33](https://github.com/filecoin-project/go-fil-markets/pull/33)) - - ci(codecov): disable status, display report (#31) ([filecoin-project/go-fil-markets#31](https://github.com/filecoin-project/go-fil-markets/pull/31)) - - Flaky test fix (#28) ([filecoin-project/go-fil-markets#28](https://github.com/filecoin-project/go-fil-markets/pull/28)) - - skip flaky test (#30) ([filecoin-project/go-fil-markets#30](https://github.com/filecoin-project/go-fil-markets/pull/30)) - - Network Abstraction For Retrieval Market (#17) ([filecoin-project/go-fil-markets#17](https://github.com/filecoin-project/go-fil-markets/pull/17)) - - Use CAR file in generation of CommP (#26) ([filecoin-project/go-fil-markets#26](https://github.com/filecoin-project/go-fil-markets/pull/26)) - - filestore: track close err, lints (#20) ([filecoin-project/go-fil-markets#20](https://github.com/filecoin-project/go-fil-markets/pull/20)) - - Deleting datatransfer files (#19) ([filecoin-project/go-fil-markets#19](https://github.com/filecoin-project/go-fil-markets/pull/19)) - - Use shared go-filecoin packages go-cbor-util, go-address, go-crypto, (#22) ([filecoin-project/go-fil-markets#22](https://github.com/filecoin-project/go-fil-markets/pull/22)) - - Storage Market Extraction (#15) ([filecoin-project/go-fil-markets#15](https://github.com/filecoin-project/go-fil-markets/pull/15)) - - Retrieval Market Extraction (#13) ([filecoin-project/go-fil-markets#13](https://github.com/filecoin-project/go-fil-markets/pull/13)) - - PieceIO improvements (#12) ([filecoin-project/go-fil-markets#12](https://github.com/filecoin-project/go-fil-markets/pull/12)) - - fix links in datatransfer README (#11) ([filecoin-project/go-fil-markets#11](https://github.com/filecoin-project/go-fil-markets/pull/11)) - - fix(build): fix tools build error (#14) ([filecoin-project/go-fil-markets#14](https://github.com/filecoin-project/go-fil-markets/pull/14)) - - fix(tokenamount): fix naming (#10) ([filecoin-project/go-fil-markets#10](https://github.com/filecoin-project/go-fil-markets/pull/10)) - - feat(shared): add shared tools and types (#9) ([filecoin-project/go-fil-markets#9](https://github.com/filecoin-project/go-fil-markets/pull/9)) - - add circle config, let's ci ([filecoin-project/go-fil-markets#7](https://github.com/filecoin-project/go-fil-markets/pull/7)) - - Skeleton readme ([filecoin-project/go-fil-markets#5](https://github.com/filecoin-project/go-fil-markets/pull/5)) - - Feat/datatransfer readme, contributing, design doc (rename) - - Piece IO ([filecoin-project/go-fil-markets#2](https://github.com/filecoin-project/go-fil-markets/pull/2)) - - Feat/datatransfer graphsync movein ([filecoin-project/go-fil-markets#1](https://github.com/filecoin-project/go-fil-markets/pull/1)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Hannah Howard | 38 | +27080/-10375 | 455 | -| Ingar Shu | 10 | +1315/-6870 | 127 | -| shannonwells | 12 | +5500/-70 | 48 | -| Shannon Wells | 20 | +2671/-940 | 109 | -| ergastic | 4 | +1835/-501 | 47 | -| Erin Swenson-Healey | 9 | +516/-408 | 112 | -| hannahhoward | 10 | +497/-150 | 79 | -| Łukasz Magiera | 4 | +379/-139 | 19 | -| whyrusleeping | 3 | +239/-87 | 19 | -| Whyrusleeping | 4 | +192/-96 | 26 | -| Aayush Rajasekaran | 3 | +93/-13 | 14 | -| Mosh | 2 | +37/-8 | 2 | -| Ignacio Hagopian | 2 | +9/-11 | 2 | -| Alex North | 2 | +11/-7 | 4 | -| Alex Cruikshank | 1 | +1/-9 | 1 | - -# go-fil-markets 0.1.1 - -Hotfix release for spec actors update - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - chore(changelog): update changelog for tagged release - - Upgrade to specs-actors v0.3.0 (#207) ([filecoin-project/go-fil-markets#207](https://github.com/filecoin-project/go-fil-markets/pull/207)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| hannahhoward | 1 | +9/-1 | 1 | -| Alex North | 1 | +3/-3 | 2 | - -# go-fil-markets 0.1.2 - -Hotfix release for transitive dependencies to use new go-ipld-prime - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - docs(CHANGELOG): update changelog - - Upgrade IPLD-prime to latest (#215) ([filecoin-project/go-fil-markets#215](https://github.com/filecoin-project/go-fil-markets/pull/215)) -- github.com/filecoin-project/go-data-transfer (v0.0.0-20200408061858-82c58b423ca6 -> v0.2.0): - - Upgrade graphsync + ipld-prime (#49) ([filecoin-project/go-data-transfer#49](https://github.com/filecoin-project/go-data-transfer/pull/49)) - - Use extracted generic pubsub (#48) ([filecoin-project/go-data-transfer#48](https://github.com/filecoin-project/go-data-transfer/pull/48)) - - Refactor & Cleanup In Preparation For Added Complexity (#47) ([filecoin-project/go-data-transfer#47](https://github.com/filecoin-project/go-data-transfer/pull/47)) - - feat(graphsync): complete notifications for responder (#46) ([filecoin-project/go-data-transfer#46](https://github.com/filecoin-project/go-data-transfer/pull/46)) -- github.com/ipfs/go-graphsync (v0.0.6-0.20200408061628-e1a98fc64c42 -> v0.0.6-0.20200428204348-97a8cf76a482): - - refactor(hooks): use external pubsub (#65) ([ipfs/go-graphsync#65](https://github.com/ipfs/go-graphsync/pull/65)) - - Update of IPLD Prime (#66) ([ipfs/go-graphsync#66](https://github.com/ipfs/go-graphsync/pull/66)) - - Add standard issue template - - feat(responsemanager): add listener for completed responses (#64) ([ipfs/go-graphsync#64](https://github.com/ipfs/go-graphsync/pull/64)) - - Update Requests (#63) ([ipfs/go-graphsync#63](https://github.com/ipfs/go-graphsync/pull/63)) - - Add pausing and unpausing of requests (#62) ([ipfs/go-graphsync#62](https://github.com/ipfs/go-graphsync/pull/62)) - - ci(circle): remove benchmark task for now - - ci(circle): update orb - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Hannah Howard | 10 | +5409/-4023 | 151 | -| Hector Sanjuan | 1 | +27/-0 | 2 | -| hannahhoward | 3 | +16/-8 | 5 | - - -# go-fil-markets 0.1.3 - -Hotfix release for critical graphsync bug fix - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - docs(CHANGELOG): add release documentation - - fix(deps): update to tagged data transfer - - chore(deps): update data transfer + graphsync -- github.com/filecoin-project/go-data-transfer (v0.2.0 -> v0.2.1): - - chore(deps): update graphsync -- github.com/ipfs/go-graphsync (v0.0.6-0.20200428204348-97a8cf76a482 -> v0.0.6-0.20200504202014-9d5f2c26a103): - - fix(responsemanager): add nil check (#67) ([ipfs/go-graphsync#67](https://github.com/ipfs/go-graphsync/pull/67)) - - Add autocomment configuration - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Hector Sanjuan | 1 | +68/-0 | 1 | -| hannahhoward | 4 | +20/-12 | 7 | -| Hannah Howard | 1 | +4/-0 | 1 | - -# go-fil-markets 0.2.0 - -Asynchronous operations release -- we no longer synchronously wait for chain messages to push - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - docs(CHANGELOG): update changelog for 0.2.0 release - - Storage Market Changes Based On Lotus Integration (#223) ([filecoin-project/go-fil-markets#223](https://github.com/filecoin-project/go-fil-markets/pull/223)) - - Merge in hotfix 0.1.3 ([filecoin-project/go-fil-markets#225](https://github.com/filecoin-project/go-fil-markets/pull/225)) - - ppl can sub to storage client evts (#217) ([filecoin-project/go-fil-markets#217](https://github.com/filecoin-project/go-fil-markets/pull/217)) - - fix(storagemarket): set miner peer id on deals (#216) ([filecoin-project/go-fil-markets#216](https://github.com/filecoin-project/go-fil-markets/pull/216)) - - chore(release): merge hotfix 0.1.2 branch back - - docs(release): update release process (#212) ([filecoin-project/go-fil-markets#212](https://github.com/filecoin-project/go-fil-markets/pull/212)) - - Nonblocking storage deals [#80] (#194) ([filecoin-project/go-fil-markets#194](https://github.com/filecoin-project/go-fil-markets/pull/194)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Ingar Shu | 1 | +993/-608 | 13 | -| Hannah Howard | 3 | +101/-59 | 14 | -| Shannon Wells | 1 | +106/-31 | 5 | -| hannahhoward | 1 | +8/-0 | 1 | - -# go-fil-markets 0.2.1 - -Hotfix release -- updates to try to solve deal stream problems attempt #1 - -### Changelog -- github.com/filecoin-project/go-fil-markets: - - docs(CHANGELOG): update for 0.2.1 release - - update to v26 proofs (#232) ([filecoin-project/go-fil-markets#232](https://github.com/filecoin-project/go-fil-markets/pull/232)) - - Don't Keep Streams Open (#230) ([filecoin-project/go-fil-markets#230](https://github.com/filecoin-project/go-fil-markets/pull/230)) - - Round-trip storage/retrieval test (#229) ([filecoin-project/go-fil-markets#229](https://github.com/filecoin-project/go-fil-markets/pull/229)) - - feat(storagemarket): improve human readable constant maps (#228) ([filecoin-project/go-fil-markets#228](https://github.com/filecoin-project/go-fil-markets/pull/228)) - - fix(deps): update data-transfer 0.3.0 (#227) ([filecoin-project/go-fil-markets#227](https://github.com/filecoin-project/go-fil-markets/pull/227)) - - docs(CHANGELOG): update changelog for 0.2.0 release ([filecoin-project/go-fil-markets#226](https://github.com/filecoin-project/go-fil-markets/pull/226)) -- github.com/filecoin-project/go-data-transfer (v0.2.1 -> v0.3.0): - - feat(graphsyncimpl): fix open/close events (#52) ([filecoin-project/go-data-transfer#52](https://github.com/filecoin-project/go-data-transfer/pull/52)) - - chore(deps): update graphsync ([filecoin-project/go-data-transfer#51](https://github.com/filecoin-project/go-data-transfer/pull/51)) - - Refactor registry and encoding (#50) ([filecoin-project/go-data-transfer#50](https://github.com/filecoin-project/go-data-transfer/pull/50)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Hannah Howard | 5 | +1841/-1303 | 59 | -| Shannon Wells | 1 | +511/-141 | 19 | -| hannahhoward | 1 | +11/-1 | 1 | -| Erin Swenson-Healey | 1 | +1/-1 | 1 | - -# go-fil-markets 0.2.2 - -Hotfix release -- updates to try to solve deal stream problems attempt #2 & v26 params update - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - docs(CHANGELOG): docs for 0.2.2 release - - feat(storagemarket): revert protocol changes (#236) ([filecoin-project/go-fil-markets#236](https://github.com/filecoin-project/go-fil-markets/pull/236)) - - Feat/cbor gen check ci #231 (#234) ([filecoin-project/go-fil-markets#234](https://github.com/filecoin-project/go-fil-markets/pull/234)) - - update sector-storage and break transitive dependency on lotus (#235) ([filecoin-project/go-fil-markets#235](https://github.com/filecoin-project/go-fil-markets/pull/235)) - - docs(CHANGELOG): update for 0.2.1 release ([filecoin-project/go-fil-markets#233](https://github.com/filecoin-project/go-fil-markets/pull/233)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Hannah Howard | 1 | +701/-614 | 22 | -| Erin Swenson-Healey | 1 | +5/-265 | 2 | -| Shannon Wells | 1 | +11/-0 | 1 | -| hannahhoward | 1 | +8/-1 | 1 | - -# go-fil-markets 0.2.3 - -Hotfix release -- final fix for issues with deal streams held open - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - feat(CHANGELOG): update changelog for v0.2.3 - - feat(network): tag connections to preserve them (#246) ([filecoin-project/go-fil-markets#246](https://github.com/filecoin-project/go-fil-markets/pull/246)) - - docs(CHANGELOG): docs for 0.2.2 release ([filecoin-project/go-fil-markets#243](https://github.com/filecoin-project/go-fil-markets/pull/243)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Hannah Howard | 1 | +112/-7 | 10 | -| hannahhoward | 1 | +7/-1 | 1 | - -# go-fil-markets 0.2.4 - -go-filecoin compatibility release - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - docs(CHANGELOG): update change log - - Buffer the done channel when adding storage collateral (#249) ([filecoin-project/go-fil-markets#249](https://github.com/filecoin-project/go-fil-markets/pull/249)) - - feat(CHANGELOG): update changelog for v0.2.3 ([filecoin-project/go-fil-markets#248](https://github.com/filecoin-project/go-fil-markets/pull/248)) - - Unified request validator (#247) ([filecoin-project/go-fil-markets#247](https://github.com/filecoin-project/go-fil-markets/pull/247)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Ingar Shu | 2 | +221/-230 | 7 | -| hannahhoward | 1 | +8/-0 | 1 | - -# go-fil-markets 0.2.5 - -go-filecoin compatibility release - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - docs(CHANGELOG): update for 0.2.5 release - - Fixes from filecoin integration work (#253) ([filecoin-project/go-fil-markets#253](https://github.com/filecoin-project/go-fil-markets/pull/253)) - - docs(CHANGELOG): update change log ([filecoin-project/go-fil-markets#250](https://github.com/filecoin-project/go-fil-markets/pull/250)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Hannah Howard | 1 | +138/-68 | 7 | -| hannahhoward | 1 | +8/-3 | 3 | - -# go-fil-markets 0.2.6 - -Remove data store wrapping - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - Feat/change prefixes 256 (#257) ([filecoin-project/go-fil-markets#257](https://github.com/filecoin-project/go-fil-markets/pull/257)) - - docs(CHANGELOG): update for 0.2.5 release ([filecoin-project/go-fil-markets#254](https://github.com/filecoin-project/go-fil-markets/pull/254)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Shannon Wells | 1 | +6/-15 | 5 | - -# go-fil-markets 0.2.7 - -Custom Deal Decision Logic and cleanups of 0.2.6 - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - docs(CHANGELOG): update changelog for 0.2.7 - - refactor(storagemarket): remove storedask from provider (#263) ([filecoin-project/go-fil-markets#263](https://github.com/filecoin-project/go-fil-markets/pull/263)) - - Deal Decision Custom Function (#262) ([filecoin-project/go-fil-markets#262](https://github.com/filecoin-project/go-fil-markets/pull/262)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Hannah Howard | 2 | +142/-27 | 11 | -| shannonwells | 1 | +19/-6 | 1 | - -# go-fil-markets 0.3.0 - -Deal Resumability release. We now attempt to resume storage deals when the application is shut down and restart, and we support a more flexible deal acceptance protocol. - -### Changelog -- github.com/filecoin-project/go-fil-markets: - - fix(storagemarket): fix validator, add to test - - docs(CHANGELOG): update changelog and add detail script - - both StoredAsk and storage Provider are scoped to a single miner (#276) ([filecoin-project/go-fil-markets#276](https://github.com/filecoin-project/go-fil-markets/pull/276)) - - specs actors v0.6 (#274) ([filecoin-project/go-fil-markets#274](https://github.com/filecoin-project/go-fil-markets/pull/274)) - - Restartable storage deals (#270) ([filecoin-project/go-fil-markets#270](https://github.com/filecoin-project/go-fil-markets/pull/270)) - - replace AddAsk with SetAsk, to convey intent (#275) ([filecoin-project/go-fil-markets#275](https://github.com/filecoin-project/go-fil-markets/pull/275)) - - Allow custom decisioning for a provider to decide retrieval deals. (#269) ([filecoin-project/go-fil-markets#269](https://github.com/filecoin-project/go-fil-markets/pull/269)) - - Feat/module docs #83 (#267) ([filecoin-project/go-fil-markets#267](https://github.com/filecoin-project/go-fil-markets/pull/267)) - - Tentative acceptance protocol (#244) ([filecoin-project/go-fil-markets#244](https://github.com/filecoin-project/go-fil-markets/pull/244)) - - docs(CHANGELOG): update changelog for 0.2.7 ([filecoin-project/go-fil-markets#264](https://github.com/filecoin-project/go-fil-markets/pull/264)) -- github.com/filecoin-project/go-statemachine (v0.0.0-20200226041606-2074af6d51d9 -> v0.0.0-20200612181802-4eb3d0c68eba): - - Serialize notifications (#11) ([filecoin-project/go-statemachine#11](https://github.com/filecoin-project/go-statemachine/pull/11)) - - Run callback in goroutine (#10) ([filecoin-project/go-statemachine#10](https://github.com/filecoin-project/go-statemachine/pull/10)) - - Finality States ([filecoin-project/go-statemachine#9](https://github.com/filecoin-project/go-statemachine/pull/9)) - - Documentation, particularly for FSM Module (#8) ([filecoin-project/go-statemachine#8](https://github.com/filecoin-project/go-statemachine/pull/8)) - - Call stageDone on nil nextStep ([filecoin-project/go-statemachine#7](https://github.com/filecoin-project/go-statemachine/pull/7)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Ingar Shu | 4 | +1407/-695 | 35 | -| Shannon Wells | 2 | +1515/-467 | 20 | -| hannahhoward | 8 | +862/-191 | 21 | -| Hannah Howard | 1 | +263/-0 | 2 | -| Łukasz Magiera | 1 | +48/-43 | 15 | -| Erin Swenson-Healey | 2 | +39/-42 | 10 | - -# go-fil-markets 0.3.1 - -Hotfix release to get `use addresses from miner info for connecting to miners` task merged for downstream dependencies to used - -### Changelog -- github.com/filecoin-project/go-fil-markets: - - use addresses from miner info for connecting to miners (#290) ([filecoin-project/go-fil-markets#290](https://github.com/filecoin-project/go-fil-markets/pull/290)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Whyrusleeping | 1 | +53/-5 | 9 | - -# go-fil-markets 0.3.1.1 - -Hotfix bug release to address critical issues affecting node startup - -### Changelog - -- github.com/filecoin-project/go-fil-markets: - - add locks protecting retrieval market maps (#311) ([filecoin-project/go-fil-markets#311](https://github.com/filecoin-project/go-fil-markets/pull/311)) - - fix(storagemarket): run deal restarts in go routine (#309) ([filecoin-project/go-fil-markets#309](https://github.com/filecoin-project/go-fil-markets/pull/309)) - -### Contributors - -| Contributor | Commits | Lines ± | Files Changed | -|-------------|---------|---------|---------------| -| Hannah Howard | 1 | +13/-7 | 2 | -| vyzo | 1 | +10/-0 | 1 | - -### 🙌🏽 Want to contribute? - -Would you like to contribute to this repo and don’t know how? Here are a few places you can get started: - -- Check out the [Contributing Guidelines](https://github.com/filecoin-project/go-fil-markets/blob/master/CONTRIBUTING.md) -- Look for issues with the `good-first-issue` label in [go-fil-markets](https://github.com/filecoin-project/go-fil-markets/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+label%3A%22e-good-first-issue%22+) diff --git a/vendor/github.com/filecoin-project/go-fil-markets/Makefile b/vendor/github.com/filecoin-project/go-fil-markets/Makefile deleted file mode 100644 index bc9b60cc70..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/Makefile +++ /dev/null @@ -1,61 +0,0 @@ -all: build -.PHONY: all - -SUBMODULES= - -FFI_PATH:=./extern/filecoin-ffi/ -FFI_DEPS:=.install-filcrypto -FFI_DEPS:=$(addprefix $(FFI_PATH),$(FFI_DEPS)) - -$(FFI_DEPS): .filecoin-build ; - -.filecoin-build: $(FFI_PATH) - $(MAKE) -C $(FFI_PATH) $(FFI_DEPS:$(FFI_PATH)%=%) - @touch $@ - -.update-modules: - git submodule update --init --recursive - @touch $@ - -pieceio: .update-modules .filecoin-build - go build ./pieceio -.PHONY: pieceio -SUBMODULES+=pieceio - -filestore: - go build ./filestore -.PHONY: filestore -SUBMODULES+=filestore - -build: $(SUBMODULES) - -test: build - gotestsum -- -coverprofile=coverage.txt -timeout 5m ./... - -clean: - rm -f .filecoin-build - rm -f .update-modules - rm -f coverage.txt - -DOTs=$(shell find docs -name '*.dot') -MMDs=$(shell find docs -name '*.mmd') -SVGs=$(DOTs:%=%.svg) $(MMDs:%=%.svg) -PNGs=$(DOTs:%=%.png) $(MMDs:%=%.png) - -node_modules: package.json - npm install - -diagrams: ${MMDs} ${SVGs} ${PNGs} - -%.mmd.svg: %.mmd - node_modules/.bin/mmdc -i $< -o $@ - -%.mmd.png: %.mmd - node_modules/.bin/mmdc -i $< -o $@ - -FORCE: - -docsgen: FORCE .update-modules .filecoin-build - go run ./docsgen - -$(MMDs): docsgen node_modules diff --git a/vendor/github.com/filecoin-project/go-fil-markets/README.md b/vendor/github.com/filecoin-project/go-fil-markets/README.md deleted file mode 100644 index 5e67594bcd..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/README.md +++ /dev/null @@ -1,53 +0,0 @@ -# go-fil-markets -[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) -[![CircleCI](https://circleci.com/gh/filecoin-project/go-fil-markets.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-fil-markets) -[![codecov](https://codecov.io/gh/filecoin-project/go-fil-markets/branch/master/graph/badge.svg)](https://codecov.io/gh/filecoin-project/go-fil-markets) -[![GoDoc](https://godoc.org/github.com/filecoin-project/go-fil-markets?status.svg)](https://godoc.org/github.com/filecoin-project/go-fil-markets) - -This repository contains modular implementations of the [storage and retrieval market subsystems](https://filecoin-project.github.io/specs/#systems__filecoin_markets) of Filecoin. -They are guided by the [v1.0 and 1.1 Filecoin specification updates](https://filecoin-project.github.io/specs/#intro__changelog). - -Separating implementations into a blockchain component and one or more mining and market components presents an opportunity to encourage implementation diversity while reusing non-security-critical components. - -## Components - -* **[storagemarket](./storagemarket)**: for finding, negotiating, and consummating deals to - store data between clients and providers (storage miners). -* **[retrievalmarket](./retrievalmarket)**: for finding, negotiating, and consummating deals to - retrieve data between clients and providers (retrieval miners). -* **[filestore](./filestore)**: a wrapper around os.File for use by pieceio, storagemarket, and retrievalmarket. -* **[pieceio](./pieceio)**: utilities that take IPLD graphs and turn them into pieces. Used by storagemarket. -* **[piecestore](./piecestore)**: a database for storing deal-related PieceInfo and CIDInfo. -Used by storagemarket and retrievalmarket. - -Related components in other repos: -* **[go-data-transfer](https://github.com/filecoin-project/go-data-transfer)**: for exchanging piece data between clients and miners, used by storage & retrieval market modules. - -### Background reading -* The [Markets in Filecoin](https://filecoin-project.github.io/specs/#systems__filecoin_markets) -section of the Filecoin Specification contains the canonical spec. - -### Technical Documentation -* [GoDoc for Storage Market](https://godoc.org/github.com/filecoin-project/go-fil-markets/storagemarket) contains an architectural overview and robust API documentation -* [GoDoc for Retrieval Market](https://godoc.org/github.com/filecoin-project/go-fil-markets/retrievalmarket) contains an architectural overview and robust API documentation - -## Installation -```bash -go get "github.com/filecoin-project/go-fil-markets/"` -``` - -## Usage -Documentation is in the README for each module, listed in [Components](#Components). - -## Contributing -Issues and PRs are welcome! Please first read the [background reading](#background-reading) and [CONTRIBUTING](.go-fil-markets/CONTRIBUTING.md) guide, and look over the current code. PRs against master require approval of at least two maintainers. - -Day-to-day discussion takes place in the #fil-components channel of the [Filecoin project chat](https://github.com/filecoin-project/community#chat). Usage or design questions are welcome. - -## Project-level documentation -The filecoin-project has a [community repo](https://github.com/filecoin-project/community) with more detail about our resources and policies, such as the [Code of Conduct](https://github.com/filecoin-project/community/blob/master/CODE_OF_CONDUCT.md). - -## License -This repository is dual-licensed under Apache 2.0 and MIT terms. - -Copyright 2019. Protocol Labs, Inc. diff --git a/vendor/github.com/filecoin-project/go-fil-markets/codecov.yml b/vendor/github.com/filecoin-project/go-fil-markets/codecov.yml deleted file mode 100644 index 30c84f38ed..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/codecov.yml +++ /dev/null @@ -1,10 +0,0 @@ -coverage: - precision: 2 - round: up - range: "50...90" - ignore: - - "**/*_cbor_gen.go" - status: - project: off - patch: off - diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalclient.mmd b/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalclient.mmd deleted file mode 100644 index 2f64d3feb5..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalclient.mmd +++ /dev/null @@ -1,74 +0,0 @@ -stateDiagram-v2 - state "DealStatusNew" as 0 - state "DealStatusPaymentChannelCreating" as 1 - state "DealStatusPaymentChannelAddingFunds" as 2 - state "DealStatusPaymentChannelReady" as 4 - state "DealStatusAccepted" as 6 - state "DealStatusFailed" as 7 - state "DealStatusRejected" as 8 - state "DealStatusFundsNeeded" as 9 - state "DealStatusOngoing" as 10 - state "DealStatusFundsNeededLastPayment" as 11 - state "DealStatusCompleted" as 12 - state "DealStatusDealNotFound" as 13 - state "DealStatusErrored" as 15 - state "DealStatusBlocksComplete" as 16 - state "DealStatusFinalizing" as 17 - 0 : On entry runs ProposeDeal - 1 : On entry runs WaitForPaymentChannelCreate - 2 : On entry runs WaitForPaymentChannelAddFunds - 4 : On entry runs ProcessNextResponse - 6 : On entry runs SetupPaymentChannelStart - 9 : On entry runs ProcessPaymentRequested - 10 : On entry runs ProcessNextResponse - 11 : On entry runs ProcessPaymentRequested - 16 : On entry runs ProcessNextResponse - 17 : On entry runs Finalize - [*] --> 0 - note right of 0 - The following events are not shown cause they can trigger from any state. - - ClientEventWriteDealProposalErrored - transitions state to DealStatusErrored - ClientEventReadDealResponseErrored - transitions state to DealStatusErrored - ClientEventUnknownResponseReceived - transitions state to DealStatusFailed - ClientEventWriteDealPaymentErrored - transitions state to DealStatusErrored - end note - 0 --> 0 : ClientEventOpen - 1 --> 7 : ClientEventPaymentChannelErrored - 6 --> 7 : ClientEventPaymentChannelErrored - 6 --> 1 : ClientEventPaymentChannelCreateInitiated - 6 --> 2 : ClientEventPaymentChannelAddingFunds - 1 --> 4 : ClientEventPaymentChannelReady - 2 --> 4 : ClientEventPaymentChannelReady - 1 --> 7 : ClientEventAllocateLaneErrored - 2 --> 7 : ClientEventAllocateLaneErrored - 2 --> 7 : ClientEventPaymentChannelAddFundsErrored - 0 --> 8 : ClientEventDealRejected - 0 --> 13 : ClientEventDealNotFound - 0 --> 6 : ClientEventDealAccepted - 9 --> 7 : ClientEventFundsExpended - 11 --> 7 : ClientEventFundsExpended - 9 --> 7 : ClientEventBadPaymentRequested - 11 --> 7 : ClientEventBadPaymentRequested - 9 --> 7 : ClientEventCreateVoucherFailed - 11 --> 7 : ClientEventCreateVoucherFailed - 9 --> 10 : ClientEventPaymentSent - 11 --> 17 : ClientEventPaymentSent - 4 --> 7 : ClientEventConsumeBlockFailed - 10 --> 7 : ClientEventConsumeBlockFailed - 4 --> 11 : ClientEventLastPaymentRequested - 10 --> 11 : ClientEventLastPaymentRequested - 16 --> 11 : ClientEventLastPaymentRequested - 4 --> 16 : ClientEventAllBlocksReceived - 10 --> 16 : ClientEventAllBlocksReceived - 16 --> 16 : ClientEventAllBlocksReceived - 4 --> 12 : ClientEventComplete - 10 --> 12 : ClientEventComplete - 16 --> 12 : ClientEventComplete - 17 --> 12 : ClientEventComplete - 4 --> 7 : ClientEventEarlyTermination - 10 --> 7 : ClientEventEarlyTermination - 4 --> 9 : ClientEventPaymentRequested - 10 --> 9 : ClientEventPaymentRequested - 4 --> 10 : ClientEventBlocksReceived - 10 --> 10 : ClientEventBlocksReceived diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalclient.mmd.png b/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalclient.mmd.png deleted file mode 100644 index 40a734d644..0000000000 Binary files a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalclient.mmd.png and /dev/null differ diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalclient.mmd.svg b/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalclient.mmd.svg deleted file mode 100644 index 026dc0ca15..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalclient.mmd.svg +++ /dev/null @@ -1,6 +0,0 @@ -ClientEventOpenClientEventPaymentChannelErroredClientEventPaymentChannelErroredClientEventPaymentChannelCreateInitiatedClientEventPaymentChannelAddingFundsClientEventPaymentChannelReadyClientEventPaymentChannelReadyClientEventAllocateLaneErroredClientEventAllocateLaneErroredClientEventPaymentChannelAddFundsErroredClientEventDealRejectedClientEventDealNotFoundClientEventDealAcceptedClientEventFundsExpendedClientEventFundsExpendedClientEventBadPaymentRequestedClientEventBadPaymentRequestedClientEventCreateVoucherFailedClientEventCreateVoucherFailedClientEventPaymentSentClientEventPaymentSentClientEventConsumeBlockFailedClientEventConsumeBlockFailedClientEventLastPaymentRequestedClientEventLastPaymentRequestedClientEventLastPaymentRequestedClientEventAllBlocksReceivedClientEventAllBlocksReceivedClientEventAllBlocksReceivedClientEventCompleteClientEventCompleteClientEventCompleteClientEventCompleteClientEventEarlyTerminationClientEventEarlyTerminationClientEventPaymentRequestedClientEventPaymentRequestedClientEventBlocksReceivedClientEventBlocksReceivedDealStatusNewOn entry runs ProposeDealDealStatusPaymentChannelCreatingOn entry runs WaitForPaymentChannelCreateDealStatusPaymentChannelAddingFundsOn entry runs WaitForPaymentChannelAddFundsDealStatusPaymentChannelReadyOn entry runs ProcessNextResponseDealStatusAcceptedOn entry runs SetupPaymentChannelStartDealStatusFailedDealStatusRejectedDealStatusFundsNeededOn entry runs ProcessPaymentRequestedDealStatusOngoingOn entry runs ProcessNextResponseDealStatusFundsNeededLastPaymentOn entry runs ProcessPaymentRequestedDealStatusCompletedDealStatusDealNotFoundDealStatusErroredDealStatusBlocksCompleteOn entry runs ProcessNextResponseDealStatusFinalizingOn entry runs FinalizeThe following events are not shown cause they can trigger from any state.ClientEventWriteDealProposalErrored - transitions state to DealStatusErroredClientEventReadDealResponseErrored - transitions state to DealStatusErroredClientEventUnknownResponseReceived - transitions state to DealStatusFailedClientEventWriteDealPaymentErrored - transitions state to DealStatusErrored \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalprovider.mmd b/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalprovider.mmd deleted file mode 100644 index b16e88de72..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalprovider.mmd +++ /dev/null @@ -1,53 +0,0 @@ -stateDiagram-v2 - state "DealStatusNew" as 0 - state "DealStatusAwaitingAcceptance" as 5 - state "DealStatusAccepted" as 6 - state "DealStatusFailed" as 7 - state "DealStatusRejected" as 8 - state "DealStatusFundsNeeded" as 9 - state "DealStatusOngoing" as 10 - state "DealStatusFundsNeededLastPayment" as 11 - state "DealStatusCompleted" as 12 - state "DealStatusDealNotFound" as 13 - state "DealStatusErrored" as 15 - state "DealStatusBlocksComplete" as 16 - state "DealStatusFinalizing" as 17 - 0 : On entry runs ReceiveDeal - 5 : On entry runs DecideOnDeal - 6 : On entry runs SendBlocks - 7 : On entry runs SendFailResponse - 8 : On entry runs SendFailResponse - 9 : On entry runs ProcessPayment - 10 : On entry runs SendBlocks - 11 : On entry runs ProcessPayment - 13 : On entry runs SendFailResponse - 17 : On entry runs Finalize - [*] --> 0 - note right of 0 - The following events are not shown cause they can trigger from any state. - - ProviderEventWriteResponseFailed - transitions state to DealStatusErrored - ProviderEventReadPaymentFailed - transitions state to DealStatusErrored - end note - 0 --> 0 : ProviderEventOpen - 0 --> 5 : ProviderEventDealReceived - 5 --> 15 : ProviderEventDecisioningError - 0 --> 7 : ProviderEventGetPieceSizeErrored - 0 --> 13 : ProviderEventDealNotFound - 0 --> 8 : ProviderEventDealRejected - 5 --> 8 : ProviderEventDealRejected - 5 --> 6 : ProviderEventDealAccepted - 6 --> 7 : ProviderEventBlockErrored - 10 --> 7 : ProviderEventBlockErrored - 6 --> 16 : ProviderEventBlocksCompleted - 10 --> 16 : ProviderEventBlocksCompleted - 6 --> 9 : ProviderEventPaymentRequested - 10 --> 9 : ProviderEventPaymentRequested - 16 --> 11 : ProviderEventPaymentRequested - 9 --> 7 : ProviderEventSaveVoucherFailed - 11 --> 7 : ProviderEventSaveVoucherFailed - 9 --> 9 : ProviderEventPartialPaymentReceived - 11 --> 11 : ProviderEventPartialPaymentReceived - 9 --> 10 : ProviderEventPaymentReceived - 11 --> 17 : ProviderEventPaymentReceived - 17 --> 12 : ProviderEventComplete diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalprovider.mmd.png b/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalprovider.mmd.png deleted file mode 100644 index a5004d70c8..0000000000 Binary files a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalprovider.mmd.png and /dev/null differ diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalprovider.mmd.svg b/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalprovider.mmd.svg deleted file mode 100644 index fd3c17caa8..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/docs/retrievalprovider.mmd.svg +++ /dev/null @@ -1,6 +0,0 @@ -ProviderEventOpenProviderEventDealReceivedProviderEventDecisioningErrorProviderEventGetPieceSizeErroredProviderEventDealNotFoundProviderEventDealRejectedProviderEventDealRejectedProviderEventDealAcceptedProviderEventBlockErroredProviderEventBlockErroredProviderEventBlocksCompletedProviderEventBlocksCompletedProviderEventPaymentRequestedProviderEventPaymentRequestedProviderEventPaymentRequestedProviderEventSaveVoucherFailedProviderEventSaveVoucherFailedProviderEventPartialPaymentReceivedProviderEventPartialPaymentReceivedProviderEventPaymentReceivedProviderEventPaymentReceivedProviderEventCompleteDealStatusNewOn entry runs ReceiveDealDealStatusAwaitingAcceptanceOn entry runs DecideOnDealDealStatusAcceptedOn entry runs SendBlocksDealStatusFailedOn entry runs SendFailResponseDealStatusRejectedOn entry runs SendFailResponseDealStatusFundsNeededOn entry runs ProcessPaymentDealStatusOngoingOn entry runs SendBlocksDealStatusFundsNeededLastPaymentOn entry runs ProcessPaymentDealStatusCompletedDealStatusDealNotFoundOn entry runs SendFailResponseDealStatusErroredDealStatusBlocksCompleteDealStatusFinalizingOn entry runs FinalizeThe following events are not shown cause they can trigger from any state.ProviderEventWriteResponseFailed - transitions state to DealStatusErroredProviderEventReadPaymentFailed - transitions state to DealStatusErrored \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageclient.mmd b/vendor/github.com/filecoin-project/go-fil-markets/docs/storageclient.mmd deleted file mode 100644 index 82c00d85de..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageclient.mmd +++ /dev/null @@ -1,62 +0,0 @@ -stateDiagram-v2 - state "StorageDealUnknown" as 0 - state "StorageDealProposalAccepted" as 3 - state "StorageDealSealing" as 5 - state "StorageDealActive" as 7 - state "StorageDealExpired" as 8 - state "StorageDealSlashed" as 9 - state "StorageDealFailing" as 11 - state "StorageDealFundsEnsured" as 12 - state "StorageDealCheckForAcceptance" as 13 - state "StorageDealStartDataTransfer" as 16 - state "StorageDealTransferring" as 17 - state "StorageDealEnsureClientFunds" as 21 - state "StorageDealClientFunding" as 23 - state "StorageDealError" as 26 - 3 : On entry runs ValidateDealPublished - 5 : On entry runs VerifyDealActivated - 7 : On entry runs WaitForDealCompletion - 11 : On entry runs FailDeal - 12 : On entry runs ProposeDeal - 13 : On entry runs CheckForDealAcceptance - 16 : On entry runs InitiateDataTransfer - 21 : On entry runs EnsureClientFunds - 23 : On entry runs WaitForFunding - [*] --> 0 - note right of 0 - The following events are not shown cause they can trigger from any state. - - ClientEventStreamCloseError - transitions state to StorageDealError - ClientEventRestart - does not transition state - end note - 0 --> 21 : ClientEventOpen - 21 --> 23 : ClientEventFundingInitiated - 21 --> 11 : ClientEventEnsureFundsFailed - 23 --> 11 : ClientEventEnsureFundsFailed - 21 --> 12 : ClientEventFundsEnsured - 23 --> 12 : ClientEventFundsEnsured - 12 --> 26 : ClientEventWriteProposalFailed - 12 --> 11 : ClientEventReadResponseFailed - 12 --> 11 : ClientEventResponseVerificationFailed - 12 --> 16 : ClientEventInitiateDataTransfer - 12 --> 11 : ClientEventUnexpectedDealState - 16 --> 11 : ClientEventDataTransferFailed - 17 --> 11 : ClientEventDataTransferFailed - 16 --> 17 : ClientEventDataTransferInitiated - 16 --> 13 : ClientEventDataTransferComplete - 17 --> 13 : ClientEventDataTransferComplete - 13 --> 13 : ClientEventWaitForDealState - 13 --> 11 : ClientEventResponseDealDidNotMatch - 13 --> 11 : ClientEventDealRejected - 13 --> 3 : ClientEventDealAccepted - 3 --> 26 : ClientEventDealPublishFailed - 3 --> 5 : ClientEventDealPublished - 5 --> 26 : ClientEventDealActivationFailed - 5 --> 7 : ClientEventDealActivated - 7 --> 9 : ClientEventDealSlashed - 7 --> 8 : ClientEventDealExpired - 7 --> 26 : ClientEventDealCompletionFailed - 11 --> 26 : ClientEventFailed - 9 --> [*] - 8 --> [*] - 26 --> [*] diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageclient.mmd.png b/vendor/github.com/filecoin-project/go-fil-markets/docs/storageclient.mmd.png deleted file mode 100644 index 95be3a02c0..0000000000 Binary files a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageclient.mmd.png and /dev/null differ diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageclient.mmd.svg b/vendor/github.com/filecoin-project/go-fil-markets/docs/storageclient.mmd.svg deleted file mode 100644 index 4164cf66b7..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageclient.mmd.svg +++ /dev/null @@ -1,6 +0,0 @@ -ClientEventOpenClientEventFundingInitiatedClientEventEnsureFundsFailedClientEventEnsureFundsFailedClientEventFundsEnsuredClientEventFundsEnsuredClientEventWriteProposalFailedClientEventReadResponseFailedClientEventResponseVerificationFailedClientEventInitiateDataTransferClientEventUnexpectedDealStateClientEventDataTransferFailedClientEventDataTransferFailedClientEventDataTransferInitiatedClientEventDataTransferCompleteClientEventDataTransferCompleteClientEventWaitForDealStateClientEventResponseDealDidNotMatchClientEventDealRejectedClientEventDealAcceptedClientEventDealPublishFailedClientEventDealPublishedClientEventDealActivationFailedClientEventDealActivatedClientEventDealSlashedClientEventDealExpiredClientEventDealCompletionFailedClientEventFailedStorageDealUnknownStorageDealProposalAcceptedOn entry runs ValidateDealPublishedStorageDealSealingOn entry runs VerifyDealActivatedStorageDealActiveOn entry runs WaitForDealCompletionStorageDealExpiredStorageDealSlashedStorageDealFailingOn entry runs FailDealStorageDealFundsEnsuredOn entry runs ProposeDealStorageDealCheckForAcceptanceOn entry runs CheckForDealAcceptanceStorageDealStartDataTransferOn entry runs InitiateDataTransferStorageDealTransferringStorageDealEnsureClientFundsOn entry runs EnsureClientFundsStorageDealClientFundingOn entry runs WaitForFundingStorageDealErrorThe following events are not shown cause they can trigger from any state.ClientEventStreamCloseError - transitions state to StorageDealErrorClientEventRestart - does not transition state \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageprovider.mmd b/vendor/github.com/filecoin-project/go-fil-markets/docs/storageprovider.mmd deleted file mode 100644 index 16a2c599ca..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageprovider.mmd +++ /dev/null @@ -1,82 +0,0 @@ -stateDiagram-v2 - state "StorageDealUnknown" as 0 - state "StorageDealStaged" as 4 - state "StorageDealSealing" as 5 - state "" as 6 - state "StorageDealActive" as 7 - state "StorageDealExpired" as 8 - state "StorageDealSlashed" as 9 - state "StorageDealRejecting" as 10 - state "StorageDealFailing" as 11 - state "StorageDealValidating" as 14 - state "StorageDealAcceptWait" as 15 - state "StorageDealTransferring" as 17 - state "StorageDealWaitingForData" as 18 - state "StorageDealVerifyData" as 19 - state "StorageDealEnsureProviderFunds" as 20 - state "StorageDealProviderFunding" as 22 - state "StorageDealPublish" as 24 - state "StorageDealPublishing" as 25 - state "StorageDealError" as 26 - 4 : On entry runs HandoffDeal - 5 : On entry runs VerifyDealActivated - 6 : On entry runs RecordPieceInfo - 7 : On entry runs WaitForDealCompletion - 10 : On entry runs RejectDeal - 11 : On entry runs FailDeal - 14 : On entry runs ValidateDealProposal - 15 : On entry runs DecideOnProposal - 19 : On entry runs VerifyData - 20 : On entry runs EnsureProviderFunds - 22 : On entry runs WaitForFunding - 24 : On entry runs PublishDeal - 25 : On entry runs WaitForPublish - [*] --> 0 - note right of 0 - The following events are not shown cause they can trigger from any state. - - ProviderEventNodeErrored - transitions state to StorageDealFailing - ProviderEventRestart - does not transition state - end note - 0 --> 14 : ProviderEventOpen - 14 --> 10 : ProviderEventDealRejected - 15 --> 10 : ProviderEventDealRejected - 19 --> 10 : ProviderEventDealRejected - 10 --> 11 : ProviderEventRejectionSent - 14 --> 15 : ProviderEventDealDeciding - 15 --> 18 : ProviderEventDataRequested - 17 --> 11 : ProviderEventDataTransferFailed - 18 --> 17 : ProviderEventDataTransferInitiated - 17 --> 19 : ProviderEventDataTransferCompleted - 19 --> 11 : ProviderEventDataVerificationFailed - 18 --> 20 : ProviderEventVerifiedData - 19 --> 20 : ProviderEventVerifiedData - 20 --> 22 : ProviderEventFundingInitiated - 20 --> 24 : ProviderEventFunded - 22 --> 24 : ProviderEventFunded - 24 --> 25 : ProviderEventDealPublishInitiated - 25 --> 11 : ProviderEventDealPublishError - 10 --> 11 : ProviderEventSendResponseFailed - 15 --> 11 : ProviderEventSendResponseFailed - 25 --> 4 : ProviderEventDealPublished - 4 --> 11 : ProviderEventFileStoreErrored - 5 --> 11 : ProviderEventFileStoreErrored - 7 --> 11 : ProviderEventFileStoreErrored - 4 --> 11 : ProviderEventDealHandoffFailed - 4 --> 5 : ProviderEventDealHandedOff - 5 --> 11 : ProviderEventDealActivationFailed - 5 --> 6 : ProviderEventDealActivated - 6 --> 11 : ProviderEventPieceStoreErrored - 6 --> 11 : ProviderEventUnableToLocatePiece - 6 --> 11 : ProviderEventReadMetadataErrored - 6 --> 7 : ProviderEventPieceRecorded - 7 --> 9 : ProviderEventDealSlashed - 7 --> 8 : ProviderEventDealExpired - 7 --> 26 : ProviderEventDealCompletionFailed - 11 --> 26 : ProviderEventFailed - 10 --> 26 : ProviderEventRestart - 14 --> 26 : ProviderEventRestart - 15 --> 26 : ProviderEventRestart - 26 --> [*] - 9 --> [*] - 8 --> [*] diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageprovider.mmd.png b/vendor/github.com/filecoin-project/go-fil-markets/docs/storageprovider.mmd.png deleted file mode 100644 index e43c1c8b72..0000000000 Binary files a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageprovider.mmd.png and /dev/null differ diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageprovider.mmd.svg b/vendor/github.com/filecoin-project/go-fil-markets/docs/storageprovider.mmd.svg deleted file mode 100644 index 89aaeb298c..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/docs/storageprovider.mmd.svg +++ /dev/null @@ -1,6 +0,0 @@ -ProviderEventOpenProviderEventDealRejectedProviderEventDealRejectedProviderEventDealRejectedProviderEventRejectionSentProviderEventDealDecidingProviderEventDataRequestedProviderEventDataTransferFailedProviderEventDataTransferInitiatedProviderEventDataTransferCompletedProviderEventDataVerificationFailedProviderEventVerifiedDataProviderEventVerifiedDataProviderEventFundingInitiatedProviderEventFundedProviderEventFundedProviderEventDealPublishInitiatedProviderEventDealPublishErrorProviderEventSendResponseFailedProviderEventSendResponseFailedProviderEventDealPublishedProviderEventFileStoreErroredProviderEventFileStoreErroredProviderEventFileStoreErroredProviderEventDealHandoffFailedProviderEventDealHandedOffProviderEventDealActivationFailedProviderEventDealActivatedProviderEventPieceStoreErroredProviderEventUnableToLocatePieceProviderEventReadMetadataErroredProviderEventPieceRecordedProviderEventDealSlashedProviderEventDealExpiredProviderEventDealCompletionFailedProviderEventFailedProviderEventRestartProviderEventRestartProviderEventRestartStorageDealUnknownStorageDealStagedOn entry runs HandoffDealStorageDealSealingOn entry runs VerifyDealActivated<invalid Value>On entry runs RecordPieceInfoStorageDealActiveOn entry runs WaitForDealCompletionStorageDealExpiredStorageDealSlashedStorageDealRejectingOn entry runs RejectDealStorageDealFailingOn entry runs FailDealStorageDealValidatingOn entry runs ValidateDealProposalStorageDealAcceptWaitOn entry runs DecideOnProposalStorageDealTransferringStorageDealWaitingForDataStorageDealVerifyDataOn entry runs VerifyDataStorageDealEnsureProviderFundsOn entry runs EnsureProviderFundsStorageDealProviderFundingOn entry runs WaitForFundingStorageDealPublishOn entry runs PublishDealStorageDealPublishingOn entry runs WaitForPublishStorageDealErrorThe following events are not shown cause they can trigger from any state.ProviderEventNodeErrored - transitions state to StorageDealFailingProviderEventRestart - does not transition state \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-markets/docsgen/main.go b/vendor/github.com/filecoin-project/go-fil-markets/docsgen/main.go deleted file mode 100644 index 95485e6a71..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/docsgen/main.go +++ /dev/null @@ -1,95 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - - retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" - "github.com/filecoin-project/go-fil-markets/storagemarket" - storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - "github.com/filecoin-project/go-statemachine/fsm" -) - -func storageDealStatusCmp(a, b fsm.StateKey) bool { - aDealStatus := a.(storagemarket.StorageDealStatus) - bDealStatus := b.(storagemarket.StorageDealStatus) - return aDealStatus < bDealStatus -} - -func retrievalDealStatusCmp(a, b fsm.StateKey) bool { - aDealStatus := a.(retrievalmarket.DealStatus) - bDealStatus := b.(retrievalmarket.DealStatus) - return aDealStatus < bDealStatus -} - -func updateOnChanged(name string, writeContents func(w io.Writer) error) error { - input, err := os.Open(name) - if err != nil { - return err - } - orig, err := ioutil.ReadAll(input) - if err != nil { - return err - } - err = input.Close() - if err != nil { - return err - } - buf := new(bytes.Buffer) - err = writeContents(buf) - if err != nil { - return err - } - if !bytes.Equal(orig, buf.Bytes()) { - file, err := os.Create(name) - if err != nil { - return err - } - _, err = file.Write(buf.Bytes()) - if err != nil { - return err - } - err = file.Close() - if err != nil { - return err - } - } - return nil -} - -func main() { - - err := updateOnChanged("./docs/storageclient.mmd", func(w io.Writer) error { - return fsm.GenerateUML(w, fsm.MermaidUML, storageimpl.ClientFSMParameterSpec, storagemarket.DealStates, storagemarket.ClientEvents, []fsm.StateKey{storagemarket.StorageDealUnknown}, false, storageDealStatusCmp) - }) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - err = updateOnChanged("./docs/storageprovider.mmd", func(w io.Writer) error { - return fsm.GenerateUML(w, fsm.MermaidUML, storageimpl.ProviderFSMParameterSpec, storagemarket.DealStates, storagemarket.ProviderEvents, []fsm.StateKey{storagemarket.StorageDealUnknown}, false, storageDealStatusCmp) - }) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - err = updateOnChanged("./docs/retrievalclient.mmd", func(w io.Writer) error { - return fsm.GenerateUML(w, fsm.MermaidUML, retrievalimpl.ClientFSMParameterSpec, retrievalmarket.DealStatuses, retrievalmarket.ClientEvents, []fsm.StateKey{retrievalmarket.DealStatusNew}, false, retrievalDealStatusCmp) - }) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - err = updateOnChanged("./docs/retrievalprovider.mmd", func(w io.Writer) error { - return fsm.GenerateUML(w, fsm.MermaidUML, retrievalimpl.ProviderFSMParameterSpec, retrievalmarket.DealStatuses, retrievalmarket.ProviderEvents, []fsm.StateKey{retrievalmarket.DealStatusNew}, false, retrievalDealStatusCmp) - }) - if err != nil { - fmt.Println(err) - os.Exit(1) - } -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/filestore/filestore_test.go b/vendor/github.com/filecoin-project/go-fil-markets/filestore/filestore_test.go deleted file mode 100644 index 0daaea5804..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/filestore/filestore_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package filestore - -import ( - "crypto/rand" - "fmt" - "log" - "os" - "path" - "testing" - - "github.com/stretchr/testify/require" -) - -func randBytes(n int) []byte { - arr := make([]byte, n) - _, err := rand.Read(arr) - if err != nil { - log.Fatal(err) - } - return arr -} - -const baseDir = "_test/a/b/c/d" -const existingFile = "existing.txt" - -func init() { - err := os.MkdirAll(baseDir, 0755) - if err != nil { - log.Print(err.Error()) - return - } - filename := path.Join(baseDir, existingFile) - file, err := os.Create(filename) - if err != nil { - log.Print(err.Error()) - return - } - defer file.Close() - _, err = file.Write(randBytes(64)) - if err != nil { - log.Print(err.Error()) - return - } -} - -func Test_SizeFails(t *testing.T) { - store, err := NewLocalFileStore(baseDir) - require.NoError(t, err) - name := Path("noFile.txt") - file, err := store.Create(name) - require.NoError(t, err) - err = store.Delete(file.Path()) - require.NoError(t, err) - require.Equal(t, int64(-1), file.Size()) -} - -func Test_OpenFileFails(t *testing.T) { - base := "_test/a/b/c/d/e" - err := os.MkdirAll(base, 0755) - require.NoError(t, err) - store, err := NewLocalFileStore(OsPath(base)) - require.NoError(t, err) - err = os.Remove(base) - require.NoError(t, err) - _, err = store.Open(existingFile) - require.Error(t, err) -} - -func Test_RemoveSeparators(t *testing.T) { - first, err := NewLocalFileStore(baseDir) - require.NoError(t, err) - second, err := NewLocalFileStore(OsPath(fmt.Sprintf("%s%c%c", baseDir, os.PathSeparator, os.PathSeparator))) - require.NoError(t, err) - f1, err := first.Open(existingFile) - require.NoError(t, err) - f2, err := second.Open(existingFile) - require.NoError(t, err) - require.Equal(t, f1.Path(), f2.Path()) -} - -func Test_BaseDirIsFileFails(t *testing.T) { - base := fmt.Sprintf("%s%c%s", baseDir, os.PathSeparator, existingFile) - _, err := NewLocalFileStore(OsPath(base)) - require.Error(t, err) -} - -func Test_CreateExistingFileFails(t *testing.T) { - store, err := NewLocalFileStore(baseDir) - require.NoError(t, err) - _, err = store.Create(Path(existingFile)) - require.Error(t, err) -} - -func Test_StoreFails(t *testing.T) { - store, err := NewLocalFileStore(baseDir) - require.NoError(t, err) - file, err := store.Open(Path(existingFile)) - require.NoError(t, err) - _, err = store.Store(Path(existingFile), file) - require.Error(t, err) -} - -func Test_OpenFails(t *testing.T) { - store, err := NewLocalFileStore(baseDir) - require.NoError(t, err) - name := Path("newFile.txt") - _, err = store.Open(name) - require.Error(t, err) -} - -func Test_InvalidBaseDirectory(t *testing.T) { - _, err := NewLocalFileStore("NoSuchDirectory") - require.Error(t, err) -} - -func Test_CreateFile(t *testing.T) { - store, err := NewLocalFileStore(baseDir) - require.NoError(t, err) - name := Path("newFile.txt") - f, err := store.Create(name) - require.NoError(t, err) - defer func() { - err := store.Delete(f.Path()) - require.NoError(t, err) - }() - bytesToWrite := 32 - written, err := f.Write(randBytes(bytesToWrite)) - require.NoError(t, err) - require.Equal(t, bytesToWrite, written) - require.Equal(t, int64(bytesToWrite), f.Size()) -} - -func Test_CreateTempFile(t *testing.T) { - store, err := NewLocalFileStore(baseDir) - require.NoError(t, err) - file, err := store.CreateTemp() - require.NoError(t, err) - defer func() { - err := store.Delete(file.Path()) - require.NoError(t, err) - }() - bytesToWrite := 32 - written, err := file.Write(randBytes(bytesToWrite)) - require.NoError(t, err) - require.Equal(t, bytesToWrite, written) - require.Equal(t, int64(bytesToWrite), file.Size()) -} - -func Test_OpenAndReadFile(t *testing.T) { - store, err := NewLocalFileStore(baseDir) - require.NoError(t, err) - file, err := store.Open(Path(existingFile)) - require.NoError(t, err) - size := file.Size() - require.NotEqual(t, -1, size) - pos := int64(size / 2) - offset, err := file.Seek(pos, 0) - require.NoError(t, err) - require.Equal(t, pos, offset) - buffer := make([]byte, size/2) - read, err := file.Read(buffer) - require.NoError(t, err) - require.Equal(t, int(size/2), read) - err = file.Close() - require.NoError(t, err) -} - -func Test_CopyFile(t *testing.T) { - store, err := NewLocalFileStore(baseDir) - require.NoError(t, err) - file, err := store.Open(Path(existingFile)) - require.NoError(t, err) - newFile := Path("newFile.txt") - newPath, err := store.Store(newFile, file) - require.NoError(t, err) - err = store.Delete(newPath) - require.NoError(t, err) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/filestore/mocks/File.go b/vendor/github.com/filecoin-project/go-fil-markets/filestore/mocks/File.go deleted file mode 100644 index dfeac9eeb3..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/filestore/mocks/File.go +++ /dev/null @@ -1,133 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - mock "github.com/stretchr/testify/mock" - - filestore "github.com/filecoin-project/go-fil-markets/filestore" -) - -// File is an autogenerated mock type for the File type -type File struct { - mock.Mock -} - -// Close provides a mock function with given fields: -func (_m *File) Close() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Path provides a mock function with given fields: -func (_m *File) Path() filestore.Path { - ret := _m.Called() - - var r0 filestore.Path - if rf, ok := ret.Get(0).(func() filestore.Path); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(filestore.Path) - } - - return r0 -} - -// OsPath provides a mock function with given fields: -func (_m *File) OsPath() filestore.OsPath { - ret := _m.Called() - - var r0 filestore.OsPath - if rf, ok := ret.Get(0).(func() filestore.OsPath); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(filestore.OsPath) - } - - return r0 -} - -// Read provides a mock function with given fields: p -func (_m *File) Read(p []byte) (int, error) { - ret := _m.Called(p) - - var r0 int - if rf, ok := ret.Get(0).(func([]byte) int); ok { - r0 = rf(p) - } else { - r0 = ret.Get(0).(int) - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(p) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Seek provides a mock function with given fields: offset, whence -func (_m *File) Seek(offset int64, whence int) (int64, error) { - ret := _m.Called(offset, whence) - - var r0 int64 - if rf, ok := ret.Get(0).(func(int64, int) int64); ok { - r0 = rf(offset, whence) - } else { - r0 = ret.Get(0).(int64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(int64, int) error); ok { - r1 = rf(offset, whence) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Size provides a mock function with given fields: -func (_m *File) Size() int64 { - ret := _m.Called() - - var r0 int64 - if rf, ok := ret.Get(0).(func() int64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(int64) - } - - return r0 -} - -// Write provides a mock function with given fields: p -func (_m *File) Write(p []byte) (int, error) { - ret := _m.Called(p) - - var r0 int - if rf, ok := ret.Get(0).(func([]byte) int); ok { - r0 = rf(p) - } else { - r0 = ret.Get(0).(int) - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(p) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/filestore/mocks/FileStore.go b/vendor/github.com/filecoin-project/go-fil-markets/filestore/mocks/FileStore.go deleted file mode 100644 index de3d668041..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/filestore/mocks/FileStore.go +++ /dev/null @@ -1,118 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - mock "github.com/stretchr/testify/mock" - - filestore "github.com/filecoin-project/go-fil-markets/filestore" -) - -// FileStore is an autogenerated mock type for the FileStore type -type FileStore struct { - mock.Mock -} - -// Create provides a mock function with given fields: p -func (_m *FileStore) Create(p filestore.Path) (filestore.File, error) { - ret := _m.Called(p) - - var r0 filestore.File - if rf, ok := ret.Get(0).(func(filestore.Path) filestore.File); ok { - r0 = rf(p) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(filestore.File) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(filestore.Path) error); ok { - r1 = rf(p) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateTemp provides a mock function with given fields: -func (_m *FileStore) CreateTemp() (filestore.File, error) { - ret := _m.Called() - - var r0 filestore.File - if rf, ok := ret.Get(0).(func() filestore.File); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(filestore.File) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Delete provides a mock function with given fields: p -func (_m *FileStore) Delete(p filestore.Path) error { - ret := _m.Called(p) - - var r0 error - if rf, ok := ret.Get(0).(func(filestore.Path) error); ok { - r0 = rf(p) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Open provides a mock function with given fields: p -func (_m *FileStore) Open(p filestore.Path) (filestore.File, error) { - ret := _m.Called(p) - - var r0 filestore.File - if rf, ok := ret.Get(0).(func(filestore.Path) filestore.File); ok { - r0 = rf(p) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(filestore.File) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(filestore.Path) error); ok { - r1 = rf(p) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Store provides a mock function with given fields: p, f -func (_m *FileStore) Store(p filestore.Path, f filestore.File) (filestore.Path, error) { - ret := _m.Called(p, f) - - var r0 filestore.Path - if rf, ok := ret.Get(0).(func(filestore.Path, filestore.File) filestore.Path); ok { - r0 = rf(p, f) - } else { - r0 = ret.Get(0).(filestore.Path) - } - - var r1 error - if rf, ok := ret.Get(1).(func(filestore.Path, filestore.File) error); ok { - r1 = rf(p, f) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/go.mod b/vendor/github.com/filecoin-project/go-fil-markets/go.mod deleted file mode 100644 index ad558a0f9e..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/go.mod +++ /dev/null @@ -1,48 +0,0 @@ -module github.com/filecoin-project/go-fil-markets - -go 1.13 - -require ( - github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be - github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 - github.com/filecoin-project/go-data-transfer v0.3.0 - github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6 - github.com/filecoin-project/go-statemachine v0.0.0-20200619205156-c7bf525c06ef - github.com/filecoin-project/go-statestore v0.1.0 - github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6 - github.com/filecoin-project/specs-actors v0.7.0 - github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099 - github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e - github.com/ipfs/go-block-format v0.0.2 - github.com/ipfs/go-blockservice v0.1.3 - github.com/ipfs/go-cid v0.0.5 - github.com/ipfs/go-datastore v0.4.4 - github.com/ipfs/go-graphsync v0.0.6-0.20200504202014-9d5f2c26a103 - github.com/ipfs/go-ipfs-blockstore v1.0.0 - github.com/ipfs/go-ipfs-blocksutil v0.0.1 - github.com/ipfs/go-ipfs-chunker v0.0.5 - github.com/ipfs/go-ipfs-ds-help v1.0.0 - github.com/ipfs/go-ipfs-exchange-offline v0.0.1 - github.com/ipfs/go-ipfs-files v0.0.8 - github.com/ipfs/go-ipld-cbor v0.0.4 - github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.0.3 - github.com/ipfs/go-merkledag v0.3.1 - github.com/ipfs/go-unixfs v0.2.4 - github.com/ipld/go-car v0.1.1-0.20200429200904-c222d793c339 - github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e - github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1 - github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c - github.com/libp2p/go-libp2p v0.6.0 - github.com/libp2p/go-libp2p-core v0.5.0 - github.com/multiformats/go-multiaddr v0.2.1 - github.com/multiformats/go-multihash v0.0.13 - github.com/stretchr/testify v1.5.1 - github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e - golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 - golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 - gotest.tools v2.2.0+incompatible -) - -replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/vendor/github.com/filecoin-project/go-fil-markets/mkreleaselog b/vendor/github.com/filecoin-project/go-fil-markets/mkreleaselog deleted file mode 100755 index 88ae89be3e..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/mkreleaselog +++ /dev/null @@ -1,248 +0,0 @@ -#!/bin/zsh - -# Note: This script is a modified version of the mkreleaselog script used by -# the go-ipfs team. -# -# Usage: ./mkreleaselog v0.25.0 v0.26.0 > /tmp/release.log - -set -euo pipefail -export GO111MODULE=on -export GOPATH="$(go env GOPATH)" - -alias jq="jq --unbuffered" - -REPO_SUFFIXES_TO_STRIP=( - "/v2" - "/v3" - "/v4" - "/v5" - "/v6" -) - -AUTHORS=( - # orgs - filecoin-project/go-fil-markets - filecoin-project/go-data-transfer - filecoin-project/go-statemachine - ipfs/go-graphsync - - # Authors of personal repos used by filecoin-ffi that should be mentioned in the - # release notes. - xlab - hannahhoward -) - -[[ -n "${REPO_FILTER+x}" ]] || REPO_FILTER="github.com/(${$(printf "|%s" "${AUTHORS[@]}"):1})" -echo $REPO_FILTER -[[ -n "${IGNORED_FILES+x}" ]] || IGNORED_FILES='^\(\.gx\|package\.json\|\.travis\.yml\|go.mod\|go\.sum|\.github|\.circleci\)$' - -NL=$'\n' - -msg() { - echo "$*" >&2 -} - -statlog() { - rpath="$GOPATH/src/$1" - for s in $REPO_SUFFIXES_TO_STRIP; do - rpath=${rpath%$s} - done - - start="${2:-}" - end="${3:-HEAD}" - - git -C "$rpath" log --shortstat --no-merges --pretty="tformat:%H%n%aN%n%aE" "$start..$end" | while - read hash - read name - read email - read _ # empty line - read changes - do - changed=0 - insertions=0 - deletions=0 - while read count event; do - if [[ "$event" =~ ^file ]]; then - changed=$count - elif [[ "$event" =~ ^insertion ]]; then - insertions=$count - elif [[ "$event" =~ ^deletion ]]; then - deletions=$count - else - echo "unknown event $event" >&2 - exit 1 - fi - done<<<"${changes//,/$NL}" - - jq -n \ - --arg "hash" "$hash" \ - --arg "name" "$name" \ - --arg "email" "$email" \ - --argjson "changed" "$changed" \ - --argjson "insertions" "$insertions" \ - --argjson "deletions" "$deletions" \ - '{Commit: $hash, Author: $name, Email: $email, Files: $changed, Insertions: $insertions, Deletions: $deletions}' - done -} - -# Returns a stream of deps changed between $1 and $2. -dep_changes() { - { - <"$1" - <"$2" - } | jq -s 'JOIN(INDEX(.[0][]; .Path); .[1][]; .Path; {Path: .[0].Path, Old: (.[1] | del(.Path)), New: (.[0] | del(.Path))}) | select(.New.Version != .Old.Version)' -} - -# resolve_commits resolves a git ref for each version. -resolve_commits() { - jq '. + {Ref: (.Version|capture("^((?.*)\\+incompatible|v.*-(0\\.)?[0-9]{14}-(?[a-f0-9]{12})|(?v.*))$") | .ref1 // .ref2 // .ref3)}' -} - -pr_link() { - local repo="$1" - local prnum="$2" - local ghname="${repo##github.com/}" - printf -- "[%s#%s](https://%s/pull/%s)" "$ghname" "$prnum" "$repo" "$prnum" -} - -# Generate a release log for a range of commits in a single repo. -release_log() { - setopt local_options BASH_REMATCH - - local repo="$1" - local start="$2" - local end="${3:-HEAD}" - local dir="$GOPATH/src/$repo" - - local commit pr - git -C "$dir" log \ - --format='tformat:%H %s' \ - --first-parent \ - "$start..$end" | - while read commit subject; do - # Skip gx-only PRs. - git -C "$dir" diff-tree --no-commit-id --name-only "$commit^" "$commit" | - grep -v "${IGNORED_FILES}" >/dev/null || continue - - if [[ "$subject" =~ '^Merge pull request #([0-9]+) from' ]]; then - local prnum="${BASH_REMATCH[2]}" - local desc="$(git -C "$dir" show --summary --format='tformat:%b' "$commit" | head -1)" - printf -- "- %s (%s)\n" "$desc" "$(pr_link "$repo" "$prnum")" - elif [[ "$subject" =~ '\(#([0-9]+)\)$' ]]; then - local prnum="${BASH_REMATCH[2]}" - printf -- "- %s (%s)\n" "$subject" "$(pr_link "$repo" "$prnum")" - else - printf -- "- %s\n" "$subject" - fi - done -} - -indent() { - sed -e 's/^/ /' -} - -mod_deps() { - go list -json -m all | jq 'select(.Version != null)' -} - -ensure() { - local repo="$1" - for s in $REPO_SUFFIXES_TO_STRIP; do - repo=${repo%$s} - done - - local commit="$2" - - local rpath="$GOPATH/src/$repo" - if [[ ! -d "$rpath" ]]; then - msg "Cloning $repo..." - git clone "http://$repo" "$rpath" >&2 - fi - - if ! git -C "$rpath" rev-parse --verify "$commit" >/dev/null; then - msg "Fetching $repo..." - git -C "$rpath" fetch --all >&2 - fi - - git -C "$rpath" rev-parse --verify "$commit" >/dev/null || return 1 -} - -statsummary() { - jq -s 'group_by(.Author)[] | {Author: .[0].Author, Commits: (. | length), Insertions: (map(.Insertions) | add), Deletions: (map(.Deletions) | add), Files: (map(.Files) | add)}' | - jq '. + {Lines: (.Deletions + .Insertions)}' -} - -recursive_release_log() { - local start="${1:-$(git tag -l | sort -V | grep -v -- '-rc' | grep 'v'| tail -n1)}" - local end="${2:-$(git rev-parse HEAD)}" - local repo_root="$(git rev-parse --show-toplevel)" - local package="$(cd "$repo_root" && go list -m)" - - if ! [[ "${GOPATH}/${package}" != "${repo_root}" ]]; then - echo "This script requires the target package and all dependencies to live in a GOPATH." - return 1 - fi - - ( - local result=0 - local workspace="$(mktemp -d)" - trap "$(printf 'rm -rf "%q"' "$workspace")" INT TERM EXIT - cd "$workspace" - - echo "Computing old deps..." >&2 - git -C "$repo_root" show "$start:go.mod" >go.mod - sed "s/^replace.*//g" go.mod > go.mod.new - mv go.mod.new go.mod - mod_deps | resolve_commits | jq -s > old_deps.json - - echo "Computing new deps..." >&2 - git -C "$repo_root" show "$end:go.mod" >go.mod - sed "s/^replace.*//g" go.mod > go.mod.new - mv go.mod.new go.mod - mod_deps | resolve_commits | jq -s > new_deps.json - - rm -f go.mod go.sum - - printf -- "Generating Changelog for %s %s..%s\n" "$package" "$start" "$end" >&2 - - printf -- "- %s:\n" "$package" - release_log "$package" "$start" "$end" | indent - - statlog "$package" "$start" "$end" > statlog.json - - dep_changes old_deps.json new_deps.json | - jq --arg filter "$REPO_FILTER" 'select(.Path | match($filter))' | - # Compute changelogs - jq -r '"\(.Path) \(.New.Version) \(.New.Ref) \(.Old.Version) \(.Old.Ref // "")"' | - while read repo new new_ref old old_ref; do - for s in $REPO_SUFFIXES_TO_STRIP; do - repo=${repo%$s} - done - - if ! ensure "$repo" "$new_ref"; then - result=1 - local changelog="failed to fetch repo" - else - statlog "$repo" "$old_ref" "$new_ref" >> statlog.json - local changelog="$(release_log "$repo" "$old_ref" "$new_ref")" - fi - if [[ -n "$changelog" ]]; then - printf -- "- %s (%s -> %s):\n" "$repo" "$old" "$new" - echo "$changelog" | indent - fi - done - - echo - echo "Contributors" - echo - - echo "| Contributor | Commits | Lines ± | Files Changed |" - echo "|-------------|---------|---------|---------------|" - statsummary 1 { - return cid.Undef, fmt.Errorf("invalid header: contains %d roots (expecting 1)", l) - } - return header.Roots[0], nil -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/CarIO.go b/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/CarIO.go deleted file mode 100644 index d9348c7e9b..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/CarIO.go +++ /dev/null @@ -1,81 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import car "github.com/ipld/go-car" -import cid "github.com/ipfs/go-cid" -import context "context" -import io "io" -import ipld "github.com/ipld/go-ipld-prime" -import mock "github.com/stretchr/testify/mock" -import pieceio "github.com/filecoin-project/go-fil-markets/pieceio" - -// CarIO is an autogenerated mock type for the CarIO type -type CarIO struct { - mock.Mock -} - -// LoadCar provides a mock function with given fields: bs, r -func (_m *CarIO) LoadCar(bs pieceio.WriteStore, r io.Reader) (cid.Cid, error) { - ret := _m.Called(bs, r) - - var r0 cid.Cid - if rf, ok := ret.Get(0).(func(pieceio.WriteStore, io.Reader) cid.Cid); ok { - r0 = rf(bs, r) - } else { - r0 = ret.Get(0).(cid.Cid) - } - - var r1 error - if rf, ok := ret.Get(1).(func(pieceio.WriteStore, io.Reader) error); ok { - r1 = rf(bs, r) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PrepareCar provides a mock function with given fields: ctx, bs, payloadCid, node -func (_m *CarIO) PrepareCar(ctx context.Context, bs pieceio.ReadStore, payloadCid cid.Cid, node ipld.Node) (pieceio.PreparedCar, error) { - ret := _m.Called(ctx, bs, payloadCid, node) - - var r0 pieceio.PreparedCar - if rf, ok := ret.Get(0).(func(context.Context, pieceio.ReadStore, cid.Cid, ipld.Node) pieceio.PreparedCar); ok { - r0 = rf(ctx, bs, payloadCid, node) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(pieceio.PreparedCar) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, pieceio.ReadStore, cid.Cid, ipld.Node) error); ok { - r1 = rf(ctx, bs, payloadCid, node) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// WriteCar provides a mock function with given fields: ctx, bs, payloadCid, node, w, userOnNewCarBlocks -func (_m *CarIO) WriteCar(ctx context.Context, bs pieceio.ReadStore, payloadCid cid.Cid, node ipld.Node, w io.Writer, userOnNewCarBlocks ...car.OnNewCarBlockFunc) error { - _va := make([]interface{}, len(userOnNewCarBlocks)) - for _i := range userOnNewCarBlocks { - _va[_i] = userOnNewCarBlocks[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, bs, payloadCid, node, w) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, pieceio.ReadStore, cid.Cid, ipld.Node, io.Writer, ...car.OnNewCarBlockFunc) error); ok { - r0 = rf(ctx, bs, payloadCid, node, w, userOnNewCarBlocks...) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/PieceIO.go b/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/PieceIO.go deleted file mode 100644 index b0e26f3fa4..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/PieceIO.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import abi "github.com/filecoin-project/specs-actors/actors/abi" -import cid "github.com/ipfs/go-cid" -import io "io" -import ipld "github.com/ipld/go-ipld-prime" -import mock "github.com/stretchr/testify/mock" - -// PieceIO is an autogenerated mock type for the PieceIO type -type PieceIO struct { - mock.Mock -} - -// GeneratePieceCommitment provides a mock function with given fields: rt, payloadCid, selector -func (_m *PieceIO) GeneratePieceCommitment(rt abi.RegisteredProof, payloadCid cid.Cid, selector ipld.Node) (cid.Cid, abi.UnpaddedPieceSize, error) { - ret := _m.Called(rt, payloadCid, selector) - - var r0 cid.Cid - if rf, ok := ret.Get(0).(func(abi.RegisteredProof, cid.Cid, ipld.Node) cid.Cid); ok { - r0 = rf(rt, payloadCid, selector) - } else { - r0 = ret.Get(0).(cid.Cid) - } - - var r1 abi.UnpaddedPieceSize - if rf, ok := ret.Get(1).(func(abi.RegisteredProof, cid.Cid, ipld.Node) abi.UnpaddedPieceSize); ok { - r1 = rf(rt, payloadCid, selector) - } else { - r1 = ret.Get(1).(abi.UnpaddedPieceSize) - } - - var r2 error - if rf, ok := ret.Get(2).(func(abi.RegisteredProof, cid.Cid, ipld.Node) error); ok { - r2 = rf(rt, payloadCid, selector) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// ReadPiece provides a mock function with given fields: r -func (_m *PieceIO) ReadPiece(r io.Reader) (cid.Cid, error) { - ret := _m.Called(r) - - var r0 cid.Cid - if rf, ok := ret.Get(0).(func(io.Reader) cid.Cid); ok { - r0 = rf(r) - } else { - r0 = ret.Get(0).(cid.Cid) - } - - var r1 error - if rf, ok := ret.Get(1).(func(io.Reader) error); ok { - r1 = rf(r) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/PreparedCar.go b/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/PreparedCar.go deleted file mode 100644 index 0ad17eab36..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/PreparedCar.go +++ /dev/null @@ -1,42 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - io "io" - - mock "github.com/stretchr/testify/mock" -) - -// PreparedCar is an autogenerated mock type for the PreparedCar type -type PreparedCar struct { - mock.Mock -} - -// Dump provides a mock function with given fields: w -func (_m *PreparedCar) Dump(w io.Writer) error { - ret := _m.Called(w) - - var r0 error - if rf, ok := ret.Get(0).(func(io.Writer) error); ok { - r0 = rf(w) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Size provides a mock function with given fields: -func (_m *PreparedCar) Size() uint64 { - ret := _m.Called() - - var r0 uint64 - if rf, ok := ret.Get(0).(func() uint64); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(uint64) - } - - return r0 -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/ReadStore.go b/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/ReadStore.go deleted file mode 100644 index 5e3f1e6b8c..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/ReadStore.go +++ /dev/null @@ -1,37 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - blocks "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - mock "github.com/stretchr/testify/mock" -) - -// ReadStore is an autogenerated mock type for the ReadStore type -type ReadStore struct { - mock.Mock -} - -// Get provides a mock function with given fields: _a0 -func (_m *ReadStore) Get(_a0 cid.Cid) (blocks.Block, error) { - ret := _m.Called(_a0) - - var r0 blocks.Block - if rf, ok := ret.Get(0).(func(cid.Cid) blocks.Block); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(blocks.Block) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(cid.Cid) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/SectorCalculator.go b/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/SectorCalculator.go deleted file mode 100644 index 43f0923f19..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/SectorCalculator.go +++ /dev/null @@ -1,37 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - io "io" - - mock "github.com/stretchr/testify/mock" -) - -// SectorCalculator is an autogenerated mock type for the SectorCalculator type -type SectorCalculator struct { - mock.Mock -} - -// GeneratePieceCommitment provides a mock function with given fields: piece, pieceSize -func (_m *SectorCalculator) GeneratePieceCommitment(piece io.Reader, pieceSize uint64) ([]byte, error) { - ret := _m.Called(piece, pieceSize) - - var r0 []byte - if rf, ok := ret.Get(0).(func(io.Reader, uint64) []byte); ok { - r0 = rf(piece, pieceSize) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(io.Reader, uint64) error); ok { - r1 = rf(piece, pieceSize) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/WriteStore.go b/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/WriteStore.go deleted file mode 100644 index b186eef38f..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/mocks/WriteStore.go +++ /dev/null @@ -1,27 +0,0 @@ -// Code generated by mockery v1.0.0. DO NOT EDIT. - -package mocks - -import ( - blocks "github.com/ipfs/go-block-format" - mock "github.com/stretchr/testify/mock" -) - -// WriteStore is an autogenerated mock type for the WriteStore type -type WriteStore struct { - mock.Mock -} - -// Put provides a mock function with given fields: _a0 -func (_m *WriteStore) Put(_a0 blocks.Block) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(blocks.Block) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/pieceio.go b/vendor/github.com/filecoin-project/go-fil-markets/pieceio/pieceio.go deleted file mode 100644 index 93c3ad8233..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/pieceio.go +++ /dev/null @@ -1,131 +0,0 @@ -package pieceio - -import ( - "context" - "io" - "os" - "sync" - - "github.com/filecoin-project/go-padreader" - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/ipld/go-car" - "github.com/ipld/go-ipld-prime" - - "github.com/filecoin-project/go-fil-markets/filestore" -) - -type PreparedCar interface { - Size() uint64 - Dump(w io.Writer) error -} - -type CarIO interface { - // WriteCar writes a given payload to a CAR file and into the passed IO stream - WriteCar(ctx context.Context, bs ReadStore, payloadCid cid.Cid, node ipld.Node, w io.Writer, userOnNewCarBlocks ...car.OnNewCarBlockFunc) error - - // PrepareCar prepares a car so that its total size can be calculated without writing it to a file. - // It can then be written with PreparedCar.Dump - PrepareCar(ctx context.Context, bs ReadStore, payloadCid cid.Cid, node ipld.Node) (PreparedCar, error) - - // LoadCar loads blocks into the a store from a given CAR file - LoadCar(bs WriteStore, r io.Reader) (cid.Cid, error) -} - -type pieceIO struct { - carIO CarIO - bs blockstore.Blockstore -} - -func NewPieceIO(carIO CarIO, bs blockstore.Blockstore) PieceIO { - return &pieceIO{carIO, bs} -} - -type pieceIOWithStore struct { - pieceIO - store filestore.FileStore -} - -func NewPieceIOWithStore(carIO CarIO, store filestore.FileStore, bs blockstore.Blockstore) PieceIOWithStore { - return &pieceIOWithStore{pieceIO{carIO, bs}, store} -} - -func (pio *pieceIO) GeneratePieceCommitment(rt abi.RegisteredSealProof, payloadCid cid.Cid, selector ipld.Node) (cid.Cid, abi.UnpaddedPieceSize, error) { - preparedCar, err := pio.carIO.PrepareCar(context.Background(), pio.bs, payloadCid, selector) - if err != nil { - return cid.Undef, 0, err - } - pieceSize := uint64(preparedCar.Size()) - r, w, err := os.Pipe() - if err != nil { - return cid.Undef, 0, err - } - var stop sync.WaitGroup - stop.Add(1) - var werr error - go func() { - defer stop.Done() - werr = preparedCar.Dump(w) - err := w.Close() - if werr == nil && err != nil { - werr = err - } - }() - commitment, paddedSize, err := GeneratePieceCommitment(rt, r, pieceSize) - closeErr := r.Close() - if err != nil { - return cid.Undef, 0, err - } - if closeErr != nil { - return cid.Undef, 0, closeErr - } - stop.Wait() - if werr != nil { - return cid.Undef, 0, werr - } - return commitment, paddedSize, nil -} - -func (pio *pieceIOWithStore) GeneratePieceCommitmentToFile(rt abi.RegisteredSealProof, payloadCid cid.Cid, selector ipld.Node, userOnNewCarBlocks ...car.OnNewCarBlockFunc) (cid.Cid, filestore.Path, abi.UnpaddedPieceSize, error) { - f, err := pio.store.CreateTemp() - if err != nil { - return cid.Undef, "", 0, err - } - cleanup := func() { - f.Close() - _ = pio.store.Delete(f.Path()) - } - err = pio.carIO.WriteCar(context.Background(), pio.bs, payloadCid, selector, f, userOnNewCarBlocks...) - if err != nil { - cleanup() - return cid.Undef, "", 0, err - } - pieceSize := uint64(f.Size()) - _, err = f.Seek(0, io.SeekStart) - if err != nil { - cleanup() - return cid.Undef, "", 0, err - } - commitment, paddedSize, err := GeneratePieceCommitment(rt, f, pieceSize) - if err != nil { - cleanup() - return cid.Undef, "", 0, err - } - _ = f.Close() - return commitment, f.Path(), paddedSize, nil -} - -func GeneratePieceCommitment(rt abi.RegisteredSealProof, rd io.Reader, pieceSize uint64) (cid.Cid, abi.UnpaddedPieceSize, error) { - paddedReader, paddedSize := padreader.New(rd, pieceSize) - commitment, err := ffiwrapper.GeneratePieceCIDFromFile(rt, paddedReader, paddedSize) - if err != nil { - return cid.Undef, 0, err - } - return commitment, paddedSize, nil -} - -func (pio *pieceIO) ReadPiece(r io.Reader) (cid.Cid, error) { - return pio.carIO.LoadCar(pio.bs, r) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/pieceio_test.go b/vendor/github.com/filecoin-project/go-fil-markets/pieceio/pieceio_test.go deleted file mode 100644 index 15a1ee7b5e..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/pieceio_test.go +++ /dev/null @@ -1,337 +0,0 @@ -package pieceio_test - -import ( - "bytes" - "context" - "fmt" - "io" - "testing" - - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - dag "github.com/ipfs/go-merkledag" - dstest "github.com/ipfs/go-merkledag/test" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/filestore" - fsmocks "github.com/filecoin-project/go-fil-markets/filestore/mocks" - "github.com/filecoin-project/go-fil-markets/pieceio" - "github.com/filecoin-project/go-fil-markets/pieceio/cario" - pmocks "github.com/filecoin-project/go-fil-markets/pieceio/mocks" -) - -func Test_ThereAndBackAgain(t *testing.T) { - tempDir := filestore.OsPath("./tempDir") - cio := cario.NewCarIO() - - store, err := filestore.NewLocalFileStore(tempDir) - require.NoError(t, err) - - sourceBserv := dstest.Bserv() - sourceBs := sourceBserv.Blockstore() - - pio := pieceio.NewPieceIOWithStore(cio, store, sourceBs) - require.NoError(t, err) - - dserv := dag.NewDAGService(sourceBserv) - a := dag.NewRawNode([]byte("aaaa")) - b := dag.NewRawNode([]byte("bbbb")) - c := dag.NewRawNode([]byte("cccc")) - - nd1 := &dag.ProtoNode{} - _ = nd1.AddNodeLink("cat", a) - - nd2 := &dag.ProtoNode{} - _ = nd2.AddNodeLink("first", nd1) - _ = nd2.AddNodeLink("dog", b) - - nd3 := &dag.ProtoNode{} - _ = nd3.AddNodeLink("second", nd2) - _ = nd3.AddNodeLink("bear", c) - - ctx := context.Background() - _ = dserv.Add(ctx, a) - _ = dserv.Add(ctx, b) - _ = dserv.Add(ctx, c) - _ = dserv.Add(ctx, nd1) - _ = dserv.Add(ctx, nd2) - _ = dserv.Add(ctx, nd3) - - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) - node := ssb.ExploreFields(func(efsb builder.ExploreFieldsSpecBuilder) { - efsb.Insert("Links", - ssb.ExploreIndex(1, ssb.ExploreRecursive(selector.RecursionLimitNone(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())))) - }).Node() - - pcid, tmpPath, _, err := pio.GeneratePieceCommitmentToFile(abi.RegisteredSealProof_StackedDrg2KiBV1, nd3.Cid(), node) - require.NoError(t, err) - tmpFile, err := store.Open(tmpPath) - require.NoError(t, err) - defer func() { - deferErr := tmpFile.Close() - require.NoError(t, deferErr) - deferErr = store.Delete(tmpFile.Path()) - require.NoError(t, deferErr) - }() - require.NotEqual(t, pcid, cid.Undef) - bufSize := int64(16) // small buffer to illustrate the logic - buf := make([]byte, bufSize) - var readErr error - padStart := int64(-1) - loops := int64(-1) - read := 0 - skipped, err := tmpFile.Seek(tmpFile.Size()/2, io.SeekStart) - require.NoError(t, err) - for readErr == nil { - loops++ - read, readErr = tmpFile.Read(buf) - for idx := int64(0); idx < int64(read); idx++ { - if buf[idx] == 0 { - if padStart == -1 { - padStart = skipped + loops*bufSize + idx - } - } else { - padStart = -1 - } - } - } - _, err = tmpFile.Seek(0, io.SeekStart) - require.NoError(t, err) - - var reader io.Reader - if padStart != -1 { - reader = io.LimitReader(tmpFile, padStart) - } else { - reader = tmpFile - } - - id, err := pio.ReadPiece(reader) - require.NoError(t, err) - require.Equal(t, nd3.Cid(), id) -} - -func Test_StoreRestoreMemoryBuffer(t *testing.T) { - tempDir := filestore.OsPath("./tempDir") - cio := cario.NewCarIO() - - store, err := filestore.NewLocalFileStore(tempDir) - require.NoError(t, err) - - sourceBserv := dstest.Bserv() - sourceBs := sourceBserv.Blockstore() - pio := pieceio.NewPieceIOWithStore(cio, store, sourceBs) - - dserv := dag.NewDAGService(sourceBserv) - a := dag.NewRawNode([]byte("aaaa")) - b := dag.NewRawNode([]byte("bbbb")) - c := dag.NewRawNode([]byte("cccc")) - - nd1 := &dag.ProtoNode{} - _ = nd1.AddNodeLink("cat", a) - - nd2 := &dag.ProtoNode{} - _ = nd2.AddNodeLink("first", nd1) - _ = nd2.AddNodeLink("dog", b) - - nd3 := &dag.ProtoNode{} - _ = nd3.AddNodeLink("second", nd2) - _ = nd3.AddNodeLink("bear", c) - - ctx := context.Background() - _ = dserv.Add(ctx, a) - _ = dserv.Add(ctx, b) - _ = dserv.Add(ctx, c) - _ = dserv.Add(ctx, nd1) - _ = dserv.Add(ctx, nd2) - _ = dserv.Add(ctx, nd3) - - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) - node := ssb.ExploreFields(func(efsb builder.ExploreFieldsSpecBuilder) { - efsb.Insert("Links", - ssb.ExploreIndex(1, ssb.ExploreRecursive(selector.RecursionLimitNone(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())))) - }).Node() - - commitment, tmpPath, paddedSize, err := pio.GeneratePieceCommitmentToFile(abi.RegisteredSealProof_StackedDrg2KiBV1, nd3.Cid(), node) - require.NoError(t, err) - tmpFile, err := store.Open(tmpPath) - require.NoError(t, err) - defer func() { - deferErr := tmpFile.Close() - require.NoError(t, deferErr) - deferErr = store.Delete(tmpFile.Path()) - require.NoError(t, deferErr) - }() - - _, err = tmpFile.Seek(0, io.SeekStart) - require.NoError(t, err) - - require.NotEqual(t, commitment, cid.Undef) - buf := make([]byte, paddedSize) - _, err = tmpFile.Read(buf) - require.NoError(t, err) - buffer := bytes.NewBuffer(buf) - secondCommitment, err := ffiwrapper.GeneratePieceCIDFromFile(abi.RegisteredSealProof_StackedDrg2KiBV1, buffer, paddedSize) - require.NoError(t, err) - require.Equal(t, commitment, secondCommitment) -} - -func Test_PieceCommitmentEquivalenceMemoryFile(t *testing.T) { - tempDir := filestore.OsPath("./tempDir") - cio := cario.NewCarIO() - - store, err := filestore.NewLocalFileStore(tempDir) - require.NoError(t, err) - - sourceBserv := dstest.Bserv() - sourceBs := sourceBserv.Blockstore() - pio := pieceio.NewPieceIOWithStore(cio, store, sourceBs) - - dserv := dag.NewDAGService(sourceBserv) - a := dag.NewRawNode([]byte("aaaa")) - b := dag.NewRawNode([]byte("bbbb")) - c := dag.NewRawNode([]byte("cccc")) - - nd1 := &dag.ProtoNode{} - _ = nd1.AddNodeLink("cat", a) - - nd2 := &dag.ProtoNode{} - _ = nd2.AddNodeLink("first", nd1) - _ = nd2.AddNodeLink("dog", b) - - nd3 := &dag.ProtoNode{} - _ = nd3.AddNodeLink("second", nd2) - _ = nd3.AddNodeLink("bear", c) - - ctx := context.Background() - _ = dserv.Add(ctx, a) - _ = dserv.Add(ctx, b) - _ = dserv.Add(ctx, c) - _ = dserv.Add(ctx, nd1) - _ = dserv.Add(ctx, nd2) - _ = dserv.Add(ctx, nd3) - - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) - node := ssb.ExploreFields(func(efsb builder.ExploreFieldsSpecBuilder) { - efsb.Insert("Links", - ssb.ExploreIndex(1, ssb.ExploreRecursive(selector.RecursionLimitNone(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())))) - }).Node() - - fcommitment, tmpPath, fpaddedSize, ferr := pio.GeneratePieceCommitmentToFile(abi.RegisteredSealProof_StackedDrg2KiBV1, nd3.Cid(), node) - defer func() { - deferErr := store.Delete(tmpPath) - require.NoError(t, deferErr) - }() - - mcommitment, mpaddedSize, merr := pio.GeneratePieceCommitment(abi.RegisteredSealProof_StackedDrg2KiBV1, nd3.Cid(), node) - require.Equal(t, fcommitment, mcommitment) - require.Equal(t, fpaddedSize, mpaddedSize) - require.Equal(t, ferr, merr) - require.NoError(t, ferr) - require.NoError(t, merr) -} - -func Test_Failures(t *testing.T) { - sourceBserv := dstest.Bserv() - sourceBs := sourceBserv.Blockstore() - dserv := dag.NewDAGService(sourceBserv) - a := dag.NewRawNode([]byte("aaaa")) - b := dag.NewRawNode([]byte("bbbb")) - c := dag.NewRawNode([]byte("cccc")) - - nd1 := &dag.ProtoNode{} - _ = nd1.AddNodeLink("cat", a) - - nd2 := &dag.ProtoNode{} - _ = nd2.AddNodeLink("first", nd1) - _ = nd2.AddNodeLink("dog", b) - - nd3 := &dag.ProtoNode{} - _ = nd3.AddNodeLink("second", nd2) - _ = nd3.AddNodeLink("bear", c) - - ctx := context.Background() - _ = dserv.Add(ctx, a) - _ = dserv.Add(ctx, b) - _ = dserv.Add(ctx, c) - _ = dserv.Add(ctx, nd1) - _ = dserv.Add(ctx, nd2) - _ = dserv.Add(ctx, nd3) - - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) - node := ssb.ExploreFields(func(efsb builder.ExploreFieldsSpecBuilder) { - efsb.Insert("Links", - ssb.ExploreIndex(1, ssb.ExploreRecursive(selector.RecursionLimitNone(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())))) - }).Node() - - t.Run("create temp file fails", func(t *testing.T) { - fsmock := fsmocks.FileStore{} - fsmock.On("CreateTemp").Return(nil, fmt.Errorf("Failed")) - pio := pieceio.NewPieceIOWithStore(nil, &fsmock, sourceBs) - _, _, _, err := pio.GeneratePieceCommitmentToFile(abi.RegisteredSealProof_StackedDrg2KiBV1, nd3.Cid(), node) - require.Error(t, err) - }) - t.Run("write CAR fails", func(t *testing.T) { - tempDir := filestore.OsPath("./tempDir") - store, err := filestore.NewLocalFileStore(tempDir) - require.NoError(t, err) - - ciomock := pmocks.CarIO{} - any := mock.Anything - ciomock.On("WriteCar", any, any, any, any, any).Return(fmt.Errorf("failed to write car")) - pio := pieceio.NewPieceIOWithStore(&ciomock, store, sourceBs) - _, _, _, err = pio.GeneratePieceCommitmentToFile(abi.RegisteredSealProof_StackedDrg2KiBV1, nd3.Cid(), node) - require.Error(t, err) - }) - t.Run("prepare CAR fails", func(t *testing.T) { - - ciomock := pmocks.CarIO{} - any := mock.Anything - ciomock.On("PrepareCar", any, any, any, any).Return(nil, fmt.Errorf("failed to prepare car")) - pio := pieceio.NewPieceIO(&ciomock, sourceBs) - _, _, err := pio.GeneratePieceCommitment(abi.RegisteredSealProof_StackedDrg2KiBV1, nd3.Cid(), node) - require.Error(t, err) - }) - t.Run("PreparedCard dump operation fails", func(t *testing.T) { - preparedCarMock := pmocks.PreparedCar{} - ciomock := pmocks.CarIO{} - any := mock.Anything - ciomock.On("PrepareCar", any, any, any, any).Return(&preparedCarMock, nil) - preparedCarMock.On("Size").Return(uint64(1000)) - preparedCarMock.On("Dump", any).Return(fmt.Errorf("failed to write car")) - pio := pieceio.NewPieceIO(&ciomock, sourceBs) - _, _, err := pio.GeneratePieceCommitment(abi.RegisteredSealProof_StackedDrg2KiBV1, nd3.Cid(), node) - require.Error(t, err) - }) - t.Run("seek fails", func(t *testing.T) { - cio := cario.NewCarIO() - - fsmock := fsmocks.FileStore{} - mockfile := fsmocks.File{} - - fsmock.On("CreateTemp").Return(&mockfile, nil).Once() - fsmock.On("Delete", mock.Anything).Return(nil).Once() - - counter := 0 - size := 0 - mockfile.On("Write", mock.Anything).Run(func(args mock.Arguments) { - arg := args[0] - buf := arg.([]byte) - size := len(buf) - counter += size - }).Return(size, nil).Times(17) - mockfile.On("Size").Return(int64(484)) - mockfile.On("Write", mock.Anything).Return(24, nil).Once() - mockfile.On("Close").Return(nil).Once() - mockfile.On("Path").Return(filestore.Path("mock")).Once() - mockfile.On("Seek", mock.Anything, mock.Anything).Return(int64(0), fmt.Errorf("seek failed")) - - pio := pieceio.NewPieceIOWithStore(cio, &fsmock, sourceBs) - _, _, _, err := pio.GeneratePieceCommitmentToFile(abi.RegisteredSealProof_StackedDrg2KiBV1, nd3.Cid(), node) - require.Error(t, err) - }) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/tempDir/.gitkeep b/vendor/github.com/filecoin-project/go-fil-markets/pieceio/tempDir/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/types.go b/vendor/github.com/filecoin-project/go-fil-markets/pieceio/types.go deleted file mode 100644 index 847b15fb23..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/pieceio/types.go +++ /dev/null @@ -1,32 +0,0 @@ -package pieceio - -import ( - "io" - - "github.com/filecoin-project/specs-actors/actors/abi" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipld/go-car" - "github.com/ipld/go-ipld-prime" - - "github.com/filecoin-project/go-fil-markets/filestore" -) - -type WriteStore interface { - Put(blocks.Block) error -} - -type ReadStore interface { - Get(cid.Cid) (blocks.Block, error) -} - -// PieceIO converts between payloads and pieces -type PieceIO interface { - GeneratePieceCommitment(rt abi.RegisteredSealProof, payloadCid cid.Cid, selector ipld.Node) (cid.Cid, abi.UnpaddedPieceSize, error) - ReadPiece(r io.Reader) (cid.Cid, error) -} - -type PieceIOWithStore interface { - PieceIO - GeneratePieceCommitmentToFile(rt abi.RegisteredSealProof, payloadCid cid.Cid, selector ipld.Node, userOnNewCarBlocks ...car.OnNewCarBlockFunc) (cid.Cid, filestore.Path, abi.UnpaddedPieceSize, error) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/piecestore.go b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/piecestore.go deleted file mode 100644 index 67bd6cea02..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/piecestore.go +++ /dev/null @@ -1,124 +0,0 @@ -package piecestore - -import ( - "github.com/filecoin-project/go-statestore" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" -) - -// DSPiecePrefix is the name space for storing piece infos -var DSPiecePrefix = "/pieces" - -// DSCIDPrefix is the name space for storing CID infos -var DSCIDPrefix = "/cid-infos" - -// NewPieceStore returns a new piecestore based on the given datastore -func NewPieceStore(ds datastore.Batching) PieceStore { - return &pieceStore{ - pieces: statestore.New(namespace.Wrap(ds, datastore.NewKey(DSPiecePrefix))), - cidInfos: statestore.New(namespace.Wrap(ds, datastore.NewKey(DSCIDPrefix))), - } -} - -type pieceStore struct { - pieces *statestore.StateStore - cidInfos *statestore.StateStore -} - -// Store `dealInfo` in the PieceStore with key `pieceCID`. -func (ps *pieceStore) AddDealForPiece(pieceCID cid.Cid, dealInfo DealInfo) error { - return ps.mutatePieceInfo(pieceCID, func(pi *PieceInfo) error { - for _, di := range pi.Deals { - if di == dealInfo { - return nil - } - } - pi.Deals = append(pi.Deals, dealInfo) - return nil - }) -} - -// Store the map of blockLocations in the PieceStore's CIDInfo store, with key `pieceCID` -func (ps *pieceStore) AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]BlockLocation) error { - for c, blockLocation := range blockLocations { - err := ps.mutateCIDInfo(c, func(ci *CIDInfo) error { - for _, pbl := range ci.PieceBlockLocations { - if pbl.PieceCID.Equals(pieceCID) && pbl.BlockLocation == blockLocation { - return nil - } - } - ci.PieceBlockLocations = append(ci.PieceBlockLocations, PieceBlockLocation{blockLocation, pieceCID}) - return nil - }) - if err != nil { - return err - } - } - return nil -} - -// Retrieve the PieceInfo associated with `pieceCID` from the piece info store. -func (ps *pieceStore) GetPieceInfo(pieceCID cid.Cid) (PieceInfo, error) { - var out PieceInfo - if err := ps.pieces.Get(pieceCID).Get(&out); err != nil { - return PieceInfo{}, err - } - return out, nil -} - -// Retrieve the CIDInfo associated with `pieceCID` from the CID info store. -func (ps *pieceStore) GetCIDInfo(payloadCID cid.Cid) (CIDInfo, error) { - var out CIDInfo - if err := ps.cidInfos.Get(payloadCID).Get(&out); err != nil { - return CIDInfo{}, err - } - return out, nil -} - -func (ps *pieceStore) ensurePieceInfo(pieceCID cid.Cid) error { - has, err := ps.pieces.Has(pieceCID) - - if err != nil { - return err - } - if has { - return nil - } - - pieceInfo := PieceInfo{PieceCID: pieceCID} - return ps.pieces.Begin(pieceCID, &pieceInfo) -} - -func (ps *pieceStore) ensureCIDInfo(c cid.Cid) error { - has, err := ps.cidInfos.Has(c) - - if err != nil { - return err - } - - if has { - return nil - } - - cidInfo := CIDInfo{CID: c} - return ps.cidInfos.Begin(c, &cidInfo) -} - -func (ps *pieceStore) mutatePieceInfo(pieceCID cid.Cid, mutator interface{}) error { - err := ps.ensurePieceInfo(pieceCID) - if err != nil { - return err - } - - return ps.pieces.Get(pieceCID).Mutate(mutator) -} - -func (ps *pieceStore) mutateCIDInfo(c cid.Cid, mutator interface{}) error { - err := ps.ensureCIDInfo(c) - if err != nil { - return err - } - - return ps.cidInfos.Get(c).Mutate(mutator) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/piecestore_test.go b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/piecestore_test.go deleted file mode 100644 index 6a8baff76e..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/piecestore_test.go +++ /dev/null @@ -1,173 +0,0 @@ -package piecestore_test - -import ( - "math/rand" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -func TestStorePieceInfo(t *testing.T) { - - pieceCid := shared_testutil.GenerateCids(1)[0] - initializePieceStore := func(t *testing.T) piecestore.PieceStore { - ps := piecestore.NewPieceStore(datastore.NewMapDatastore()) - _, err := ps.GetPieceInfo(pieceCid) - assert.Error(t, err) - return ps - } - - // Add a deal info - t.Run("can add deals", func(t *testing.T) { - ps := initializePieceStore(t) - dealInfo := piecestore.DealInfo{ - DealID: abi.DealID(rand.Uint64()), - SectorID: rand.Uint64(), - Offset: rand.Uint64(), - Length: rand.Uint64(), - } - err := ps.AddDealForPiece(pieceCid, dealInfo) - assert.NoError(t, err) - - pi, err := ps.GetPieceInfo(pieceCid) - assert.NoError(t, err) - assert.Len(t, pi.Deals, 1) - assert.Equal(t, pi.Deals[0], dealInfo) - }) - - t.Run("adding same deal twice does not dup", func(t *testing.T) { - ps := initializePieceStore(t) - dealInfo := piecestore.DealInfo{ - DealID: abi.DealID(rand.Uint64()), - SectorID: rand.Uint64(), - Offset: rand.Uint64(), - Length: rand.Uint64(), - } - err := ps.AddDealForPiece(pieceCid, dealInfo) - assert.NoError(t, err) - - pi, err := ps.GetPieceInfo(pieceCid) - assert.NoError(t, err) - assert.Len(t, pi.Deals, 1) - assert.Equal(t, pi.Deals[0], dealInfo) - - err = ps.AddDealForPiece(pieceCid, dealInfo) - assert.NoError(t, err) - - pi, err = ps.GetPieceInfo(pieceCid) - assert.NoError(t, err) - assert.Len(t, pi.Deals, 1) - assert.Equal(t, pi.Deals[0], dealInfo) - }) -} - -func TestStoreCIDInfo(t *testing.T) { - pieceCids := shared_testutil.GenerateCids(2) - pieceCid1 := pieceCids[0] - pieceCid2 := pieceCids[1] - testCIDs := shared_testutil.GenerateCids(3) - blockLocations := make([]piecestore.BlockLocation, 0, 3) - for i := 0; i < 3; i++ { - blockLocations = append(blockLocations, piecestore.BlockLocation{ - RelOffset: rand.Uint64(), - BlockSize: rand.Uint64(), - }) - } - - initializePieceStore := func(t *testing.T) piecestore.PieceStore { - ps := piecestore.NewPieceStore(datastore.NewMapDatastore()) - _, err := ps.GetCIDInfo(testCIDs[0]) - assert.Error(t, err) - return ps - } - - t.Run("can add piece block locations", func(t *testing.T) { - ps := initializePieceStore(t) - err := ps.AddPieceBlockLocations(pieceCid1, map[cid.Cid]piecestore.BlockLocation{ - testCIDs[0]: blockLocations[0], - testCIDs[1]: blockLocations[1], - testCIDs[2]: blockLocations[2], - }) - assert.NoError(t, err) - - ci, err := ps.GetCIDInfo(testCIDs[0]) - assert.NoError(t, err) - assert.Len(t, ci.PieceBlockLocations, 1) - assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{blockLocations[0], pieceCid1}) - - ci, err = ps.GetCIDInfo(testCIDs[1]) - assert.NoError(t, err) - assert.Len(t, ci.PieceBlockLocations, 1) - assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{blockLocations[1], pieceCid1}) - - ci, err = ps.GetCIDInfo(testCIDs[2]) - assert.NoError(t, err) - assert.Len(t, ci.PieceBlockLocations, 1) - assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{blockLocations[2], pieceCid1}) - }) - - t.Run("overlapping adds", func(t *testing.T) { - ps := initializePieceStore(t) - err := ps.AddPieceBlockLocations(pieceCid1, map[cid.Cid]piecestore.BlockLocation{ - testCIDs[0]: blockLocations[0], - testCIDs[1]: blockLocations[2], - }) - assert.NoError(t, err) - err = ps.AddPieceBlockLocations(pieceCid2, map[cid.Cid]piecestore.BlockLocation{ - testCIDs[1]: blockLocations[1], - testCIDs[2]: blockLocations[2], - }) - assert.NoError(t, err) - - ci, err := ps.GetCIDInfo(testCIDs[0]) - assert.NoError(t, err) - assert.Len(t, ci.PieceBlockLocations, 1) - assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{blockLocations[0], pieceCid1}) - - ci, err = ps.GetCIDInfo(testCIDs[1]) - assert.NoError(t, err) - assert.Len(t, ci.PieceBlockLocations, 2) - assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{blockLocations[2], pieceCid1}) - assert.Equal(t, ci.PieceBlockLocations[1], piecestore.PieceBlockLocation{blockLocations[1], pieceCid2}) - - ci, err = ps.GetCIDInfo(testCIDs[2]) - assert.NoError(t, err) - assert.Len(t, ci.PieceBlockLocations, 1) - assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{blockLocations[2], pieceCid2}) - }) - - t.Run("duplicate adds", func(t *testing.T) { - ps := initializePieceStore(t) - err := ps.AddPieceBlockLocations(pieceCid1, map[cid.Cid]piecestore.BlockLocation{ - testCIDs[0]: blockLocations[0], - testCIDs[1]: blockLocations[1], - }) - assert.NoError(t, err) - err = ps.AddPieceBlockLocations(pieceCid1, map[cid.Cid]piecestore.BlockLocation{ - testCIDs[1]: blockLocations[1], - testCIDs[2]: blockLocations[2], - }) - assert.NoError(t, err) - - ci, err := ps.GetCIDInfo(testCIDs[0]) - assert.NoError(t, err) - assert.Len(t, ci.PieceBlockLocations, 1) - assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{blockLocations[0], pieceCid1}) - - ci, err = ps.GetCIDInfo(testCIDs[1]) - assert.NoError(t, err) - assert.Len(t, ci.PieceBlockLocations, 1) - assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{blockLocations[1], pieceCid1}) - - ci, err = ps.GetCIDInfo(testCIDs[2]) - assert.NoError(t, err) - assert.Len(t, ci.PieceBlockLocations, 1) - assert.Equal(t, ci.PieceBlockLocations[0], piecestore.PieceBlockLocation{blockLocations[2], pieceCid1}) - }) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types.go b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types.go index cb18b4e849..42fdd3426f 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types.go @@ -1,18 +1,23 @@ package piecestore import ( - "github.com/filecoin-project/specs-actors/actors/abi" + "context" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared" ) -//go:generate cbor-gen-for PieceInfo DealInfo BlockLocation PieceBlockLocation CIDInfo +//go:generate cbor-gen-for --map-encoding PieceInfo DealInfo BlockLocation PieceBlockLocation CIDInfo // DealInfo is information about a single deal for a given piece type DealInfo struct { DealID abi.DealID - SectorID uint64 - Offset uint64 - Length uint64 + SectorID abi.SectorNumber + Offset abi.PaddedPieceSize + Length abi.PaddedPieceSize } // BlockLocation is information about where a given block is relative to the overall piece @@ -50,8 +55,12 @@ var PieceInfoUndefined = PieceInfo{} // PieceStore is a saved database of piece info that can be modified and queried type PieceStore interface { + Start(ctx context.Context) error + OnReady(ready shared.ReadyFunc) AddDealForPiece(pieceCID cid.Cid, dealInfo DealInfo) error AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]BlockLocation) error GetPieceInfo(pieceCID cid.Cid) (PieceInfo, error) GetCIDInfo(payloadCID cid.Cid) (CIDInfo, error) + ListCidInfoKeys() ([]cid.Cid, error) + ListPieceInfoKeys() ([]cid.Cid, error) } diff --git a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types_cbor_gen.go index fe5817a7f4..f01eaf0280 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types_cbor_gen.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/piecestore/types_cbor_gen.go @@ -6,7 +6,7 @@ import ( "fmt" "io" - "github.com/filecoin-project/specs-actors/actors/abi" + abi "github.com/filecoin-project/go-state-types/abi" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) @@ -18,22 +18,45 @@ func (t *PieceInfo) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{130}); err != nil { + if _, err := w.Write([]byte{162}); err != nil { return err } + scratch := make([]byte, 9) + // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } - if err := cbg.WriteCid(w, t.PieceCID); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.PieceCID); err != nil { return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) } // t.Deals ([]piecestore.DealInfo) (slice) + if len("Deals") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Deals\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Deals"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Deals")); err != nil { + return err + } + if len(t.Deals) > cbg.MaxLength { return xerrors.Errorf("Slice value in field t.Deals was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Deals)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Deals))); err != nil { return err } for _, v := range t.Deals { @@ -45,94 +68,160 @@ func (t *PieceInfo) MarshalCBOR(w io.Writer) error { } func (t *PieceInfo) UnmarshalCBOR(r io.Reader) error { + *t = PieceInfo{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("PieceInfo: map struct too large (%d)", extra) } - // t.PieceCID (cid.Cid) (struct) + var name string + n := extra + + for i := uint64(0); i < n; i++ { - { + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + name = string(sval) } - t.PieceCID = c + switch name { + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": - } - // t.Deals ([]piecestore.DealInfo) (slice) + { - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } - if extra > cbg.MaxLength { - return fmt.Errorf("t.Deals: array too large (%d)", extra) - } + t.PieceCID = c - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } + } + // t.Deals ([]piecestore.DealInfo) (slice) + case "Deals": - if extra > 0 { - t.Deals = make([]DealInfo, extra) - } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } - for i := 0; i < int(extra); i++ { + if extra > cbg.MaxLength { + return fmt.Errorf("t.Deals: array too large (%d)", extra) + } - var v DealInfo - if err := v.UnmarshalCBOR(br); err != nil { - return err - } + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } - t.Deals[i] = v + if extra > 0 { + t.Deals = make([]DealInfo, extra) + } + + for i := 0; i < int(extra); i++ { + + var v DealInfo + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Deals[i] = v + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } } return nil } - func (t *DealInfo) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{132}); err != nil { + if _, err := w.Write([]byte{164}); err != nil { return err } + scratch := make([]byte, 9) + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.DealID))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { return err } - // t.SectorID (uint64) (uint64) + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { + return err + } + + // t.SectorID (abi.SectorNumber) (uint64) + if len("SectorID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SectorID\" was too long") + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SectorID))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SectorID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SectorID")); err != nil { return err } - // t.Offset (uint64) (uint64) + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { + return err + } + + // t.Offset (abi.PaddedPieceSize) (uint64) + if len("Offset") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Offset\" was too long") + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Offset))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Offset"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Offset")); err != nil { return err } - // t.Length (uint64) (uint64) + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Offset)); err != nil { + return err + } + + // t.Length (abi.PaddedPieceSize) (uint64) + if len("Length") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Length\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Length"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Length")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Length))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Length)); err != nil { return err } @@ -140,97 +229,146 @@ func (t *DealInfo) MarshalCBOR(w io.Writer) error { } func (t *DealInfo) UnmarshalCBOR(r io.Reader) error { + *t = DealInfo{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("DealInfo: map struct too large (%d)", extra) } - // t.DealID (abi.DealID) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = abi.DealID(extra) + var name string + n := extra - } - // t.SectorID (uint64) (uint64) + for i := uint64(0); i < n; i++ { - { + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err + name = string(sval) } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorID = uint64(extra) - - } - // t.Offset (uint64) (uint64) - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") + switch name { + // t.DealID (abi.DealID) (uint64) + case "DealID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.SectorID (abi.SectorNumber) (uint64) + case "SectorID": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SectorID = abi.SectorNumber(extra) + + } + // t.Offset (abi.PaddedPieceSize) (uint64) + case "Offset": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Offset = abi.PaddedPieceSize(extra) + + } + // t.Length (abi.PaddedPieceSize) (uint64) + case "Length": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Length = abi.PaddedPieceSize(extra) + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) } - t.Offset = uint64(extra) - } - // t.Length (uint64) (uint64) - - { - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Length = uint64(extra) - - } return nil } - func (t *BlockLocation) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{130}); err != nil { + if _, err := w.Write([]byte{162}); err != nil { return err } + scratch := make([]byte, 9) + // t.RelOffset (uint64) (uint64) + if len("RelOffset") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"RelOffset\" was too long") + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.RelOffset))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("RelOffset"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("RelOffset")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.RelOffset)); err != nil { return err } // t.BlockSize (uint64) (uint64) + if len("BlockSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BlockSize\" was too long") + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.BlockSize))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("BlockSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("BlockSize")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.BlockSize)); err != nil { return err } @@ -238,68 +376,116 @@ func (t *BlockLocation) MarshalCBOR(w io.Writer) error { } func (t *BlockLocation) UnmarshalCBOR(r io.Reader) error { + *t = BlockLocation{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("BlockLocation: map struct too large (%d)", extra) } - // t.RelOffset (uint64) (uint64) + var name string + n := extra - { + for i := uint64(0); i < n; i++ { - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) } - t.RelOffset = uint64(extra) - } - // t.BlockSize (uint64) (uint64) + switch name { + // t.RelOffset (uint64) (uint64) + case "RelOffset": - { + { - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.BlockSize = uint64(extra) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.RelOffset = uint64(extra) + + } + // t.BlockSize (uint64) (uint64) + case "BlockSize": + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BlockSize = uint64(extra) + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } } + return nil } - func (t *PieceBlockLocation) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{130}); err != nil { + if _, err := w.Write([]byte{162}); err != nil { return err } + scratch := make([]byte, 9) + // t.BlockLocation (piecestore.BlockLocation) (struct) + if len("BlockLocation") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"BlockLocation\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("BlockLocation"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("BlockLocation")); err != nil { + return err + } + if err := t.BlockLocation.MarshalCBOR(w); err != nil { return err } // t.PieceCID (cid.Cid) (struct) + if len("PieceCID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCID\" was too long") + } - if err := cbg.WriteCid(w, t.PieceCID); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceCID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCID")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.PieceCID); err != nil { return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) } @@ -307,65 +493,113 @@ func (t *PieceBlockLocation) MarshalCBOR(w io.Writer) error { } func (t *PieceBlockLocation) UnmarshalCBOR(r io.Reader) error { + *t = PieceBlockLocation{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("PieceBlockLocation: map struct too large (%d)", extra) } - // t.BlockLocation (piecestore.BlockLocation) (struct) + var name string + n := extra + + for i := uint64(0); i < n; i++ { - { + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - if err := t.BlockLocation.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.BlockLocation: %w", err) + name = string(sval) } - } - // t.PieceCID (cid.Cid) (struct) + switch name { + // t.BlockLocation (piecestore.BlockLocation) (struct) + case "BlockLocation": - { + { - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) - } + if err := t.BlockLocation.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.BlockLocation: %w", err) + } + + } + // t.PieceCID (cid.Cid) (struct) + case "PieceCID": + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } - t.PieceCID = c + t.PieceCID = c + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } } + return nil } - func (t *CIDInfo) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{130}); err != nil { + if _, err := w.Write([]byte{162}); err != nil { return err } + scratch := make([]byte, 9) + // t.CID (cid.Cid) (struct) + if len("CID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CID")); err != nil { + return err + } - if err := cbg.WriteCid(w, t.CID); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.CID); err != nil { return xerrors.Errorf("failed to write cid field t.CID: %w", err) } // t.PieceBlockLocations ([]piecestore.PieceBlockLocation) (slice) + if len("PieceBlockLocations") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceBlockLocations\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceBlockLocations"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceBlockLocations")); err != nil { + return err + } + if len(t.PieceBlockLocations) > cbg.MaxLength { return xerrors.Errorf("Slice value in field t.PieceBlockLocations was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.PieceBlockLocations)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.PieceBlockLocations))); err != nil { return err } for _, v := range t.PieceBlockLocations { @@ -377,59 +611,84 @@ func (t *CIDInfo) MarshalCBOR(w io.Writer) error { } func (t *CIDInfo) UnmarshalCBOR(r io.Reader) error { + *t = CIDInfo{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("CIDInfo: map struct too large (%d)", extra) } - // t.CID (cid.Cid) (struct) + var name string + n := extra + + for i := uint64(0); i < n; i++ { - { + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.CID: %w", err) + name = string(sval) } - t.CID = c + switch name { + // t.CID (cid.Cid) (struct) + case "CID": - } - // t.PieceBlockLocations ([]piecestore.PieceBlockLocation) (slice) + { - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CID: %w", err) + } - if extra > cbg.MaxLength { - return fmt.Errorf("t.PieceBlockLocations: array too large (%d)", extra) - } + t.CID = c - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } + } + // t.PieceBlockLocations ([]piecestore.PieceBlockLocation) (slice) + case "PieceBlockLocations": - if extra > 0 { - t.PieceBlockLocations = make([]PieceBlockLocation, extra) - } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } - for i := 0; i < int(extra); i++ { + if extra > cbg.MaxLength { + return fmt.Errorf("t.PieceBlockLocations: array too large (%d)", extra) + } - var v PieceBlockLocation - if err := v.UnmarshalCBOR(br); err != nil { - return err - } + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } - t.PieceBlockLocations[i] = v + if extra > 0 { + t.PieceBlockLocations = make([]PieceBlockLocation, extra) + } + + for i := 0; i < int(extra); i++ { + + var v PieceBlockLocation + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.PieceBlockLocations[i] = v + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } } return nil diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/README.md b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/README.md deleted file mode 100644 index bd7653a45b..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/README.md +++ /dev/null @@ -1,303 +0,0 @@ -# retrievalmarket -The retrievalmarket module is intended for Filecoin node implementations written in Go. -It implements functionality to allow execution of retrieval market deals on the -Filecoin network. -The node implementation must provide access to chain operations, and persistent -data storage. - -## Table of Contents -* [Background reading](#Background-reading) -* [Installation](#Installation) -* [Operation](#Operation) -* [Implementation](#Implementation) - * [Peer Resolver](#Peer_Resolver) - * [RetrievalClientNode](#RetrievalClientNode) - * [RetrievalProviderNode](#RetrievalProviderNode) -* [Construction](#Construction) - * [Construct a RetrievalClient](#RetrievalClient) - * [Construct a RetrievalProvider](#RetrievalProvider) -* [Technical Documentation](#technical-documentation) - -## Background reading -Please see the -[Filecoin Retrieval Market Specification](https://filecoin-project.github.io/specs/#systems__filecoin_markets__retrieval_market). - -## Installation -The build process for retrievalmarket requires Go >= v1.13. - -To install: -```bash -go get github.com/filecoin-project/go-fil-markets/retrievalmarket -``` - -## Operation - -The `retrievalmarket` package provides high level APIs to execute data retrieval deals between a - retrieval client and a retrieval - provider (a.k.a. retrieval miner) on the Filecoin netwwork. - The node must implement the `PeerResolver`, `RetrievalProviderNode`, and - `RetrievalClientNode` interfaces in order to construct and use the module. - -Deals are expected to survive a node restart; deals and related information are - expected to be stored on disk. - -`retrievalmarket` communicates its deal operations and requested data via -[go-data-transfer](https://github.com/filecoin-project/go-data-transfer) using -[go-graphsync](https://github.com/ipfs/go-graphsync). - -Once required Node APIs are implemented and the retrievalmarket APIs are exposed to your desired - consumers (such as a command-line or web interface), a retrieval from the client side could -proceed roughly like so: -1. Your node has a record of data with payloadCIDs and their respective pieceCIDs. Someone, -possibly you, wants to retrieve data referenced by `paylaodCID`. -1. It calls `PeerResolver.GetPeers` to obtain a list of retrieval providers storing data - referenced by `payloadCID`. -1. It obtains retrieval deal terms by calling each retrieval miners' `Query` function. -1. The node selects the best terms for a retrieval deal and initiates a deal by calling - the retrieval client's `Retrieve` function with the selected retrieval miner and piece info. -1. The deal then proceeds automatically until all the data is returned and full payment in the - form of vouchers is made to the retrieval provider, or the deal errors. -1. Once the deal is complete and the final payment voucher is posted to chain, your client account balance - will be adjusted according to the terms of the deal. - -A retrieval from the provider side is more automated; the RetrievalProvider would be listening - for retrieval Query and Retrieve requests, and respond accordingly. - -1. Your node stores a record of what it has stored locally, or possibly a record of peers - with data. -1. Your node receives a Query for `payloadCID` and responds automatically with the terms you the - node operator have set for retrieval deals. -1. Your node receives a DealProposal for retrieval, and automatically validates and accepts or - rejects it. If accepted, the deal proceeds and your node begins sending data in pieces, stopping - every so often to request another voucher for a greater value. -1. Once the deal is complete and your node has received a voucher sufficient to cover the entire -data transfer, you the node operator may then redeem the voucher and collect FIL. - -### Collecting FIL for a deal is the node's responsibility -To collect your FIL, your node must send on-chain -messages directly to the payment channel actor to send all the vouchers, -Settle, and Collect on the deal. This will finalize the client and provider balances for the -retrieval deal on the Filecoin blockchain. Implementation and timing of these calls is the node's -responsibility and is not a part of `retrievalmarket`. For more information about how -to interact with the -payment channel actor, see the -[github.com/filecoin-project/specs-actors](https://github.com/filecoin-project/specs-actors) repo. - -## Implementation - -### General Steps -1. Decide if your node can be configured as a Retrieval Provider, a Retrieval Client or both. -1. Determine how and where your retrieval calls to RetrievalProvider and RetrievalClient functions - will be made. -1. Implement the required interfaces as described in this section. -1. [Construct a RetrievalClient](#RetrievalClient) in your node's startup, if your - node will be a client. -1. [Construct a RetrievalProvider](#RetrievalProvider) in your node's startup, if your - node will be a provider. -If setting up a RetrievalProvider, call its `Start` function it in the appropriate place, and its - `Stop` function in the appropriate place. -1. Expose desired `retrievalmarket` functionality to whatever internal modules desired, such as - command line interface, JSON RPC, or HTTP API. - -Implement the [`PeerResolver`](#PeerResolver), [`RetrievalProviderNode`](#RetrievalProviderNode), -and [`RetrievalClientNode`](#RetrievalClientNode) -interfaces in [retrievalmarket/types.go](./types.go), described below: - -### PeerResolver -PeerResolver is an interface for looking up providers that may have a piece of identifiable -data. Its functions are: - -#### GetPeers -```go -func GetPeers(payloadCID cid.Cid) ([]RetrievalPeer, error) -``` -Return a slice of RetrievalPeers that store the data referenced by `payloadCID`. - ---- -### RetrievalClientNode - -`RetrievalClientNode` contains the node dependencies for a RetrievalClient. Its functions are: - -* [`AllocateLane`](#AllocateLane) -* [`GetChainHead`](#GetChainHead) -* [`GetOrCreatePaymentChannel`](#GetOrCreatePaymentChannel) -* [`CreatePaymentVoucher`](#CreatePaymentVoucher) -* [`WaitForPaymentChannelAddFunds`](#WaitForPaymentChannelAddFunds) -* [`WaitForPaymentChannelCreation`](#WaitForPaymentChannelCreation) - -#### AllocateLane -```go -func AllocateLane(paymentChannel address.Address) (uint64, error) -``` - -Create a lane within `paymentChannel` so that calls to CreatePaymentVoucher will -automatically make vouchers only for the difference in total. Note that payment channel -Actors have a -[lane limit](https://github.com/filecoin-project/specs-actors/blob/0df536f7e461599c818231aa0effcdaccbb74900/actors/builtin/paych/paych_actor.go#L20). - -#### CreatePaymentVoucher -```go -func CreatePaymentVoucher(ctx context.Context, paymentChannel address.Address, - amount abi.TokenAmount, lane uint64, tok shared.TipSetToken - ) (*paych.SignedVoucher, error) -``` -Create a new payment voucher for `paymentChannel` with `amount`, for lane `lane`, given chain -state at `tok`. - -#### GetChainHead -```go -func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) -``` -Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. - -#### GetOrCreatePaymentChannel -```go -func GetOrCreatePaymentChannel(ctx context.Context, clientAddress, minerAddress address.Address, - amount abi.TokenAmount, tok shared.TipSetToken - ) (address.Address, cid.Cid, error) -``` -If there is a current payment channel for deals between `clientAddress` and `minerAddress`, -add `amount` to the channel, then return the payment channel address and `cid.Undef`. - -If there isn't, construct a new payment channel actor with `amount` funds by posting -the corresponding message on chain, then return `address.Undef` and the posted message `cid.Cid`. -For more information about how to construct a payment channel actor, see -[github.com/filecoin-project/specs-actors](https://github.com/filecoin-project/specs-actors) - -#### WaitForPaymentChannelAddFunds -```go -func WaitForPaymentChannelAddFunds(messageCID cid.Cid) error -``` -Wait for message with CID `messageCID` on chain that funds have been sent to a payment channel. - -#### WaitForPaymentChannelCreation -```go -func WaitForPaymentChannelCreation(messageCID cid.Cid) (address.Address, error) -``` -Wait for a message on chain with CID `messageCID` that a payment channel has been created. - ---- -### RetrievalProviderNode -`RetrievalProviderNode` contains the node dependencies for a RetrievalProvider. -Its functions are: - -* [`GetChainHead`](#GetChainHead) -* [`GetMinerWorkerAddress`](#GetMinerWorkerAddress) -* [`UnsealSector`](#UnsealSector) -* [`SavePaymentVoucher`](#SavePaymentVoucher) - -#### GetChainHead -```go -func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) -``` -Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. - -#### GetMinerWorkerAddress -```go -func GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken, - ) (address.Address, error) -``` -Get the miner worker address for the given miner owner, as of `tok`. - -#### UnsealSector -```go -func UnsealSector(ctx context.Context, sectorID uint64, offset uint64, length uint64, - ) (io.ReadCloser, error) -``` -Unseal `length` data contained in `sectorID`, starting at `offset`. Return an `io.ReadCloser -` for accessing the data. - -#### SavePaymentVoucher -```go -func SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, - voucher *paych.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, - tok shared.TipSetToken) (abi.TokenAmount, error) -``` - -Save the provided `paych.SignedVoucher` for `paymentChannel`. The RetrievalProviderNode -implementation should validate the SignedVoucher using the provided `proof`, ` -expectedAmount`, based on the chain state referenced by `tok`. The value of the -voucher should be equal or greater than the largest previous voucher by - `expectedAmount`. It returns the actual difference. - - -## Construction -### RetrievalClient -```go -package retrievalimpl -func NewClient( - netwk network.RetrievalMarketNetwork, - bs blockstore.Blockstore, - node retrievalmarket.RetrievalClientNode, - resolver retrievalmarket.PeerResolver, - ds datastore.Batching, - storedCounter *storedcounter.StoredCounter, -) (retrievalmarket.RetrievalClient, error) -``` -#### Parameters -* `netwk rmnet.RetrievalMarketNetwork` - `RetrievalMarketNetwork` is an interface for creating and handling deal streams. To create it: - - ```go - package network - - func NewFromLibp2pHost(h host.Host) RetrievalMarketNetwork - ``` - where `h host.Host` is your node's libp2p Host. - See - [github.com/libp2p/go-libp2p-core/host](https://github.com/libp2p/go-libp2p-core/host). - -* `bs blockstore.Blockstore` is an IPFS blockstore for storing and retrieving data for deals. - See - [github.com/ipfs/go-ipfs-blockstore](github.com/ipfs/go-ipfs-blockstore). - -* `node retrievalmarket.RetrievalClientNode` is the `RetrievalClientNode` interface you have - implemented. - -* `resolver retrievalmarket.PeerResolver` is the `PeerResolver` interface you have implemented. -* `ds datastore.Batching` is a datastore for the deal's state machine. It is - typically the node's own datastore that implements the IPFS datastore.Batching interface. - See - [github.com/ipfs/go-datastore](https://github.com/ipfs/go-datastore). - - * `storedCounter *storedcounter.StoredCounter` is a file-based stored counter used to generate new - dealIDs. See - [github.com/filecoin-project/go-storedcounter](https://github.com/filecoin-project/go-storedcounter). - -### RetrievalProvider -```go -package retrievalimpl - -func NewProvider(minerAddress address.Address, - node retrievalmarket.RetrievalProviderNode, - netwk network.RetrievalMarketNetwork, - pieceStore piecestore.PieceStore, - bs blockstore.Blockstore, - ds datastore.Batching, - ) (retrievalmarket.RetrievalProvider, error) -``` - -#### Parameters -* `minerAddress address.Address` is the address of the retrieval miner owner. -* `node retrievalmarket.RetrievalProviderNode` is the `RetrievalProviderNode` API you have implemented. -* `netwk rmnet.RetrievalMarketNetwork` is the same interface for creating and handling deal streams -as for [constructing a RetrievalClient](#RetrievalClient). -* `pieceStore piecestore.PieceStore` is the database of deals and pieces associated with them. -See this repo's [piecestore module](../piecestore). -* `bs blockstore.Blockstore` is the same interface as for -[constructing a RetrievalClient](#RetrievalClient). -* `ds datastore.Batching` is the same batching datastore interface as for -[constructing a RetrievalClient](#RetrievalClient). - -## Technical Documentation - -* [GoDoc](https://godoc.org/github.com/filecoin-project/go-fil-markets/retrievalmarket) contains an architectural overview and robust API documentation - -* Retrieval Client FSM diagram: - -[![Diagram of RetrievalClientFSM](../docs/retrievalclient.mmd.png)](https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalclient.mmd.svg) - - -* Retrieval Provider FSM diagram: - -[![Diagram of RetrievalClientFSM](../docs/retrievalprovider.mmd.png)](https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalprovider.mmd.svg) diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/common.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/common.go deleted file mode 100644 index 78df09bab9..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/common.go +++ /dev/null @@ -1,22 +0,0 @@ -package retrievalmarket - -import ( - "bytes" - - "github.com/ipld/go-ipld-prime" - "github.com/ipld/go-ipld-prime/codec/dagcbor" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - cbg "github.com/whyrusleeping/cbor-gen" -) - -// DecodeNode validates and computes a decoded ipld.Node selector from the -// provided cbor-encoded selector -func DecodeNode(defnode *cbg.Deferred) (ipld.Node, error) { - reader := bytes.NewReader(defnode.Raw) - nb := basicnode.Style.Any.NewBuilder() - err := dagcbor.Decoder(nb, reader) - if err != nil { - return nil, err - } - return nb.Build(), nil -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery/discovery.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery/discovery.go deleted file mode 100644 index 824a0a6c5b..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery/discovery.go +++ /dev/null @@ -1,15 +0,0 @@ -package discovery - -import ( - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -func init() { - cbor.RegisterCborType(retrievalmarket.RetrievalPeer{}) -} - -func Multi(r retrievalmarket.PeerResolver) retrievalmarket.PeerResolver { // TODO: actually support multiple mechanisms - return r -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery/local.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery/local.go deleted file mode 100644 index a9f4551f4e..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery/local.go +++ /dev/null @@ -1,80 +0,0 @@ -package discovery - -import ( - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - dshelp "github.com/ipfs/go-ipfs-ds-help" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -type Local struct { - ds datastore.Datastore -} - -func NewLocal(ds datastore.Batching) *Local { - return &Local{ds: ds} -} - -func (l *Local) AddPeer(cid cid.Cid, peer retrievalmarket.RetrievalPeer) error { - key := dshelp.MultihashToDsKey(cid.Hash()) - exists, err := l.ds.Has(key) - if err != nil { - return err - } - - var newRecord []byte - - if !exists { - newRecord, err = cbor.DumpObject([]retrievalmarket.RetrievalPeer{peer}) - if err != nil { - return err - } - } else { - entry, err := l.ds.Get(key) - if err != nil { - return err - } - var peerList []retrievalmarket.RetrievalPeer - if err = cbor.DecodeInto(entry, &peerList); err != nil { - return err - } - if hasPeer(peerList, peer) { - return nil - } - peerList = append(peerList, peer) - newRecord, err = cbor.DumpObject(peerList) - if err != nil { - return err - } - } - - return l.ds.Put(key, newRecord) -} - -func hasPeer(peerList []retrievalmarket.RetrievalPeer, peer retrievalmarket.RetrievalPeer) bool { - for _, p := range peerList { - if p == peer { - return true - } - } - return false -} - -func (l *Local) GetPeers(payloadCID cid.Cid) ([]retrievalmarket.RetrievalPeer, error) { - entry, err := l.ds.Get(dshelp.MultihashToDsKey(payloadCID.Hash())) - if err == datastore.ErrNotFound { - return []retrievalmarket.RetrievalPeer{}, nil - } - if err != nil { - return nil, err - } - var peerList []retrievalmarket.RetrievalPeer - if err := cbor.DecodeInto(entry, &peerList); err != nil { - return nil, err - } - return peerList, nil -} - -var _ retrievalmarket.PeerResolver = &Local{} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery/local_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery/local_test.go deleted file mode 100644 index 8b991c761d..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery/local_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package discovery_test - -import ( - "testing" - - specst "github.com/filecoin-project/specs-actors/support/testing" - "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" - "gotest.tools/assert" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery" - "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -func TestLocal_AddPeer(t *testing.T) { - - peer1 := retrievalmarket.RetrievalPeer{ - Address: specst.NewIDAddr(t, 1), - ID: peer.NewPeerRecord().PeerID, - } - peer2 := retrievalmarket.RetrievalPeer{ - Address: specst.NewIDAddr(t, 2), - ID: peer.NewPeerRecord().PeerID, - } - testCases := []struct { - name string - peers2add []retrievalmarket.RetrievalPeer - expPeers []retrievalmarket.RetrievalPeer - }{ - { - name: "can add 3 peers", - peers2add: []retrievalmarket.RetrievalPeer{peer1, peer2}, - expPeers: []retrievalmarket.RetrievalPeer{peer1, peer2}, - }, - { - name: "can add same peer without duping", - peers2add: []retrievalmarket.RetrievalPeer{peer1, peer1}, - expPeers: []retrievalmarket.RetrievalPeer{peer1}, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - ds := datastore.NewMapDatastore() - l := discovery.NewLocal(ds) - payloadCID := shared_testutil.GenerateCids(1)[0] - for _, testpeer := range tc.peers2add { - require.NoError(t, l.AddPeer(payloadCID, testpeer)) - } - actualPeers, err := l.GetPeers(payloadCID) - require.NoError(t, err) - assert.Equal(t, len(tc.expPeers), len(actualPeers)) - assert.Equal(t, tc.expPeers[0], actualPeers[0]) - }) - } -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/doc.go deleted file mode 100644 index 06a445ea52..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Package retrievalmarket implements the Filecoin retrieval protocol. - -An overview of the retrieval protocol can be found in the Filecoin specification: - -https://filecoin-project.github.io/specs/#systems__filecoin_markets__retrieval_market - -The following architectural components provide a brief overview of the design of -the retrieval market module: - -Public Interfaces And Node Dependencies - -While retrieval deals primarily happen off-chain, there are some chain operations -that must be performed by a Filecoin node implementation. The module is intended to separate -the primarily off-chain retrieval deal flow from the on-chain operations related primarily -to payment channels, the mechanism for getting paid for retrieval deals. - -As such for both the client and the provider in the retrieval market, the module defines a top level -public interface which it provides an implementation for, and a node interface that must be implemented -by the Filecoin node itself, and provided as a dependency. These node interfaces provide a universal way to -talk to potentially multiple different Filecoin node implementations, and can be implemented as using HTTP -or other interprocess communication to talk to a node implementation running in a different process. - -The top level interfaces this package implements are RetrievalClient & RetrievalProvider. The dependencies the Filecoin -node is expected to implement are RetrievalClientNode & RetrievalProviderNode. Further documentation of exactly what those -dependencies should do can be found in the readme. - -Finite State Machines - -While retrieval deals in general should be fairly fast, making a retrieval deal is still an asynchronous process. -As documented in the Filecoin spec, the basic architecture of the Filecoin retrieval protocol is incremental payments. -Because neither client nor provider trust each other, we bootstrap trust by essentially paying in small increments as we receive -data. The client only sends payment when it verifies data and the provider only sends more data when it receives payment. -Not surprisingly, many things can go wrong along the way. To manage this back and forth asynchronous process, -we use finite state machines that update deal state when discrete events occur. State updates -always persist state to disk. This means we have a permanent record of exactly what's going on with deals at any time, -and we can ideally survive our Filecoin processes shutting down and restarting. - -The following diagrams visualize the statemachine flows for the client and the provider: - -Client FSM - https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalclient.mmd.svg - -Provider FSM - https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalprovider.mmd.svg - -Identifying Retrieval Providers - -The RetrievalClient provides two functions to locate a provider from which to retrieve data. - -`FindProviders` returns a list of retrieval peers who may have the data your looking for. FindProviders delegates its work to -an implementation of the PeerResolver interface. - -`Query` queries a specific retrieval provider to find out definitively if they have the requested data and if so, the -parameters they will accept for a retrieval deal. - -Deal Flow - -The primary mechanism for initiating storage deals is the `Retrieve` method on the RetrievalClient. - -When `Retrieve` is called, it allocates a new DealID from its stored counter, constructs a DealProposal, sends -the deal proposal to the provider, initiates tracking of deal state and hands the deal to the Client FSM, -and returns the DealID which constitutes the identifier for that deal. - -The Retrieval provider receives the deal in `HandleDealStream`. `HandleDealStream` initiates tracking of deal state -on the Provider side and hands the deal to the Provider FSM, which handles the rest of deal flow. - -From this point forward, deal negotiation is completely asynchronous and runs in the FSMs. - -A user of the modules can monitor deal progress through `SubscribeToEvents` methods on RetrievalClient and RetrievalProvider, -or by simply calling `ListDeals` to get all deal statuses. - -The FSMs implement every remaining step in deal negotiation. Importantly, the RetrievalProvider delegates unsealing sectors -back to the node via the `UnsealSector` method (the node itself likely delegates management of sectors and sealing to an -implementation of the Storage Mining subsystem of the Filecoin spec). Sectors are unsealed on an as needed basis using -the `PieceStore` to locate sectors that contain data related to the deal. - -Major Dependencies - -Other libraries in go-fil-markets: - -https://github.com/filecoin-project/go-fil-markets/tree/master/piecestore - used to locate data for deals in sectors -https://github.com/filecoin-project/go-fil-markets/tree/master/shared - types and utility functions shared with -storagemarket package - -Other Filecoin Repos: - -https://github.com/filecoin-project/go-data-transfer - for transferring data, via go-graphsync -https://github.com/filecoin-project/go-statemachine - a finite state machine that tracks deal state -https://github.com/filecoin-project/go-storedcounter - for generating and persisting unique deal IDs -https://github.com/filecoin-project/specs-actors - the Filecoin actors - -IPFS Project Repos: - -https://github.com/ipfs/go-graphsync - used by go-data-transfer -https://github.com/ipfs/go-datastore - for persisting statemachine state for deals -https://github.com/ipfs/go-ipfs-blockstore - for storing and retrieving block data for deals - -Other Repos: - -https://github.com/libp2p/go-libp2p) the network over which retrieval deal data is exchanged. -https://github.com/hannahhoward/go-pubsub - for pub/sub notifications external to the statemachine - -Root package - -This top level package defines top level enumerations and interfaces. The primary implementation -lives in the `impl` directory - -*/ -package retrievalmarket diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/doc.go deleted file mode 100644 index ab4c2879fc..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package blockio constains logic for a retrieval provider or client to traverse, -read and verify that blocks received are in a dag in the expected traversal order. */ -package blockio diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/reader.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/reader.go deleted file mode 100644 index 5ffbd2e570..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/reader.go +++ /dev/null @@ -1,61 +0,0 @@ -package blockio - -import ( - "bytes" - "context" - "io" - - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -// BlockReader is any data struct that can be read block by block -type BlockReader interface { - // ReadBlock reads data from a single block. Data is nil - // for intermediate nodes - ReadBlock(context.Context) (retrievalmarket.Block, bool, error) -} - -// SelectorBlockReader reads an ipld data structure in individual blocks -// allowing the next block to be read and then advancing no further -type SelectorBlockReader struct { - root ipld.Link - selector ipld.Node - loader ipld.Loader - traverser *Traverser -} - -// NewSelectorBlockReader returns a new Block reader starting at the given -// root and using the given loader -func NewSelectorBlockReader(root ipld.Link, sel ipld.Node, loader ipld.Loader) BlockReader { - return &SelectorBlockReader{root, sel, loader, nil} -} - -// ReadBlock reads the next block in the IPLD traversal -func (sr *SelectorBlockReader) ReadBlock(ctx context.Context) (retrievalmarket.Block, bool, error) { - - if sr.traverser == nil { - sr.traverser = NewTraverser(sr.root, sr.selector) - sr.traverser.Start(ctx) - } - lnk, lnkCtx := sr.traverser.CurrentRequest(ctx) - reader, err := sr.loader(lnk, lnkCtx) - if err != nil { - sr.traverser.Error(ctx, err) - return retrievalmarket.EmptyBlock, false, err - } - var buf bytes.Buffer - _, err = io.Copy(&buf, reader) - if err != nil { - sr.traverser.Error(ctx, err) - return retrievalmarket.EmptyBlock, false, err - } - block := retrievalmarket.Block{ - Data: buf.Bytes(), - Prefix: lnk.(cidlink.Link).Cid.Prefix().Bytes(), - } - err = sr.traverser.Advance(ctx, &buf) - return block, sr.traverser.IsComplete(ctx), err -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/reader_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/reader_test.go deleted file mode 100644 index eaf8329c1b..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/reader_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package blockio_test - -import ( - "context" - "testing" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio" - "github.com/filecoin-project/go-fil-markets/shared" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -func TestSelectorReader(t *testing.T) { - ctx := context.Background() - testdata := tut.NewTestIPLDTree() - - t.Run("reads correctly", func(t *testing.T) { - reader := blockio.NewSelectorBlockReader(testdata.RootNodeLnk, shared.AllSelector(), testdata.Loader) - - checkReadSequence(ctx, t, reader, []blocks.Block{ - testdata.RootBlock, - testdata.LeafAlphaBlock, - testdata.MiddleMapBlock, - testdata.LeafAlphaBlock, - testdata.MiddleListBlock, - testdata.LeafAlphaBlock, - testdata.LeafAlphaBlock, - testdata.LeafBetaBlock, - testdata.LeafAlphaBlock, - }) - }) - -} - -func checkReadSequence(ctx context.Context, t *testing.T, reader blockio.BlockReader, expectedBlks []blocks.Block) { - for i := range expectedBlks { - block, done, err := reader.ReadBlock(ctx) - require.NoError(t, err) - if i == len(expectedBlks)-1 { - require.True(t, done) - } else { - require.False(t, done) - } - prefix, err := cid.PrefixFromBytes(block.Prefix) - require.NoError(t, err) - - c, err := prefix.Sum(block.Data) - require.NoError(t, err) - - require.Equal(t, c, expectedBlks[i].Cid()) - } -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/traverser.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/traverser.go deleted file mode 100644 index d08867a062..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/traverser.go +++ /dev/null @@ -1,170 +0,0 @@ -package blockio - -import ( - "context" - "errors" - "io" - - "github.com/ipld/go-ipld-prime" - dagpb "github.com/ipld/go-ipld-prime-proto" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal" - "github.com/ipld/go-ipld-prime/traversal/selector" -) - -type state struct { - isDone bool - currentLink ipld.Link - currentContext ipld.LinkContext -} - -type nextResponse struct { - input io.Reader - err error -} - -// Traverser is a class to perform a selector traversal that stops every time a new block is loaded -// and waits for manual input (in the form of advance or error) -type Traverser struct { - root ipld.Link - selector ipld.Node - currentLink ipld.Link - currentContext ipld.LinkContext - isDone bool - awaitRequest chan struct{} - stateChan chan state - responses chan nextResponse -} - -func (t *Traverser) checkState(ctx context.Context) { - select { - case <-t.awaitRequest: - select { - case <-ctx.Done(): - case newState := <-t.stateChan: - t.isDone = newState.isDone - t.currentLink = newState.currentLink - t.currentContext = newState.currentContext - } - default: - } -} - -// NewTraverser creates a new traverser -func NewTraverser(root ipld.Link, selector ipld.Node) *Traverser { - return &Traverser{ - root: root, - selector: selector, - awaitRequest: make(chan struct{}, 1), - stateChan: make(chan state, 1), - responses: make(chan nextResponse), - } -} - -func (t *Traverser) writeDone(ctx context.Context) { - select { - case <-ctx.Done(): - case t.stateChan <- state{true, nil, ipld.LinkContext{}}: - } -} - -// Start initiates the traversal (run in a go routine because the regular -// selector traversal expects a call back) -func (t *Traverser) Start(ctx context.Context) { - select { - case <-ctx.Done(): - return - case t.awaitRequest <- struct{}{}: - } - go func() { - var chooser traversal.LinkTargetNodeStyleChooser = dagpb.AddDagPBSupportToChooser(func(ipld.Link, ipld.LinkContext) (ipld.NodeStyle, error) { - return basicnode.Style.Any, nil - }) - loader := func(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) { - select { - case <-ctx.Done(): - return nil, errors.New("Context cancelled") - case t.stateChan <- state{false, lnk, lnkCtx}: - } - select { - case <-ctx.Done(): - return nil, errors.New("Context cancelled") - case response := <-t.responses: - return response.input, response.err - } - } - style, err := chooser(t.root, ipld.LinkContext{}) - if err != nil { - t.writeDone(ctx) - return - } - builder := style.NewBuilder() - err = t.root.Load(ctx, ipld.LinkContext{}, builder, loader) - if err != nil { - t.writeDone(ctx) - return - } - nd := builder.Build() - sel, err := selector.ParseSelector(t.selector) - if err != nil { - t.writeDone(ctx) - return - } - _ = traversal.Progress{ - Cfg: &traversal.Config{ - Ctx: ctx, - LinkLoader: loader, - LinkTargetNodeStyleChooser: chooser, - }, - }.WalkAdv(nd, sel, func(traversal.Progress, ipld.Node, traversal.VisitReason) error { return nil }) - t.writeDone(ctx) - }() - -} - -// IsComplete returns true if a traversal is complete -func (t *Traverser) IsComplete(ctx context.Context) bool { - t.checkState(ctx) - return t.isDone -} - -// CurrentRequest returns the current block load waiting to be fulfilled in order -// to advance further -func (t *Traverser) CurrentRequest(ctx context.Context) (ipld.Link, ipld.LinkContext) { - t.checkState(ctx) - return t.currentLink, t.currentContext -} - -// Advance advances the traversal with an io.Reader for the next requested block -func (t *Traverser) Advance(ctx context.Context, reader io.Reader) error { - if t.IsComplete(ctx) { - return errors.New("cannot advance when done") - } - select { - case <-ctx.Done(): - return errors.New("context cancelled") - case t.awaitRequest <- struct{}{}: - } - select { - case <-ctx.Done(): - return errors.New("context cancelled") - case t.responses <- nextResponse{reader, nil}: - } - return nil -} - -// Error aborts the traversal with an error -func (t *Traverser) Error(ctx context.Context, err error) { - if t.IsComplete(ctx) { - return - } - select { - case <-ctx.Done(): - return - case t.awaitRequest <- struct{}{}: - } - select { - case <-ctx.Done(): - case t.responses <- nextResponse{nil, err}: - } -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/traverser_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/traverser_test.go deleted file mode 100644 index e9aaa14597..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/traverser_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package blockio_test - -import ( - "bytes" - "context" - "testing" - - blocks "github.com/ipfs/go-block-format" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio" - "github.com/filecoin-project/go-fil-markets/shared" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -func TestTraverser(t *testing.T) { - ctx := context.Background() - testdata := tut.NewTestIPLDTree() - - t.Run("traverses correctly", func(t *testing.T) { - traverser := blockio.NewTraverser(testdata.RootNodeLnk, shared.AllSelector()) - traverser.Start(ctx) - checkTraverseSequence(ctx, t, traverser, []blocks.Block{ - testdata.RootBlock, - testdata.LeafAlphaBlock, - testdata.MiddleMapBlock, - testdata.LeafAlphaBlock, - testdata.MiddleListBlock, - testdata.LeafAlphaBlock, - testdata.LeafAlphaBlock, - testdata.LeafBetaBlock, - testdata.LeafAlphaBlock, - }) - }) - -} - -func checkTraverseSequence(ctx context.Context, t *testing.T, traverser *blockio.Traverser, expectedBlks []blocks.Block) { - for _, blk := range expectedBlks { - require.False(t, traverser.IsComplete(ctx)) - lnk, _ := traverser.CurrentRequest(ctx) - require.Equal(t, lnk.(cidlink.Link).Cid, blk.Cid()) - err := traverser.Advance(ctx, bytes.NewBuffer(blk.RawData())) - require.NoError(t, err) - } - require.True(t, traverser.IsComplete(ctx)) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/verify.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/verify.go deleted file mode 100644 index a72ef69152..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/verify.go +++ /dev/null @@ -1,56 +0,0 @@ -package blockio - -import ( - "bytes" - "context" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -// BlockVerifier verifies blocks received are part of a dag, in the order -// the dag is expected to be traversed -type BlockVerifier interface { - Verify(context.Context, blocks.Block) (done bool, err error) -} - -// SelectorVerifier verifies a traversal of an IPLD data structure by feeding blocks in -// in the order they are traversed in a dag walk -type SelectorVerifier struct { - root ipld.Link - selector ipld.Node - traverser *Traverser -} - -// NewSelectorVerifier returns a new selector based block verifier -func NewSelectorVerifier(root ipld.Link, selector ipld.Node) BlockVerifier { - return &SelectorVerifier{root, selector, nil} -} - -// Verify verifies that the given block is the next one needed for the current traversal -// and returns true if the traversal is done -func (sv *SelectorVerifier) Verify(ctx context.Context, blk blocks.Block) (done bool, err error) { - if sv.traverser == nil { - sv.traverser = NewTraverser(sv.root, sv.selector) - sv.traverser.Start(ctx) - } - if sv.traverser.IsComplete(ctx) { - return false, retrievalmarket.ErrVerification - } - lnk, _ := sv.traverser.CurrentRequest(ctx) - c := lnk.(cidlink.Link).Cid - if !c.Equals(blk.Cid()) { - sv.traverser.Error(ctx, retrievalmarket.ErrVerification) - return false, retrievalmarket.ErrVerification - } - err = sv.traverser.Advance(ctx, bytes.NewBuffer(blk.RawData())) - if err != nil { - return false, err - } - return sv.traverser.IsComplete(ctx), nil -} - -var _ BlockVerifier = &SelectorVerifier{} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/verify_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/verify_test.go deleted file mode 100644 index 9e68391031..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio/verify_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package blockio_test - -import ( - "context" - "testing" - - blocks "github.com/ipfs/go-block-format" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio" - "github.com/filecoin-project/go-fil-markets/shared" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -func TestSelectorVerifier(t *testing.T) { - ctx := context.Background() - testdata := tut.NewTestIPLDTree() - sel := shared.AllSelector() - t.Run("verifies correctly", func(t *testing.T) { - verifier := blockio.NewSelectorVerifier(testdata.RootNodeLnk, sel) - checkVerifySequence(ctx, t, verifier, false, []blocks.Block{ - testdata.RootBlock, - testdata.LeafAlphaBlock, - testdata.MiddleMapBlock, - testdata.LeafAlphaBlock, - testdata.MiddleListBlock, - testdata.LeafAlphaBlock, - testdata.LeafAlphaBlock, - testdata.LeafBetaBlock, - testdata.LeafAlphaBlock, - }) - }) - - t.Run("fed incorrect block", func(t *testing.T) { - t.Run("right away", func(t *testing.T) { - verifier := blockio.NewSelectorVerifier(testdata.RootNodeLnk, sel) - checkVerifySequence(ctx, t, verifier, true, []blocks.Block{ - testdata.LeafAlphaBlock, - }) - }) - t.Run("in middle", func(t *testing.T) { - verifier := blockio.NewSelectorVerifier(testdata.RootNodeLnk, sel) - checkVerifySequence(ctx, t, verifier, true, []blocks.Block{ - testdata.RootBlock, - testdata.LeafAlphaBlock, - testdata.MiddleMapBlock, - testdata.MiddleListBlock, - }) - }) - t.Run("at end", func(t *testing.T) { - verifier := blockio.NewSelectorVerifier(testdata.RootNodeLnk, sel) - checkVerifySequence(ctx, t, verifier, true, []blocks.Block{ - testdata.RootBlock, - testdata.LeafAlphaBlock, - testdata.MiddleMapBlock, - testdata.LeafAlphaBlock, - testdata.MiddleListBlock, - testdata.LeafAlphaBlock, - testdata.LeafAlphaBlock, - testdata.LeafBetaBlock, - testdata.LeafBetaBlock, - }) - }) - }) - -} - -func checkVerifySequence(ctx context.Context, t *testing.T, verifier blockio.BlockVerifier, errorOnLast bool, blks []blocks.Block) { - for i, b := range blks { - done, err := verifier.Verify(ctx, b) - if i < len(blks)-1 { - require.False(t, done) - require.NoError(t, err) - } else { - if errorOnLast { - require.False(t, done) - require.Error(t, err) - } else { - require.True(t, done) - require.NoError(t, err) - } - } - } -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockunsealing/blockunsealing.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockunsealing/blockunsealing.go deleted file mode 100644 index 78e66135ae..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockunsealing/blockunsealing.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Package blockunsealing contains the logic needed to unseal sealed blocks for retrieval -*/ -package blockunsealing - -import ( - "bytes" - "context" - "io" - - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/pieceio" - "github.com/filecoin-project/go-fil-markets/piecestore" -) - -// LoaderWithUnsealing is an ipld.Loader function that will also unseal pieces as needed -type LoaderWithUnsealing interface { - Load(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) -} - -type loaderWithUnsealing struct { - ctx context.Context - bs blockstore.Blockstore - pieceStore piecestore.PieceStore - carIO pieceio.CarIO - unsealer UnsealingFunc - pieceCid *cid.Cid -} - -// UnsealingFunc is a function that unseals sectors at a given offset and length -type UnsealingFunc func(ctx context.Context, sectorId uint64, offset uint64, length uint64) (io.ReadCloser, error) - -// NewLoaderWithUnsealing creates a loader that will attempt to read blocks from the blockstore but unseal the piece -// as needed using the passed unsealing function -func NewLoaderWithUnsealing(ctx context.Context, bs blockstore.Blockstore, pieceStore piecestore.PieceStore, carIO pieceio.CarIO, unsealer UnsealingFunc, pieceCid *cid.Cid) LoaderWithUnsealing { - return &loaderWithUnsealing{ctx, bs, pieceStore, carIO, unsealer, pieceCid} -} - -func (lu *loaderWithUnsealing) Load(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) { - cl, ok := lnk.(cidlink.Link) - if !ok { - return nil, xerrors.New("Unsupported link type") - } - c := cl.Cid - // check if intermediate blockstore has cid - has, err := lu.bs.Has(c) - if err != nil { - return nil, xerrors.Errorf("attempting to load cid from blockstore: %w", err) - } - - // attempt unseal if block is not in blockstore - if !has { - err = lu.attemptUnseal(c) - if err != nil { - return nil, err - } - } - - blk, err := lu.bs.Get(c) - if err != nil { - return nil, xerrors.Errorf("attempting to load cid from blockstore: %w", err) - } - - return bytes.NewReader(blk.RawData()), nil -} - -func (lu *loaderWithUnsealing) attemptUnseal(c cid.Cid) error { - var err error - var reader io.Reader - var cidInfo piecestore.CIDInfo - - // if the deal proposal specified a Piece CID, only check that piece - if lu.pieceCid != nil { - reader, err = lu.firstSuccessfulUnsealByPieceCID(*lu.pieceCid) - } else { - cidInfo, err = lu.pieceStore.GetCIDInfo(c) - if err != nil { - return xerrors.Errorf("error looking up information on CID: %w", err) - } - - reader, err = lu.firstSuccessfulUnseal(cidInfo) - } - // no successful unseal - if err != nil { - return xerrors.Errorf("Unable to unseal piece: %w", err) - } - - // attempt to load data as a car file into the block store - _, err = lu.carIO.LoadCar(lu.bs, reader) - if err != nil { - return xerrors.Errorf("attempting to read Car file: %w", err) - } - - return nil -} - -func (lu *loaderWithUnsealing) firstSuccessfulUnseal(payloadCidInfo piecestore.CIDInfo) (io.ReadCloser, error) { - var lastErr error - for _, pieceBlockLocation := range payloadCidInfo.PieceBlockLocations { - reader, err := lu.firstSuccessfulUnsealByPieceCID(pieceBlockLocation.PieceCID) - if err == nil { - return reader, nil - } - lastErr = err - } - return nil, lastErr -} - -func (lu *loaderWithUnsealing) firstSuccessfulUnsealByPieceCID(pieceCID cid.Cid) (io.ReadCloser, error) { - pieceInfo, err := lu.pieceStore.GetPieceInfo(pieceCID) - if err != nil { - return nil, err - } - - // try to unseal data from all pieces - lastErr := xerrors.New("no sectors found to unseal from") - for _, deal := range pieceInfo.Deals { - reader, err := lu.unsealer(lu.ctx, deal.SectorID, deal.Offset, deal.Length) - if err == nil { - return reader, nil - } - lastErr = err - } - return nil, lastErr -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockunsealing/blockunsealing_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockunsealing/blockunsealing_test.go deleted file mode 100644 index c3f1d40712..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockunsealing/blockunsealing_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package blockunsealing_test - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "math/rand" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - bstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/pieceio/cario" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockunsealing" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" - "github.com/filecoin-project/go-fil-markets/shared" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -func TestNewLoaderWithUnsealing(t *testing.T) { - ctx := context.Background() - cio := cario.NewCarIO() - testdata := tut.NewTestIPLDTree() - var carBuffer bytes.Buffer - err := cio.WriteCar(ctx, testdata, testdata.RootNodeLnk.(cidlink.Link).Cid, shared.AllSelector(), &carBuffer) - require.NoError(t, err) - carData := carBuffer.Bytes() - - setupBlockStore := func(t *testing.T) bstore.Blockstore { - bs := bstore.NewBlockstore(dss.MutexWrap(datastore.NewMapDatastore())) - err = bs.Put(testdata.RootBlock) - require.NoError(t, err) - return bs - } - deal1 := piecestore.DealInfo{ - DealID: abi.DealID(rand.Uint64()), - SectorID: rand.Uint64(), - Offset: rand.Uint64(), - Length: rand.Uint64(), - } - deal2 := piecestore.DealInfo{ - DealID: abi.DealID(rand.Uint64()), - SectorID: rand.Uint64(), - Offset: rand.Uint64(), - Length: rand.Uint64(), - } - pieceCID := tut.GenerateCids(1)[0] - piece := piecestore.PieceInfo{ - PieceCID: pieceCID, - Deals: []piecestore.DealInfo{ - deal1, - deal2, - }, - } - deal3 := piecestore.DealInfo{ - DealID: abi.DealID(rand.Uint64()), - SectorID: rand.Uint64(), - Offset: rand.Uint64(), - Length: rand.Uint64(), - } - pieceCID2 := tut.GenerateCids(1)[0] - piece2 := piecestore.PieceInfo{ - PieceCID: pieceCID2, - Deals: []piecestore.DealInfo{ - deal3, - }, - } - cidInfo := piecestore.CIDInfo{ - PieceBlockLocations: []piecestore.PieceBlockLocation{ - { - PieceCID: pieceCID, - }, - { - PieceCID: pieceCID2, - }, - }, - } - - checkSuccessLoad := func(t *testing.T, loaderWithUnsealing blockunsealing.LoaderWithUnsealing, lnk ipld.Link) { - read, err := loaderWithUnsealing.Load(lnk, ipld.LinkContext{}) - require.NoError(t, err) - readData, err := ioutil.ReadAll(read) - require.NoError(t, err) - c, err := lnk.(cidlink.Link).Prefix().Sum(readData) - require.NoError(t, err) - require.Equal(t, c.Bytes(), lnk.(cidlink.Link).Bytes()) - } - - t.Run("when intermediate blockstore has block", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - pieceStore := tut.NewTestPieceStore() - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, nil) - checkSuccessLoad(t, loaderWithUnsealing, testdata.RootNodeLnk) - unsealer.VerifyExpectations(t) - }) - - t.Run("when caller has provided a PieceCID", func(t *testing.T) { - t.Run("succeeds if it can locate the piece", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - unsealer.ExpectUnseal(deal1.SectorID, deal1.Offset, deal1.Length, carData) - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectCID(testdata.MiddleMapBlock.Cid(), cidInfo) - pieceStore.ExpectPiece(pieceCID, piece) - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, &pieceCID) - checkSuccessLoad(t, loaderWithUnsealing, testdata.MiddleMapNodeLnk) - unsealer.VerifyExpectations(t) - }) - - t.Run("fails if it cannot locate the piece", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - pieceStore := tut.NewTestPieceStoreWithParams(tut.TestPieceStoreParams{GetPieceInfoError: fmt.Errorf("not found")}) - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, &pieceCID) - _, err := loaderWithUnsealing.Load(testdata.MiddleMapNodeLnk, ipld.LinkContext{}) - require.Error(t, err) - unsealer.VerifyExpectations(t) - pieceStore.VerifyExpectations(t) - }) - }) - - t.Run("when intermediate blockstore does not have block", func(t *testing.T) { - t.Run("unsealing success on first ref", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - unsealer.ExpectUnseal(deal1.SectorID, deal1.Offset, deal1.Length, carData) - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectCID(testdata.MiddleMapBlock.Cid(), cidInfo) - pieceStore.ExpectPiece(pieceCID, piece) - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, nil) - checkSuccessLoad(t, loaderWithUnsealing, testdata.MiddleMapNodeLnk) - unsealer.VerifyExpectations(t) - pieceStore.VerifyExpectations(t) - }) - - t.Run("unsealing success on later ref", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - unsealer.ExpectFailedUnseal(deal1.SectorID, deal1.Offset, deal1.Length) - unsealer.ExpectUnseal(deal2.SectorID, deal2.Offset, deal2.Length, carData) - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectCID(testdata.MiddleMapBlock.Cid(), cidInfo) - pieceStore.ExpectPiece(pieceCID, piece) - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, nil) - checkSuccessLoad(t, loaderWithUnsealing, testdata.MiddleMapNodeLnk) - unsealer.VerifyExpectations(t) - pieceStore.VerifyExpectations(t) - }) - - t.Run("unsealing success on second piece", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - unsealer.ExpectFailedUnseal(deal1.SectorID, deal1.Offset, deal1.Length) - unsealer.ExpectFailedUnseal(deal2.SectorID, deal2.Offset, deal2.Length) - unsealer.ExpectUnseal(deal3.SectorID, deal3.Offset, deal3.Length, carData) - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectCID(testdata.MiddleMapBlock.Cid(), cidInfo) - pieceStore.ExpectPiece(pieceCID, piece) - pieceStore.ExpectPiece(pieceCID2, piece2) - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, nil) - checkSuccessLoad(t, loaderWithUnsealing, testdata.MiddleMapNodeLnk) - unsealer.VerifyExpectations(t) - pieceStore.VerifyExpectations(t) - }) - - t.Run("piece lookup success on second piece", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - unsealer.ExpectUnseal(deal3.SectorID, deal3.Offset, deal3.Length, carData) - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectCID(testdata.MiddleMapBlock.Cid(), cidInfo) - pieceStore.ExpectMissingPiece(pieceCID) - pieceStore.ExpectPiece(pieceCID2, piece2) - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, nil) - checkSuccessLoad(t, loaderWithUnsealing, testdata.MiddleMapNodeLnk) - unsealer.VerifyExpectations(t) - pieceStore.VerifyExpectations(t) - }) - - t.Run("fails all unsealing", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - unsealer.ExpectFailedUnseal(deal1.SectorID, deal1.Offset, deal1.Length) - unsealer.ExpectFailedUnseal(deal2.SectorID, deal2.Offset, deal2.Length) - unsealer.ExpectFailedUnseal(deal3.SectorID, deal3.Offset, deal3.Length) - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectCID(testdata.MiddleMapBlock.Cid(), cidInfo) - pieceStore.ExpectPiece(pieceCID, piece) - pieceStore.ExpectPiece(pieceCID2, piece2) - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, nil) - _, err := loaderWithUnsealing.Load(testdata.MiddleMapNodeLnk, ipld.LinkContext{}) - require.Error(t, err) - unsealer.VerifyExpectations(t) - pieceStore.VerifyExpectations(t) - }) - - t.Run("fails looking up cid info", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectMissingCID(testdata.MiddleMapBlock.Cid()) - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, nil) - _, err := loaderWithUnsealing.Load(testdata.MiddleMapNodeLnk, ipld.LinkContext{}) - require.Error(t, err) - unsealer.VerifyExpectations(t) - pieceStore.VerifyExpectations(t) - }) - - t.Run("fails looking up all pieces", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectCID(testdata.MiddleMapBlock.Cid(), cidInfo) - pieceStore.ExpectMissingPiece(pieceCID) - pieceStore.ExpectMissingPiece(pieceCID2) - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, nil) - _, err := loaderWithUnsealing.Load(testdata.MiddleMapNodeLnk, ipld.LinkContext{}) - require.Error(t, err) - unsealer.VerifyExpectations(t) - pieceStore.VerifyExpectations(t) - }) - - t.Run("car io failure", func(t *testing.T) { - bs := setupBlockStore(t) - unsealer := testnodes.NewTestRetrievalProviderNode() - randBytes := make([]byte, 100) - _, err := rand.Read(randBytes) - require.NoError(t, err) - unsealer.ExpectUnseal(deal1.SectorID, deal1.Offset, deal1.Length, randBytes) - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectCID(testdata.MiddleMapBlock.Cid(), cidInfo) - pieceStore.ExpectPiece(pieceCID, piece) - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(ctx, bs, pieceStore, cio, unsealer.UnsealSector, nil) - _, err = loaderWithUnsealing.Load(testdata.MiddleMapNodeLnk, ipld.LinkContext{}) - require.Error(t, err) - unsealer.VerifyExpectations(t) - }) - - }) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/client.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/client.go deleted file mode 100644 index 1a5c5c7713..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/client.go +++ /dev/null @@ -1,327 +0,0 @@ -package retrievalimpl - -import ( - "context" - "reflect" - "sync" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log/v2" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/libp2p/go-libp2p-core/peer" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - "github.com/filecoin-project/go-fil-markets/shared" - - "github.com/filecoin-project/go-storedcounter" -) - -var log = logging.Logger("retrieval") - -// Client is the production implementation of the RetrievalClient interface -type Client struct { - network rmnet.RetrievalMarketNetwork - bs blockstore.Blockstore - node retrievalmarket.RetrievalClientNode - storedCounter *storedcounter.StoredCounter - - subscribersLk sync.RWMutex - subscribers []retrievalmarket.ClientSubscriber - resolver retrievalmarket.PeerResolver - blockVerifiers map[retrievalmarket.DealID]blockio.BlockVerifier - dealStreams map[retrievalmarket.DealID]rmnet.RetrievalDealStream - stateMachines fsm.Group -} - -var _ retrievalmarket.RetrievalClient = &Client{} - -// NewClient creates a new retrieval client -func NewClient( - network rmnet.RetrievalMarketNetwork, - bs blockstore.Blockstore, - node retrievalmarket.RetrievalClientNode, - resolver retrievalmarket.PeerResolver, - ds datastore.Batching, - storedCounter *storedcounter.StoredCounter, -) (retrievalmarket.RetrievalClient, error) { - c := &Client{ - network: network, - bs: bs, - node: node, - resolver: resolver, - storedCounter: storedCounter, - dealStreams: make(map[retrievalmarket.DealID]rmnet.RetrievalDealStream), - blockVerifiers: make(map[retrievalmarket.DealID]blockio.BlockVerifier), - } - stateMachines, err := fsm.New(ds, fsm.Parameters{ - Environment: &clientDealEnvironment{c}, - StateType: retrievalmarket.ClientDealState{}, - StateKeyField: "Status", - Events: clientstates.ClientEvents, - StateEntryFuncs: clientstates.ClientStateEntryFuncs, - Notifier: c.notifySubscribers, - }) - if err != nil { - return nil, err - } - c.stateMachines = stateMachines - return c, nil -} - -// V0 - -// FindProviders uses PeerResolver interface to locate a list of providers who may have a given payload CID. -func (c *Client) FindProviders(payloadCID cid.Cid) []retrievalmarket.RetrievalPeer { - peers, err := c.resolver.GetPeers(payloadCID) - if err != nil { - log.Errorf("failed to get peers: %s", err) - return []retrievalmarket.RetrievalPeer{} - } - return peers -} - -/* -Query sends a retrieval query to a specific retrieval provider, to determine -if the provider can serve a retrieval request and what its specific parameters for -the request are. - -The client a new `RetrievalQueryStream` for the chosen peer ID, -and calls WriteQuery on it, which constructs a data-transfer message and writes it to the Query stream. -*/ -func (c *Client) Query(_ context.Context, p retrievalmarket.RetrievalPeer, payloadCID cid.Cid, params retrievalmarket.QueryParams) (retrievalmarket.QueryResponse, error) { - s, err := c.network.NewQueryStream(p.ID) - if err != nil { - log.Warn(err) - return retrievalmarket.QueryResponseUndefined, err - } - defer s.Close() - - err = s.WriteQuery(retrievalmarket.Query{ - PayloadCID: payloadCID, - QueryParams: params, - }) - if err != nil { - log.Warn(err) - return retrievalmarket.QueryResponseUndefined, err - } - - return s.ReadQueryResponse() -} - -/* -Retrieve initiates the retrieval deal flow, which involves multiple requests and responses - -To start this processes, the client creates a new `RetrievalDealStream`. Currently, this connection is -kept open through the entire deal until completion or failure. Make deals pauseable as well as surviving -a restart is a planned future feature. - -Retrieve should be called after using FindProviders and Query are used to identify an appropriate provider to -retrieve the deal from. The parameters identified in Query should be passed to Retrieve to ensure the -greatest likelihood the provider will accept the deal - -When called, the client takes the following actions: - -1. Creates a deal ID using the next value from its storedcounter. - -2. Constructs a `DealProposal` with deal terms - -3. Tells its statemachine to begin tracking this deal state by dealID. - -4. Constructs a `blockio.SelectorVerifier` and adds it to its dealID-keyed map of block verifiers. - -5. Triggers a `ClientEventOpen` event on its statemachine. - -From then on, the statemachine controls the deal flow in the client. Other components may listen for events in this flow by calling -`SubscribeToEvents` on the Client. The Client handles consuming blocks it receives from the provider, via `ConsumeBlocks` function - -Documentation of the client state machine can be found at https://godoc.org/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates -*/ -func (c *Client) Retrieve(ctx context.Context, payloadCID cid.Cid, params retrievalmarket.Params, totalFunds abi.TokenAmount, miner peer.ID, clientWallet address.Address, minerWallet address.Address) (retrievalmarket.DealID, error) { - var err error - next, err := c.storedCounter.Next() - if err != nil { - return 0, err - } - dealID := retrievalmarket.DealID(next) - - dealState := retrievalmarket.ClientDealState{ - DealProposal: retrievalmarket.DealProposal{ - PayloadCID: payloadCID, - ID: dealID, - Params: params, - }, - TotalFunds: totalFunds, - ClientWallet: clientWallet, - MinerWallet: minerWallet, - TotalReceived: 0, - CurrentInterval: params.PaymentInterval, - BytesPaidFor: 0, - PaymentRequested: abi.NewTokenAmount(0), - FundsSpent: abi.NewTokenAmount(0), - Status: retrievalmarket.DealStatusNew, - Sender: miner, - } - - // start the deal processing - err = c.stateMachines.Begin(dealState.ID, &dealState) - if err != nil { - return 0, err - } - - // open stream - s, err := c.network.NewDealStream(dealState.Sender) - if err != nil { - return 0, err - } - - c.dealStreams[dealID] = s - - sel := shared.AllSelector() - if params.Selector != nil { - sel, err = retrievalmarket.DecodeNode(params.Selector) - if err != nil { - return 0, xerrors.Errorf("selector is invalid: %w", err) - } - } - - c.blockVerifiers[dealID] = blockio.NewSelectorVerifier(cidlink.Link{Cid: dealState.DealProposal.PayloadCID}, sel) - - err = c.stateMachines.Send(dealState.ID, retrievalmarket.ClientEventOpen) - if err != nil { - s.Close() - return 0, err - } - - return dealID, nil -} - -// unsubscribeAt returns a function that removes an item from the subscribers list by comparing -// their reflect.ValueOf before pulling the item out of the slice. Does not preserve order. -// Subsequent, repeated calls to the func with the same Subscriber are a no-op. -func (c *Client) unsubscribeAt(sub retrievalmarket.ClientSubscriber) retrievalmarket.Unsubscribe { - return func() { - c.subscribersLk.Lock() - defer c.subscribersLk.Unlock() - curLen := len(c.subscribers) - for i, el := range c.subscribers { - if reflect.ValueOf(sub) == reflect.ValueOf(el) { - c.subscribers[i] = c.subscribers[curLen-1] - c.subscribers = c.subscribers[:curLen-1] - return - } - } - } -} - -func (c *Client) notifySubscribers(eventName fsm.EventName, state fsm.StateType) { - c.subscribersLk.RLock() - defer c.subscribersLk.RUnlock() - evt := eventName.(retrievalmarket.ClientEvent) - ds := state.(retrievalmarket.ClientDealState) - for _, cb := range c.subscribers { - cb(evt, ds) - } -} - -// SubscribeToEvents allows another component to listen for events on the RetrievalClient -// in order to track deals as they progress through the deal flow -func (c *Client) SubscribeToEvents(subscriber retrievalmarket.ClientSubscriber) retrievalmarket.Unsubscribe { - c.subscribersLk.Lock() - c.subscribers = append(c.subscribers, subscriber) - c.subscribersLk.Unlock() - - return c.unsubscribeAt(subscriber) -} - -// V1 -func (c *Client) AddMoreFunds(retrievalmarket.DealID, abi.TokenAmount) error { - panic("not implemented") -} - -func (c *Client) CancelDeal(retrievalmarket.DealID) error { - panic("not implemented") -} - -func (c *Client) RetrievalStatus(retrievalmarket.DealID) { - panic("not implemented") -} - -// ListDeals lists in all known retrieval deals -func (c *Client) ListDeals() map[retrievalmarket.DealID]retrievalmarket.ClientDealState { - var deals []retrievalmarket.ClientDealState - _ = c.stateMachines.List(&deals) - dealMap := make(map[retrievalmarket.DealID]retrievalmarket.ClientDealState) - for _, deal := range deals { - dealMap[deal.ID] = deal - } - return dealMap -} - -type clientDealEnvironment struct { - c *Client -} - -func (c *clientDealEnvironment) Node() retrievalmarket.RetrievalClientNode { - return c.c.node -} - -func (c *clientDealEnvironment) DealStream(dealID retrievalmarket.DealID) rmnet.RetrievalDealStream { - return c.c.dealStreams[dealID] -} - -func (c *clientDealEnvironment) ConsumeBlock(ctx context.Context, dealID retrievalmarket.DealID, block retrievalmarket.Block) (uint64, bool, error) { - prefix, err := cid.PrefixFromBytes(block.Prefix) - if err != nil { - return 0, false, err - } - - scid, err := prefix.Sum(block.Data) - if err != nil { - return 0, false, err - } - - blk, err := blocks.NewBlockWithCid(block.Data, scid) - if err != nil { - return 0, false, err - } - - verifier, ok := c.c.blockVerifiers[dealID] - if !ok { - return 0, false, xerrors.New("no block verifier found") - } - - done, err := verifier.Verify(ctx, blk) - if err != nil { - log.Warnf("block verify failed: %s", err) - return 0, false, err - } - - // TODO: Smarter out, maybe add to filestore automagically - // (Also, persist intermediate nodes) - err = c.c.bs.Put(blk) - if err != nil { - log.Warnf("block write failed: %s", err) - return 0, false, err - } - - return uint64(len(block.Data)), done, nil -} - -// ClientFSMParameterSpec is a valid set of parameters for a client deal FSM - used in doc generation -var ClientFSMParameterSpec = fsm.Parameters{ - Environment: &clientDealEnvironment{}, - StateType: retrievalmarket.ClientDealState{}, - StateKeyField: "Status", - Events: clientstates.ClientEvents, - StateEntryFuncs: clientstates.ClientStateEntryFuncs, -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/client_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/client_test.go deleted file mode 100644 index a4b55e4c12..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/client_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package retrievalimpl_test - -import ( - "context" - "errors" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - bstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-storedcounter" -) - -func TestClient_Query(t *testing.T) { - ctx := context.Background() - - ds := dss.MutexWrap(datastore.NewMapDatastore()) - storedCounter := storedcounter.New(ds, datastore.NewKey("nextDealID")) - bs := bstore.NewBlockstore(ds) - - pcid := tut.GenerateCids(1)[0] - expectedPeer := peer.ID("somevalue") - rpeer := retrievalmarket.RetrievalPeer{ - Address: address.TestAddress2, - ID: expectedPeer, - } - - expectedQuery := retrievalmarket.Query{ - PayloadCID: pcid, - } - - expectedQueryResponse := retrievalmarket.QueryResponse{ - Status: retrievalmarket.QueryResponseAvailable, - Size: 1234, - PaymentAddress: address.TestAddress, - MinPricePerByte: abi.NewTokenAmount(5678), - MaxPaymentInterval: 4321, - MaxPaymentIntervalIncrease: 0, - } - - t.Run("it works", func(t *testing.T) { - var qsb tut.QueryStreamBuilder = func(p peer.ID) (rmnet.RetrievalQueryStream, error) { - return tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ - Writer: tut.ExpectQueryWriter(t, expectedQuery, "queries should match"), - RespReader: tut.StubbedQueryResponseReader(expectedQueryResponse), - }), nil - } - net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{ - QueryStreamBuilder: tut.ExpectPeerOnQueryStreamBuilder(t, expectedPeer, qsb, "Peers should match"), - }) - c, err := retrievalimpl.NewClient( - net, - bs, - testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}), - &tut.TestPeerResolver{}, - ds, - storedCounter) - require.NoError(t, err) - - resp, err := c.Query(ctx, rpeer, pcid, retrievalmarket.QueryParams{}) - require.NoError(t, err) - assert.NotNil(t, resp) - assert.Equal(t, expectedQueryResponse, resp) - }) - - t.Run("when the stream returns error, returns error", func(t *testing.T) { - net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{ - QueryStreamBuilder: tut.FailNewQueryStream, - }) - c, err := retrievalimpl.NewClient( - net, - bs, - testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}), - &tut.TestPeerResolver{}, - ds, - storedCounter) - require.NoError(t, err) - - _, err = c.Query(ctx, rpeer, pcid, retrievalmarket.QueryParams{}) - assert.EqualError(t, err, "new query stream failed") - }) - - t.Run("when WriteDealStatusRequest fails, returns error", func(t *testing.T) { - - qsbuilder := func(p peer.ID) (network.RetrievalQueryStream, error) { - newStream := tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ - PeerID: p, - Writer: tut.FailQueryWriter, - }) - return newStream, nil - } - - net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{ - QueryStreamBuilder: qsbuilder, - }) - c, err := retrievalimpl.NewClient( - net, - bs, - testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}), - &tut.TestPeerResolver{}, - ds, - storedCounter) - require.NoError(t, err) - - statusCode, err := c.Query(ctx, rpeer, pcid, retrievalmarket.QueryParams{}) - assert.EqualError(t, err, "write query failed") - assert.Equal(t, retrievalmarket.QueryResponseUndefined, statusCode) - }) - - t.Run("when ReadDealStatusResponse fails, returns error", func(t *testing.T) { - qsbuilder := func(p peer.ID) (network.RetrievalQueryStream, error) { - newStream := tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ - PeerID: p, - RespReader: tut.FailResponseReader, - }) - return newStream, nil - } - net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{ - QueryStreamBuilder: qsbuilder, - }) - c, err := retrievalimpl.NewClient( - net, - bs, - testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}), - &tut.TestPeerResolver{}, - ds, - storedCounter) - require.NoError(t, err) - - statusCode, err := c.Query(ctx, rpeer, pcid, retrievalmarket.QueryParams{}) - assert.EqualError(t, err, "query response failed") - assert.Equal(t, retrievalmarket.QueryResponseUndefined, statusCode) - }) -} - -func TestClient_FindProviders(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - storedCounter := storedcounter.New(ds, datastore.NewKey("nextDealID")) - bs := bstore.NewBlockstore(ds) - expectedPeer := peer.ID("somevalue") - - var qsb tut.QueryStreamBuilder = func(p peer.ID) (rmnet.RetrievalQueryStream, error) { - return tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ - Writer: tut.TrivialQueryWriter, - RespReader: tut.TrivialQueryResponseReader, - }), nil - } - net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{ - QueryStreamBuilder: tut.ExpectPeerOnQueryStreamBuilder(t, expectedPeer, qsb, "Peers should match"), - }) - - t.Run("when providers are found, returns providers", func(t *testing.T) { - peers := tut.RequireGenerateRetrievalPeers(t, 3) - testResolver := tut.TestPeerResolver{Peers: peers} - - c, err := retrievalimpl.NewClient(net, bs, &testnodes.TestRetrievalClientNode{}, &testResolver, ds, storedCounter) - require.NoError(t, err) - - testCid := tut.GenerateCids(1)[0] - assert.Len(t, c.FindProviders(testCid), 3) - }) - - t.Run("when there is an error, returns empty provider list", func(t *testing.T) { - testResolver := tut.TestPeerResolver{Peers: []retrievalmarket.RetrievalPeer{}, ResolverError: errors.New("boom")} - c, err := retrievalimpl.NewClient(net, bs, &testnodes.TestRetrievalClientNode{}, &testResolver, ds, storedCounter) - require.NoError(t, err) - - badCid := tut.GenerateCids(1)[0] - assert.Len(t, c.FindProviders(badCid), 0) - }) - - t.Run("when there are no providers", func(t *testing.T) { - testResolver := tut.TestPeerResolver{Peers: []retrievalmarket.RetrievalPeer{}} - c, err := retrievalimpl.NewClient(net, bs, &testnodes.TestRetrievalClientNode{}, &testResolver, ds, storedCounter) - require.NoError(t, err) - - testCid := tut.GenerateCids(1)[0] - assert.Len(t, c.FindProviders(testCid), 0) - }) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/client_fsm.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/client_fsm.go deleted file mode 100644 index c14f056a84..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/client_fsm.go +++ /dev/null @@ -1,198 +0,0 @@ -package clientstates - -import ( - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -func recordPaymentOwed(deal *rm.ClientDealState, totalProcessed uint64, paymentOwed abi.TokenAmount) error { - deal.TotalReceived += totalProcessed - deal.PaymentRequested = paymentOwed - return nil -} - -func recordProcessed(deal *rm.ClientDealState, totalProcessed uint64) error { - deal.TotalReceived += totalProcessed - return nil -} - -// ClientEvents are the events that can happen in a retrieval client -var ClientEvents = fsm.Events{ - fsm.Event(rm.ClientEventOpen). - From(rm.DealStatusNew).ToNoChange(), - fsm.Event(rm.ClientEventPaymentChannelErrored). - FromMany(rm.DealStatusAccepted, rm.DealStatusPaymentChannelCreating).To(rm.DealStatusFailed). - Action(func(deal *rm.ClientDealState, err error) error { - deal.Message = xerrors.Errorf("get or create payment channel: %w", err).Error() - return nil - }), - fsm.Event(rm.ClientEventPaymentChannelCreateInitiated). - From(rm.DealStatusAccepted).To(rm.DealStatusPaymentChannelCreating). - Action(func(deal *rm.ClientDealState, msgCID cid.Cid) error { - deal.WaitMsgCID = &msgCID - return nil - }), - fsm.Event(rm.ClientEventPaymentChannelAddingFunds). - FromMany(rm.DealStatusAccepted).To(rm.DealStatusPaymentChannelAddingFunds). - Action(func(deal *rm.ClientDealState, msgCID cid.Cid, payCh address.Address) error { - deal.WaitMsgCID = &msgCID - deal.PaymentInfo = &rm.PaymentInfo{ - PayCh: payCh, - } - return nil - }), - fsm.Event(rm.ClientEventPaymentChannelReady). - FromMany(rm.DealStatusPaymentChannelCreating, rm.DealStatusPaymentChannelAddingFunds). - To(rm.DealStatusPaymentChannelReady). - Action(func(deal *rm.ClientDealState, payCh address.Address, lane uint64) error { - deal.PaymentInfo = &rm.PaymentInfo{ - PayCh: payCh, - Lane: lane, - } - return nil - }), - fsm.Event(rm.ClientEventAllocateLaneErrored). - FromMany(rm.DealStatusPaymentChannelCreating, rm.DealStatusPaymentChannelAddingFunds). - To(rm.DealStatusFailed). - Action(func(deal *rm.ClientDealState, err error) error { - deal.Message = xerrors.Errorf("allocating payment lane: %w", err).Error() - return nil - }), - fsm.Event(rm.ClientEventPaymentChannelAddFundsErrored). - From(rm.DealStatusPaymentChannelAddingFunds).To(rm.DealStatusFailed). - Action(func(deal *rm.ClientDealState, err error) error { - deal.Message = xerrors.Errorf("wait for add funds: %w", err).Error() - return nil - }), - fsm.Event(rm.ClientEventWriteDealProposalErrored). - FromAny().To(rm.DealStatusErrored). - Action(func(deal *rm.ClientDealState, err error) error { - deal.Message = xerrors.Errorf("proposing deal: %w", err).Error() - return nil - }), - fsm.Event(rm.ClientEventReadDealResponseErrored). - FromAny().To(rm.DealStatusErrored). - Action(func(deal *rm.ClientDealState, err error) error { - deal.Message = xerrors.Errorf("reading deal response: %w", err).Error() - return nil - }), - fsm.Event(rm.ClientEventDealRejected). - From(rm.DealStatusNew).To(rm.DealStatusRejected). - Action(func(deal *rm.ClientDealState, message string) error { - deal.Message = fmt.Sprintf("deal rejected: %s", message) - return nil - }), - fsm.Event(rm.ClientEventDealNotFound). - From(rm.DealStatusNew).To(rm.DealStatusDealNotFound). - Action(func(deal *rm.ClientDealState, message string) error { - deal.Message = fmt.Sprintf("deal not found: %s", message) - return nil - }), - fsm.Event(rm.ClientEventDealAccepted). - From(rm.DealStatusNew).To(rm.DealStatusAccepted), - fsm.Event(rm.ClientEventUnknownResponseReceived). - FromAny().To(rm.DealStatusFailed). - Action(func(deal *rm.ClientDealState) error { - deal.Message = "Unexpected deal response status" - return nil - }), - fsm.Event(rm.ClientEventFundsExpended). - FromMany(rm.DealStatusFundsNeeded, rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusFailed). - Action(func(deal *rm.ClientDealState, expectedTotal string, actualTotal string) error { - deal.Message = fmt.Sprintf("not enough funds left: expected amt = %s, actual amt = %s", expectedTotal, actualTotal) - return nil - }), - fsm.Event(rm.ClientEventBadPaymentRequested). - FromMany(rm.DealStatusFundsNeeded, rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusFailed). - Action(func(deal *rm.ClientDealState, message string) error { - deal.Message = message - return nil - }), - fsm.Event(rm.ClientEventCreateVoucherFailed). - FromMany(rm.DealStatusFundsNeeded, rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusFailed). - Action(func(deal *rm.ClientDealState, err error) error { - deal.Message = xerrors.Errorf("creating payment voucher: %w", err).Error() - return nil - }), - fsm.Event(rm.ClientEventWriteDealPaymentErrored). - FromAny().To(rm.DealStatusErrored). - Action(func(deal *rm.ClientDealState, err error) error { - deal.Message = xerrors.Errorf("writing deal payment: %w", err).Error() - return nil - }), - fsm.Event(rm.ClientEventPaymentSent). - From(rm.DealStatusFundsNeeded).To(rm.DealStatusOngoing). - From(rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusFinalizing). - Action(func(deal *rm.ClientDealState) error { - // paymentRequested = 0 - // fundsSpent = fundsSpent + paymentRequested - // if paymentRequested / pricePerByte >= currentInterval - // currentInterval = currentInterval + proposal.intervalIncrease - // bytesPaidFor = bytesPaidFor + (paymentRequested / pricePerByte) - deal.FundsSpent = big.Add(deal.FundsSpent, deal.PaymentRequested) - bytesPaidFor := big.Div(deal.PaymentRequested, deal.PricePerByte).Uint64() - if bytesPaidFor >= deal.CurrentInterval { - deal.CurrentInterval += deal.DealProposal.PaymentIntervalIncrease - } - deal.BytesPaidFor += bytesPaidFor - deal.PaymentRequested = abi.NewTokenAmount(0) - return nil - }), - fsm.Event(rm.ClientEventConsumeBlockFailed). - FromMany(rm.DealStatusPaymentChannelReady, rm.DealStatusOngoing).To(rm.DealStatusFailed). - Action(func(deal *rm.ClientDealState, err error) error { - deal.Message = xerrors.Errorf("consuming block: %w", err).Error() - return nil - }), - fsm.Event(rm.ClientEventLastPaymentRequested). - FromMany(rm.DealStatusPaymentChannelReady, - rm.DealStatusOngoing, - rm.DealStatusBlocksComplete).To(rm.DealStatusFundsNeededLastPayment). - Action(recordPaymentOwed), - fsm.Event(rm.ClientEventAllBlocksReceived). - FromMany(rm.DealStatusPaymentChannelReady, - rm.DealStatusOngoing, - rm.DealStatusBlocksComplete).To(rm.DealStatusBlocksComplete). - Action(recordProcessed), - fsm.Event(rm.ClientEventComplete). - FromMany(rm.DealStatusPaymentChannelReady, - rm.DealStatusOngoing, - rm.DealStatusBlocksComplete, - rm.DealStatusFinalizing).To(rm.DealStatusCompleted). - Action(recordProcessed), - fsm.Event(rm.ClientEventEarlyTermination). - FromMany(rm.DealStatusPaymentChannelReady, rm.DealStatusOngoing).To(rm.DealStatusFailed). - Action(func(deal *rm.ClientDealState) error { - deal.Message = "received complete status before all blocks received" - return nil - }), - fsm.Event(rm.ClientEventPaymentRequested). - FromMany(rm.DealStatusPaymentChannelReady, rm.DealStatusOngoing).To(rm.DealStatusFundsNeeded). - Action(recordPaymentOwed), - fsm.Event(rm.ClientEventBlocksReceived). - From(rm.DealStatusPaymentChannelReady).To(rm.DealStatusOngoing). - From(rm.DealStatusOngoing).ToNoChange(). - Action(recordProcessed), -} - -// ClientStateEntryFuncs are the handlers for different states in a retrieval client -var ClientStateEntryFuncs = fsm.StateEntryFuncs{ - rm.DealStatusNew: ProposeDeal, - rm.DealStatusAccepted: SetupPaymentChannelStart, - rm.DealStatusPaymentChannelCreating: WaitForPaymentChannelCreate, - rm.DealStatusPaymentChannelAddingFunds: WaitForPaymentChannelAddFunds, - rm.DealStatusPaymentChannelReady: ProcessNextResponse, - rm.DealStatusOngoing: ProcessNextResponse, - rm.DealStatusBlocksComplete: ProcessNextResponse, - rm.DealStatusFundsNeeded: ProcessPaymentRequested, - rm.DealStatusFundsNeededLastPayment: ProcessPaymentRequested, - rm.DealStatusFinalizing: Finalize, -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/client_states.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/client_states.go deleted file mode 100644 index 184af7c3c7..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/client_states.go +++ /dev/null @@ -1,205 +0,0 @@ -package clientstates - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - - rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" -) - -// ClientDealEnvironment is a bridge to the environment a client deal is executing in. -// It provides access to relevant functionality on the retrieval client -type ClientDealEnvironment interface { - // Node returns the node interface for this deal - Node() rm.RetrievalClientNode - // DealStream returns the relevant libp2p interface for this deal - DealStream(id rm.DealID) rmnet.RetrievalDealStream - // ConsumeBlock allows us to validate an incoming block sent over the retrieval protocol - ConsumeBlock(context.Context, rm.DealID, rm.Block) (uint64, bool, error) -} - -// SetupPaymentChannelStart initiates setting up a payment channel for a deal -func SetupPaymentChannelStart(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { - tok, _, err := environment.Node().GetChainHead(ctx.Context()) - if err != nil { - return ctx.Trigger(rm.ClientEventPaymentChannelErrored, err) - } - - paych, msgCID, err := environment.Node().GetOrCreatePaymentChannel(ctx.Context(), deal.ClientWallet, deal.MinerWallet, deal.TotalFunds, tok) - if err != nil { - return ctx.Trigger(rm.ClientEventPaymentChannelErrored, err) - } - - if paych == address.Undef { - return ctx.Trigger(rm.ClientEventPaymentChannelCreateInitiated, msgCID) - } - - return ctx.Trigger(rm.ClientEventPaymentChannelAddingFunds, msgCID, paych) -} - -// WaitForPaymentChannelCreate waits for payment channel creation to be posted on chain, -// allocates a lane for vouchers, then signals that the payment channel is ready -func WaitForPaymentChannelCreate(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { - paych, err := environment.Node().WaitForPaymentChannelCreation(*deal.WaitMsgCID) - if err != nil { - return ctx.Trigger(rm.ClientEventPaymentChannelErrored, err) - } - - lane, err := environment.Node().AllocateLane(paych) - if err != nil { - return ctx.Trigger(rm.ClientEventAllocateLaneErrored, err) - } - return ctx.Trigger(rm.ClientEventPaymentChannelReady, paych, lane) -} - -// WaitForPaymentChannelAddFunds waits for funds to be added to an existing payment channel, then -// signals that payment channel is ready again -func WaitForPaymentChannelAddFunds(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { - err := environment.Node().WaitForPaymentChannelAddFunds(*deal.WaitMsgCID) - if err != nil { - return ctx.Trigger(rm.ClientEventPaymentChannelAddFundsErrored, err) - } - lane, err := environment.Node().AllocateLane(deal.PaymentInfo.PayCh) - if err != nil { - return ctx.Trigger(rm.ClientEventAllocateLaneErrored, err) - } - return ctx.Trigger(rm.ClientEventPaymentChannelReady, deal.PaymentInfo.PayCh, lane) -} - -// ProposeDeal sends the proposal to the other party -func ProposeDeal(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { - stream := environment.DealStream(deal.ID) - err := stream.WriteDealProposal(deal.DealProposal) - if err != nil { - return ctx.Trigger(rm.ClientEventWriteDealProposalErrored, err) - } - response, err := stream.ReadDealResponse() - if err != nil { - return ctx.Trigger(rm.ClientEventReadDealResponseErrored, err) - } - switch response.Status { - case rm.DealStatusRejected: - return ctx.Trigger(rm.ClientEventDealRejected, response.Message) - case rm.DealStatusDealNotFound: - return ctx.Trigger(rm.ClientEventDealNotFound, response.Message) - case rm.DealStatusAccepted: - return ctx.Trigger(rm.ClientEventDealAccepted) - default: - return ctx.Trigger(rm.ClientEventUnknownResponseReceived) - } -} - -// ProcessPaymentRequested processes a request for payment from the provider -func ProcessPaymentRequested(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { - // check that fundsSpent + paymentRequested <= totalFunds, or fail - if big.Add(deal.FundsSpent, deal.PaymentRequested).GreaterThan(deal.TotalFunds) { - expectedTotal := deal.TotalFunds.String() - actualTotal := big.Add(deal.FundsSpent, deal.PaymentRequested).String() - return ctx.Trigger(rm.ClientEventFundsExpended, expectedTotal, actualTotal) - } - - // check that totalReceived - bytesPaidFor >= currentInterval, or fail - if (deal.TotalReceived-deal.BytesPaidFor < deal.CurrentInterval) && deal.Status != rm.DealStatusFundsNeededLastPayment { - return ctx.Trigger(rm.ClientEventBadPaymentRequested, "not enough bytes received between payment request") - } - - // check that paymentRequest <= (totalReceived - bytesPaidFor) * pricePerByte, or fail - if deal.PaymentRequested.GreaterThan(big.Mul(abi.NewTokenAmount(int64(deal.TotalReceived-deal.BytesPaidFor)), deal.PricePerByte)) { - return ctx.Trigger(rm.ClientEventBadPaymentRequested, "too much money requested for bytes sent") - } - - tok, _, err := environment.Node().GetChainHead(ctx.Context()) - if err != nil { - return ctx.Trigger(rm.ClientEventCreateVoucherFailed, err) - } - - // create payment voucher with node (or fail) for (fundsSpent + paymentRequested) - // use correct payCh + lane - // (node will do subtraction back to paymentRequested... slightly odd behavior but... well anyway) - voucher, err := environment.Node().CreatePaymentVoucher(ctx.Context(), deal.PaymentInfo.PayCh, big.Add(deal.FundsSpent, deal.PaymentRequested), deal.PaymentInfo.Lane, tok) - if err != nil { - return ctx.Trigger(rm.ClientEventCreateVoucherFailed, err) - } - - // send payment voucher (or fail) - err = environment.DealStream(deal.ID).WriteDealPayment(rm.DealPayment{ - ID: deal.DealProposal.ID, - PaymentChannel: deal.PaymentInfo.PayCh, - PaymentVoucher: voucher, - }) - if err != nil { - return ctx.Trigger(rm.ClientEventWriteDealPaymentErrored, err) - } - - return ctx.Trigger(rm.ClientEventPaymentSent) -} - -// ProcessNextResponse reads and processes the next response from the provider -func ProcessNextResponse(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { - // Read next response (or fail) - response, err := environment.DealStream(deal.ID).ReadDealResponse() - if err != nil { - return ctx.Trigger(rm.ClientEventReadDealResponseErrored, err) - } - - // Process Blocks - totalProcessed := uint64(0) - completed := deal.Status == rm.DealStatusBlocksComplete - if !completed { - var processed uint64 - for _, block := range response.Blocks { - processed, completed, err = environment.ConsumeBlock(ctx.Context(), deal.ID, block) - if err != nil { - return ctx.Trigger(rm.ClientEventConsumeBlockFailed, err) - } - totalProcessed += processed - if completed { - break - } - } - } - - if completed { - switch response.Status { - case rm.DealStatusFundsNeededLastPayment: - return ctx.Trigger(rm.ClientEventLastPaymentRequested, totalProcessed, response.PaymentOwed) - case rm.DealStatusBlocksComplete: - return ctx.Trigger(rm.ClientEventAllBlocksReceived, totalProcessed) - case rm.DealStatusCompleted: - return ctx.Trigger(rm.ClientEventComplete, totalProcessed) - default: - return ctx.Trigger(rm.ClientEventUnknownResponseReceived) - } - } - switch response.Status { - // Error on complete status, but not all blocks received - case rm.DealStatusFundsNeededLastPayment, rm.DealStatusCompleted: - return ctx.Trigger(rm.ClientEventEarlyTermination) - case rm.DealStatusFundsNeeded: - return ctx.Trigger(rm.ClientEventPaymentRequested, totalProcessed, response.PaymentOwed) - case rm.DealStatusOngoing: - return ctx.Trigger(rm.ClientEventBlocksReceived, totalProcessed) - default: - return ctx.Trigger(rm.ClientEventUnknownResponseReceived) - } -} - -// Finalize completes a deal -func Finalize(ctx fsm.Context, environment ClientDealEnvironment, deal rm.ClientDealState) error { - // Read next response (or fail) - response, err := environment.DealStream(deal.ID).ReadDealResponse() - if err != nil { - return ctx.Trigger(rm.ClientEventReadDealResponseErrored, err) - } - - if response.Status != rm.DealStatusCompleted { - return ctx.Trigger(rm.ClientEventUnknownResponseReceived) - } - - return ctx.Trigger(rm.ClientEventComplete, uint64(0)) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/client_states_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/client_states_test.go deleted file mode 100644 index 6e99160d97..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/client_states_test.go +++ /dev/null @@ -1,657 +0,0 @@ -package clientstates_test - -import ( - "context" - "crypto/rand" - "errors" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-statemachine/fsm" - fsmtest "github.com/filecoin-project/go-statemachine/fsm/testutil" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - testnet "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -type consumeBlockResponse struct { - size uint64 - done bool - err error -} - -type fakeEnvironment struct { - node retrievalmarket.RetrievalClientNode - ds rmnet.RetrievalDealStream - nextResponse int - responses []consumeBlockResponse -} - -func (e *fakeEnvironment) Node() retrievalmarket.RetrievalClientNode { - return e.node -} - -func (e *fakeEnvironment) DealStream(id retrievalmarket.DealID) rmnet.RetrievalDealStream { - return e.ds -} - -func (e *fakeEnvironment) ConsumeBlock(context.Context, retrievalmarket.DealID, retrievalmarket.Block) (uint64, bool, error) { - if e.nextResponse >= len(e.responses) { - return 0, false, errors.New("ConsumeBlock failed") - } - response := e.responses[e.nextResponse] - e.nextResponse += 1 - return response.size, response.done, response.err -} - -func TestSetupPaymentChannel(t *testing.T) { - ctx := context.Background() - ds := testnet.NewTestRetrievalDealStream(testnet.TestDealStreamParams{}) - expectedPayCh := address.TestAddress2 - eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) - require.NoError(t, err) - runSetupPaymentChannel := func(t *testing.T, - params testnodes.TestRetrievalClientNodeParams, - dealState *retrievalmarket.ClientDealState) { - node := testnodes.NewTestRetrievalClientNode(params) - environment := &fakeEnvironment{node, ds, 0, nil} - fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) - err := clientstates.SetupPaymentChannelStart(fsmCtx, environment, *dealState) - require.NoError(t, err) - fsmCtx.ReplayEvents(t, dealState) - } - - t.Run("payment channel create initiated", func(t *testing.T) { - envParams := testnodes.TestRetrievalClientNodeParams{ - PayCh: address.Undef, - CreatePaychCID: testnet.GenerateCids(1)[0], - } - dealState := makeDealState(retrievalmarket.DealStatusAccepted) - runSetupPaymentChannel(t, envParams, dealState) - assert.Empty(t, dealState.Message) - assert.Equal(t, dealState.Status, retrievalmarket.DealStatusPaymentChannelCreating) - }) - - t.Run("payment channel needs funds added", func(t *testing.T) { - envParams := testnodes.TestRetrievalClientNodeParams{ - AddFundsOnly: true, - PayCh: expectedPayCh, - CreatePaychCID: testnet.GenerateCids(1)[0], - } - dealState := makeDealState(retrievalmarket.DealStatusAccepted) - runSetupPaymentChannel(t, envParams, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, retrievalmarket.DealStatusPaymentChannelAddingFunds, dealState.Status) - require.Equal(t, expectedPayCh, dealState.PaymentInfo.PayCh) - }) - - t.Run("when create payment channel fails", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusAccepted) - envParams := testnodes.TestRetrievalClientNodeParams{ - PayCh: address.Undef, - PayChErr: errors.New("Something went wrong"), - } - runSetupPaymentChannel(t, envParams, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - }) - -} - -func TestWaitForPaymentChannelCreate(t *testing.T) { - ctx := context.Background() - ds := testnet.NewTestRetrievalDealStream(testnet.TestDealStreamParams{}) - expectedPayCh := address.TestAddress2 - expectedLane := uint64(10) - eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) - require.NoError(t, err) - runWaitForPaychCreate := func(t *testing.T, - params testnodes.TestRetrievalClientNodeParams, - dealState *retrievalmarket.ClientDealState) { - node := testnodes.NewTestRetrievalClientNode(params) - environment := &fakeEnvironment{node, ds, 0, nil} - fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) - err := clientstates.WaitForPaymentChannelCreate(fsmCtx, environment, *dealState) - require.NoError(t, err) - fsmCtx.ReplayEvents(t, dealState) - } - msgCID := testnet.GenerateCids(1)[0] - - t.Run("it works", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelCreating) - dealState.WaitMsgCID = &msgCID - params := testnodes.TestRetrievalClientNodeParams{ - PayCh: expectedPayCh, - CreatePaychCID: msgCID, - Lane: expectedLane, - } - runWaitForPaychCreate(t, params, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusPaymentChannelReady) - require.Equal(t, expectedLane, dealState.PaymentInfo.Lane) - require.Equal(t, expectedPayCh, dealState.PaymentInfo.PayCh) - }) - t.Run("if Wait fails", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelCreating) - dealState.WaitMsgCID = &msgCID - params := testnodes.TestRetrievalClientNodeParams{ - PayCh: expectedPayCh, - CreatePaychCID: msgCID, - WaitForChCreateErr: errors.New("boom"), - } - runWaitForPaychCreate(t, params, dealState) - require.Contains(t, dealState.Message, "boom") - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - }) - - t.Run("if AllocateLane fails", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelCreating) - dealState.WaitMsgCID = &msgCID - params := testnodes.TestRetrievalClientNodeParams{ - PayCh: expectedPayCh, - CreatePaychCID: msgCID, - LaneError: errors.New("boom"), - } - runWaitForPaychCreate(t, params, dealState) - require.Contains(t, dealState.Message, "boom") - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - }) -} - -func TestWaitForPaymentChannelAddFunds(t *testing.T) { - ctx := context.Background() - ds := testnet.NewTestRetrievalDealStream(testnet.TestDealStreamParams{}) - expectedPayCh := address.TestAddress2 - expectedLane := uint64(99) - eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) - require.NoError(t, err) - runWaitForPaychAddFunds := func(t *testing.T, - params testnodes.TestRetrievalClientNodeParams, - dealState *retrievalmarket.ClientDealState) { - node := testnodes.NewTestRetrievalClientNode(params) - environment := &fakeEnvironment{node, ds, 0, nil} - fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) - err := clientstates.WaitForPaymentChannelAddFunds(fsmCtx, environment, *dealState) - require.NoError(t, err) - fsmCtx.ReplayEvents(t, dealState) - } - msgCID := testnet.GenerateCids(1)[0] - - t.Run("it works", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelAddingFunds) - dealState.PaymentInfo.PayCh = expectedPayCh - dealState.WaitMsgCID = &msgCID - - params := testnodes.TestRetrievalClientNodeParams{ - AddFundsOnly: true, - PayCh: expectedPayCh, - AddFundsCID: msgCID, - Lane: expectedLane, - } - runWaitForPaychAddFunds(t, params, dealState) - require.Empty(t, dealState.Message) - assert.Equal(t, retrievalmarket.DealStatusPaymentChannelReady, dealState.Status) - assert.Equal(t, expectedLane, dealState.PaymentInfo.Lane) - assert.Equal(t, expectedPayCh, dealState.PaymentInfo.PayCh) - }) - t.Run("if Wait fails", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelAddingFunds) - dealState.WaitMsgCID = &msgCID - params := testnodes.TestRetrievalClientNodeParams{ - AddFundsOnly: true, - PayCh: expectedPayCh, - AddFundsCID: msgCID, - WaitForAddFundsErr: errors.New("boom"), - Lane: expectedLane, - } - runWaitForPaychAddFunds(t, params, dealState) - assert.Contains(t, dealState.Message, "boom") - assert.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - assert.Equal(t, uint64(0), dealState.PaymentInfo.Lane) - }) - t.Run("if AllocateLane fails", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusPaymentChannelAddingFunds) - dealState.WaitMsgCID = &msgCID - params := testnodes.TestRetrievalClientNodeParams{ - AddFundsOnly: true, - PayCh: expectedPayCh, - AddFundsCID: msgCID, - LaneError: errors.New("boom"), - Lane: expectedLane, - } - runWaitForPaychAddFunds(t, params, dealState) - assert.Contains(t, dealState.Message, "boom") - assert.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - assert.Equal(t, uint64(0), dealState.PaymentInfo.Lane) - }) -} - -func TestProposeDeal(t *testing.T) { - ctx := context.Background() - node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) - eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) - require.NoError(t, err) - runProposeDeal := func(t *testing.T, params testnet.TestDealStreamParams, dealState *retrievalmarket.ClientDealState) { - ds := testnet.NewTestRetrievalDealStream(params) - environment := &fakeEnvironment{node, ds, 0, nil} - fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) - err := clientstates.ProposeDeal(fsmCtx, environment, *dealState) - require.NoError(t, err) - fsmCtx.ReplayEvents(t, dealState) - } - - t.Run("it works", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusNew) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.StubbedDealResponseReader(retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusAccepted, - ID: dealState.ID, - }), - } - runProposeDeal(t, dealStreamParams, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusAccepted) - }) - - t.Run("deal rejected", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusNew) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.StubbedDealResponseReader(retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusRejected, - ID: dealState.ID, - Message: "your deal proposal sucks", - }), - } - runProposeDeal(t, dealStreamParams, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusRejected) - }) - - t.Run("deal not found", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusNew) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.StubbedDealResponseReader(retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusDealNotFound, - ID: dealState.ID, - Message: "can't find a deal", - }), - } - runProposeDeal(t, dealStreamParams, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusDealNotFound) - }) - - t.Run("unable to send proposal", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusNew) - dealStreamParams := testnet.TestDealStreamParams{ - ProposalWriter: testnet.FailDealProposalWriter, - } - runProposeDeal(t, dealStreamParams, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusErrored) - }) - - t.Run("unable to read response", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusNew) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.FailDealResponseReader, - } - runProposeDeal(t, dealStreamParams, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusErrored) - }) -} - -func TestProcessPaymentRequested(t *testing.T) { - ctx := context.Background() - eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) - require.NoError(t, err) - runProcessPaymentRequested := func(t *testing.T, - netParams testnet.TestDealStreamParams, - nodeParams testnodes.TestRetrievalClientNodeParams, - dealState *retrievalmarket.ClientDealState) { - ds := testnet.NewTestRetrievalDealStream(netParams) - node := testnodes.NewTestRetrievalClientNode(nodeParams) - environment := &fakeEnvironment{node, ds, 0, nil} - fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) - err := clientstates.ProcessPaymentRequested(fsmCtx, environment, *dealState) - require.NoError(t, err) - fsmCtx.ReplayEvents(t, dealState) - } - - testVoucher := &paych.SignedVoucher{} - - t.Run("it works", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealStreamParams := testnet.TestDealStreamParams{} - nodeParams := testnodes.TestRetrievalClientNodeParams{ - Voucher: testVoucher, - } - runProcessPaymentRequested(t, dealStreamParams, nodeParams, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, dealState.PaymentRequested, abi.NewTokenAmount(0)) - require.Equal(t, dealState.FundsSpent, big.Add(defaultFundsSpent, defaultPaymentRequested)) - require.Equal(t, dealState.BytesPaidFor, defaultTotalReceived) - require.Equal(t, dealState.CurrentInterval, defaultCurrentInterval+defaultIntervalIncrease) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) - }) - - t.Run("last payment", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusFundsNeededLastPayment) - dealStreamParams := testnet.TestDealStreamParams{} - nodeParams := testnodes.TestRetrievalClientNodeParams{ - Voucher: testVoucher, - } - runProcessPaymentRequested(t, dealStreamParams, nodeParams, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, dealState.PaymentRequested, abi.NewTokenAmount(0)) - require.Equal(t, dealState.FundsSpent, big.Add(defaultFundsSpent, defaultPaymentRequested)) - require.Equal(t, dealState.BytesPaidFor, defaultTotalReceived) - require.Equal(t, dealState.CurrentInterval, defaultCurrentInterval+defaultIntervalIncrease) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFinalizing) - }) - - t.Run("not enough funds left", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealState.FundsSpent = defaultTotalFunds - dealStreamParams := testnet.TestDealStreamParams{} - nodeParams := testnodes.TestRetrievalClientNodeParams{ - Voucher: testVoucher, - } - runProcessPaymentRequested(t, dealStreamParams, nodeParams, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - }) - - t.Run("not enough bytes since last payment", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealState.BytesPaidFor = defaultBytesPaidFor + 500 - dealStreamParams := testnet.TestDealStreamParams{} - nodeParams := testnodes.TestRetrievalClientNodeParams{ - Voucher: testVoucher, - } - runProcessPaymentRequested(t, dealStreamParams, nodeParams, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - }) - - t.Run("more bytes since last payment than interval works, can charge more", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealState.BytesPaidFor = defaultBytesPaidFor - 500 - largerPaymentRequested := abi.NewTokenAmount(750000) - dealState.PaymentRequested = largerPaymentRequested - dealStreamParams := testnet.TestDealStreamParams{} - nodeParams := testnodes.TestRetrievalClientNodeParams{ - Voucher: testVoucher, - } - runProcessPaymentRequested(t, dealStreamParams, nodeParams, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, dealState.PaymentRequested, abi.NewTokenAmount(0)) - require.Equal(t, dealState.FundsSpent, big.Add(defaultFundsSpent, largerPaymentRequested)) - require.Equal(t, dealState.BytesPaidFor, defaultTotalReceived) - require.Equal(t, dealState.CurrentInterval, defaultCurrentInterval+defaultIntervalIncrease) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) - }) - - t.Run("too much payment requested", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealState.PaymentRequested = abi.NewTokenAmount(750000) - dealStreamParams := testnet.TestDealStreamParams{} - nodeParams := testnodes.TestRetrievalClientNodeParams{ - Voucher: testVoucher, - } - runProcessPaymentRequested(t, dealStreamParams, nodeParams, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - }) - - t.Run("too little payment requested works but records correctly", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - smallerPaymentRequested := abi.NewTokenAmount(250000) - dealState.PaymentRequested = smallerPaymentRequested - dealStreamParams := testnet.TestDealStreamParams{} - nodeParams := testnodes.TestRetrievalClientNodeParams{ - Voucher: testVoucher, - } - runProcessPaymentRequested(t, dealStreamParams, nodeParams, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, dealState.PaymentRequested, abi.NewTokenAmount(0)) - require.Equal(t, dealState.FundsSpent, big.Add(defaultFundsSpent, smallerPaymentRequested)) - // only records change for those bytes paid for - require.Equal(t, dealState.BytesPaidFor, defaultBytesPaidFor+500) - // no interval increase - require.Equal(t, dealState.CurrentInterval, defaultCurrentInterval) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) - }) - - t.Run("voucher create fails", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealStreamParams := testnet.TestDealStreamParams{} - nodeParams := testnodes.TestRetrievalClientNodeParams{ - VoucherError: errors.New("Something Went Wrong"), - } - runProcessPaymentRequested(t, dealStreamParams, nodeParams, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - }) - - t.Run("unable to send payment", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealStreamParams := testnet.TestDealStreamParams{ - PaymentWriter: testnet.FailDealPaymentWriter, - } - nodeParams := testnodes.TestRetrievalClientNodeParams{ - Voucher: testVoucher, - } - runProcessPaymentRequested(t, dealStreamParams, nodeParams, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusErrored) - }) -} - -func TestProcessNextResponse(t *testing.T) { - ctx := context.Background() - node := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{}) - eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ClientDealState{}, "Status", clientstates.ClientEvents) - require.NoError(t, err) - runProcessNextResponse := func(t *testing.T, - netParams testnet.TestDealStreamParams, - responses []consumeBlockResponse, - dealState *retrievalmarket.ClientDealState) { - ds := testnet.NewTestRetrievalDealStream(netParams) - environment := &fakeEnvironment{node, ds, 0, responses} - fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) - err := clientstates.ProcessNextResponse(fsmCtx, environment, *dealState) - require.NoError(t, err) - fsmCtx.ReplayEvents(t, dealState) - } - paymentOwed := abi.NewTokenAmount(1000) - t.Run("it works", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusOngoing) - blocks, consumeBlockResponses := generateBlocks(10, 100, false, false) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.StubbedDealResponseReader(retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusOngoing, - ID: dealState.ID, - Blocks: blocks, - }), - } - runProcessNextResponse(t, dealStreamParams, consumeBlockResponses, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, dealState.TotalReceived, defaultTotalReceived+1000) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) - }) - - t.Run("completes", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusOngoing) - blocks, consumeBlockResponses := generateBlocks(10, 100, true, false) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.StubbedDealResponseReader(retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusCompleted, - ID: dealState.ID, - Blocks: blocks, - }), - } - runProcessNextResponse(t, dealStreamParams, consumeBlockResponses, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, dealState.TotalReceived, defaultTotalReceived+1000) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusCompleted) - }) - - t.Run("completes last payment", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusOngoing) - blocks, consumeBlockResponses := generateBlocks(10, 100, true, false) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.StubbedDealResponseReader(retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusFundsNeededLastPayment, - ID: dealState.ID, - PaymentOwed: paymentOwed, - Blocks: blocks, - }), - } - runProcessNextResponse(t, dealStreamParams, consumeBlockResponses, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, dealState.TotalReceived, defaultTotalReceived+1000) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFundsNeededLastPayment) - require.Equal(t, dealState.PaymentRequested, paymentOwed) - }) - - t.Run("receive complete status but deal is not complete errors", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusOngoing) - blocks, consumeBlockResponses := generateBlocks(10, 100, false, false) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.StubbedDealResponseReader(retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusCompleted, - ID: dealState.ID, - Blocks: blocks, - }), - } - runProcessNextResponse(t, dealStreamParams, consumeBlockResponses, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.TotalReceived, defaultTotalReceived) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - }) - t.Run("payment requested", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusOngoing) - blocks, consumeBlockResponses := generateBlocks(10, 100, false, false) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.StubbedDealResponseReader(retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusFundsNeeded, - ID: dealState.ID, - PaymentOwed: paymentOwed, - Blocks: blocks, - }), - } - runProcessNextResponse(t, dealStreamParams, consumeBlockResponses, dealState) - require.Empty(t, dealState.Message) - require.Equal(t, dealState.TotalReceived, defaultTotalReceived+1000) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFundsNeeded) - require.Equal(t, dealState.PaymentRequested, paymentOwed) - }) - - t.Run("unexpected status errors", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusOngoing) - blocks, consumeBlockResponses := generateBlocks(10, 100, false, false) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.StubbedDealResponseReader(retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusNew, - ID: dealState.ID, - Blocks: blocks, - }), - } - runProcessNextResponse(t, dealStreamParams, consumeBlockResponses, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.TotalReceived, defaultTotalReceived) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - }) - - t.Run("consume block errors", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusOngoing) - blocks, consumeBlockResponses := generateBlocks(10, 100, false, true) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.StubbedDealResponseReader(retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusOngoing, - ID: dealState.ID, - Blocks: blocks, - }), - } - runProcessNextResponse(t, dealStreamParams, consumeBlockResponses, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.TotalReceived, defaultTotalReceived) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - }) - - t.Run("read response errors", func(t *testing.T) { - dealState := makeDealState(retrievalmarket.DealStatusOngoing) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseReader: testnet.FailDealResponseReader, - } - runProcessNextResponse(t, dealStreamParams, nil, dealState) - require.NotEmpty(t, dealState.Message) - require.Equal(t, dealState.TotalReceived, defaultTotalReceived) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusErrored) - }) -} - -var defaultTotalFunds = abi.NewTokenAmount(4000000) -var defaultCurrentInterval = uint64(1000) -var defaultIntervalIncrease = uint64(500) -var defaultPricePerByte = abi.NewTokenAmount(500) -var defaultTotalReceived = uint64(6000) -var defaultBytesPaidFor = uint64(5000) -var defaultFundsSpent = abi.NewTokenAmount(2500000) -var defaultPaymentRequested = abi.NewTokenAmount(500000) - -func makeDealState(status retrievalmarket.DealStatus) *retrievalmarket.ClientDealState { - return &retrievalmarket.ClientDealState{ - TotalFunds: defaultTotalFunds, - MinerWallet: address.TestAddress, - ClientWallet: address.TestAddress2, - PaymentInfo: &retrievalmarket.PaymentInfo{}, - Status: status, - BytesPaidFor: defaultBytesPaidFor, - TotalReceived: defaultTotalReceived, - CurrentInterval: defaultCurrentInterval, - FundsSpent: defaultFundsSpent, - PaymentRequested: defaultPaymentRequested, - DealProposal: retrievalmarket.DealProposal{ - ID: retrievalmarket.DealID(10), - Params: retrievalmarket.NewParamsV0(defaultPricePerByte, 0, defaultIntervalIncrease), - }, - } -} - -func generateBlocks(count uint64, blockSize uint64, completeOnLast bool, errorOnFirst bool) ([]retrievalmarket.Block, []consumeBlockResponse) { - blocks := make([]retrievalmarket.Block, count) - responses := make([]consumeBlockResponse, count) - var i uint64 = 0 - for ; i < count; i++ { - data := make([]byte, blockSize) - var err error - _, err = rand.Read(data) - blocks[i] = retrievalmarket.Block{ - Prefix: cid.NewPrefixV1(cid.Raw, mh.SHA2_256).Bytes(), - Data: data, - } - complete := false - if i == 0 && errorOnFirst { - err = errors.New("something went wrong") - } - - if i == count-1 && completeOnLast { - complete = true - } - responses[i] = consumeBlockResponse{blockSize, complete, err} - } - return blocks, responses -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/doc.go deleted file mode 100644 index a97c953124..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/clientstates/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -/* -Package clientstates contains state machine logic relating to the `RetrievalClient`. - -client_fsm.go is where the state transitions are defined, and the default handlers for each new state are defined. - -client_states.go contains state handler functions. - -The following diagram illustrates the operation of the client state machine. This diagram is auto-generated from current code and should remain up to date over time: - -https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalclient.mmd.svg - -*/ -package clientstates diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/doc.go deleted file mode 100644 index 94b32998c9..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -/* -Package retrievalimpl provides the primary implementation of retrieval market top level interfaces interfaces - -This package provides a production implementation of `RetrievalClient` and `RetrievalProvider`. -*/ -package retrievalimpl diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/fixtures/lorem.txt b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/fixtures/lorem.txt deleted file mode 100644 index fd4a2f3c1f..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/fixtures/lorem.txt +++ /dev/null @@ -1,49 +0,0 @@ -Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu. - -Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed. - -Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique. - -Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus. - -Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh. - -Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit. - -Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet. - -Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices. - -Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis. - -Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem. - -Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis. - -Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. - -Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam. - -Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada. - -Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque. - -Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl. - -Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla. - -Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet. - -Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam. - -Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus. - -Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor. - -Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est. - -Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat. - -Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum. - -Eros in cursus turpis massa tincidunt dui ut. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/fixtures/lorem_under_1_block.txt b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/fixtures/lorem_under_1_block.txt deleted file mode 100644 index 8a5b223df6..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/fixtures/lorem_under_1_block.txt +++ /dev/null @@ -1 +0,0 @@ -Aliquam sollicitudin diam non pellentesque eleifend. Phasellus at mauris id est interdum feugiat. Morbi lobortis quam eget nulla pulvinar, ac maximus dui consequat. Donec ut mauris faucibus nulla finibus cursus. In iaculis, est vitae viverra dignissim, sem nulla hendrerit augue, sed mollis magna libero in odio. Morbi interdum lacus pellentesque pulvinar bibendum. Cras ac ultrices tortor, nec lobortis lorem. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/integration_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/integration_test.go deleted file mode 100644 index f79c3da14a..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/integration_test.go +++ /dev/null @@ -1,481 +0,0 @@ -package retrievalimpl_test - -import ( - "bytes" - "context" - "path/filepath" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/pieceio/cario" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - rmtesting "github.com/filecoin-project/go-fil-markets/retrievalmarket/testing" - "github.com/filecoin-project/go-fil-markets/shared" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -func TestClientCanMakeQueryToProvider(t *testing.T) { - bgCtx := context.Background() - payChAddr := address.TestAddress - - client, expectedCIDs, missingPiece, expectedQR, retrievalPeer, _ := requireSetupTestClientAndProvider(bgCtx, t, payChAddr) - - t.Run("when piece is found, returns piece and price data", func(t *testing.T) { - expectedQR.Status = retrievalmarket.QueryResponseAvailable - actualQR, err := client.Query(bgCtx, retrievalPeer, expectedCIDs[0], retrievalmarket.QueryParams{}) - - assert.NoError(t, err) - assert.Equal(t, expectedQR, actualQR) - }) - - t.Run("when piece is not found, returns unavailable", func(t *testing.T) { - expectedQR.PieceCIDFound = retrievalmarket.QueryItemUnavailable - expectedQR.Status = retrievalmarket.QueryResponseUnavailable - expectedQR.Size = 0 - actualQR, err := client.Query(bgCtx, retrievalPeer, missingPiece, retrievalmarket.QueryParams{}) - assert.NoError(t, err) - assert.Equal(t, expectedQR, actualQR) - }) - - t.Run("when there is some other error, returns error", func(t *testing.T) { - unknownPiece := tut.GenerateCids(1)[0] - expectedQR.Status = retrievalmarket.QueryResponseError - expectedQR.Message = "get cid info: GetCIDInfo failed" - actualQR, err := client.Query(bgCtx, retrievalPeer, unknownPiece, retrievalmarket.QueryParams{}) - assert.NoError(t, err) - assert.Equal(t, expectedQR, actualQR) - }) - -} - -func TestProvider_Stop(t *testing.T) { - bgCtx := context.Background() - payChAddr := address.TestAddress - client, expectedCIDs, _, _, retrievalPeer, provider := requireSetupTestClientAndProvider(bgCtx, t, payChAddr) - require.NoError(t, provider.Stop()) - _, err := client.Query(bgCtx, retrievalPeer, expectedCIDs[0], retrievalmarket.QueryParams{}) - assert.EqualError(t, err, "protocol not supported") -} - -func requireSetupTestClientAndProvider(bgCtx context.Context, t *testing.T, payChAddr address.Address) (retrievalmarket.RetrievalClient, - []cid.Cid, - cid.Cid, - retrievalmarket.QueryResponse, - retrievalmarket.RetrievalPeer, - retrievalmarket.RetrievalProvider) { - testData := tut.NewLibp2pTestData(bgCtx, t) - nw1 := rmnet.NewFromLibp2pHost(testData.Host1) - cids := tut.GenerateCids(2) - rcNode1 := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{ - PayCh: payChAddr, - CreatePaychCID: cids[0], - AddFundsCID: cids[1], - }) - client, err := retrievalimpl.NewClient(nw1, testData.Bs1, rcNode1, &tut.TestPeerResolver{}, testData.Ds1, testData.RetrievalStoredCounter1) - require.NoError(t, err) - nw2 := rmnet.NewFromLibp2pHost(testData.Host2) - providerNode := testnodes.NewTestRetrievalProviderNode() - pieceStore := tut.NewTestPieceStore() - expectedCIDs := tut.GenerateCids(3) - expectedPieceCIDs := tut.GenerateCids(3) - missingCID := tut.GenerateCids(1)[0] - expectedQR := tut.MakeTestQueryResponse() - - pieceStore.ExpectMissingCID(missingCID) - for i, c := range expectedCIDs { - pieceStore.ExpectCID(c, piecestore.CIDInfo{ - PieceBlockLocations: []piecestore.PieceBlockLocation{ - { - PieceCID: expectedPieceCIDs[i], - }, - }, - }) - } - for i, piece := range expectedPieceCIDs { - pieceStore.ExpectPiece(piece, piecestore.PieceInfo{ - Deals: []piecestore.DealInfo{ - { - Length: expectedQR.Size * uint64(i+1), - }, - }, - }) - } - - paymentAddress := address.TestAddress2 - provider, err := retrievalimpl.NewProvider(paymentAddress, providerNode, nw2, pieceStore, testData.Bs2, testData.Ds2) - require.NoError(t, err) - - provider.SetPaymentInterval(expectedQR.MaxPaymentInterval, expectedQR.MaxPaymentIntervalIncrease) - provider.SetPricePerByte(expectedQR.MinPricePerByte) - require.NoError(t, provider.Start()) - - retrievalPeer := retrievalmarket.RetrievalPeer{ - Address: paymentAddress, - ID: testData.Host2.ID(), - } - return client, expectedCIDs, missingCID, expectedQR, retrievalPeer, provider -} - -func TestClientCanMakeDealWithProvider(t *testing.T) { - // -------- SET UP PROVIDER - - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) - - partialSelector := ssb.ExploreFields(func(specBuilder builder.ExploreFieldsSpecBuilder) { - specBuilder.Insert("Links", ssb.ExploreIndex(0, ssb.ExploreFields(func(specBuilder builder.ExploreFieldsSpecBuilder) { - specBuilder.Insert("Hash", ssb.Matcher()) - }))) - }).Node() - - var customDeciderRan bool - - testCases := []struct { - name string - decider retrievalimpl.DealDecider - filename string - filesize uint64 - voucherAmts []abi.TokenAmount - selector ipld.Node - paramsV1, unsealing, addFunds bool - }{ - {name: "1 block file retrieval succeeds", - filename: "lorem_under_1_block.txt", - filesize: 410, - voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(410000)}, - unsealing: false}, - {name: "1 block file retrieval succeeds with existing payment channel", - filename: "lorem_under_1_block.txt", - filesize: 410, - voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(410000)}, - unsealing: false, addFunds: true}, - {name: "1 block file retrieval succeeds with unsealing", - filename: "lorem_under_1_block.txt", - filesize: 410, - voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(410000)}, - unsealing: true}, - {name: "multi-block file retrieval succeeds", - filename: "lorem.txt", - filesize: 19000, - voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(10136000), abi.NewTokenAmount(9784000)}, - unsealing: false}, - {name: "multi-block file retrieval succeeds with unsealing", - filename: "lorem.txt", - filesize: 19000, - voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(10136000), abi.NewTokenAmount(9784000)}, - unsealing: true}, - {name: "multi-block file retrieval succeeds with V1 params and AllSelector", - filename: "lorem.txt", - filesize: 19000, - voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(10136000), abi.NewTokenAmount(9784000)}, - paramsV1: true, - selector: shared.AllSelector(), - unsealing: false}, - {name: "partial file retrieval succeeds with V1 params and selector recursion depth 1", - filename: "lorem.txt", - filesize: 1024, - voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(1944000)}, - paramsV1: true, - selector: partialSelector, - unsealing: false}, - {name: "succeeds when using a custom decider function", - decider: func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { - customDeciderRan = true - return true, "", nil - }, - filename: "lorem_under_1_block.txt", - filesize: 410, - voucherAmts: []abi.TokenAmount{abi.NewTokenAmount(410000)}, - unsealing: false, - }, - } - - for i, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - bgCtx := context.Background() - clientPaymentChannel, err := address.NewIDAddress(uint64(i * 10)) - require.NoError(t, err) - - testData := tut.NewLibp2pTestData(bgCtx, t) - - // Inject a unixFS file on the provider side to its blockstore - // obtained via `ls -laf` on this file - - fpath := filepath.Join("retrievalmarket", "impl", "fixtures", testCase.filename) - - pieceLink := testData.LoadUnixFSFile(t, fpath, true) - c, ok := pieceLink.(cidlink.Link) - require.True(t, ok) - payloadCID := c.Cid - providerPaymentAddr, err := address.NewIDAddress(uint64(i * 99)) - require.NoError(t, err) - paymentInterval := uint64(10000) - paymentIntervalIncrease := uint64(1000) - pricePerByte := abi.NewTokenAmount(1000) - - expectedQR := retrievalmarket.QueryResponse{ - Size: 1024, - PaymentAddress: providerPaymentAddr, - MinPricePerByte: pricePerByte, - MaxPaymentInterval: paymentInterval, - MaxPaymentIntervalIncrease: paymentIntervalIncrease, - } - - providerNode := testnodes.NewTestRetrievalProviderNode() - var pieceInfo piecestore.PieceInfo - if testCase.unsealing { - cio := cario.NewCarIO() - var buf bytes.Buffer - err := cio.WriteCar(bgCtx, testData.Bs2, payloadCID, shared.AllSelector(), &buf) - require.NoError(t, err) - carData := buf.Bytes() - sectorID := uint64(100000) - offset := uint64(1000) - pieceInfo = piecestore.PieceInfo{ - Deals: []piecestore.DealInfo{ - { - SectorID: sectorID, - Offset: offset, - Length: uint64(len(carData)), - }, - }, - } - providerNode.ExpectUnseal(sectorID, offset, uint64(len(carData)), carData) - // clearout provider blockstore - allCids, err := testData.Bs2.AllKeysChan(bgCtx) - require.NoError(t, err) - for c := range allCids { - err = testData.Bs2.DeleteBlock(c) - require.NoError(t, err) - } - } else { - pieceInfo = piecestore.PieceInfo{ - Deals: []piecestore.DealInfo{ - { - Length: expectedQR.Size, - }, - }, - } - } - - decider := rmtesting.TrivalTestDecider - if testCase.decider != nil { - decider = testCase.decider - } - provider := setupProvider(t, testData, payloadCID, pieceInfo, expectedQR, - providerPaymentAddr, providerNode, decider) - - retrievalPeer := &retrievalmarket.RetrievalPeer{Address: providerPaymentAddr, ID: testData.Host2.ID()} - - expectedVoucher := tut.MakeTestSignedVoucher() - - // just make sure there is enough to cover the transfer - expectedTotal := big.Mul(pricePerByte, abi.NewTokenAmount(int64(testCase.filesize*2))) - - // voucherAmts are pulled from the actual answer so the expected keys in the test node match up. - // later we compare the voucher values. The last voucherAmt is a remainder - proof := []byte("") - for _, voucherAmt := range testCase.voucherAmts { - require.NoError(t, providerNode.ExpectVoucher(clientPaymentChannel, expectedVoucher, proof, voucherAmt, voucherAmt, nil)) - } - - // ------- SET UP CLIENT - nw1 := rmnet.NewFromLibp2pHost(testData.Host1) - - createdChan, newLaneAddr, createdVoucher, client, err := setupClient(clientPaymentChannel, expectedVoucher, nw1, testData, testCase.addFunds) - require.NoError(t, err) - - clientDealStateChan := make(chan retrievalmarket.ClientDealState) - client.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) { - switch event { - case retrievalmarket.ClientEventComplete: - clientDealStateChan <- state - default: - msg := ` -Client: -Status: %s -TotalReceived: %d -BytesPaidFor: %d -CurrentInterval: %d -TotalFunds: %s -Message: %s -` - t.Logf(msg, retrievalmarket.DealStatuses[state.Status], state.TotalReceived, state.BytesPaidFor, state.CurrentInterval, - state.TotalFunds.String(), state.Message) - } - }) - - providerDealStateChan := make(chan retrievalmarket.ProviderDealState) - provider.SubscribeToEvents(func(event retrievalmarket.ProviderEvent, state retrievalmarket.ProviderDealState) { - switch event { - case retrievalmarket.ProviderEventComplete: - providerDealStateChan <- state - default: - msg := ` -Provider: -Status: %s -TotalSent: %d -FundsReceived: %s -Message: %s -CurrentInterval: %d -` - t.Logf(msg, retrievalmarket.DealStatuses[state.Status], state.TotalSent, state.FundsReceived.String(), state.Message, - state.CurrentInterval) - } - }) - - // **** Send the query for the Piece - // set up retrieval params - resp, err := client.Query(bgCtx, *retrievalPeer, payloadCID, retrievalmarket.QueryParams{}) - require.NoError(t, err) - require.Equal(t, retrievalmarket.QueryResponseAvailable, resp.Status) - - var rmParams retrievalmarket.Params - if testCase.paramsV1 { - rmParams = retrievalmarket.NewParamsV1(pricePerByte, paymentInterval, paymentIntervalIncrease, testCase.selector, nil) - - } else { - rmParams = retrievalmarket.NewParamsV0(pricePerByte, paymentInterval, paymentIntervalIncrease) - } - - // *** Retrieve the piece - did, err := client.Retrieve(bgCtx, payloadCID, rmParams, expectedTotal, retrievalPeer.ID, clientPaymentChannel, retrievalPeer.Address) - assert.Equal(t, did, retrievalmarket.DealID(0)) - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(bgCtx, 10*time.Second) - defer cancel() - - // verify that client subscribers will be notified of state changes - var clientDealState retrievalmarket.ClientDealState - select { - case <-ctx.Done(): - t.Error("deal never completed") - t.FailNow() - case clientDealState = <-clientDealStateChan: - } - assert.Equal(t, clientDealState.PaymentInfo.Lane, expectedVoucher.Lane) - require.NotNil(t, createdChan) - require.Equal(t, expectedTotal, createdChan.amt) - require.Equal(t, clientPaymentChannel, *newLaneAddr) - // verify that the voucher was saved/seen by the client with correct values - require.NotNil(t, createdVoucher) - tut.TestVoucherEquality(t, createdVoucher, expectedVoucher) - - ctx, cancel = context.WithTimeout(bgCtx, 5*time.Second) - defer cancel() - var providerDealState retrievalmarket.ProviderDealState - select { - case <-ctx.Done(): - t.Error("provider never saw completed deal") - t.FailNow() - case providerDealState = <-providerDealStateChan: - } - - require.Equal(t, retrievalmarket.DealStatusCompleted, providerDealState.Status) - // TODO this is terrible, but it's temporary until the test harness refactor - // in the resuming retrieval deals branch is done - // https://github.com/filecoin-project/go-fil-markets/issues/65 - if testCase.decider != nil { - assert.True(t, customDeciderRan) - } - // verify that the provider saved the same voucher values - providerNode.VerifyExpectations(t) - testData.VerifyFileTransferred(t, pieceLink, false, testCase.filesize) - }) - } - -} - -func setupClient( - clientPaymentChannel address.Address, - expectedVoucher *paych.SignedVoucher, - nw1 rmnet.RetrievalMarketNetwork, - testData *tut.Libp2pTestData, - addFunds bool, -) ( - *pmtChan, - *address.Address, - *paych.SignedVoucher, - retrievalmarket.RetrievalClient, - error) { - var createdChan pmtChan - paymentChannelRecorder := func(client, miner address.Address, amt abi.TokenAmount) { - createdChan = pmtChan{client, miner, amt} - } - - var newLaneAddr address.Address - laneRecorder := func(paymentChannel address.Address) { - newLaneAddr = paymentChannel - } - - var createdVoucher paych.SignedVoucher - paymentVoucherRecorder := func(v *paych.SignedVoucher) { - createdVoucher = *v - } - cids := tut.GenerateCids(2) - clientNode := testnodes.NewTestRetrievalClientNode(testnodes.TestRetrievalClientNodeParams{ - AddFundsOnly: addFunds, - PayCh: clientPaymentChannel, - Lane: expectedVoucher.Lane, - Voucher: expectedVoucher, - PaymentChannelRecorder: paymentChannelRecorder, - AllocateLaneRecorder: laneRecorder, - PaymentVoucherRecorder: paymentVoucherRecorder, - CreatePaychCID: cids[0], - AddFundsCID: cids[1], - }) - client, err := retrievalimpl.NewClient(nw1, testData.Bs1, clientNode, &tut.TestPeerResolver{}, testData.Ds1, testData.RetrievalStoredCounter1) - return &createdChan, &newLaneAddr, &createdVoucher, client, err -} - -func setupProvider(t *testing.T, - testData *tut.Libp2pTestData, - payloadCID cid.Cid, - pieceInfo piecestore.PieceInfo, - expectedQR retrievalmarket.QueryResponse, - providerPaymentAddr address.Address, - providerNode retrievalmarket.RetrievalProviderNode, - decider retrievalimpl.DealDecider, -) retrievalmarket.RetrievalProvider { - nw2 := rmnet.NewFromLibp2pHost(testData.Host2) - pieceStore := tut.NewTestPieceStore() - expectedPiece := tut.GenerateCids(1)[0] - cidInfo := piecestore.CIDInfo{ - PieceBlockLocations: []piecestore.PieceBlockLocation{ - { - PieceCID: expectedPiece, - }, - }, - } - pieceStore.ExpectCID(payloadCID, cidInfo) - pieceStore.ExpectPiece(expectedPiece, pieceInfo) - provider, err := retrievalimpl.NewProvider(providerPaymentAddr, providerNode, nw2, - pieceStore, testData.Bs2, testData.Ds2, - retrievalimpl.DealDeciderOpt(decider)) - require.NoError(t, err) - provider.SetPaymentInterval(expectedQR.MaxPaymentInterval, expectedQR.MaxPaymentIntervalIncrease) - provider.SetPricePerByte(expectedQR.MinPricePerByte) - require.NoError(t, provider.Start()) - return provider -} - -type pmtChan struct { - client, miner address.Address - amt abi.TokenAmount -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/provider.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/provider.go deleted file mode 100644 index b362b4be86..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/provider.go +++ /dev/null @@ -1,427 +0,0 @@ -package retrievalimpl - -import ( - "context" - "errors" - "reflect" - "sync" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/pieceio/cario" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockio" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/blockunsealing" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - "github.com/filecoin-project/go-fil-markets/shared" -) - -// RetrievalProviderOption is a function that configures a retrieval provider -type RetrievalProviderOption func(p *Provider) - -// DealDecider is a function that makes a decision about whether to accept a deal -type DealDecider func(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) - -// Provider is the production implementation of the RetrievalProvider interface -type Provider struct { - bs blockstore.Blockstore - node retrievalmarket.RetrievalProviderNode - network rmnet.RetrievalMarketNetwork - paymentInterval uint64 - paymentIntervalIncrease uint64 - minerAddress address.Address - pieceStore piecestore.PieceStore - pricePerByte abi.TokenAmount - subscribers []retrievalmarket.ProviderSubscriber - subscribersLk sync.RWMutex - dealStreams map[retrievalmarket.ProviderDealIdentifier]rmnet.RetrievalDealStream - dealStreamsLk sync.Mutex - blockReaders map[retrievalmarket.ProviderDealIdentifier]blockio.BlockReader - blockReadersLk sync.Mutex - stateMachines fsm.Group - dealDecider DealDecider -} - -var _ retrievalmarket.RetrievalProvider = new(Provider) -var _ providerstates.ProviderDealEnvironment = new(providerDealEnvironment) - -// DefaultPricePerByte is the charge per byte retrieved if the miner does -// not specifically set it -var DefaultPricePerByte = abi.NewTokenAmount(2) - -// DefaultPaymentInterval is the baseline interval, set to 1Mb -// if the miner does not explicitly set it otherwise -var DefaultPaymentInterval = uint64(1 << 20) - -// DefaultPaymentIntervalIncrease is the amount interval increases on each payment, -// set to to 1Mb if the miner does not explicitly set it otherwise -var DefaultPaymentIntervalIncrease = uint64(1 << 20) - -// DealDeciderOpt sets a custom protocol -func DealDeciderOpt(dd DealDecider) RetrievalProviderOption { - return func(provider *Provider) { - provider.dealDecider = dd - } -} - -// NewProvider returns a new retrieval Provider -func NewProvider(minerAddress address.Address, node retrievalmarket.RetrievalProviderNode, - network rmnet.RetrievalMarketNetwork, pieceStore piecestore.PieceStore, - bs blockstore.Blockstore, ds datastore.Batching, opts ...RetrievalProviderOption, -) (retrievalmarket.RetrievalProvider, error) { - - p := &Provider{ - bs: bs, - node: node, - network: network, - minerAddress: minerAddress, - pieceStore: pieceStore, - pricePerByte: DefaultPricePerByte, // TODO: allow setting - paymentInterval: DefaultPaymentInterval, - paymentIntervalIncrease: DefaultPaymentIntervalIncrease, - dealStreams: make(map[retrievalmarket.ProviderDealIdentifier]rmnet.RetrievalDealStream), - blockReaders: make(map[retrievalmarket.ProviderDealIdentifier]blockio.BlockReader), - } - statemachines, err := fsm.New(ds, fsm.Parameters{ - Environment: &providerDealEnvironment{p}, - StateType: retrievalmarket.ProviderDealState{}, - StateKeyField: "Status", - Events: providerstates.ProviderEvents, - StateEntryFuncs: providerstates.ProviderStateEntryFuncs, - Notifier: p.notifySubscribers, - }) - if err != nil { - return nil, err - } - p.Configure(opts...) - p.stateMachines = statemachines - return p, nil -} - -// Stop stops handling incoming requests. -func (p *Provider) Stop() error { - return p.network.StopHandlingRequests() -} - -// Start begins listening for deals on the given host. -// Start must be called in order to accept incoming deals. -func (p *Provider) Start() error { - return p.network.SetDelegate(p) -} - -// V0 - -// SetPricePerByte sets the price per byte a miner charges for retrievals -func (p *Provider) SetPricePerByte(price abi.TokenAmount) { - p.pricePerByte = price -} - -// SetPaymentInterval sets the maximum number of bytes a a Provider will send before -// requesting further payment, and the rate at which that value increases -func (p *Provider) SetPaymentInterval(paymentInterval uint64, paymentIntervalIncrease uint64) { - p.paymentInterval = paymentInterval - p.paymentIntervalIncrease = paymentIntervalIncrease -} - -// unsubscribeAt returns a function that removes an item from the subscribers list by comparing -// their reflect.ValueOf before pulling the item out of the slice. Does not preserve order. -// Subsequent, repeated calls to the func with the same Subscriber are a no-op. -func (p *Provider) unsubscribeAt(sub retrievalmarket.ProviderSubscriber) retrievalmarket.Unsubscribe { - return func() { - p.subscribersLk.Lock() - defer p.subscribersLk.Unlock() - curLen := len(p.subscribers) - for i, el := range p.subscribers { - if reflect.ValueOf(sub) == reflect.ValueOf(el) { - p.subscribers[i] = p.subscribers[curLen-1] - p.subscribers = p.subscribers[:curLen-1] - return - } - } - } -} - -func (p *Provider) notifySubscribers(eventName fsm.EventName, state fsm.StateType) { - p.subscribersLk.RLock() - defer p.subscribersLk.RUnlock() - evt := eventName.(retrievalmarket.ProviderEvent) - ds := state.(retrievalmarket.ProviderDealState) - for _, cb := range p.subscribers { - cb(evt, ds) - } -} - -// SubscribeToEvents listens for events that happen related to client retrievals -func (p *Provider) SubscribeToEvents(subscriber retrievalmarket.ProviderSubscriber) retrievalmarket.Unsubscribe { - p.subscribersLk.Lock() - p.subscribers = append(p.subscribers, subscriber) - p.subscribersLk.Unlock() - - return p.unsubscribeAt(subscriber) -} - -// V1 - -func (p *Provider) SetPricePerUnseal(price abi.TokenAmount) { - panic("not implemented") -} - -// ListDeals lists in all known retrieval deals -func (p *Provider) ListDeals() map[retrievalmarket.ProviderDealID]retrievalmarket.ProviderDealState { - var deals []retrievalmarket.ProviderDealState - _ = p.stateMachines.List(&deals) - dealMap := make(map[retrievalmarket.ProviderDealID]retrievalmarket.ProviderDealState) - for _, deal := range deals { - dealMap[retrievalmarket.ProviderDealID{From: deal.Receiver, ID: deal.ID}] = deal - } - return dealMap -} - -/* -HandleQueryStream is called by the network implementation whenever a new message is received on the query protocol - -A Provider handling a retrieval `Query` does the following: - -1. Get the node's chain head in order to get its miner worker address. - -2. Look in its piece store for determine if it can serve the given payload CID. - -3. Combine these results with its existing parameters for retrieval deals to construct a `retrievalmarket.QueryResponse` struct. - -4.0 Writes this response to the `Query` stream. - -The connection is kept open only as long as the query-response exchange. -*/ -func (p *Provider) HandleQueryStream(stream rmnet.RetrievalQueryStream) { - defer stream.Close() - query, err := stream.ReadQuery() - if err != nil { - return - } - - answer := retrievalmarket.QueryResponse{ - Status: retrievalmarket.QueryResponseUnavailable, - PieceCIDFound: retrievalmarket.QueryItemUnavailable, - MinPricePerByte: p.pricePerByte, - MaxPaymentInterval: p.paymentInterval, - MaxPaymentIntervalIncrease: p.paymentIntervalIncrease, - } - - ctx := context.TODO() - - tok, _, err := p.node.GetChainHead(ctx) - if err != nil { - log.Errorf("Retrieval query: GetChainHead: %s", err) - return - } - - paymentAddress, err := p.node.GetMinerWorkerAddress(ctx, p.minerAddress, tok) - if err != nil { - log.Errorf("Retrieval query: Lookup Payment Address: %s", err) - answer.Status = retrievalmarket.QueryResponseError - answer.Message = err.Error() - } else { - answer.PaymentAddress = paymentAddress - - pieceCID := cid.Undef - if query.PieceCID != nil { - pieceCID = *query.PieceCID - } - pieceInfo, err := getPieceInfoFromCid(p.pieceStore, query.PayloadCID, pieceCID) - - if err == nil && len(pieceInfo.Deals) > 0 { - answer.Status = retrievalmarket.QueryResponseAvailable - // TODO: get price, look for already unsealed ref to reduce work - answer.Size = uint64(pieceInfo.Deals[0].Length) // TODO: verify on intermediate - answer.PieceCIDFound = retrievalmarket.QueryItemAvailable - } - - if err != nil && !xerrors.Is(err, retrievalmarket.ErrNotFound) { - log.Errorf("Retrieval query: GetRefs: %s", err) - answer.Status = retrievalmarket.QueryResponseError - answer.Message = err.Error() - } - - } - if err := stream.WriteQueryResponse(answer); err != nil { - log.Errorf("Retrieval query: WriteCborRPC: %s", err) - return - } -} - -/* -HandleDealStream is called by the network implementation whenever a new message is received on the deal protocol - -When a provider receives a DealProposal of the deal protocol, it takes the following steps: - -1. Tells its statemachine to begin tracking this deal state by dealID. - -2. Constructs a `blockunsealing.LoaderWithUnsealing` that abstracts the process of unsealing pieces as needed when loading blocks - -3. Constructs a `blockio.BlockReader` and adds it to its dealID-keyed map of block readers. - -4. Triggers a `ProviderEventOpen` event on its statemachine. - -From then on, the statemachine controls the deal flow in the client. Other components may listen for events in this flow by calling -`SubscribeToEvents` on the Provider. The Provider handles loading the next block to send to the client.*/ -func (p *Provider) HandleDealStream(stream rmnet.RetrievalDealStream) { - // read deal proposal (or fail) - err := p.newProviderDeal(stream) - if err != nil { - log.Error(err) - stream.Close() - } -} - -// Configure reconfigures a provider after initialization -func (p *Provider) Configure(opts ...RetrievalProviderOption) { - for _, opt := range opts { - opt(p) - } -} - -func (p *Provider) newProviderDeal(stream rmnet.RetrievalDealStream) error { - dealProposal, err := stream.ReadDealProposal() - if err != nil { - return err - } - - pds := retrievalmarket.ProviderDealState{ - DealProposal: dealProposal, - Receiver: stream.Receiver(), - } - - p.dealStreamsLk.Lock() - p.dealStreams[pds.Identifier()] = stream - p.dealStreamsLk.Unlock() - - loaderWithUnsealing := blockunsealing.NewLoaderWithUnsealing(context.TODO(), p.bs, p.pieceStore, cario.NewCarIO(), p.node.UnsealSector, dealProposal.PieceCID) - - // validate the selector, if provided - var sel ipld.Node - if dealProposal.Params.Selector != nil { - sel, err = retrievalmarket.DecodeNode(dealProposal.Params.Selector) - if err != nil { - return xerrors.Errorf("selector is invalid: %w", err) - } - } else { - sel = shared.AllSelector() - } - - br := blockio.NewSelectorBlockReader(cidlink.Link{Cid: dealProposal.PayloadCID}, sel, loaderWithUnsealing.Load) - p.blockReadersLk.Lock() - p.blockReaders[pds.Identifier()] = br - p.blockReadersLk.Unlock() - - // start the deal processing, synchronously so we can log the error and close the stream if it doesn't start - err = p.stateMachines.Begin(pds.Identifier(), &pds) - if err != nil { - return err - } - - err = p.stateMachines.Send(pds.Identifier(), retrievalmarket.ProviderEventOpen) - if err != nil { - return err - } - - return nil -} - -type providerDealEnvironment struct { - p *Provider -} - -func (p *providerDealEnvironment) Node() retrievalmarket.RetrievalProviderNode { - return p.p.node -} - -func (p *providerDealEnvironment) DealStream(id retrievalmarket.ProviderDealIdentifier) rmnet.RetrievalDealStream { - p.p.dealStreamsLk.Lock() - defer p.p.dealStreamsLk.Unlock() - return p.p.dealStreams[id] -} - -func (p *providerDealEnvironment) CheckDealParams(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64) error { - if pricePerByte.LessThan(p.p.pricePerByte) { - return errors.New("Price per byte too low") - } - if paymentInterval > p.p.paymentInterval { - return errors.New("Payment interval too large") - } - if paymentIntervalIncrease > p.p.paymentIntervalIncrease { - return errors.New("Payment interval increase too large") - } - return nil -} - -func (p *providerDealEnvironment) NextBlock(ctx context.Context, id retrievalmarket.ProviderDealIdentifier) (retrievalmarket.Block, bool, error) { - p.p.blockReadersLk.Lock() - br, ok := p.p.blockReaders[id] - p.p.blockReadersLk.Unlock() - if !ok { - return retrievalmarket.Block{}, false, errors.New("Could not read block") - } - return br.ReadBlock(ctx) -} - -func (p *providerDealEnvironment) GetPieceSize(c cid.Cid, pieceCID *cid.Cid) (uint64, error) { - inPieceCid := cid.Undef - if pieceCID != nil { - inPieceCid = *pieceCID - } - pieceInfo, err := getPieceInfoFromCid(p.p.pieceStore, c, inPieceCid) - if err != nil { - return 0, err - } - if len(pieceInfo.Deals) == 0 { - return 0, errors.New("Not enough piece info") - } - return pieceInfo.Deals[0].Length, nil -} - -func (p *providerDealEnvironment) RunDealDecisioningLogic(ctx context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { - if p.p.dealDecider == nil { - return true, "", nil - } - return p.p.dealDecider(ctx, state) -} - -func getPieceInfoFromCid(pieceStore piecestore.PieceStore, payloadCID, pieceCID cid.Cid) (piecestore.PieceInfo, error) { - cidInfo, err := pieceStore.GetCIDInfo(payloadCID) - if err != nil { - return piecestore.PieceInfoUndefined, xerrors.Errorf("get cid info: %w", err) - } - var lastErr error - for _, pieceBlockLocation := range cidInfo.PieceBlockLocations { - pieceInfo, err := pieceStore.GetPieceInfo(pieceBlockLocation.PieceCID) - if err == nil { - if pieceCID.Equals(cid.Undef) || pieceInfo.PieceCID.Equals(pieceCID) { - return pieceInfo, nil - } - } - lastErr = err - } - if lastErr == nil { - lastErr = xerrors.Errorf("unknown pieceCID %s", pieceCID.String()) - } - return piecestore.PieceInfoUndefined, xerrors.Errorf("could not locate piece: %w", lastErr) -} - -// ProviderFSMParameterSpec is a valid set of parameters for a provider FSM - used in doc generation -var ProviderFSMParameterSpec = fsm.Parameters{ - Environment: &providerDealEnvironment{}, - StateType: retrievalmarket.ProviderDealState{}, - StateKeyField: "Status", - Events: providerstates.ProviderEvents, - StateEntryFuncs: providerstates.ProviderStateEntryFuncs, -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/provider_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/provider_test.go deleted file mode 100644 index 687c2c8bde..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/provider_test.go +++ /dev/null @@ -1,281 +0,0 @@ -package retrievalimpl_test - -import ( - "context" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - spect "github.com/filecoin-project/specs-actors/support/testing" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - bstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -func TestHandleQueryStream(t *testing.T) { - - payloadCID := tut.GenerateCids(1)[0] - expectedPeer := peer.ID("somepeer") - expectedSize := uint64(1234) - - expectedPieceCID := tut.GenerateCids(1)[0] - expectedCIDInfo := piecestore.CIDInfo{ - PieceBlockLocations: []piecestore.PieceBlockLocation{ - { - PieceCID: expectedPieceCID, - }, - }, - } - expectedPiece := piecestore.PieceInfo{ - Deals: []piecestore.DealInfo{ - { - Length: expectedSize, - }, - }, - } - expectedAddress := address.TestAddress2 - expectedPricePerByte := abi.NewTokenAmount(4321) - expectedPaymentInterval := uint64(4567) - expectedPaymentIntervalIncrease := uint64(100) - - readWriteQueryStream := func() network.RetrievalQueryStream { - qRead, qWrite := tut.QueryReadWriter() - qrRead, qrWrite := tut.QueryResponseReadWriter() - qs := tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ - PeerID: expectedPeer, - Reader: qRead, - Writer: qWrite, - RespReader: qrRead, - RespWriter: qrWrite, - }) - return qs - } - - receiveStreamOnProvider := func(qs network.RetrievalQueryStream, pieceStore piecestore.PieceStore) { - node := testnodes.NewTestRetrievalProviderNode() - ds := dss.MutexWrap(datastore.NewMapDatastore()) - bs := bstore.NewBlockstore(ds) - net := tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}) - c, err := retrievalimpl.NewProvider(expectedAddress, node, net, pieceStore, bs, ds) - require.NoError(t, err) - c.SetPricePerByte(expectedPricePerByte) - c.SetPaymentInterval(expectedPaymentInterval, expectedPaymentIntervalIncrease) - _ = c.Start() - net.ReceiveQueryStream(qs) - } - - testCases := []struct { - name string - query retrievalmarket.Query - expResp retrievalmarket.QueryResponse - expErr string - expFunc func(t *testing.T, pieceStore *tut.TestPieceStore) - }{ - {name: "When PieceCID is not provided and PayloadCID is found", - expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore) { - pieceStore.ExpectCID(payloadCID, expectedCIDInfo) - pieceStore.ExpectPiece(expectedPieceCID, expectedPiece) - }, - query: retrievalmarket.Query{PayloadCID: payloadCID}, - expResp: retrievalmarket.QueryResponse{ - Status: retrievalmarket.QueryResponseAvailable, - PieceCIDFound: retrievalmarket.QueryItemAvailable, - Size: expectedSize, - }, - }, - {name: "When PieceCID is provided and both PieceCID and PayloadCID are found", - expFunc: func(t *testing.T, pieceStore *tut.TestPieceStore) { - loadPieceCIDS(t, pieceStore, payloadCID, expectedPieceCID) - }, - query: retrievalmarket.Query{ - PayloadCID: payloadCID, - QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID}, - }, - expResp: retrievalmarket.QueryResponse{ - Status: retrievalmarket.QueryResponseAvailable, - PieceCIDFound: retrievalmarket.QueryItemAvailable, - Size: expectedSize, - }, - }, - {name: "When QueryParams has PieceCID and is missing", - expFunc: func(t *testing.T, ps *tut.TestPieceStore) { - loadPieceCIDS(t, ps, payloadCID, cid.Undef) - ps.ExpectCID(payloadCID, expectedCIDInfo) - ps.ExpectMissingPiece(expectedPieceCID) - }, - query: retrievalmarket.Query{ - PayloadCID: payloadCID, - QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID}, - }, - expResp: retrievalmarket.QueryResponse{ - Status: retrievalmarket.QueryResponseUnavailable, - PieceCIDFound: retrievalmarket.QueryItemUnavailable, - }, - }, - {name: "When CID info not found", - expFunc: func(t *testing.T, ps *tut.TestPieceStore) { - ps.ExpectMissingCID(payloadCID) - }, - query: retrievalmarket.Query{ - PayloadCID: payloadCID, - QueryParams: retrievalmarket.QueryParams{PieceCID: &expectedPieceCID}, - }, - expResp: retrievalmarket.QueryResponse{ - Status: retrievalmarket.QueryResponseUnavailable, - PieceCIDFound: retrievalmarket.QueryItemUnavailable, - }, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - qs := readWriteQueryStream() - err := qs.WriteQuery(tc.query) - require.NoError(t, err) - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectCID(payloadCID, expectedCIDInfo) - pieceStore.ExpectMissingPiece(expectedPieceCID) - - tc.expFunc(t, pieceStore) - - receiveStreamOnProvider(qs, pieceStore) - - actualResp, err := qs.ReadQueryResponse() - pieceStore.VerifyExpectations(t) - if tc.expErr == "" { - assert.NoError(t, err) - } else { - assert.EqualError(t, err, tc.expErr) - } - - tc.expResp.PaymentAddress = expectedAddress - tc.expResp.MinPricePerByte = expectedPricePerByte - tc.expResp.MaxPaymentInterval = expectedPaymentInterval - tc.expResp.MaxPaymentIntervalIncrease = expectedPaymentIntervalIncrease - assert.Equal(t, tc.expResp, actualResp) - }) - } - - t.Run("error reading piece", func(t *testing.T) { - qs := readWriteQueryStream() - err := qs.WriteQuery(retrievalmarket.Query{ - PayloadCID: payloadCID, - }) - require.NoError(t, err) - pieceStore := tut.NewTestPieceStore() - - receiveStreamOnProvider(qs, pieceStore) - - response, err := qs.ReadQueryResponse() - require.NoError(t, err) - require.Equal(t, response.Status, retrievalmarket.QueryResponseError) - require.NotEmpty(t, response.Message) - }) - - t.Run("when ReadDealStatusRequest fails", func(t *testing.T) { - qs := readWriteQueryStream() - pieceStore := tut.NewTestPieceStore() - - receiveStreamOnProvider(qs, pieceStore) - - response, err := qs.ReadQueryResponse() - require.NotNil(t, err) - require.Equal(t, response, retrievalmarket.QueryResponseUndefined) - }) - - t.Run("when WriteDealStatusResponse fails", func(t *testing.T) { - qRead, qWrite := tut.QueryReadWriter() - qs := tut.NewTestRetrievalQueryStream(tut.TestQueryStreamParams{ - PeerID: expectedPeer, - Reader: qRead, - Writer: qWrite, - RespWriter: tut.FailResponseWriter, - }) - err := qs.WriteQuery(retrievalmarket.Query{ - PayloadCID: payloadCID, - }) - require.NoError(t, err) - pieceStore := tut.NewTestPieceStore() - pieceStore.ExpectCID(payloadCID, expectedCIDInfo) - pieceStore.ExpectPiece(expectedPieceCID, expectedPiece) - - receiveStreamOnProvider(qs, pieceStore) - - pieceStore.VerifyExpectations(t) - }) - -} - -func TestProviderConfigOpts(t *testing.T) { - var sawOpt int - opt1 := func(p *retrievalimpl.Provider) { sawOpt++ } - opt2 := func(p *retrievalimpl.Provider) { sawOpt += 2 } - ds := datastore.NewMapDatastore() - bs := bstore.NewBlockstore(ds) - p, err := retrievalimpl.NewProvider( - spect.NewIDAddr(t, 2344), - testnodes.NewTestRetrievalProviderNode(), - tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}), - tut.NewTestPieceStore(), - bs, ds, opt1, opt2, - ) - require.NoError(t, err) - assert.NotNil(t, p) - assert.Equal(t, 3, sawOpt) - - // just test that we can create a DealDeciderOpt function and that it runs - // successfully in the constructor - ddOpt := retrievalimpl.DealDeciderOpt( - func(_ context.Context, state retrievalmarket.ProviderDealState) (bool, string, error) { - return true, "yes", nil - }) - - p, err = retrievalimpl.NewProvider( - spect.NewIDAddr(t, 2344), - testnodes.NewTestRetrievalProviderNode(), - tut.NewTestRetrievalMarketNetwork(tut.TestNetworkParams{}), - tut.NewTestPieceStore(), - bs, ds, ddOpt) - require.NoError(t, err) - require.NotNil(t, p) -} - -// loadPieceCIDS sets expectations to receive expectedPieceCID and 3 other random PieceCIDs to -// disinguish the case of a PayloadCID is found but the PieceCID is not -func loadPieceCIDS(t *testing.T, pieceStore *tut.TestPieceStore, expPayloadCID, expectedPieceCID cid.Cid) { - - otherPieceCIDs := tut.GenerateCids(3) - expectedSize := uint64(1234) - - blockLocs := make([]piecestore.PieceBlockLocation, 4) - expectedPieceInfo := piecestore.PieceInfo{ - PieceCID: expectedPieceCID, - Deals: []piecestore.DealInfo{ - { - Length: expectedSize, - }, - }, - } - - blockLocs[0] = piecestore.PieceBlockLocation{PieceCID: expectedPieceCID} - for i, pieceCID := range otherPieceCIDs { - blockLocs[i+1] = piecestore.PieceBlockLocation{PieceCID: pieceCID} - pi := expectedPieceInfo - pi.PieceCID = pieceCID - } - if expectedPieceCID != cid.Undef { - pieceStore.ExpectPiece(expectedPieceCID, expectedPieceInfo) - } - expectedCIDInfo := piecestore.CIDInfo{PieceBlockLocations: blockLocs} - pieceStore.ExpectCID(expPayloadCID, expectedCIDInfo) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/doc.go deleted file mode 100644 index 8a350ba7cb..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -/* -Package providerstates contains state machine logic relating to the `RetrievalProvider`. - -provider_fsm.go is where the state transitions are defined, and the default handlers for each new state are defined. - -provider_states.go contains state handler functions. - -The following diagram illustrates the operation of the provider state machine. This diagram is auto-generated from current code and should remain up to date over time: - -https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/retrievalprovider.mmd.svg - -*/ -package providerstates diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/provider_fsm.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/provider_fsm.go deleted file mode 100644 index c421f3bbfb..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/provider_fsm.go +++ /dev/null @@ -1,109 +0,0 @@ -package providerstates - -import ( - "fmt" - - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "golang.org/x/xerrors" - - rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -func recordError(deal *rm.ProviderDealState, err error) error { - deal.Message = err.Error() - return nil -} - -// ProviderEvents are the events that can happen in a retrieval provider -var ProviderEvents = fsm.Events{ - fsm.Event(rm.ProviderEventOpen). - From(rm.DealStatusNew).ToNoChange(). - Action( - func(deal *rm.ProviderDealState) error { - deal.TotalSent = 0 - deal.FundsReceived = abi.NewTokenAmount(0) - return nil - }, - ), - fsm.Event(rm.ProviderEventDealReceived). - From(rm.DealStatusNew).To(rm.DealStatusAwaitingAcceptance), - fsm.Event(rm.ProviderEventWriteResponseFailed). - FromAny().To(rm.DealStatusErrored). - Action(func(deal *rm.ProviderDealState, err error) error { - deal.Message = xerrors.Errorf("writing deal response: %w", err).Error() - return nil - }), - fsm.Event(rm.ProviderEventDecisioningError). - From(rm.DealStatusAwaitingAcceptance).To(rm.DealStatusErrored). - Action(recordError), - fsm.Event(rm.ProviderEventReadPaymentFailed). - FromAny().To(rm.DealStatusErrored). - Action(recordError), - fsm.Event(rm.ProviderEventGetPieceSizeErrored). - From(rm.DealStatusNew).To(rm.DealStatusFailed). - Action(recordError), - fsm.Event(rm.ProviderEventDealNotFound). - From(rm.DealStatusNew).To(rm.DealStatusDealNotFound). - Action(func(deal *rm.ProviderDealState) error { - deal.Message = rm.ErrNotFound.Error() - return nil - }), - fsm.Event(rm.ProviderEventDealRejected). - FromMany(rm.DealStatusNew, rm.DealStatusAwaitingAcceptance).To(rm.DealStatusRejected). - Action(recordError), - fsm.Event(rm.ProviderEventDealAccepted). - From(rm.DealStatusAwaitingAcceptance).To(rm.DealStatusAccepted). - Action(func(deal *rm.ProviderDealState, dealProposal rm.DealProposal) error { - deal.DealProposal = dealProposal - deal.CurrentInterval = deal.PaymentInterval - return nil - }), - fsm.Event(rm.ProviderEventBlockErrored). - FromMany(rm.DealStatusAccepted, rm.DealStatusOngoing).To(rm.DealStatusFailed). - Action(recordError), - fsm.Event(rm.ProviderEventBlocksCompleted). - FromMany(rm.DealStatusAccepted, rm.DealStatusOngoing).To(rm.DealStatusBlocksComplete), - fsm.Event(rm.ProviderEventPaymentRequested). - FromMany(rm.DealStatusAccepted, rm.DealStatusOngoing).To(rm.DealStatusFundsNeeded). - From(rm.DealStatusBlocksComplete).To(rm.DealStatusFundsNeededLastPayment). - Action(func(deal *rm.ProviderDealState, totalSent uint64) error { - fmt.Println("Requesting payment") - deal.TotalSent = totalSent - return nil - }), - fsm.Event(rm.ProviderEventSaveVoucherFailed). - FromMany(rm.DealStatusFundsNeeded, rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusFailed). - Action(recordError), - fsm.Event(rm.ProviderEventPartialPaymentReceived). - FromMany(rm.DealStatusFundsNeeded, rm.DealStatusFundsNeededLastPayment).ToNoChange(). - Action(func(deal *rm.ProviderDealState, fundsReceived abi.TokenAmount) error { - deal.FundsReceived = big.Add(deal.FundsReceived, fundsReceived) - return nil - }), - fsm.Event(rm.ProviderEventPaymentReceived). - From(rm.DealStatusFundsNeeded).To(rm.DealStatusOngoing). - From(rm.DealStatusFundsNeededLastPayment).To(rm.DealStatusFinalizing). - Action(func(deal *rm.ProviderDealState, fundsReceived abi.TokenAmount) error { - deal.FundsReceived = big.Add(deal.FundsReceived, fundsReceived) - deal.CurrentInterval += deal.PaymentIntervalIncrease - return nil - }), - fsm.Event(rm.ProviderEventComplete). - From(rm.DealStatusFinalizing).To(rm.DealStatusCompleted), -} - -// ProviderStateEntryFuncs are the handlers for different states in a retrieval provider -var ProviderStateEntryFuncs = fsm.StateEntryFuncs{ - rm.DealStatusNew: ReceiveDeal, - rm.DealStatusFailed: SendFailResponse, - rm.DealStatusRejected: SendFailResponse, - rm.DealStatusDealNotFound: SendFailResponse, - rm.DealStatusOngoing: SendBlocks, - rm.DealStatusAwaitingAcceptance: DecideOnDeal, - rm.DealStatusAccepted: SendBlocks, - rm.DealStatusFundsNeeded: ProcessPayment, - rm.DealStatusFundsNeededLastPayment: ProcessPayment, - rm.DealStatusFinalizing: Finalize, -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/provider_states.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/provider_states.go deleted file mode 100644 index 611add0051..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/provider_states.go +++ /dev/null @@ -1,192 +0,0 @@ -package providerstates - -import ( - "context" - "errors" - - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" -) - -// ProviderDealEnvironment is a bridge to the environment a provider deal is executing in -// It provides access to relevant functionality on the retrieval provider -type ProviderDealEnvironment interface { - // Node returns the node interface for this deal - Node() rm.RetrievalProviderNode - // GetPieceSize returns the size of the piece for a given payload CID, - // looking only in the specified PieceCID if given - GetPieceSize(c cid.Cid, pieceCID *cid.Cid) (uint64, error) - // DealStream returns the relevant libp2p interface for this deal - DealStream(id rm.ProviderDealIdentifier) rmnet.RetrievalDealStream - // NextBlock returns the next block for the given payload, unsealing if neccesary - NextBlock(context.Context, rm.ProviderDealIdentifier) (rm.Block, bool, error) - // CheckDealParams verifies the given deal params are acceptable - CheckDealParams(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64) error - // RunDealDecisioningLogic runs custom deal decision logic to decide if a deal is accepted, if present - RunDealDecisioningLogic(ctx context.Context, state rm.ProviderDealState) (bool, string, error) -} - -// ReceiveDeal receives and evaluates a deal proposal -func ReceiveDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal rm.ProviderDealState) error { - dealProposal := deal.DealProposal - - // verify we have the piece - _, err := environment.GetPieceSize(dealProposal.PayloadCID, dealProposal.PieceCID) - if err != nil { - if err == rm.ErrNotFound { - return ctx.Trigger(rm.ProviderEventDealNotFound) - } - return ctx.Trigger(rm.ProviderEventGetPieceSizeErrored, err) - } - - // check that the deal parameters match our required parameters or - // reject outright - err = environment.CheckDealParams(dealProposal.PricePerByte, - dealProposal.PaymentInterval, - dealProposal.PaymentIntervalIncrease) - if err != nil { - return ctx.Trigger(rm.ProviderEventDealRejected, err) - } - return ctx.Trigger(rm.ProviderEventDealReceived) -} - -// DecideOnDeal runs any custom deal decider and if it passes, tell client -// it's accepted, and move to the next state -func DecideOnDeal(ctx fsm.Context, env ProviderDealEnvironment, state rm.ProviderDealState) error { - accepted, reason, err := env.RunDealDecisioningLogic(ctx.Context(), state) - if err != nil { - return ctx.Trigger(rm.ProviderEventDecisioningError, err) - } - if !accepted { - return ctx.Trigger(rm.ProviderEventDealRejected, errors.New(reason)) - } - err = env.DealStream(state.Identifier()).WriteDealResponse(rm.DealResponse{ - Status: rm.DealStatusAccepted, - ID: state.ID, - }) - if err != nil { - return ctx.Trigger(rm.ProviderEventWriteResponseFailed, err) - } - - return ctx.Trigger(rm.ProviderEventDealAccepted, state.DealProposal) -} - -// SendBlocks sends blocks to the client until funds are needed -func SendBlocks(ctx fsm.Context, environment ProviderDealEnvironment, deal rm.ProviderDealState) error { - totalSent := deal.TotalSent - totalPaidFor := big.Div(deal.FundsReceived, deal.PricePerByte).Uint64() - var blocks []rm.Block - - // read blocks until we reach current interval - responseStatus := rm.DealStatusFundsNeeded - for totalSent-totalPaidFor < deal.CurrentInterval { - block, done, err := environment.NextBlock(ctx.Context(), deal.Identifier()) - if err != nil { - return ctx.Trigger(rm.ProviderEventBlockErrored, err) - } - blocks = append(blocks, block) - totalSent += uint64(len(block.Data)) - if done { - err := ctx.Trigger(rm.ProviderEventBlocksCompleted) - if err != nil { - return err - } - responseStatus = rm.DealStatusFundsNeededLastPayment - break - } - } - - // send back response of blocks plus payment owed - paymentOwed := big.Mul(abi.NewTokenAmount(int64(totalSent-totalPaidFor)), deal.PricePerByte) - - err := environment.DealStream(deal.Identifier()).WriteDealResponse(rm.DealResponse{ - ID: deal.ID, - Status: responseStatus, - PaymentOwed: paymentOwed, - Blocks: blocks, - }) - - if err != nil { - return ctx.Trigger(rm.ProviderEventWriteResponseFailed, err) - } - - return ctx.Trigger(rm.ProviderEventPaymentRequested, totalSent) -} - -// ProcessPayment processes a payment from the client and resumes the deal if successful -func ProcessPayment(ctx fsm.Context, environment ProviderDealEnvironment, deal rm.ProviderDealState) error { - // read payment, or fail - payment, err := environment.DealStream(deal.Identifier()).ReadDealPayment() - if err != nil { - return ctx.Trigger(rm.ProviderEventReadPaymentFailed, xerrors.Errorf("reading payment: %w", err)) - } - - tok, _, err := environment.Node().GetChainHead(ctx.Context()) - if err != nil { - return ctx.Trigger(rm.ProviderEventSaveVoucherFailed, err) - } - - // attempt to redeem voucher - // (totalSent * pricePerbyte) - fundsReceived - paymentOwed := big.Sub(big.Mul(abi.NewTokenAmount(int64(deal.TotalSent)), deal.PricePerByte), deal.FundsReceived) - received, err := environment.Node().SavePaymentVoucher(ctx.Context(), payment.PaymentChannel, payment.PaymentVoucher, nil, paymentOwed, tok) - if err != nil { - return ctx.Trigger(rm.ProviderEventSaveVoucherFailed, err) - } - - // received = 0 / err = nil indicates that the voucher was already saved, but this may be ok - // if we are making a deal with ourself - in this case, we'll instead calculate received - // but subtracting from fund sent - if big.Cmp(received, big.Zero()) == 0 { - received = big.Sub(payment.PaymentVoucher.Amount, deal.FundsReceived) - } - - // check if all payments are received to continue the deal, or send updated required payment - if received.LessThan(paymentOwed) { - err := environment.DealStream(deal.Identifier()).WriteDealResponse(rm.DealResponse{ - ID: deal.ID, - Status: deal.Status, - PaymentOwed: big.Sub(paymentOwed, received), - }) - if err != nil { - return ctx.Trigger(rm.ProviderEventWriteResponseFailed, err) - } - return ctx.Trigger(rm.ProviderEventPartialPaymentReceived, received) - } - - // resume deal - return ctx.Trigger(rm.ProviderEventPaymentReceived, received) -} - -// SendFailResponse sends a failure response before closing the deal -func SendFailResponse(ctx fsm.Context, environment ProviderDealEnvironment, deal rm.ProviderDealState) error { - stream := environment.DealStream(deal.Identifier()) - err := stream.WriteDealResponse(rm.DealResponse{ - Status: deal.Status, - Message: deal.Message, - ID: deal.ID, - }) - if err != nil { - return ctx.Trigger(rm.ProviderEventWriteResponseFailed, err) - } - return nil -} - -// Finalize completes a deal -func Finalize(ctx fsm.Context, environment ProviderDealEnvironment, deal rm.ProviderDealState) error { - err := environment.DealStream(deal.Identifier()).WriteDealResponse(rm.DealResponse{ - Status: rm.DealStatusCompleted, - ID: deal.ID, - }) - if err != nil { - return ctx.Trigger(rm.ProviderEventWriteResponseFailed, err) - } - - return ctx.Trigger(rm.ProviderEventComplete) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/provider_states_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/provider_states_test.go deleted file mode 100644 index e3dccf7c46..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates/provider_states_test.go +++ /dev/null @@ -1,485 +0,0 @@ -package providerstates_test - -import ( - "context" - "crypto/rand" - "errors" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-statemachine/fsm" - fsmtest "github.com/filecoin-project/go-statemachine/fsm/testutil" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" - retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/providerstates" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" - rmtesting "github.com/filecoin-project/go-fil-markets/retrievalmarket/testing" - testnet "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -func TestReceiveDeal(t *testing.T) { - ctx := context.Background() - eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ProviderDealState{}, "Status", providerstates.ProviderEvents) - require.NoError(t, err) - runReceiveDeal := func(t *testing.T, - node *testnodes.TestRetrievalProviderNode, - params testnet.TestDealStreamParams, - setupEnv func(e *rmtesting.TestProviderDealEnvironment), - dealState *retrievalmarket.ProviderDealState) { - ds := testnet.NewTestRetrievalDealStream(params) - environment := rmtesting.NewTestProviderDealEnvironment(node, ds, rmtesting.TrivalTestDecider, nil) - setupEnv(environment) - fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) - err := providerstates.ReceiveDeal(fsmCtx, environment, *dealState) - require.NoError(t, err) - environment.VerifyExpectations(t) - node.VerifyExpectations(t) - fsmCtx.ReplayEvents(t, dealState) - } - - expectedPiece := testnet.GenerateCids(1)[0] - proposal := retrievalmarket.DealProposal{ - ID: retrievalmarket.DealID(10), - PayloadCID: expectedPiece, - Params: retrievalmarket.NewParamsV0(defaultPricePerByte, defaultCurrentInterval, defaultIntervalIncrease), - } - - blankDealState := func() *retrievalmarket.ProviderDealState { - return &retrievalmarket.ProviderDealState{ - DealProposal: proposal, - Status: retrievalmarket.DealStatusNew, - TotalSent: 0, - FundsReceived: abi.NewTokenAmount(0), - } - } - - t.Run("it works", func(t *testing.T) { - node := testnodes.NewTestRetrievalProviderNode() - dealState := blankDealState() - dealStreamParams := testnet.TestDealStreamParams{ - ResponseWriter: testnet.ExpectDealResponseWriter(t, retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusAccepted, - ID: proposal.ID, - }), - } - setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { - fe.ExpectPiece(expectedPiece, 10000) - fe.ExpectParams(defaultPricePerByte, defaultCurrentInterval, defaultIntervalIncrease, nil) - } - runReceiveDeal(t, node, dealStreamParams, setupEnv, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusAwaitingAcceptance) - require.Equal(t, dealState.DealProposal, proposal) - require.Empty(t, dealState.Message) - }) - - t.Run("missing piece", func(t *testing.T) { - node := testnodes.NewTestRetrievalProviderNode() - dealState := blankDealState() - dealStreamParams := testnet.TestDealStreamParams{ - ProposalReader: testnet.StubbedDealProposalReader(proposal), - ResponseWriter: testnet.ExpectDealResponseWriter(t, retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusDealNotFound, - ID: proposal.ID, - Message: retrievalmarket.ErrNotFound.Error(), - }), - } - setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { - fe.ExpectMissingPiece(expectedPiece) - } - runReceiveDeal(t, node, dealStreamParams, setupEnv, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusDealNotFound) - require.NotEmpty(t, dealState.Message) - }) - - t.Run("deal rejected", func(t *testing.T) { - node := testnodes.NewTestRetrievalProviderNode() - dealState := blankDealState() - message := "Something Terrible Happened" - dealStreamParams := testnet.TestDealStreamParams{ - ProposalReader: testnet.StubbedDealProposalReader(proposal), - ResponseWriter: testnet.ExpectDealResponseWriter(t, retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusRejected, - ID: proposal.ID, - Message: message, - }), - } - setupEnv := func(fe *rmtesting.TestProviderDealEnvironment) { - fe.ExpectPiece(expectedPiece, 10000) - fe.ExpectParams(defaultPricePerByte, defaultCurrentInterval, defaultIntervalIncrease, errors.New(message)) - } - runReceiveDeal(t, node, dealStreamParams, setupEnv, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusRejected) - require.NotEmpty(t, dealState.Message) - }) - -} - -func TestSendBlocks(t *testing.T) { - ctx := context.Background() - node := testnodes.NewTestRetrievalProviderNode() - eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ProviderDealState{}, "Status", providerstates.ProviderEvents) - require.NoError(t, err) - runSendBlocks := func(t *testing.T, - params testnet.TestDealStreamParams, - responses []rmtesting.ReadBlockResponse, - dealState *retrievalmarket.ProviderDealState) { - ds := testnet.NewTestRetrievalDealStream(params) - environment := rmtesting.NewTestProviderDealEnvironment(node, ds, rmtesting.TrivalTestDecider, responses) - fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) - err := providerstates.SendBlocks(fsmCtx, environment, *dealState) - require.NoError(t, err) - fsmCtx.ReplayEvents(t, dealState) - } - - t.Run("it works", func(t *testing.T) { - blocks, responses := generateResponses(10, 100, false, false) - dealState := makeDealState(retrievalmarket.DealStatusAccepted) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseWriter: testnet.ExpectDealResponseWriter(t, retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusFundsNeeded, - PaymentOwed: defaultPaymentPerInterval, - Blocks: blocks, - ID: dealState.ID, - }), - } - runSendBlocks(t, dealStreamParams, responses, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFundsNeeded) - require.Equal(t, dealState.TotalSent, defaultTotalSent+defaultCurrentInterval) - require.Empty(t, dealState.Message) - }) - - t.Run("it completes", func(t *testing.T) { - blocks, responses := generateResponses(10, 100, true, false) - dealState := makeDealState(retrievalmarket.DealStatusAccepted) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseWriter: testnet.ExpectDealResponseWriter(t, retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusFundsNeededLastPayment, - PaymentOwed: defaultPaymentPerInterval, - Blocks: blocks, - ID: dealState.ID, - }), - } - runSendBlocks(t, dealStreamParams, responses, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFundsNeededLastPayment) - require.Equal(t, dealState.TotalSent, defaultTotalSent+defaultCurrentInterval) - require.Empty(t, dealState.Message) - }) - - t.Run("error reading a block", func(t *testing.T) { - _, responses := generateResponses(10, 100, false, true) - dealState := makeDealState(retrievalmarket.DealStatusAccepted) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseWriter: testnet.ExpectDealResponseWriter(t, retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusFailed, - Message: responses[0].Err.Error(), - ID: dealState.ID, - }), - } - runSendBlocks(t, dealStreamParams, responses, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - require.NotEmpty(t, dealState.Message) - }) - - t.Run("error writing response", func(t *testing.T) { - _, responses := generateResponses(10, 100, false, false) - dealState := makeDealState(retrievalmarket.DealStatusAccepted) - dealStreamParams := testnet.TestDealStreamParams{ - ResponseWriter: testnet.FailDealResponseWriter, - } - runSendBlocks(t, dealStreamParams, responses, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusErrored) - require.NotEmpty(t, dealState.Message) - }) -} - -func TestProcessPayment(t *testing.T) { - ctx := context.Background() - eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ProviderDealState{}, "Status", providerstates.ProviderEvents) - require.NoError(t, err) - runProcessPayment := func(t *testing.T, node *testnodes.TestRetrievalProviderNode, - params testnet.TestDealStreamParams, - dealState *retrievalmarket.ProviderDealState) { - ds := testnet.NewTestRetrievalDealStream(params) - environment := rmtesting.NewTestProviderDealEnvironment(node, ds, rmtesting.TrivalTestDecider, nil) - fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) - err = providerstates.ProcessPayment(fsmCtx, environment, *dealState) - require.NoError(t, err) - node.VerifyExpectations(t) - fsmCtx.ReplayEvents(t, dealState) - } - - payCh := address.TestAddress - voucher := testnet.MakeTestSignedVoucher() - voucher.Amount = big.Add(defaultFundsReceived, defaultPaymentPerInterval) - dealPayment := retrievalmarket.DealPayment{ - ID: dealID, - PaymentChannel: payCh, - PaymentVoucher: voucher, - } - t.Run("it works", func(t *testing.T) { - node := testnodes.NewTestRetrievalProviderNode() - err := node.ExpectVoucher(payCh, voucher, nil, defaultPaymentPerInterval, defaultPaymentPerInterval, nil) - require.NoError(t, err) - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealState.TotalSent = defaultTotalSent + defaultCurrentInterval - dealStreamParams := testnet.TestDealStreamParams{ - PaymentReader: testnet.StubbedDealPaymentReader(dealPayment), - } - runProcessPayment(t, node, dealStreamParams, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) - require.Equal(t, dealState.FundsReceived, big.Add(defaultFundsReceived, defaultPaymentPerInterval)) - require.Equal(t, dealState.CurrentInterval, defaultCurrentInterval+defaultIntervalIncrease) - require.Empty(t, dealState.Message) - }) - t.Run("it completes", func(t *testing.T) { - node := testnodes.NewTestRetrievalProviderNode() - err := node.ExpectVoucher(payCh, voucher, nil, defaultPaymentPerInterval, defaultPaymentPerInterval, nil) - require.NoError(t, err) - dealState := makeDealState(retrievalmarket.DealStatusFundsNeededLastPayment) - dealState.TotalSent = defaultTotalSent + defaultCurrentInterval - dealStreamParams := testnet.TestDealStreamParams{ - PaymentReader: testnet.StubbedDealPaymentReader(dealPayment), - } - runProcessPayment(t, node, dealStreamParams, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFinalizing) - require.Equal(t, dealState.FundsReceived, big.Add(defaultFundsReceived, defaultPaymentPerInterval)) - require.Equal(t, dealState.CurrentInterval, defaultCurrentInterval+defaultIntervalIncrease) - require.Empty(t, dealState.Message) - }) - - t.Run("not enough funds sent", func(t *testing.T) { - node := testnodes.NewTestRetrievalProviderNode() - smallerPayment := abi.NewTokenAmount(400000) - err := node.ExpectVoucher(payCh, voucher, nil, defaultPaymentPerInterval, smallerPayment, nil) - require.NoError(t, err) - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealState.TotalSent = defaultTotalSent + defaultCurrentInterval - dealStreamParams := testnet.TestDealStreamParams{ - PaymentReader: testnet.StubbedDealPaymentReader(dealPayment), - ResponseWriter: testnet.ExpectDealResponseWriter(t, rm.DealResponse{ - ID: dealState.ID, - Status: retrievalmarket.DealStatusFundsNeeded, - PaymentOwed: big.Sub(defaultPaymentPerInterval, smallerPayment), - }), - } - runProcessPayment(t, node, dealStreamParams, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFundsNeeded) - require.Equal(t, dealState.FundsReceived, big.Add(defaultFundsReceived, smallerPayment)) - require.Equal(t, dealState.CurrentInterval, defaultCurrentInterval) - require.Empty(t, dealState.Message) - }) - - t.Run("voucher already saved", func(t *testing.T) { - node := testnodes.NewTestRetrievalProviderNode() - err := node.ExpectVoucher(payCh, voucher, nil, defaultPaymentPerInterval, big.Zero(), nil) - require.NoError(t, err) - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealState.TotalSent = defaultTotalSent + defaultCurrentInterval - dealStreamParams := testnet.TestDealStreamParams{ - PaymentReader: testnet.StubbedDealPaymentReader(dealPayment), - } - runProcessPayment(t, node, dealStreamParams, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusOngoing) - require.Equal(t, dealState.FundsReceived, big.Add(defaultFundsReceived, defaultPaymentPerInterval)) - require.Equal(t, dealState.CurrentInterval, defaultCurrentInterval+defaultIntervalIncrease) - require.Empty(t, dealState.Message) - }) - - t.Run("failure processing payment", func(t *testing.T) { - node := testnodes.NewTestRetrievalProviderNode() - message := "your money's no good here" - err := node.ExpectVoucher(payCh, voucher, nil, defaultPaymentPerInterval, abi.NewTokenAmount(0), errors.New(message)) - require.NoError(t, err) - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealState.TotalSent = defaultTotalSent + defaultCurrentInterval - dealStreamParams := testnet.TestDealStreamParams{ - PaymentReader: testnet.StubbedDealPaymentReader(dealPayment), - ResponseWriter: testnet.ExpectDealResponseWriter(t, rm.DealResponse{ - ID: dealState.ID, - Status: retrievalmarket.DealStatusFailed, - Message: message, - }), - } - runProcessPayment(t, node, dealStreamParams, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusFailed) - require.NotEmpty(t, dealState.Message) - }) - - t.Run("failure reading payment", func(t *testing.T) { - node := testnodes.NewTestRetrievalProviderNode() - dealState := makeDealState(retrievalmarket.DealStatusFundsNeeded) - dealState.TotalSent = defaultTotalSent + defaultCurrentInterval - dealStreamParams := testnet.TestDealStreamParams{ - PaymentReader: testnet.FailDealPaymentReader, - } - runProcessPayment(t, node, dealStreamParams, dealState) - require.Equal(t, dealState.Status, retrievalmarket.DealStatusErrored) - require.NotEmpty(t, dealState.Message) - }) -} - -func TestDecideOnDeal(t *testing.T) { - ctx := context.Background() - eventMachine, err := fsm.NewEventProcessor(retrievalmarket.ProviderDealState{}, "Status", providerstates.ProviderEvents) - require.NoError(t, err) - runDecideDeal := func(t *testing.T, - node *testnodes.TestRetrievalProviderNode, - params testnet.TestDealStreamParams, - setupEnv func(e *rmtesting.TestProviderDealEnvironment), - decider retrievalimpl.DealDecider, - dealState *retrievalmarket.ProviderDealState) { - ds := testnet.NewTestRetrievalDealStream(params) - environment := rmtesting.NewTestProviderDealEnvironment(node, ds, decider, nil) - setupEnv(environment) - fsmCtx := fsmtest.NewTestContext(ctx, eventMachine) - err := providerstates.DecideOnDeal(fsmCtx, environment, *dealState) - require.NoError(t, err) - environment.VerifyExpectations(t) - node.VerifyExpectations(t) - fsmCtx.ReplayEvents(t, dealState) - } - - proposal := retrievalmarket.DealProposal{ - ID: retrievalmarket.DealID(10), - PayloadCID: testnet.GenerateCids(1)[0], - Params: retrievalmarket.NewParamsV0(defaultPricePerByte, defaultCurrentInterval, defaultIntervalIncrease), - } - - startingDealState := func() *retrievalmarket.ProviderDealState { - return &retrievalmarket.ProviderDealState{ - DealProposal: proposal, - Status: retrievalmarket.DealStatusAwaitingAcceptance, - FundsReceived: abi.NewTokenAmount(0), - } - } - acceptedDsParams := testnet.TestDealStreamParams{ - ResponseWriter: testnet.ExpectDealResponseWriter(t, retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusAccepted, - ID: proposal.ID, - })} - - type testCases map[string]struct { - dsParams testnet.TestDealStreamParams - decider retrievalimpl.DealDecider - setupEnv func(*rmtesting.TestProviderDealEnvironment) - verify func(*testing.T, *rm.ProviderDealState) - } - tcs := testCases{ - "qapla'": { - dsParams: acceptedDsParams, - setupEnv: func(te *rmtesting.TestProviderDealEnvironment) { - te.ExpectDeciderCalledWith(proposal.ID) - }, - verify: func(t *testing.T, state *rm.ProviderDealState) { - assert.Equal(t, state.Status, retrievalmarket.DealStatusAccepted) - assert.Empty(t, state.Message) - assert.Equal(t, defaultCurrentInterval, state.CurrentInterval) - }, - }, - "if decider fails, deal errors": { - dsParams: acceptedDsParams, - decider: func(ctx context.Context, state rm.ProviderDealState) (bool, string, error) { - return false, "", errors.New("boom") - }, - setupEnv: func(te *rmtesting.TestProviderDealEnvironment) { - te.ExpectDeciderCalledWith(proposal.ID) - }, - verify: func(t *testing.T, state *rm.ProviderDealState) { - assert.Equal(t, retrievalmarket.DealStatusErrored, state.Status) - assert.Equal(t, "boom", state.Message) - }, - }, - "if decider rejects, deal is rejected": { - dsParams: acceptedDsParams, - decider: func(ctx context.Context, state rm.ProviderDealState) (bool, string, error) { - return false, "Thursday, I don't care about you", nil - }, - setupEnv: func(te *rmtesting.TestProviderDealEnvironment) { - te.ExpectDeciderCalledWith(proposal.ID) - }, - verify: func(t *testing.T, state *rm.ProviderDealState) { - assert.Equal(t, retrievalmarket.DealStatusRejected, state.Status) - assert.Equal(t, "Thursday, I don't care about you", state.Message) - }, - }, - "if response write error, deal errors": { - dsParams: testnet.TestDealStreamParams{ - ProposalReader: testnet.StubbedDealProposalReader(proposal), - ResponseWriter: testnet.FailDealResponseWriter, - }, - setupEnv: func(te *rmtesting.TestProviderDealEnvironment) { - te.ExpectDeciderCalledWith(proposal.ID) - }, - verify: func(t *testing.T, state *rm.ProviderDealState) { - assert.Equal(t, retrievalmarket.DealStatusErrored, state.Status) - assert.NotEmpty(t, state.Message) - }, - }, - } - for name, tc := range tcs { - t.Run(name, func(t *testing.T) { - node := testnodes.NewTestRetrievalProviderNode() - dealState := startingDealState() - runDecideDeal(t, node, tc.dsParams, tc.setupEnv, tc.decider, dealState) - assert.Equal(t, proposal, dealState.DealProposal) - tc.verify(t, dealState) - }) - } -} - -var dealID = retrievalmarket.DealID(10) -var defaultCurrentInterval = uint64(1000) -var defaultIntervalIncrease = uint64(500) -var defaultPricePerByte = abi.NewTokenAmount(500) -var defaultPaymentPerInterval = big.Mul(defaultPricePerByte, abi.NewTokenAmount(int64(defaultCurrentInterval))) -var defaultTotalSent = uint64(5000) -var defaultFundsReceived = abi.NewTokenAmount(2500000) - -func makeDealState(status retrievalmarket.DealStatus) *retrievalmarket.ProviderDealState { - return &retrievalmarket.ProviderDealState{ - Status: status, - TotalSent: defaultTotalSent, - CurrentInterval: defaultCurrentInterval, - FundsReceived: defaultFundsReceived, - DealProposal: retrievalmarket.DealProposal{ - ID: dealID, - Params: retrievalmarket.NewParamsV0(defaultPricePerByte, defaultCurrentInterval, defaultIntervalIncrease), - }, - } -} - -func generateResponses(count uint64, blockSize uint64, completeOnLast bool, - errorOnFirst bool) ([]retrievalmarket.Block, []rmtesting.ReadBlockResponse) { - responses := make([]rmtesting.ReadBlockResponse, count) - blocks := make([]retrievalmarket.Block, count) - var i uint64 = 0 - for ; i < count; i++ { - data := make([]byte, blockSize) - var err error - _, err = rand.Read(data) - complete := false - if i == 0 && errorOnFirst { - err = errors.New("something went wrong") - } - - if i == count-1 && completeOnLast { - complete = true - } - block := retrievalmarket.Block{ - Prefix: cid.NewPrefixV1(cid.Raw, mh.SHA2_256).Bytes(), - Data: data, - } - blocks[i] = block - responses[i] = rmtesting.ReadBlockResponse{ - Block: block, Done: complete, Err: err} - } - return blocks, responses -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes/doc.go deleted file mode 100644 index 2ce77caabd..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Package testnodes contains stubbed implementations of the RetrievalProviderNode -// and RetrievalClientNode interface to simulate communications with a filecoin -// node -package testnodes diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes/test_retrieval_client_node.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes/test_retrieval_client_node.go deleted file mode 100644 index e596f2eb84..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes/test_retrieval_client_node.go +++ /dev/null @@ -1,120 +0,0 @@ -package testnodes - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/shared" -) - -// TestRetrievalClientNode is a node adapter for a retrieval client whose responses -// are stubbed -type TestRetrievalClientNode struct { - addFundsOnly bool // set this to true to test adding funds to an existing payment channel - payCh address.Address - payChErr error - createPaychMsgCID, addFundsMsgCID cid.Cid - lane uint64 - laneError error - voucher *paych.SignedVoucher - voucherError, waitCreateErr, waitAddErr error - - allocateLaneRecorder func(address.Address) - createPaymentVoucherRecorder func(voucher *paych.SignedVoucher) - getCreatePaymentChannelRecorder func(address.Address, address.Address, abi.TokenAmount) -} - -// TestRetrievalClientNodeParams are parameters for initializing a TestRetrievalClientNode -type TestRetrievalClientNodeParams struct { - PayCh address.Address - PayChErr error - CreatePaychCID, AddFundsCID cid.Cid - Lane uint64 - LaneError error - Voucher *paych.SignedVoucher - VoucherError error - AllocateLaneRecorder func(address.Address) - PaymentVoucherRecorder func(voucher *paych.SignedVoucher) - PaymentChannelRecorder func(address.Address, address.Address, abi.TokenAmount) - AddFundsOnly bool - WaitForAddFundsErr, WaitForChCreateErr error -} - -var _ retrievalmarket.RetrievalClientNode = &TestRetrievalClientNode{} - -// NewTestRetrievalClientNode initializes a new TestRetrievalClientNode based on the given params -func NewTestRetrievalClientNode(params TestRetrievalClientNodeParams) *TestRetrievalClientNode { - return &TestRetrievalClientNode{ - addFundsOnly: params.AddFundsOnly, - payCh: params.PayCh, - payChErr: params.PayChErr, - waitCreateErr: params.WaitForChCreateErr, - waitAddErr: params.WaitForAddFundsErr, - lane: params.Lane, - laneError: params.LaneError, - voucher: params.Voucher, - voucherError: params.VoucherError, - allocateLaneRecorder: params.AllocateLaneRecorder, - createPaymentVoucherRecorder: params.PaymentVoucherRecorder, - getCreatePaymentChannelRecorder: params.PaymentChannelRecorder, - createPaychMsgCID: params.CreatePaychCID, - addFundsMsgCID: params.AddFundsCID, - } -} - -// GetOrCreatePaymentChannel returns a mocked payment channel -func (trcn *TestRetrievalClientNode) GetOrCreatePaymentChannel(ctx context.Context, clientAddress address.Address, minerAddress address.Address, clientFundsAvailable abi.TokenAmount, tok shared.TipSetToken) (address.Address, cid.Cid, error) { - if trcn.getCreatePaymentChannelRecorder != nil { - trcn.getCreatePaymentChannelRecorder(clientAddress, minerAddress, clientFundsAvailable) - } - var payCh address.Address - msgCID := trcn.createPaychMsgCID - if trcn.addFundsOnly { - payCh = trcn.payCh - msgCID = trcn.addFundsMsgCID - } - return payCh, msgCID, trcn.payChErr -} - -// AllocateLane creates a mock lane on a payment channel -func (trcn *TestRetrievalClientNode) AllocateLane(paymentChannel address.Address) (uint64, error) { - if trcn.allocateLaneRecorder != nil { - trcn.allocateLaneRecorder(paymentChannel) - } - return trcn.lane, trcn.laneError -} - -// CreatePaymentVoucher creates a mock payment voucher based on a channel and lane -func (trcn *TestRetrievalClientNode) CreatePaymentVoucher(ctx context.Context, paymentChannel address.Address, amount abi.TokenAmount, lane uint64, tok shared.TipSetToken) (*paych.SignedVoucher, error) { - if trcn.createPaymentVoucherRecorder != nil { - trcn.createPaymentVoucherRecorder(trcn.voucher) - } - return trcn.voucher, trcn.voucherError -} - -// GetChainHead returns a mock value for the chain head -func (trcn *TestRetrievalClientNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - return shared.TipSetToken{}, 0, nil -} - -// WaitForPaymentChannelAddFunds simulates waiting for a payment channel add funds message to complete -func (trcn *TestRetrievalClientNode) WaitForPaymentChannelAddFunds(messageCID cid.Cid) error { - if messageCID != trcn.addFundsMsgCID { - return fmt.Errorf("expected messageCID: %s does not match actual: %s", trcn.addFundsMsgCID, messageCID) - } - return trcn.waitAddErr -} - -// WaitForPaymentChannelCreation simulates waiting for a payment channel creation message to complete -func (trcn *TestRetrievalClientNode) WaitForPaymentChannelCreation(messageCID cid.Cid) (address.Address, error) { - if messageCID != trcn.createPaychMsgCID { - return address.Undef, fmt.Errorf("expected messageCID: %s does not match actual: %s", trcn.createPaychMsgCID, messageCID) - } - return trcn.payCh, trcn.waitCreateErr -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes/test_retrieval_provider_node.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes/test_retrieval_provider_node.go deleted file mode 100644 index 2e16fa4652..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes/test_retrieval_provider_node.go +++ /dev/null @@ -1,163 +0,0 @@ -package testnodes - -import ( - "bytes" - "context" - "encoding/base64" - "errors" - "io" - "io/ioutil" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/shared" -) - -type expectedVoucherKey struct { - paymentChannel string - voucher string - proof string - expectedAmount string -} - -type sectorKey struct { - sectorID uint64 - offset uint64 - length uint64 -} - -type voucherResult struct { - amount abi.TokenAmount - err error -} - -// TestRetrievalProviderNode is a node adapter for a retrieval provider whose -// responses are mocked -type TestRetrievalProviderNode struct { - sectorStubs map[sectorKey][]byte - expectations map[sectorKey]struct{} - received map[sectorKey]struct{} - expectedVouchers map[expectedVoucherKey]voucherResult - receivedVouchers map[expectedVoucherKey]struct{} -} - -var _ retrievalmarket.RetrievalProviderNode = &TestRetrievalProviderNode{} - -// NewTestRetrievalProviderNode instantiates a new TestRetrievalProviderNode -func NewTestRetrievalProviderNode() *TestRetrievalProviderNode { - return &TestRetrievalProviderNode{ - sectorStubs: make(map[sectorKey][]byte), - expectations: make(map[sectorKey]struct{}), - received: make(map[sectorKey]struct{}), - expectedVouchers: make(map[expectedVoucherKey]voucherResult), - receivedVouchers: make(map[expectedVoucherKey]struct{}), - } -} - -// StubUnseal stubs a response to attempting to unseal a sector with the given paramters -func (trpn *TestRetrievalProviderNode) StubUnseal(sectorID uint64, offset uint64, length uint64, data []byte) { - trpn.sectorStubs[sectorKey{sectorID, offset, length}] = data -} - -// ExpectFailedUnseal indicates an expectation that a call will be made to unseal -// a sector with the given params and should fail -func (trpn *TestRetrievalProviderNode) ExpectFailedUnseal(sectorID uint64, offset uint64, length uint64) { - trpn.expectations[sectorKey{sectorID, offset, length}] = struct{}{} -} - -// ExpectUnseal indicates an expectation that a call will be made to unseal -// a sector with the given params and should return the given data -func (trpn *TestRetrievalProviderNode) ExpectUnseal(sectorID uint64, offset uint64, length uint64, data []byte) { - trpn.expectations[sectorKey{sectorID, offset, length}] = struct{}{} - trpn.StubUnseal(sectorID, offset, length, data) -} - -// UnsealSector simulates unsealing a sector by returning a stubbed response -// or erroring -func (trpn *TestRetrievalProviderNode) UnsealSector(ctx context.Context, sectorID uint64, offset uint64, length uint64) (io.ReadCloser, error) { - trpn.received[sectorKey{sectorID, offset, length}] = struct{}{} - data, ok := trpn.sectorStubs[sectorKey{sectorID, offset, length}] - if !ok { - return nil, errors.New("Could not unseal") - } - return ioutil.NopCloser(bytes.NewReader(data)), nil -} - -// VerifyExpectations verifies that all expected calls were made and no other calls -// were made -func (trpn *TestRetrievalProviderNode) VerifyExpectations(t *testing.T) { - require.Equal(t, len(trpn.expectedVouchers), len(trpn.receivedVouchers)) - require.Equal(t, trpn.expectations, trpn.received) -} - -// SavePaymentVoucher simulates saving a payment voucher with a stubbed result -func (trpn *TestRetrievalProviderNode) SavePaymentVoucher( - ctx context.Context, - paymentChannel address.Address, - voucher *paych.SignedVoucher, - proof []byte, - expectedAmount abi.TokenAmount, - tok shared.TipSetToken) (abi.TokenAmount, error) { - key, err := trpn.toExpectedVoucherKey(paymentChannel, voucher, proof, expectedAmount) - if err != nil { - return abi.TokenAmount{}, err - } - result, ok := trpn.expectedVouchers[key] - if ok { - trpn.receivedVouchers[key] = struct{}{} - return result.amount, result.err - } - return abi.TokenAmount{}, errors.New("SavePaymentVoucher failed") -} - -// GetMinerWorkerAddress translates an address -func (trpn *TestRetrievalProviderNode) GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken) (address.Address, error) { - return addr, nil -} - -// GetChainHead returns a mock value for the chain head -func (trpn *TestRetrievalProviderNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - return []byte{42}, 0, nil -} - -// --- Non-interface Functions - -// to ExpectedVoucherKey creates a lookup key for expected vouchers. -func (trpn *TestRetrievalProviderNode) toExpectedVoucherKey(paymentChannel address.Address, voucher *paych.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount) (expectedVoucherKey, error) { - pcString := paymentChannel.String() - buf := new(bytes.Buffer) - if err := voucher.MarshalCBOR(buf); err != nil { - return expectedVoucherKey{}, err - } - voucherString := base64.RawURLEncoding.EncodeToString(buf.Bytes()) - proofString := string(proof) - expectedAmountString := expectedAmount.String() - return expectedVoucherKey{pcString, voucherString, proofString, expectedAmountString}, nil -} - -// ExpectVoucher sets a voucher to be expected by SavePaymentVoucher -// paymentChannel: the address of the payment channel the client creates -// voucher: the voucher to match -// proof: the proof to use (can be blank) -// expectedAmount: the expected tokenamount for this voucher -// actualAmount: the actual amount to use. use same as expectedAmount unless you want to trigger an error -// expectedErr: an error message to expect -func (trpn *TestRetrievalProviderNode) ExpectVoucher( - paymentChannel address.Address, - voucher *paych.SignedVoucher, - proof []byte, - expectedAmount abi.TokenAmount, - actualAmount abi.TokenAmount, // the actual amount it should have (same unless you want to trigger an error) - expectedErr error) error { - key, err := trpn.toExpectedVoucherKey(paymentChannel, voucher, proof, expectedAmount) - if err != nil { - return err - } - trpn.expectedVouchers[key] = voucherResult{actualAmount, expectedErr} - return nil -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/deal_stream.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/deal_stream.go deleted file mode 100644 index 769654e872..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/deal_stream.go +++ /dev/null @@ -1,67 +0,0 @@ -package network - -import ( - "bufio" - - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/libp2p/go-libp2p-core/mux" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -type dealStream struct { - p peer.ID - rw mux.MuxedStream - buffered *bufio.Reader -} - -var _ RetrievalDealStream = (*dealStream)(nil) - -func (d *dealStream) ReadDealProposal() (retrievalmarket.DealProposal, error) { - var ds retrievalmarket.DealProposal - - if err := ds.UnmarshalCBOR(d.buffered); err != nil { - log.Warn(err) - return retrievalmarket.DealProposalUndefined, err - } - return ds, nil -} - -func (d *dealStream) WriteDealProposal(dp retrievalmarket.DealProposal) error { - return cborutil.WriteCborRPC(d.rw, &dp) -} - -func (d *dealStream) ReadDealResponse() (retrievalmarket.DealResponse, error) { - var dr retrievalmarket.DealResponse - - if err := dr.UnmarshalCBOR(d.buffered); err != nil { - return retrievalmarket.DealResponseUndefined, err - } - return dr, nil -} - -func (d *dealStream) WriteDealResponse(dr retrievalmarket.DealResponse) error { - return cborutil.WriteCborRPC(d.rw, &dr) -} - -func (d *dealStream) ReadDealPayment() (retrievalmarket.DealPayment, error) { - var ds retrievalmarket.DealPayment - - if err := ds.UnmarshalCBOR(d.rw); err != nil { - return retrievalmarket.DealPaymentUndefined, err - } - return ds, nil -} - -func (d *dealStream) WriteDealPayment(dpy retrievalmarket.DealPayment) error { - return cborutil.WriteCborRPC(d.rw, &dpy) -} - -func (d *dealStream) Receiver() peer.ID { - return d.p -} - -func (d *dealStream) Close() error { - return d.rw.Close() -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/doc.go deleted file mode 100644 index 92e82a269a..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package network providers an abstraction over a libp2p host for managing retrieval's Libp2p protocols: - -network.go - defines the interfaces that must be implemented to serve as a retrieval network -deal-stream.go - implements the `RetrievalDealStream` interface, a data stream for retrieval deal traffic only -query-stream.go - implements the `RetrievalQueryStream` interface, a data stream for retrieval query traffic only -libp2p_impl.go - provides the production implementation of the `RetrievalMarketNetwork` interface. -*/ -package network diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/libp2p_impl.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/libp2p_impl.go deleted file mode 100644 index 6fda324f94..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/libp2p_impl.go +++ /dev/null @@ -1,93 +0,0 @@ -package network - -import ( - "bufio" - "context" - - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -var log = logging.Logger("retrieval_network") -var _ RetrievalMarketNetwork = new(libp2pRetrievalMarketNetwork) - -// NewFromLibp2pHost constructs a new instance of the RetrievalMarketNetwork from a -// libp2p host -func NewFromLibp2pHost(h host.Host) RetrievalMarketNetwork { - return &libp2pRetrievalMarketNetwork{host: h} -} - -// libp2pRetrievalMarketNetwork transforms the libp2p host interface, which sends and receives -// NetMessage objects, into the graphsync network interface. -// It implements the RetrievalMarketNetwork API. -type libp2pRetrievalMarketNetwork struct { - host host.Host - // inbound messages from the network are forwarded to the receiver - receiver RetrievalReceiver -} - -// NewQueryStream creates a new RetrievalQueryStream using the provided peer.ID -func (impl *libp2pRetrievalMarketNetwork) NewQueryStream(id peer.ID) (RetrievalQueryStream, error) { - s, err := impl.host.NewStream(context.Background(), id, retrievalmarket.QueryProtocolID) - if err != nil { - log.Warn(err) - return nil, err - } - buffered := bufio.NewReaderSize(s, 16) - return &queryStream{p: id, rw: s, buffered: buffered}, nil -} - -// NewDealStream creates a new RetrievalDealStream using the provided peer.ID -func (impl *libp2pRetrievalMarketNetwork) NewDealStream(id peer.ID) (RetrievalDealStream, error) { - s, err := impl.host.NewStream(context.Background(), id, retrievalmarket.ProtocolID) - if err != nil { - return nil, err - } - buffered := bufio.NewReaderSize(s, 16) - return &dealStream{p: id, rw: s, buffered: buffered}, nil -} - -// SetDelegate sets a RetrievalReceiver to handle stream data -func (impl *libp2pRetrievalMarketNetwork) SetDelegate(r RetrievalReceiver) error { - impl.receiver = r - impl.host.SetStreamHandler(retrievalmarket.ProtocolID, impl.handleNewDealStream) - impl.host.SetStreamHandler(retrievalmarket.QueryProtocolID, impl.handleNewQueryStream) - return nil -} - -// StopHandlingRequests unsets the RetrievalReceiver and would perform any other necessary -// shutdown logic. -func (impl *libp2pRetrievalMarketNetwork) StopHandlingRequests() error { - impl.receiver = nil - impl.host.RemoveStreamHandler(retrievalmarket.ProtocolID) - impl.host.RemoveStreamHandler(retrievalmarket.QueryProtocolID) - return nil -} - -func (impl *libp2pRetrievalMarketNetwork) handleNewQueryStream(s network.Stream) { - if impl.receiver == nil { - log.Warn("no receiver set") - s.Reset() // nolint: errcheck,gosec - return - } - remotePID := s.Conn().RemotePeer() - buffered := bufio.NewReaderSize(s, 16) - qs := &queryStream{remotePID, s, buffered} - impl.receiver.HandleQueryStream(qs) -} - -func (impl *libp2pRetrievalMarketNetwork) handleNewDealStream(s network.Stream) { - if impl.receiver == nil { - log.Warn("no receiver set") - s.Reset() // nolint: errcheck,gosec - return - } - remotePID := s.Conn().RemotePeer() - buffered := bufio.NewReaderSize(s, 16) - ds := &dealStream{remotePID, s, buffered} - impl.receiver.HandleDealStream(ds) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/libp2p_impl_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/libp2p_impl_test.go deleted file mode 100644 index 759db86cc5..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/libp2p_impl_test.go +++ /dev/null @@ -1,424 +0,0 @@ -package network_test - -import ( - "context" - "math/big" - "math/rand" - "testing" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -type testReceiver struct { - t *testing.T - dealStreamHandler func(network.RetrievalDealStream) - queryStreamHandler func(network.RetrievalQueryStream) -} - -func (tr *testReceiver) HandleDealStream(s network.RetrievalDealStream) { - defer s.Close() - if tr.dealStreamHandler != nil { - tr.dealStreamHandler(s) - } -} -func (tr *testReceiver) HandleQueryStream(s network.RetrievalQueryStream) { - defer s.Close() - if tr.queryStreamHandler != nil { - tr.queryStreamHandler(s) - } -} - -func TestQueryStreamSendReceiveQuery(t *testing.T) { - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toHost := td.Host2.ID() - - // host1 gets no-op receiver - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - // host2 gets receiver - qchan := make(chan retrievalmarket.Query) - tr2 := &testReceiver{t: t, queryStreamHandler: func(s network.RetrievalQueryStream) { - readq, err := s.ReadQuery() - require.NoError(t, err) - qchan <- readq - }} - require.NoError(t, toNetwork.SetDelegate(tr2)) - - // setup query stream host1 --> host 2 - assertQueryReceived(ctx, t, fromNetwork, toHost, qchan) -} - -func TestQueryStreamSendReceiveQueryResponse(t *testing.T) { - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toHost := td.Host2.ID() - - // host1 gets no-op receiver - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - // host2 gets receiver - qchan := make(chan retrievalmarket.QueryResponse) - tr2 := &testReceiver{t: t, queryStreamHandler: func(s network.RetrievalQueryStream) { - q, err := s.ReadQueryResponse() - require.NoError(t, err) - qchan <- q - }} - require.NoError(t, toNetwork.SetDelegate(tr2)) - - assertQueryResponseReceived(ctx, t, fromNetwork, toHost, qchan) - -} - -func TestQueryStreamSendReceiveMultipleSuccessful(t *testing.T) { - // send query, read in handler, send response back, read response - ctxBg := context.Background() - td := shared_testutil.NewLibp2pTestData(ctxBg, t) - nw1 := network.NewFromLibp2pHost(td.Host1) - nw2 := network.NewFromLibp2pHost(td.Host2) - require.NoError(t, td.Host1.Connect(ctxBg, peer.AddrInfo{ID: td.Host2.ID()})) - - // host2 gets a query and sends a response - qr := shared_testutil.MakeTestQueryResponse() - done := make(chan bool) - tr2 := &testReceiver{t: t, queryStreamHandler: func(s network.RetrievalQueryStream) { - _, err := s.ReadQuery() - require.NoError(t, err) - - require.NoError(t, s.WriteQueryResponse(qr)) - done <- true - }} - require.NoError(t, nw2.SetDelegate(tr2)) - - ctx, cancel := context.WithTimeout(ctxBg, 10*time.Second) - defer cancel() - - qs, err := nw1.NewQueryStream(td.Host2.ID()) - require.NoError(t, err) - - testCid := shared_testutil.GenerateCids(1)[0] - - var resp retrievalmarket.QueryResponse - go require.NoError(t, qs.WriteQuery(retrievalmarket.Query{PayloadCID: testCid})) - resp, err = qs.ReadQueryResponse() - require.NoError(t, err) - - select { - case <-ctx.Done(): - t.Error("response not received") - case <-done: - } - - assert.Equal(t, qr, resp) -} - -func TestDealStreamSendReceiveDealProposal(t *testing.T) { - // send proposal, read in handler - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toHost := td.Host2.ID() - - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - dchan := make(chan retrievalmarket.DealProposal) - tr2 := &testReceiver{ - t: t, - dealStreamHandler: func(s network.RetrievalDealStream) { - readD, err := s.ReadDealProposal() - require.NoError(t, err) - dchan <- readD - }, - } - require.NoError(t, toNetwork.SetDelegate(tr2)) - - assertDealProposalReceived(ctx, t, fromNetwork, toHost, dchan) -} - -func TestDealStreamSendReceiveDealResponse(t *testing.T) { - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toPeer := td.Host2.ID() - - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - drChan := make(chan retrievalmarket.DealResponse) - tr2 := &testReceiver{ - t: t, - dealStreamHandler: func(s network.RetrievalDealStream) { - readDP, err := s.ReadDealResponse() - require.NoError(t, err) - drChan <- readDP - }, - } - require.NoError(t, toNetwork.SetDelegate(tr2)) - assertDealResponseReceived(ctx, t, fromNetwork, toPeer, drChan) -} - -func TestDealStreamSendReceiveDealPayment(t *testing.T) { - // send payment, read in handler - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toPeer := td.Host2.ID() - - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - dpyChan := make(chan retrievalmarket.DealPayment) - tr2 := &testReceiver{ - t: t, - dealStreamHandler: func(s network.RetrievalDealStream) { - readDpy, err := s.ReadDealPayment() - require.NoError(t, err) - dpyChan <- readDpy - }, - } - require.NoError(t, toNetwork.SetDelegate(tr2)) - assertDealPaymentReceived(ctx, t, fromNetwork, toPeer, dpyChan) -} - -func TestDealStreamSendReceiveMultipleSuccessful(t *testing.T) { - // send proposal, read in handler, send response back, - // read response, - // send payment, read farther in handler - - bgCtx := context.Background() - td := shared_testutil.NewLibp2pTestData(bgCtx, t) - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toPeer := td.Host2.ID() - - // set up stream handler, channels, and response - // dpChan := make(chan retrievalmarket.DealProposal) - dpyChan := make(chan retrievalmarket.DealPayment) - dr := shared_testutil.MakeTestDealResponse() - - tr2 := &testReceiver{t: t, dealStreamHandler: func(s network.RetrievalDealStream) { - _, err := s.ReadDealProposal() - require.NoError(t, err) - - require.NoError(t, s.WriteDealResponse(dr)) - - readDp, err := s.ReadDealPayment() - require.NoError(t, err) - dpyChan <- readDp - }} - require.NoError(t, toNetwork.SetDelegate(tr2)) - - // start sending deal proposal - ds1, err := fromNetwork.NewDealStream(toPeer) - require.NoError(t, err) - - dp := shared_testutil.MakeTestDealProposal() - - var receivedPayment retrievalmarket.DealPayment - - ctx, cancel := context.WithTimeout(bgCtx, 10*time.Second) - defer cancel() - - // write proposal - require.NoError(t, ds1.WriteDealProposal(dp)) - - // read response and verify it's the one we told toNetwork to send - responseReceived, err := ds1.ReadDealResponse() - require.NoError(t, err) - assert.Equal(t, dr.ID, responseReceived.ID) - assert.Equal(t, dr.Message, responseReceived.Message) - assert.Equal(t, dr.Status, responseReceived.Status) - - // send payment - dpy := retrievalmarket.DealPayment{ - ID: dp.ID, - PaymentChannel: address.TestAddress, - PaymentVoucher: shared_testutil.MakeTestSignedVoucher(), - } - require.NoError(t, ds1.WriteDealPayment(dpy)) - - select { - case <-ctx.Done(): - t.Errorf("failed to receive messages") - case receivedPayment = <-dpyChan: - } - - assert.Equal(t, dpy, receivedPayment) -} - -func TestLibp2pRetrievalMarketNetwork_StopHandlingRequests(t *testing.T) { - bgCtx := context.Background() - td := shared_testutil.NewLibp2pTestData(bgCtx, t) - - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toHost := td.Host2.ID() - - // host1 gets no-op receiver - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - // host2 gets receiver - qchan := make(chan retrievalmarket.Query) - tr2 := &testReceiver{t: t, queryStreamHandler: func(s network.RetrievalQueryStream) { - readq, err := s.ReadQuery() - require.NoError(t, err) - qchan <- readq - }} - require.NoError(t, toNetwork.SetDelegate(tr2)) - - require.NoError(t, toNetwork.StopHandlingRequests()) - - _, err := fromNetwork.NewQueryStream(toHost) - require.Error(t, err, "protocol not supported") -} - -// assertDealProposalReceived performs the verification that a deal proposal is received -func assertDealProposalReceived(inCtx context.Context, t *testing.T, fromNetwork network.RetrievalMarketNetwork, toPeer peer.ID, inChan chan retrievalmarket.DealProposal) { - ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) - defer cancel() - - qs1, err := fromNetwork.NewDealStream(toPeer) - require.NoError(t, err) - - // send query to host2 - dp := shared_testutil.MakeTestDealProposal() - require.NoError(t, qs1.WriteDealProposal(dp)) - - var dealReceived retrievalmarket.DealProposal - select { - case <-ctx.Done(): - t.Error("deal proposal not received") - case dealReceived = <-inChan: - } - require.NotNil(t, dealReceived) - assert.Equal(t, dp, dealReceived) -} - -func assertDealResponseReceived(parentCtx context.Context, t *testing.T, fromNetwork network.RetrievalMarketNetwork, toPeer peer.ID, inChan chan retrievalmarket.DealResponse) { - ctx, cancel := context.WithTimeout(parentCtx, 10*time.Second) - defer cancel() - - ds1, err := fromNetwork.NewDealStream(toPeer) - require.NoError(t, err) - - fakeBlk := retrievalmarket.Block{ - Prefix: []byte("prefix"), - Data: []byte("data"), - } - - dr := retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusCompleted, - ID: retrievalmarket.DealID(rand.Uint64()), - PaymentOwed: abi.TokenAmount{Int: big.NewInt(rand.Int63())}, - Message: "some message", - Blocks: []retrievalmarket.Block{fakeBlk}, - } - require.NoError(t, ds1.WriteDealResponse(dr)) - - var responseReceived retrievalmarket.DealResponse - select { - case <-ctx.Done(): - t.Error("response not received") - case responseReceived = <-inChan: - } - require.NotNil(t, responseReceived) - assert.Equal(t, dr, responseReceived) -} - -func assertDealPaymentReceived(parentCtx context.Context, t *testing.T, fromNetwork network.RetrievalMarketNetwork, toPeer peer.ID, inChan chan retrievalmarket.DealPayment) { - ctx, cancel := context.WithTimeout(parentCtx, 10*time.Second) - defer cancel() - - ds1, err := fromNetwork.NewDealStream(toPeer) - require.NoError(t, err) - - dp := retrievalmarket.DealPayment{ - ID: retrievalmarket.DealID(rand.Uint64()), - PaymentChannel: address.TestAddress, - PaymentVoucher: shared_testutil.MakeTestSignedVoucher(), - } - require.NoError(t, ds1.WriteDealPayment(dp)) - - var responseReceived retrievalmarket.DealPayment - select { - case <-ctx.Done(): - t.Error("response not received") - case responseReceived = <-inChan: - } - require.NotNil(t, responseReceived) - assert.Equal(t, dp.ID, responseReceived.ID) - assert.Equal(t, dp.PaymentChannel, responseReceived.PaymentChannel) - assert.Equal(t, *dp.PaymentVoucher, *responseReceived.PaymentVoucher) -} - -// assertQueryReceived performs the verification that a DealStatusRequest is received -func assertQueryReceived(inCtx context.Context, t *testing.T, fromNetwork network.RetrievalMarketNetwork, toHost peer.ID, qchan chan retrievalmarket.Query) { - ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) - defer cancel() - - qs1, err := fromNetwork.NewQueryStream(toHost) - require.NoError(t, err) - - // send query to host2 - cid := shared_testutil.GenerateCids(1)[0] - q := retrievalmarket.NewQueryV0(cid) - require.NoError(t, qs1.WriteQuery(q)) - - var inq retrievalmarket.Query - select { - case <-ctx.Done(): - t.Error("msg not received") - case inq = <-qchan: - } - require.NotNil(t, inq) - assert.Equal(t, q.PayloadCID, inq.PayloadCID) -} - -// assertQueryResponseReceived performs the verification that a DealStatusResponse is received -func assertQueryResponseReceived(inCtx context.Context, t *testing.T, - fromNetwork network.RetrievalMarketNetwork, - toHost peer.ID, - qchan chan retrievalmarket.QueryResponse) { - ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) - defer cancel() - - // setup query stream host1 --> host 2 - qs1, err := fromNetwork.NewQueryStream(toHost) - require.NoError(t, err) - - // send queryresponse to host2 - qr := shared_testutil.MakeTestQueryResponse() - require.NoError(t, qs1.WriteQueryResponse(qr)) - - // read queryresponse - var inqr retrievalmarket.QueryResponse - select { - case <-ctx.Done(): - t.Error("msg not received") - case inqr = <-qchan: - } - - require.NotNil(t, inqr) - assert.Equal(t, qr, inqr) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/network.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/network.go deleted file mode 100644 index eae456e7fe..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/network.go +++ /dev/null @@ -1,62 +0,0 @@ -package network - -import ( - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -// These are the required interfaces that must be implemented to send and receive data -// for retrieval queries and deals. - -// RetrievalQueryStream is the API needed to send and receive retrieval query -// data over data-transfer network. -type RetrievalQueryStream interface { - ReadQuery() (retrievalmarket.Query, error) - WriteQuery(retrievalmarket.Query) error - ReadQueryResponse() (retrievalmarket.QueryResponse, error) - WriteQueryResponse(retrievalmarket.QueryResponse) error - Close() error -} - -// RetrievalDealStream is the API needed to send and receive retrieval deal -// data over data-transfer network. -type RetrievalDealStream interface { - ReadDealProposal() (retrievalmarket.DealProposal, error) - WriteDealProposal(retrievalmarket.DealProposal) error - ReadDealResponse() (retrievalmarket.DealResponse, error) - WriteDealResponse(retrievalmarket.DealResponse) error - ReadDealPayment() (retrievalmarket.DealPayment, error) - WriteDealPayment(retrievalmarket.DealPayment) error - Receiver() peer.ID - Close() error -} - -// RetrievalReceiver is the API for handling data coming in on -// both query and deal streams -type RetrievalReceiver interface { - // HandleQueryStream sends and receives data-transfer data via the - // RetrievalQueryStream provided - HandleQueryStream(RetrievalQueryStream) - - // HandleDealStream sends and receives data-transfer data via the - // RetrievalDealStream provided - HandleDealStream(RetrievalDealStream) -} - -// RetrievalMarketNetwork is the API for creating query and deal streams and -// delegating responders to those streams. -type RetrievalMarketNetwork interface { - // NewQueryStream creates a new RetrievalQueryStream implementer using the provided peer.ID - NewQueryStream(peer.ID) (RetrievalQueryStream, error) - - // NewDealStream creates a new RetrievalDealStream implementer using the provided peer.ID - NewDealStream(peer.ID) (RetrievalDealStream, error) - - // SetDelegate sets a RetrievalReceiver implementer to handle stream data - SetDelegate(RetrievalReceiver) error - - // StopHandlingRequests unsets the RetrievalReceiver and would perform any other necessary - // shutdown logic. - StopHandlingRequests() error -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/query_stream.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/query_stream.go deleted file mode 100644 index 6cd121899c..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/network/query_stream.go +++ /dev/null @@ -1,54 +0,0 @@ -package network - -import ( - "bufio" - - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/libp2p/go-libp2p-core/mux" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -type queryStream struct { - p peer.ID - rw mux.MuxedStream - buffered *bufio.Reader -} - -var _ RetrievalQueryStream = (*queryStream)(nil) - -func (qs *queryStream) ReadQuery() (retrievalmarket.Query, error) { - var q retrievalmarket.Query - - if err := q.UnmarshalCBOR(qs.buffered); err != nil { - log.Warn(err) - return retrievalmarket.QueryUndefined, err - - } - - return q, nil -} - -func (qs *queryStream) WriteQuery(q retrievalmarket.Query) error { - return cborutil.WriteCborRPC(qs.rw, &q) -} - -func (qs *queryStream) ReadQueryResponse() (retrievalmarket.QueryResponse, error) { - var resp retrievalmarket.QueryResponse - - if err := resp.UnmarshalCBOR(qs.buffered); err != nil { - log.Warn(err) - return retrievalmarket.QueryResponseUndefined, err - } - - return resp, nil -} - -func (qs *queryStream) WriteQueryResponse(qr retrievalmarket.QueryResponse) error { - return cborutil.WriteCborRPC(qs.rw, &qr) -} - -func (qs *queryStream) Close() error { - return qs.rw.Close() -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/storage_retrieval_integration_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/storage_retrieval_integration_test.go deleted file mode 100644 index 326d6bbca8..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/storage_retrieval_integration_test.go +++ /dev/null @@ -1,423 +0,0 @@ -package retrievalmarket_test - -import ( - "bytes" - "context" - "io/ioutil" - "math/rand" - "path/filepath" - "testing" - "time" - - "github.com/filecoin-project/go-address" - datatransfer "github.com/filecoin-project/go-data-transfer" - graphsyncimpl "github.com/filecoin-project/go-data-transfer/impl/graphsync" - "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/go-fil-markets/pieceio/cario" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery" - retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" - testnodes2 "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket" - stormkt "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" - stornet "github.com/filecoin-project/go-fil-markets/storagemarket/network" - "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" -) - -func TestStorageRetrieval(t *testing.T) { - bgCtx := context.Background() - sh := newStorageHarness(bgCtx, t) - require.NoError(t, sh.Client.Start(bgCtx)) - require.NoError(t, sh.Provider.Start(bgCtx)) - - // set up a subscriber - providerDealChan := make(chan storagemarket.MinerDeal) - subscriber := func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - providerDealChan <- deal - } - _ = sh.Provider.SubscribeToEvents(subscriber) - - clientDealChan := make(chan storagemarket.ClientDeal) - clientSubscriber := func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - clientDealChan <- deal - } - _ = sh.Client.SubscribeToEvents(clientSubscriber) - - // set ask price where we'll accept any price - err := sh.Provider.SetAsk(big.NewInt(0), 50_000) - assert.NoError(t, err) - - result := sh.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: sh.PayloadCid}) - require.False(t, result.ProposalCid.Equals(cid.Undef)) - - time.Sleep(time.Millisecond * 200) - - ctxTimeout, canc := context.WithTimeout(bgCtx, 100*time.Millisecond) - defer canc() - - var storageProviderSeenDeal storagemarket.MinerDeal - var storageClientSeenDeal storagemarket.ClientDeal - for storageProviderSeenDeal.State != storagemarket.StorageDealExpired || - storageClientSeenDeal.State != storagemarket.StorageDealExpired { - select { - case storageProviderSeenDeal = <-providerDealChan: - case storageClientSeenDeal = <-clientDealChan: - case <-ctxTimeout.Done(): - t.Fatalf("never saw completed deal, client deal state: %s (%d), provider deal state: %s (%d)", - storagemarket.DealStates[storageClientSeenDeal.State], - storageClientSeenDeal.State, - storagemarket.DealStates[storageProviderSeenDeal.State], - storageProviderSeenDeal.State, - ) - } - } - - rh := newRetrievalHarness(ctxTimeout, t, sh, storageClientSeenDeal) - - clientDealStateChan := make(chan retrievalmarket.ClientDealState) - rh.Client.SubscribeToEvents(func(event retrievalmarket.ClientEvent, state retrievalmarket.ClientDealState) { - switch event { - case retrievalmarket.ClientEventComplete: - clientDealStateChan <- state - } - }) - - providerDealStateChan := make(chan retrievalmarket.ProviderDealState) - rh.Provider.SubscribeToEvents(func(event retrievalmarket.ProviderEvent, state retrievalmarket.ProviderDealState) { - switch event { - case retrievalmarket.ProviderEventComplete: - providerDealStateChan <- state - } - }) - - // **** Send the query for the Piece - // set up retrieval params - retrievalPeer := &retrievalmarket.RetrievalPeer{Address: sh.ProviderAddr, ID: sh.TestData.Host2.ID()} - - resp, err := rh.Client.Query(bgCtx, *retrievalPeer, sh.PayloadCid, retrievalmarket.QueryParams{}) - require.NoError(t, err) - require.Equal(t, retrievalmarket.QueryResponseAvailable, resp.Status) - - // testing V1 only - rmParams := retrievalmarket.NewParamsV1(rh.RetrievalParams.PricePerByte, - rh.RetrievalParams.PaymentInterval, - rh.RetrievalParams.PaymentIntervalIncrease, shared.AllSelector(), nil) - - voucherAmts := []abi.TokenAmount{abi.NewTokenAmount(10136000), abi.NewTokenAmount(9784000)} - proof := []byte("") - for _, voucherAmt := range voucherAmts { - require.NoError(t, rh.ProviderNode.ExpectVoucher(*rh.ExpPaych, rh.ExpVoucher, proof, voucherAmt, voucherAmt, nil)) - } - // just make sure there is enough to cover the transfer - fsize := 19000 // this is the known file size of the test file lorem.txt - expectedTotal := big.Mul(rh.RetrievalParams.PricePerByte, abi.NewTokenAmount(int64(fsize*2))) - - // *** Retrieve the piece - - did, err := rh.Client.Retrieve(bgCtx, sh.PayloadCid, rmParams, expectedTotal, retrievalPeer.ID, *rh.ExpPaych, retrievalPeer.Address) - assert.Equal(t, did, retrievalmarket.DealID(0)) - require.NoError(t, err) - - ctxTimeout, cancel := context.WithTimeout(bgCtx, 10*time.Second) - defer cancel() - - // verify that client subscribers will be notified of state changes - var clientDealState retrievalmarket.ClientDealState - select { - case <-ctxTimeout.Done(): - t.Error("deal never completed") - t.FailNow() - case clientDealState = <-clientDealStateChan: - } - - ctxTimeout, cancel = context.WithTimeout(bgCtx, 5*time.Second) - defer cancel() - var providerDealState retrievalmarket.ProviderDealState - select { - case <-ctxTimeout.Done(): - t.Error("provider never saw completed deal") - t.FailNow() - case providerDealState = <-providerDealStateChan: - } - - require.Equal(t, retrievalmarket.DealStatusCompleted, providerDealState.Status) - require.Equal(t, retrievalmarket.DealStatusCompleted, clientDealState.Status) - - sh.TestData.VerifyFileTransferred(t, sh.PieceLink, false, uint64(fsize)) - -} - -type storageHarness struct { - Ctx context.Context - Epoch abi.ChainEpoch - PieceLink ipld.Link - PayloadCid cid.Cid - ProviderAddr address.Address - Client storagemarket.StorageClient - ClientNode *testnodes.FakeClientNode - Provider storagemarket.StorageProvider - ProviderNode *testnodes.FakeProviderNode - ProviderInfo storagemarket.StorageProviderInfo - TestData *shared_testutil.Libp2pTestData - PieceStore piecestore.PieceStore -} - -func newStorageHarness(ctx context.Context, t *testing.T) *storageHarness { - epoch := abi.ChainEpoch(100) - td := shared_testutil.NewLibp2pTestData(ctx, t) - fpath := filepath.Join("retrievalmarket", "impl", "fixtures", "lorem.txt") - rootLink := td.LoadUnixFSFile(t, fpath, false) - payloadCid := rootLink.(cidlink.Link).Cid - clientAddr := address.TestAddress - providerAddr := address.TestAddress2 - - smState := testnodes.NewStorageMarketState() - clientNode := testnodes.FakeClientNode{ - FakeCommonNode: testnodes.FakeCommonNode{SMState: smState}, - ClientAddr: clientAddr, - MinerAddr: providerAddr, - WorkerAddr: providerAddr, - } - - expDealID := abi.DealID(rand.Uint64()) - psdReturn := market.PublishStorageDealsReturn{IDs: []abi.DealID{expDealID}} - psdReturnBytes := bytes.NewBuffer([]byte{}) - require.NoError(t, psdReturn.MarshalCBOR(psdReturnBytes)) - - tempPath, err := ioutil.TempDir("", "storagemarket_test") - require.NoError(t, err) - ps := piecestore.NewPieceStore(td.Ds2) - providerNode := &testnodes.FakeProviderNode{ - FakeCommonNode: testnodes.FakeCommonNode{ - SMState: smState, - WaitForMessageRetBytes: psdReturnBytes.Bytes(), - }, - MinerAddr: providerAddr, - } - fs, err := filestore.NewLocalFileStore(filestore.OsPath(tempPath)) - require.NoError(t, err) - - // create provider and client - dt1 := graphsyncimpl.NewGraphSyncDataTransfer(td.Host1, td.GraphSync1, td.DTStoredCounter1) - rv1 := requestvalidation.NewUnifiedRequestValidator(nil, statestore.New(td.Ds1)) - require.NoError(t, dt1.RegisterVoucherType(&requestvalidation.StorageDataTransferVoucher{}, rv1)) - - client, err := stormkt.NewClient( - stornet.NewFromLibp2pHost(td.Host1), - td.Bs1, - dt1, - discovery.NewLocal(td.Ds1), - td.Ds1, - &clientNode, - stormkt.DealPollingInterval(0), - ) - require.NoError(t, err) - - dt2 := graphsyncimpl.NewGraphSyncDataTransfer(td.Host2, td.GraphSync2, td.DTStoredCounter2) - rv2 := requestvalidation.NewUnifiedRequestValidator(statestore.New(td.Ds2), nil) - require.NoError(t, dt2.RegisterVoucherType(&requestvalidation.StorageDataTransferVoucher{}, rv2)) - - storedAsk, err := storedask.NewStoredAsk(td.Ds2, datastore.NewKey("latest-ask"), providerNode, providerAddr) - require.NoError(t, err) - provider, err := stormkt.NewProvider( - stornet.NewFromLibp2pHost(td.Host2), - td.Ds2, - td.Bs2, - fs, - ps, - dt2, - providerNode, - providerAddr, - abi.RegisteredSealProof_StackedDrg2KiBV1, - storedAsk, - ) - require.NoError(t, err) - - // set ask price where we'll accept any price - require.NoError(t, provider.SetAsk(big.NewInt(0), 50_000)) - require.NoError(t, provider.Start(ctx)) - - // Closely follows the MinerInfo struct in the spec - providerInfo := storagemarket.StorageProviderInfo{ - Address: providerAddr, - Owner: providerAddr, - Worker: providerAddr, - SectorSize: 1 << 20, - PeerID: td.Host2.ID(), - } - - smState.Providers = []*storagemarket.StorageProviderInfo{&providerInfo} - return &storageHarness{ - Ctx: ctx, - Epoch: epoch, - PayloadCid: payloadCid, - ProviderAddr: providerAddr, - Client: client, - ClientNode: &clientNode, - PieceLink: rootLink, - PieceStore: ps, - Provider: provider, - ProviderNode: providerNode, - ProviderInfo: providerInfo, - TestData: td, - } -} - -func (sh *storageHarness) ProposeStorageDeal(t *testing.T, dataRef *storagemarket.DataRef) *storagemarket.ProposeStorageDealResult { - result, err := sh.Client.ProposeStorageDeal(sh.Ctx, sh.ProviderAddr, &sh.ProviderInfo, dataRef, sh.Epoch+100, sh.Epoch+20100, big.NewInt(1), big.NewInt(0), abi.RegisteredSealProof_StackedDrg2KiBV1, false, false) - assert.NoError(t, err) - return result -} - -var _ datatransfer.RequestValidator = (*fakeDTValidator)(nil) - -type retrievalHarness struct { - Ctx context.Context - Epoch abi.ChainEpoch - Client retrievalmarket.RetrievalClient - ClientNode *testnodes2.TestRetrievalClientNode - Provider retrievalmarket.RetrievalProvider - ProviderNode *testnodes2.TestRetrievalProviderNode - PieceStore piecestore.PieceStore - ExpPaych, NewLaneAddr *address.Address - ExpPaychAmt, ActualPaychAmt *abi.TokenAmount - ExpVoucher, ActualVoucher *paych.SignedVoucher - RetrievalParams retrievalmarket.Params -} - -func newRetrievalHarness(ctx context.Context, t *testing.T, sh *storageHarness, deal storagemarket.ClientDeal) *retrievalHarness { - - var newPaychAmt abi.TokenAmount - paymentChannelRecorder := func(client, miner address.Address, amt abi.TokenAmount) { - newPaychAmt = amt - } - - var newLaneAddr address.Address - laneRecorder := func(paymentChannel address.Address) { - newLaneAddr = paymentChannel - } - - var newVoucher paych.SignedVoucher - paymentVoucherRecorder := func(v *paych.SignedVoucher) { - newVoucher = *v - } - - cids := tut.GenerateCids(2) - clientPaymentChannel, err := address.NewActorAddress([]byte("a")) - - expectedVoucher := tut.MakeTestSignedVoucher() - require.NoError(t, err) - clientNode := testnodes2.NewTestRetrievalClientNode(testnodes2.TestRetrievalClientNodeParams{ - Lane: expectedVoucher.Lane, - PayCh: clientPaymentChannel, - Voucher: expectedVoucher, - PaymentChannelRecorder: paymentChannelRecorder, - AllocateLaneRecorder: laneRecorder, - PaymentVoucherRecorder: paymentVoucherRecorder, - CreatePaychCID: cids[0], - AddFundsCID: cids[1], - }) - - nw1 := rmnet.NewFromLibp2pHost(sh.TestData.Host1) - client, err := retrievalimpl.NewClient(nw1, sh.TestData.Bs1, clientNode, &tut.TestPeerResolver{}, sh.TestData.Ds1, sh.TestData.RetrievalStoredCounter1) - require.NoError(t, err) - - payloadCID := deal.DataRef.Root - providerPaymentAddr := deal.MinerWorker - providerNode := testnodes2.NewTestRetrievalProviderNode() - cio := cario.NewCarIO() - - var buf bytes.Buffer - require.NoError(t, cio.WriteCar(sh.Ctx, sh.TestData.Bs2, payloadCID, shared.AllSelector(), &buf)) - carData := buf.Bytes() - sectorID := uint64(100000) - offset := uint64(1000) - pieceInfo := piecestore.PieceInfo{ - Deals: []piecestore.DealInfo{ - { - SectorID: sectorID, - Offset: offset, - Length: uint64(len(carData)), - }, - }, - } - providerNode.ExpectUnseal(sectorID, offset, uint64(len(carData)), carData) - // clear out provider blockstore - allCids, err := sh.TestData.Bs2.AllKeysChan(sh.Ctx) - require.NoError(t, err) - for c := range allCids { - err = sh.TestData.Bs2.DeleteBlock(c) - require.NoError(t, err) - } - - nw2 := rmnet.NewFromLibp2pHost(sh.TestData.Host2) - pieceStore := tut.NewTestPieceStore() - expectedPiece := tut.GenerateCids(1)[0] - cidInfo := piecestore.CIDInfo{ - PieceBlockLocations: []piecestore.PieceBlockLocation{ - { - PieceCID: expectedPiece, - }, - }, - } - pieceStore.ExpectCID(payloadCID, cidInfo) - pieceStore.ExpectPiece(expectedPiece, pieceInfo) - provider, err := retrievalimpl.NewProvider(providerPaymentAddr, providerNode, nw2, pieceStore, sh.TestData.Bs2, sh.TestData.Ds2) - require.NoError(t, err) - - params := retrievalmarket.Params{ - PricePerByte: abi.NewTokenAmount(1000), - PaymentInterval: uint64(10000), - PaymentIntervalIncrease: uint64(1000), - } - - provider.SetPaymentInterval(params.PaymentInterval, params.PaymentIntervalIncrease) - provider.SetPricePerByte(params.PricePerByte) - require.NoError(t, provider.Start()) - - return &retrievalHarness{ - Ctx: ctx, - Client: client, - ClientNode: clientNode, - Epoch: sh.Epoch, - ExpPaych: &clientPaymentChannel, - NewLaneAddr: &newLaneAddr, - ActualPaychAmt: &newPaychAmt, - ExpVoucher: expectedVoucher, - ActualVoucher: &newVoucher, - Provider: provider, - ProviderNode: providerNode, - PieceStore: sh.PieceStore, - RetrievalParams: params, - } -} - -type fakeDTValidator struct{} - -func (v *fakeDTValidator) ValidatePush(sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) error { - return nil -} - -func (v *fakeDTValidator) ValidatePull(receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) error { - return nil -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/testing/test_provider_deal_environment.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/testing/test_provider_deal_environment.go deleted file mode 100644 index 3a86b47c55..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/testing/test_provider_deal_environment.go +++ /dev/null @@ -1,160 +0,0 @@ -// Package testing provides test implementations of retieval market interfaces -package testing - -import ( - "context" - "errors" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" - retrievalimpl "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" -) - -// TestProviderDealEnvironment is a test implementation of ProviderDealEnvironment used -// by the provider state machine. -type TestProviderDealEnvironment struct { - decider retrievalimpl.DealDecider - node rm.RetrievalProviderNode - ds rmnet.RetrievalDealStream - nextResponse int - responses []ReadBlockResponse - expectedParams map[dealParamsKey]error - receivedParams map[dealParamsKey]bool - expectedCIDs map[cid.Cid]uint64 - expectedMissingCIDs map[cid.Cid]struct{} - receivedCIDs map[cid.Cid]struct{} - receivedMissingCIDs map[cid.Cid]struct{} - expectedDeciderCalls map[string]struct{} - receivedDeciderCalls map[string]struct{} -} - -// NewTestProviderDealEnvironment returns a new TestProviderDealEnvironment instance -func NewTestProviderDealEnvironment(node rm.RetrievalProviderNode, - ds rmnet.RetrievalDealStream, - decider retrievalimpl.DealDecider, - responses []ReadBlockResponse) *TestProviderDealEnvironment { - return &TestProviderDealEnvironment{ - node: node, - ds: ds, - nextResponse: 0, - responses: responses, - expectedParams: make(map[dealParamsKey]error), - receivedParams: make(map[dealParamsKey]bool), - expectedCIDs: make(map[cid.Cid]uint64), - expectedMissingCIDs: make(map[cid.Cid]struct{}), - receivedCIDs: make(map[cid.Cid]struct{}), - receivedMissingCIDs: make(map[cid.Cid]struct{}), - expectedDeciderCalls: make(map[string]struct{}), - receivedDeciderCalls: make(map[string]struct{}), - decider: decider, - } -} - -// ExpectPiece records a piece being expected to be queried and return the given piece info -func (te *TestProviderDealEnvironment) ExpectPiece(c cid.Cid, size uint64) { - te.expectedCIDs[c] = size -} - -// ExpectMissingPiece records a piece being expected to be queried and should fail -func (te *TestProviderDealEnvironment) ExpectMissingPiece(c cid.Cid) { - te.expectedMissingCIDs[c] = struct{}{} -} - -// ExpectParams expects a given call for CheckDealParams and stubbs a response -func (te *TestProviderDealEnvironment) ExpectParams(pricePerByte abi.TokenAmount, - paymentInterval uint64, - paymentIntervalIncrease uint64, - response error) { - te.expectedParams[dealParamsKey{pricePerByte.String(), paymentInterval, paymentIntervalIncrease}] = response -} - -// ExpectDeciderCalledWith expects that the deal decision logic will be run on the given deal ID -func (te *TestProviderDealEnvironment) ExpectDeciderCalledWith(dealid rm.DealID) { - te.expectedDeciderCalls[dealid.String()] = struct{}{} -} - -// VerifyExpectations checks that the expected calls were made on the TestProviderDealEnvironment -func (te *TestProviderDealEnvironment) VerifyExpectations(t *testing.T) { - require.Equal(t, len(te.expectedParams), len(te.receivedParams)) - require.Equal(t, len(te.expectedCIDs), len(te.receivedCIDs)) - require.Equal(t, len(te.expectedMissingCIDs), len(te.receivedMissingCIDs)) - require.Equal(t, len(te.expectedDeciderCalls), len(te.receivedDeciderCalls)) -} - -// Node returns a provider node instance -func (te *TestProviderDealEnvironment) Node() rm.RetrievalProviderNode { - return te.node -} - -// DealStream returns a provided RetrievalDealStream instance -func (te *TestProviderDealEnvironment) DealStream(_ rm.ProviderDealIdentifier) rmnet.RetrievalDealStream { - return te.ds -} - -// GetPieceSize returns a stubbed response for a piece -func (te *TestProviderDealEnvironment) GetPieceSize(c cid.Cid, pieceCID *cid.Cid) (uint64, error) { - pio, ok := te.expectedCIDs[c] - if ok { - te.receivedCIDs[c] = struct{}{} - return pio, nil - } - _, ok = te.expectedMissingCIDs[c] - if ok { - te.receivedMissingCIDs[c] = struct{}{} - return 0, rm.ErrNotFound - } - return 0, errors.New("GetPieceSize failed") -} - -// CheckDealParams returns a stubbed response for the given parameters -func (te *TestProviderDealEnvironment) CheckDealParams(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64) error { - key := dealParamsKey{pricePerByte.String(), paymentInterval, paymentIntervalIncrease} - err, ok := te.expectedParams[key] - if !ok { - return errors.New("CheckDealParamsFailed") - } - te.receivedParams[key] = true - return err -} - -// NextBlock returns a series of stubbed responses -func (te *TestProviderDealEnvironment) NextBlock(_ context.Context, _ rm.ProviderDealIdentifier) (rm.Block, bool, error) { - if te.nextResponse >= len(te.responses) { - return rm.EmptyBlock, false, errors.New("Something went wrong") - } - response := te.responses[te.nextResponse] - te.nextResponse++ - return response.Block, response.Done, response.Err -} - -// RunDealDecisioningLogic simulates running deal decision logic -func (te *TestProviderDealEnvironment) RunDealDecisioningLogic(ctx context.Context, state rm.ProviderDealState) (bool, string, error) { - te.receivedDeciderCalls[state.ID.String()] = struct{}{} - if te.decider == nil { - return TrivalTestDecider(ctx, state) - } - return te.decider(ctx, state) -} - -// TrivalTestDecider is a shortest possible DealDecider that accepts all deals -var TrivalTestDecider retrievalimpl.DealDecider = func(_ context.Context, _ rm.ProviderDealState) (bool, string, error) { - return true, "", nil -} - -type dealParamsKey struct { - pricePerByte string - paymentInterval uint64 - paymentIntervalIncrease uint64 -} - -// ReadBlockResponse is a stubbed response to calling NextBlock -type ReadBlockResponse struct { - Block rm.Block - Done bool - Err error -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types.go deleted file mode 100644 index 5e5c43aa1c..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types.go +++ /dev/null @@ -1,717 +0,0 @@ -package retrievalmarket - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - "github.com/ipld/go-ipld-prime/codec/dagcbor" - "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/go-fil-markets/shared" -) - -//go:generate cbor-gen-for Query QueryResponse DealProposal DealResponse Params QueryParams DealPayment Block ClientDealState ProviderDealState PaymentInfo - -// ProtocolID is the protocol for proposing / responding to retrieval deals -const ProtocolID = "/fil/retrieval/0.0.1" - -// QueryProtocolID is the protocol for querying information about retrieval -// deal parameters -const QueryProtocolID = "/fil/retrieval/qry/0.0.1" - -// Unsubscribe is a function that unsubscribes a subscriber for either the -// client or the provider -type Unsubscribe func() - -// PaymentInfo is the payment channel and lane for a deal, once it is setup -type PaymentInfo struct { - PayCh address.Address - Lane uint64 -} - -// ClientDealState is the current state of a deal from the point of view -// of a retrieval client -type ClientDealState struct { - DealProposal - TotalFunds abi.TokenAmount - ClientWallet address.Address - MinerWallet address.Address - PaymentInfo *PaymentInfo - Status DealStatus - Sender peer.ID - TotalReceived uint64 - Message string - BytesPaidFor uint64 - CurrentInterval uint64 - PaymentRequested abi.TokenAmount - FundsSpent abi.TokenAmount - WaitMsgCID *cid.Cid // the CID of any message the client deal is waiting for -} - -// ClientEvent is an event that occurs in a deal lifecycle on the client -type ClientEvent uint64 - -const ( - // ClientEventOpen indicates a deal was initiated - ClientEventOpen ClientEvent = iota - - // ClientEventPaymentChannelErrored means there was a failure creating a payment channel - ClientEventPaymentChannelErrored - - // ClientEventAllocateLaneErrored means there was a failure creating a lane in a payment channel - ClientEventAllocateLaneErrored - - // ClientEventPaymentChannelCreateInitiated means we are waiting for a message to - // create a payment channel to appear on chain - ClientEventPaymentChannelCreateInitiated - - // ClientEventPaymentChannelReady means the newly created payment channel is ready for the - // deal to resume - ClientEventPaymentChannelReady - - // ClientEventPaymentChannelAddingFunds mean we are waiting for funds to be - // added to a payment channel - ClientEventPaymentChannelAddingFunds - - // ClientEventPaymentChannelAddFundsErrored means that adding funds to the payment channel - // failed - ClientEventPaymentChannelAddFundsErrored - - // ClientEventWriteDealProposalErrored means a network error writing a deal proposal - ClientEventWriteDealProposalErrored - - // ClientEventReadDealResponseErrored means a network error reading a deal response - ClientEventReadDealResponseErrored - - // ClientEventDealRejected means a deal was rejected by the provider - ClientEventDealRejected - - // ClientEventDealNotFound means a provider could not find a piece for a deal - ClientEventDealNotFound - - // ClientEventDealAccepted means a provider accepted a deal - ClientEventDealAccepted - - // ClientEventUnknownResponseReceived means a client received a response it doesn't - // understand from the provider - ClientEventUnknownResponseReceived - - // ClientEventFundsExpended indicates a deal has run out of funds in the payment channel - // forcing the client to add more funds to continue the deal - ClientEventFundsExpended // when totalFunds is expended - - // ClientEventBadPaymentRequested indicates the provider asked for funds - // in a way that does not match the terms of the deal - ClientEventBadPaymentRequested - - // ClientEventCreateVoucherFailed indicates an error happened creating a payment voucher - ClientEventCreateVoucherFailed - - // ClientEventWriteDealPaymentErrored indicates a network error trying to write a payment - ClientEventWriteDealPaymentErrored - - // ClientEventPaymentSent indicates a payment was sent to the provider - ClientEventPaymentSent - - // ClientEventConsumeBlockFailed indicates an error occurred while trying to - // read a block from the provider - ClientEventConsumeBlockFailed - - // ClientEventLastPaymentRequested indicates the provider requested a final payment - ClientEventLastPaymentRequested - - // ClientEventAllBlocksReceived indicates the provider has sent all blocks - ClientEventAllBlocksReceived - - // ClientEventEarlyTermination indicates the provider completed the deal without sending all blocks - ClientEventEarlyTermination - - // ClientEventPaymentRequested indicates the provider requested a payment - ClientEventPaymentRequested - - // ClientEventBlocksReceived indicates the provider has sent blocks - ClientEventBlocksReceived - - // ClientEventProgress indicates more data was received for a retrieval - ClientEventProgress - - // ClientEventError indicates an error occurred during a deal - ClientEventError - - // ClientEventComplete indicates a deal has completed - ClientEventComplete -) - -// ClientEvents is a human readable map of client event name -> event description -var ClientEvents = map[ClientEvent]string{ - ClientEventOpen: "ClientEventOpen", - ClientEventPaymentChannelErrored: "ClientEventPaymentChannelErrored", - ClientEventAllocateLaneErrored: "ClientEventAllocateLaneErrored", - ClientEventPaymentChannelCreateInitiated: "ClientEventPaymentChannelCreateInitiated", - ClientEventPaymentChannelReady: "ClientEventPaymentChannelReady", - ClientEventPaymentChannelAddingFunds: "ClientEventPaymentChannelAddingFunds", - ClientEventPaymentChannelAddFundsErrored: "ClientEventPaymentChannelAddFundsErrored", - ClientEventWriteDealProposalErrored: "ClientEventWriteDealProposalErrored", - ClientEventReadDealResponseErrored: "ClientEventReadDealResponseErrored", - ClientEventDealRejected: "ClientEventDealRejected", - ClientEventDealNotFound: "ClientEventDealNotFound", - ClientEventDealAccepted: "ClientEventDealAccepted", - ClientEventUnknownResponseReceived: "ClientEventUnknownResponseReceived", - ClientEventFundsExpended: "ClientEventFundsExpended", - ClientEventBadPaymentRequested: "ClientEventBadPaymentRequested", - ClientEventCreateVoucherFailed: "ClientEventCreateVoucherFailed", - ClientEventWriteDealPaymentErrored: "ClientEventWriteDealPaymentErrored", - ClientEventPaymentSent: "ClientEventPaymentSent", - ClientEventConsumeBlockFailed: "ClientEventConsumeBlockFailed", - ClientEventLastPaymentRequested: "ClientEventLastPaymentRequested", - ClientEventAllBlocksReceived: "ClientEventAllBlocksReceived", - ClientEventEarlyTermination: "ClientEventEarlyTermination", - ClientEventPaymentRequested: "ClientEventPaymentRequested", - ClientEventBlocksReceived: "ClientEventBlocksReceived", - ClientEventProgress: "ClientEventProgress", - ClientEventError: "ClientEventError", - ClientEventComplete: "ClientEventComplete", -} - -// ClientSubscriber is a callback that is registered to listen for retrieval events -type ClientSubscriber func(event ClientEvent, state ClientDealState) - -// RetrievalClient is a client interface for making retrieval deals -type RetrievalClient interface { - // V0 - - // Find Providers finds retrieval providers who may be storing a given piece - FindProviders(payloadCID cid.Cid) []RetrievalPeer - - // Query asks a provider for information about a piece it is storing - Query( - ctx context.Context, - p RetrievalPeer, - payloadCID cid.Cid, - params QueryParams, - ) (QueryResponse, error) - - // Retrieve retrieves all or part of a piece with the given retrieval parameters - Retrieve( - ctx context.Context, - payloadCID cid.Cid, - params Params, - totalFunds abi.TokenAmount, - miner peer.ID, - clientWallet address.Address, - minerWallet address.Address, - ) (DealID, error) - - // SubscribeToEvents listens for events that happen related to client retrievals - SubscribeToEvents(subscriber ClientSubscriber) Unsubscribe - - // V1 - AddMoreFunds(id DealID, amount abi.TokenAmount) error - CancelDeal(id DealID) error - RetrievalStatus(id DealID) - ListDeals() map[DealID]ClientDealState -} - -// RetrievalClientNode are the node dependencies for a RetrievalClient -type RetrievalClientNode interface { - GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) - - // GetOrCreatePaymentChannel sets up a new payment channel if one does not exist - // between a client and a miner and ensures the client has the given amount of funds available in the channel - GetOrCreatePaymentChannel(ctx context.Context, clientAddress, minerAddress address.Address, - clientFundsAvailable abi.TokenAmount, tok shared.TipSetToken) (address.Address, cid.Cid, error) - - // Allocate late creates a lane within a payment channel so that calls to - // CreatePaymentVoucher will automatically make vouchers only for the difference - // in total - AllocateLane(paymentChannel address.Address) (uint64, error) - - // CreatePaymentVoucher creates a new payment voucher in the given lane for a - // given payment channel so that all the payment vouchers in the lane add up - // to the given amount (so the payment voucher will be for the difference) - CreatePaymentVoucher(ctx context.Context, paymentChannel address.Address, amount abi.TokenAmount, - lane uint64, tok shared.TipSetToken) (*paych.SignedVoucher, error) - - // WaitForPaymentChannelAddFunds waits for a message on chain that funds have - // been sent to a payment channel - WaitForPaymentChannelAddFunds(messageCID cid.Cid) error - - // WaitForPaymentChannelCreation waits for a message on chain that a - // payment channel has been created - WaitForPaymentChannelCreation(messageCID cid.Cid) (address.Address, error) -} - -// ProviderDealState is the current state of a deal from the point of view -// of a retrieval provider -type ProviderDealState struct { - DealProposal - Status DealStatus - Receiver peer.ID - TotalSent uint64 - FundsReceived abi.TokenAmount - Message string - CurrentInterval uint64 -} - -// Identifier provides a unique id for this provider deal -func (pds ProviderDealState) Identifier() ProviderDealIdentifier { - return ProviderDealIdentifier{Receiver: pds.Receiver, DealID: pds.ID} -} - -// ProviderDealIdentifier is a value that uniquely identifies a deal -type ProviderDealIdentifier struct { - Receiver peer.ID - DealID DealID -} - -func (p ProviderDealIdentifier) String() string { - return fmt.Sprintf("%v/%v", p.Receiver, p.DealID) -} - -// ProviderEvent is an event that occurs in a deal lifecycle on the provider -type ProviderEvent uint64 - -const ( - // ProviderEventOpen indicates a new deal was received from a client - ProviderEventOpen ProviderEvent = iota - - // ProviderEventDealReceived means the deal has passed initial checks and is - // in custom decisioning logic - ProviderEventDealReceived - - // ProviderEventDecisioningError means the Deciding function returned an error - ProviderEventDecisioningError - - // ProviderEventWriteResponseFailed happens when a network error occurs writing a deal response - ProviderEventWriteResponseFailed - - // ProviderEventReadPaymentFailed happens when a network error occurs trying to read a - // payment from the client - ProviderEventReadPaymentFailed - - // ProviderEventGetPieceSizeErrored happens when the provider encounters an error - // looking up the requested pieces size - ProviderEventGetPieceSizeErrored - - // ProviderEventDealNotFound happens when the provider cannot find the piece for the - // deal proposed by the client - ProviderEventDealNotFound - - // ProviderEventDealRejected happens when a provider rejects a deal proposed - // by the client - ProviderEventDealRejected - - // ProviderEventDealAccepted happens when a provider accepts a deal - ProviderEventDealAccepted - - // ProviderEventBlockErrored happens when the provider encounters an error - // trying to read the next block from the piece - ProviderEventBlockErrored - - // ProviderEventBlocksCompleted happens when the provider reads the last block - // in the piece - ProviderEventBlocksCompleted - - // ProviderEventPaymentRequested happens when a provider asks for payment from - // a client for blocks sent - ProviderEventPaymentRequested - - // ProviderEventSaveVoucherFailed happens when an attempt to save a payment - // voucher fails - ProviderEventSaveVoucherFailed - - // ProviderEventPartialPaymentReceived happens when a provider receives and processes - // a payment that is less than what was requested to proceed with the deal - ProviderEventPartialPaymentReceived - - // ProviderEventPaymentReceived happens when a provider receives a payment - // and resumes processing a deal - ProviderEventPaymentReceived - - // ProviderEventComplete indicates a retrieval deal was completed for a client - ProviderEventComplete -) - -// ProviderEvents is a human readable map of provider event name -> event description -var ProviderEvents = map[ProviderEvent]string{ - ProviderEventOpen: "ProviderEventOpen", - ProviderEventDealReceived: "ProviderEventDealReceived", - ProviderEventDecisioningError: "ProviderEventDecisioningError", - ProviderEventWriteResponseFailed: "ProviderEventWriteResponseFailed", - ProviderEventReadPaymentFailed: "ProviderEventReadPaymentFailed", - ProviderEventGetPieceSizeErrored: "ProviderEventGetPieceSizeErrored", - ProviderEventDealNotFound: "ProviderEventDealNotFound", - ProviderEventDealRejected: "ProviderEventDealRejected", - ProviderEventDealAccepted: "ProviderEventDealAccepted", - ProviderEventBlockErrored: "ProviderEventBlockErrored", - ProviderEventBlocksCompleted: "ProviderEventBlocksCompleted", - ProviderEventPaymentRequested: "ProviderEventPaymentRequested", - ProviderEventSaveVoucherFailed: "ProviderEventSaveVoucherFailed", - ProviderEventPartialPaymentReceived: "ProviderEventPartialPaymentReceived", - ProviderEventPaymentReceived: "ProviderEventPaymentReceived", - ProviderEventComplete: "ProviderEventComplete", -} - -// ProviderDealID is a unique identifier for a deal on a provider -- it is -// a combination of DealID set by the client and the peer ID of the client -type ProviderDealID struct { - From peer.ID - ID DealID -} - -// ProviderSubscriber is a callback that is registered to listen for retrieval events on a provider -type ProviderSubscriber func(event ProviderEvent, state ProviderDealState) - -// RetrievalProvider is an interface by which a provider configures their -// retrieval operations and monitors deals received and process -type RetrievalProvider interface { - // Start begins listening for deals on the given host - Start() error - - // Stop stops handling incoming requests - Stop() error - - // V0 - - // SetPricePerByte sets the price per byte a miner charges for retrievals - SetPricePerByte(price abi.TokenAmount) - - // SetPaymentInterval sets the maximum number of bytes a a provider will send before - // requesting further payment, and the rate at which that value increases - SetPaymentInterval(paymentInterval uint64, paymentIntervalIncrease uint64) - - // SubscribeToEvents listens for events that happen related to client retrievals - SubscribeToEvents(subscriber ProviderSubscriber) Unsubscribe - - // V1 - SetPricePerUnseal(price abi.TokenAmount) - ListDeals() map[ProviderDealID]ProviderDealState -} - -// RetrievalProviderNode are the node depedencies for a RetrevalProvider -type RetrievalProviderNode interface { - GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) - - // returns the worker address associated with a miner - GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) - UnsealSector(ctx context.Context, sectorID uint64, offset uint64, length uint64) (io.ReadCloser, error) - SavePaymentVoucher(ctx context.Context, paymentChannel address.Address, voucher *paych.SignedVoucher, proof []byte, expectedAmount abi.TokenAmount, tok shared.TipSetToken) (abi.TokenAmount, error) -} - -// PeerResolver is an interface for looking up providers that may have a piece -type PeerResolver interface { - GetPeers(payloadCID cid.Cid) ([]RetrievalPeer, error) // TODO: channel -} - -// RetrievalPeer is a provider address/peer.ID pair (everything needed to make -// deals for with a miner) -type RetrievalPeer struct { - Address address.Address - ID peer.ID // optional -} - -// QueryResponseStatus indicates whether a queried piece is available -type QueryResponseStatus uint64 - -const ( - // QueryResponseAvailable indicates a provider has a piece and is prepared to - // return it - QueryResponseAvailable QueryResponseStatus = iota - - // QueryResponseUnavailable indicates a provider either does not have or cannot - // serve the queried piece to the client - QueryResponseUnavailable - - // QueryResponseError indicates something went wrong generating a query response - QueryResponseError -) - -// QueryItemStatus (V1) indicates whether the requested part of a piece (payload or selector) -// is available for retrieval -type QueryItemStatus uint64 - -const ( - // QueryItemAvailable indicates requested part of the piece is available to be - // served - QueryItemAvailable QueryItemStatus = iota - - // QueryItemUnavailable indicates the piece either does not contain the requested - // item or it cannot be served - QueryItemUnavailable - - // QueryItemUnknown indicates the provider cannot determine if the given item - // is part of the requested piece (for example, if the piece is sealed and the - // miner does not maintain a payload CID index) - QueryItemUnknown -) - -// QueryParams - V1 - indicate what specific information about a piece that a retrieval -// client is interested in, as well as specific parameters the client is seeking -// for the retrieval deal -type QueryParams struct { - PieceCID *cid.Cid // optional, query if miner has this cid in this piece. some miners may not be able to respond. - //Selector ipld.Node // optional, query if miner has this cid in this piece. some miners may not be able to respond. - //MaxPricePerByte abi.TokenAmount // optional, tell miner uninterested if more expensive than this - //MinPaymentInterval uint64 // optional, tell miner uninterested unless payment interval is greater than this - //MinPaymentIntervalIncrease uint64 // optional, tell miner uninterested unless payment interval increase is greater than this -} - -// Query is a query to a given provider to determine information about a piece -// they may have available for retrieval -type Query struct { - PayloadCID cid.Cid // V0 - QueryParams // V1 -} - -// QueryUndefined is a query with no values -var QueryUndefined = Query{} - -// NewQueryV0 creates a V0 query (which only specifies a piece) -func NewQueryV0(payloadCID cid.Cid) Query { - return Query{PayloadCID: payloadCID} -} - -// QueryResponse is a miners response to a given retrieval query -type QueryResponse struct { - Status QueryResponseStatus - PieceCIDFound QueryItemStatus // V1 - if a PieceCID was requested, the result - //SelectorFound QueryItemStatus // V1 - if a Selector was requested, the result - - Size uint64 // Total size of piece in bytes - //ExpectedPayloadSize uint64 // V1 - optional, if PayloadCID + selector are specified and miner knows, can offer an expected size - - PaymentAddress address.Address // address to send funds to -- may be different than miner addr - MinPricePerByte abi.TokenAmount - MaxPaymentInterval uint64 - MaxPaymentIntervalIncrease uint64 - Message string -} - -// QueryResponseUndefined is an empty QueryResponse -var QueryResponseUndefined = QueryResponse{} - -// PieceRetrievalPrice is the total price to retrieve the piece (size * MinPricePerByte) -func (qr QueryResponse) PieceRetrievalPrice() abi.TokenAmount { - return big.Mul(qr.MinPricePerByte, abi.NewTokenAmount(int64(qr.Size))) -} - -// PayloadRetrievalPrice is the expected price to retrieve just the given payload -// & selector (V1) -//func (qr QueryResponse) PayloadRetrievalPrice() abi.TokenAmount { -// return types.BigMul(qr.MinPricePerByte, types.NewInt(qr.ExpectedPayloadSize)) -//} - -// DealStatus is the status of a retrieval deal returned by a provider -// in a DealResponse -type DealStatus uint64 - -const ( - // DealStatusNew is a deal that nothing has happened with yet - DealStatusNew DealStatus = iota - - // DealStatusPaymentChannelCreating is the status set while waiting for the - // payment channel creation to complete - DealStatusPaymentChannelCreating - - // DealStatusPaymentChannelAddingFunds is the status when we are waiting for funds - // to finish being sent to the payment channel - DealStatusPaymentChannelAddingFunds - - // DealStatusPaymentChannelAllocatingLane is the status during lane allocation - DealStatusPaymentChannelAllocatingLane - - // DealStatusPaymentChannelReady is a deal status that has a payment channel - // & lane setup - DealStatusPaymentChannelReady - - // DealStatusAwaitingAcceptance - deal is waiting for the decider function to finish - DealStatusAwaitingAcceptance - - // DealStatusAccepted means a deal has been accepted by a provider - // and its is ready to proceed with retrieval - DealStatusAccepted - - // DealStatusFailed indicates something went wrong during a retrieval - DealStatusFailed - - // DealStatusRejected indicates the provider rejected a client's deal proposal - // for some reason - DealStatusRejected - - // DealStatusFundsNeeded indicates the provider needs a payment voucher to - // continue processing the deal - DealStatusFundsNeeded - - // DealStatusOngoing indicates the provider is continuing to process a deal - DealStatusOngoing - - // DealStatusFundsNeededLastPayment indicates the provider needs a payment voucher - // in order to complete a deal - DealStatusFundsNeededLastPayment - - // DealStatusCompleted indicates a deal is complete - DealStatusCompleted - - // DealStatusDealNotFound indicates an update was received for a deal that could - // not be identified - DealStatusDealNotFound - - // DealStatusVerified means a deal has been verified as having the right parameters - DealStatusVerified - - // DealStatusErrored indicates something went wrong with a deal - DealStatusErrored - - // DealStatusBlocksComplete indicates that all blocks have been processed for the piece - DealStatusBlocksComplete - - // DealStatusFinalizing means the last payment has been received and - // we are just confirming the deal is complete - DealStatusFinalizing -) - -// DealStatuses maps deal status to a human readable representation -var DealStatuses = map[DealStatus]string{ - DealStatusNew: "DealStatusNew", - DealStatusPaymentChannelCreating: "DealStatusPaymentChannelCreating", - DealStatusPaymentChannelAddingFunds: "DealStatusPaymentChannelAddingFunds", - DealStatusPaymentChannelAllocatingLane: "DealStatusPaymentChannelAllocatingLane", - DealStatusPaymentChannelReady: "DealStatusPaymentChannelReady", - DealStatusAwaitingAcceptance: "DealStatusAwaitingAcceptance", - DealStatusAccepted: "DealStatusAccepted", - DealStatusFailed: "DealStatusFailed", - DealStatusRejected: "DealStatusRejected", - DealStatusFundsNeeded: "DealStatusFundsNeeded", - DealStatusOngoing: "DealStatusOngoing", - DealStatusFundsNeededLastPayment: "DealStatusFundsNeededLastPayment", - DealStatusCompleted: "DealStatusCompleted", - DealStatusDealNotFound: "DealStatusDealNotFound", - DealStatusVerified: "DealStatusVerified", - DealStatusErrored: "DealStatusErrored", - DealStatusBlocksComplete: "DealStatusBlocksComplete", - DealStatusFinalizing: "DealStatusFinalizing", -} - -// IsTerminalError returns true if this status indicates processing of this deal -// is complete with an error -func IsTerminalError(status DealStatus) bool { - return status == DealStatusDealNotFound || - status == DealStatusFailed || - status == DealStatusRejected -} - -// IsTerminalSuccess returns true if this status indicates processing of this deal -// is complete with a success -func IsTerminalSuccess(status DealStatus) bool { - return status == DealStatusCompleted -} - -// IsTerminalStatus returns true if this status indicates processing of a deal is -// complete (either success or error) -func IsTerminalStatus(status DealStatus) bool { - return IsTerminalError(status) || IsTerminalSuccess(status) -} - -// Params are the parameters requested for a retrieval deal proposal -type Params struct { - Selector *cbg.Deferred // V1 - PieceCID *cid.Cid - PricePerByte abi.TokenAmount - PaymentInterval uint64 // when to request payment - PaymentIntervalIncrease uint64 // -} - -// NewParamsV0 generates parameters for a retrieval deal, which is always a whole piece deal -func NewParamsV0(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64) Params { - return Params{ - PricePerByte: pricePerByte, - PaymentInterval: paymentInterval, - PaymentIntervalIncrease: paymentIntervalIncrease, - } -} - -// NewParamsV1 generates parameters for a retrieval deal, including a selector -func NewParamsV1(pricePerByte abi.TokenAmount, paymentInterval uint64, paymentIntervalIncrease uint64, sel ipld.Node, pieceCid *cid.Cid) Params { - var buffer bytes.Buffer - err := dagcbor.Encoder(sel, &buffer) - if err != nil { - return Params{} - } - - return Params{ - Selector: &cbg.Deferred{Raw: buffer.Bytes()}, - PieceCID: pieceCid, - PricePerByte: pricePerByte, - PaymentInterval: paymentInterval, - PaymentIntervalIncrease: paymentIntervalIncrease, - } -} - -// DealID is an identifier for a retrieval deal (unique to a client) -type DealID uint64 - -func (d DealID) String() string { - return fmt.Sprintf("%d", d) -} - -// DealProposal is a proposal for a new retrieval deal -type DealProposal struct { - PayloadCID cid.Cid - ID DealID - Params -} - -// DealProposalUndefined is an undefined deal proposal -var DealProposalUndefined = DealProposal{} - -// Block is an IPLD block in bitswap format -type Block struct { - Prefix []byte - Data []byte -} - -// EmptyBlock is just a block with no content -var EmptyBlock = Block{} - -// DealResponse is a response to a retrieval deal proposal -type DealResponse struct { - Status DealStatus - ID DealID - - // payment required to proceed - PaymentOwed abi.TokenAmount - - Message string - Blocks []Block // V0 only -} - -// DealResponseUndefined is an undefined deal response -var DealResponseUndefined = DealResponse{} - -// DealPayment is a payment for an in progress retrieval deal -type DealPayment struct { - ID DealID - PaymentChannel address.Address - PaymentVoucher *paych.SignedVoucher -} - -// DealPaymentUndefined is an undefined deal payment -var DealPaymentUndefined = DealPayment{} - -var ( - // ErrNotFound means a piece was not found during retrieval - ErrNotFound = errors.New("not found") - - // ErrVerification means a retrieval contained a block response that did not verify - ErrVerification = errors.New("Error when verify data") -) diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_cbor_gen.go deleted file mode 100644 index 8e41f8a0f2..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_cbor_gen.go +++ /dev/null @@ -1,1398 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package retrievalmarket - -import ( - "fmt" - "io" - - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *Query) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.PayloadCID (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.PayloadCID); err != nil { - return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) - } - - // t.QueryParams (retrievalmarket.QueryParams) (struct) - if err := t.QueryParams.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *Query) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PayloadCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) - } - - t.PayloadCID = c - - } - // t.QueryParams (retrievalmarket.QueryParams) (struct) - - { - - if err := t.QueryParams.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.QueryParams: %w", err) - } - - } - return nil -} - -func (t *QueryResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{136}); err != nil { - return err - } - - // t.Status (retrievalmarket.QueryResponseStatus) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Status))); err != nil { - return err - } - - // t.PieceCIDFound (retrievalmarket.QueryItemStatus) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.PieceCIDFound))); err != nil { - return err - } - - // t.Size (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Size))); err != nil { - return err - } - - // t.PaymentAddress (address.Address) (struct) - if err := t.PaymentAddress.MarshalCBOR(w); err != nil { - return err - } - - // t.MinPricePerByte (big.Int) (struct) - if err := t.MinPricePerByte.MarshalCBOR(w); err != nil { - return err - } - - // t.MaxPaymentInterval (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.MaxPaymentInterval))); err != nil { - return err - } - - // t.MaxPaymentIntervalIncrease (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.MaxPaymentIntervalIncrease))); err != nil { - return err - } - - // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Message)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Message)); err != nil { - return err - } - return nil -} - -func (t *QueryResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 8 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Status (retrievalmarket.QueryResponseStatus) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Status = QueryResponseStatus(extra) - - } - // t.PieceCIDFound (retrievalmarket.QueryItemStatus) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PieceCIDFound = QueryItemStatus(extra) - - } - // t.Size (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Size = uint64(extra) - - } - // t.PaymentAddress (address.Address) (struct) - - { - - if err := t.PaymentAddress.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PaymentAddress: %w", err) - } - - } - // t.MinPricePerByte (big.Int) (struct) - - { - - if err := t.MinPricePerByte.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.MinPricePerByte: %w", err) - } - - } - // t.MaxPaymentInterval (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.MaxPaymentInterval = uint64(extra) - - } - // t.MaxPaymentIntervalIncrease (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.MaxPaymentIntervalIncrease = uint64(extra) - - } - // t.Message (string) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Message = string(sval) - } - return nil -} - -func (t *DealProposal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.PayloadCID (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.PayloadCID); err != nil { - return xerrors.Errorf("failed to write cid field t.PayloadCID: %w", err) - } - - // t.ID (retrievalmarket.DealID) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ID))); err != nil { - return err - } - - // t.Params (retrievalmarket.Params) (struct) - if err := t.Params.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *DealProposal) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PayloadCID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PayloadCID: %w", err) - } - - t.PayloadCID = c - - } - // t.ID (retrievalmarket.DealID) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ID = DealID(extra) - - } - // t.Params (retrievalmarket.Params) (struct) - - { - - if err := t.Params.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Params: %w", err) - } - - } - return nil -} - -func (t *DealResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{133}); err != nil { - return err - } - - // t.Status (retrievalmarket.DealStatus) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Status))); err != nil { - return err - } - - // t.ID (retrievalmarket.DealID) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ID))); err != nil { - return err - } - - // t.PaymentOwed (big.Int) (struct) - if err := t.PaymentOwed.MarshalCBOR(w); err != nil { - return err - } - - // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Message)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Message)); err != nil { - return err - } - - // t.Blocks ([]retrievalmarket.Block) (slice) - if len(t.Blocks) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Blocks was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajArray, uint64(len(t.Blocks)))); err != nil { - return err - } - for _, v := range t.Blocks { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *DealResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Status (retrievalmarket.DealStatus) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Status = DealStatus(extra) - - } - // t.ID (retrievalmarket.DealID) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ID = DealID(extra) - - } - // t.PaymentOwed (big.Int) (struct) - - { - - if err := t.PaymentOwed.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PaymentOwed: %w", err) - } - - } - // t.Message (string) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Message = string(sval) - } - // t.Blocks ([]retrievalmarket.Block) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Blocks: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Blocks = make([]Block, extra) - } - - for i := 0; i < int(extra); i++ { - - var v Block - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Blocks[i] = v - } - - return nil -} - -func (t *Params) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{133}); err != nil { - return err - } - - // t.Selector (typegen.Deferred) (struct) - if err := t.Selector.MarshalCBOR(w); err != nil { - return err - } - - // t.PieceCID (cid.Cid) (struct) - - if t.PieceCID == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCid(w, *t.PieceCID); err != nil { - return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) - } - } - - // t.PricePerByte (big.Int) (struct) - if err := t.PricePerByte.MarshalCBOR(w); err != nil { - return err - } - - // t.PaymentInterval (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.PaymentInterval))); err != nil { - return err - } - - // t.PaymentIntervalIncrease (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.PaymentIntervalIncrease))); err != nil { - return err - } - - return nil -} - -func (t *Params) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Selector (typegen.Deferred) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Selector = new(cbg.Deferred) - if err := t.Selector.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Selector pointer: %w", err) - } - } - - } - // t.PieceCID (cid.Cid) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) - } - - t.PieceCID = &c - } - - } - // t.PricePerByte (big.Int) (struct) - - { - - if err := t.PricePerByte.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PricePerByte: %w", err) - } - - } - // t.PaymentInterval (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PaymentInterval = uint64(extra) - - } - // t.PaymentIntervalIncrease (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PaymentIntervalIncrease = uint64(extra) - - } - return nil -} - -func (t *QueryParams) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.PieceCID (cid.Cid) (struct) - - if t.PieceCID == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCid(w, *t.PieceCID); err != nil { - return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) - } - } - - return nil -} - -func (t *QueryParams) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PieceCID (cid.Cid) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) - } - - t.PieceCID = &c - } - - } - return nil -} - -func (t *DealPayment) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.ID (retrievalmarket.DealID) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.ID))); err != nil { - return err - } - - // t.PaymentChannel (address.Address) (struct) - if err := t.PaymentChannel.MarshalCBOR(w); err != nil { - return err - } - - // t.PaymentVoucher (paych.SignedVoucher) (struct) - if err := t.PaymentVoucher.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *DealPayment) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.ID (retrievalmarket.DealID) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.ID = DealID(extra) - - } - // t.PaymentChannel (address.Address) (struct) - - { - - if err := t.PaymentChannel.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PaymentChannel: %w", err) - } - - } - // t.PaymentVoucher (paych.SignedVoucher) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.PaymentVoucher = new(paych.SignedVoucher) - if err := t.PaymentVoucher.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PaymentVoucher pointer: %w", err) - } - } - - } - return nil -} - -func (t *Block) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Prefix ([]uint8) (slice) - if len(t.Prefix) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Prefix was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Prefix)))); err != nil { - return err - } - if _, err := w.Write(t.Prefix); err != nil { - return err - } - - // t.Data ([]uint8) (slice) - if len(t.Data) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Data was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(t.Data)))); err != nil { - return err - } - if _, err := w.Write(t.Data); err != nil { - return err - } - return nil -} - -func (t *Block) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Prefix ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Prefix: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Prefix = make([]byte, extra) - if _, err := io.ReadFull(br, t.Prefix); err != nil { - return err - } - // t.Data ([]uint8) (slice) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Data: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Data = make([]byte, extra) - if _, err := io.ReadFull(br, t.Data); err != nil { - return err - } - return nil -} - -func (t *ClientDealState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{142}); err != nil { - return err - } - - // t.DealProposal (retrievalmarket.DealProposal) (struct) - if err := t.DealProposal.MarshalCBOR(w); err != nil { - return err - } - - // t.TotalFunds (big.Int) (struct) - if err := t.TotalFunds.MarshalCBOR(w); err != nil { - return err - } - - // t.ClientWallet (address.Address) (struct) - if err := t.ClientWallet.MarshalCBOR(w); err != nil { - return err - } - - // t.MinerWallet (address.Address) (struct) - if err := t.MinerWallet.MarshalCBOR(w); err != nil { - return err - } - - // t.PaymentInfo (retrievalmarket.PaymentInfo) (struct) - if err := t.PaymentInfo.MarshalCBOR(w); err != nil { - return err - } - - // t.Status (retrievalmarket.DealStatus) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Status))); err != nil { - return err - } - - // t.Sender (peer.ID) (string) - if len(t.Sender) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Sender was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Sender)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Sender)); err != nil { - return err - } - - // t.TotalReceived (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.TotalReceived))); err != nil { - return err - } - - // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Message)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Message)); err != nil { - return err - } - - // t.BytesPaidFor (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.BytesPaidFor))); err != nil { - return err - } - - // t.CurrentInterval (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.CurrentInterval))); err != nil { - return err - } - - // t.PaymentRequested (big.Int) (struct) - if err := t.PaymentRequested.MarshalCBOR(w); err != nil { - return err - } - - // t.FundsSpent (big.Int) (struct) - if err := t.FundsSpent.MarshalCBOR(w); err != nil { - return err - } - - // t.WaitMsgCID (cid.Cid) (struct) - - if t.WaitMsgCID == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCid(w, *t.WaitMsgCID); err != nil { - return xerrors.Errorf("failed to write cid field t.WaitMsgCID: %w", err) - } - } - - return nil -} - -func (t *ClientDealState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 14 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealProposal (retrievalmarket.DealProposal) (struct) - - { - - if err := t.DealProposal.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) - } - - } - // t.TotalFunds (big.Int) (struct) - - { - - if err := t.TotalFunds.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.TotalFunds: %w", err) - } - - } - // t.ClientWallet (address.Address) (struct) - - { - - if err := t.ClientWallet.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ClientWallet: %w", err) - } - - } - // t.MinerWallet (address.Address) (struct) - - { - - if err := t.MinerWallet.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.MinerWallet: %w", err) - } - - } - // t.PaymentInfo (retrievalmarket.PaymentInfo) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.PaymentInfo = new(PaymentInfo) - if err := t.PaymentInfo.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PaymentInfo pointer: %w", err) - } - } - - } - // t.Status (retrievalmarket.DealStatus) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Status = DealStatus(extra) - - } - // t.Sender (peer.ID) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Sender = peer.ID(sval) - } - // t.TotalReceived (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TotalReceived = uint64(extra) - - } - // t.Message (string) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Message = string(sval) - } - // t.BytesPaidFor (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.BytesPaidFor = uint64(extra) - - } - // t.CurrentInterval (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.CurrentInterval = uint64(extra) - - } - // t.PaymentRequested (big.Int) (struct) - - { - - if err := t.PaymentRequested.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PaymentRequested: %w", err) - } - - } - // t.FundsSpent (big.Int) (struct) - - { - - if err := t.FundsSpent.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FundsSpent: %w", err) - } - - } - // t.WaitMsgCID (cid.Cid) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.WaitMsgCID: %w", err) - } - - t.WaitMsgCID = &c - } - - } - return nil -} - -func (t *ProviderDealState) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{135}); err != nil { - return err - } - - // t.DealProposal (retrievalmarket.DealProposal) (struct) - if err := t.DealProposal.MarshalCBOR(w); err != nil { - return err - } - - // t.Status (retrievalmarket.DealStatus) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Status))); err != nil { - return err - } - - // t.Receiver (peer.ID) (string) - if len(t.Receiver) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Receiver was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Receiver)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Receiver)); err != nil { - return err - } - - // t.TotalSent (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.TotalSent))); err != nil { - return err - } - - // t.FundsReceived (big.Int) (struct) - if err := t.FundsReceived.MarshalCBOR(w); err != nil { - return err - } - - // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Message)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Message)); err != nil { - return err - } - - // t.CurrentInterval (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.CurrentInterval))); err != nil { - return err - } - - return nil -} - -func (t *ProviderDealState) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 7 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealProposal (retrievalmarket.DealProposal) (struct) - - { - - if err := t.DealProposal.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) - } - - } - // t.Status (retrievalmarket.DealStatus) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Status = DealStatus(extra) - - } - // t.Receiver (peer.ID) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Receiver = peer.ID(sval) - } - // t.TotalSent (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.TotalSent = uint64(extra) - - } - // t.FundsReceived (big.Int) (struct) - - { - - if err := t.FundsReceived.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.FundsReceived: %w", err) - } - - } - // t.Message (string) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Message = string(sval) - } - // t.CurrentInterval (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.CurrentInterval = uint64(extra) - - } - return nil -} - -func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.PayCh (address.Address) (struct) - if err := t.PayCh.MarshalCBOR(w); err != nil { - return err - } - - // t.Lane (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Lane))); err != nil { - return err - } - - return nil -} - -func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.PayCh (address.Address) (struct) - - { - - if err := t.PayCh.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.PayCh: %w", err) - } - - } - // t.Lane (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Lane = uint64(extra) - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_test.go b/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_test.go deleted file mode 100644 index 3dbf20c8cc..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/retrievalmarket/types_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package retrievalmarket_test - -import ( - "bytes" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipld/go-ipld-prime/codec/dagcbor" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/shared" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" -) - -func TestParamsMarshalUnmarshal(t *testing.T) { - pieceCid := tut.GenerateCids(1)[0] - - allSelector := shared.AllSelector() - params := retrievalmarket.NewParamsV1(abi.NewTokenAmount(123), 456, 789, allSelector, &pieceCid) - - buf := new(bytes.Buffer) - err := params.MarshalCBOR(buf) - assert.NoError(t, err) - - unmarshalled := &retrievalmarket.Params{} - err = unmarshalled.UnmarshalCBOR(buf) - assert.NoError(t, err) - - assert.Equal(t, params, *unmarshalled) - - nb := basicnode.Style.Any.NewBuilder() - err = dagcbor.Decoder(nb, bytes.NewBuffer(unmarshalled.Selector.Raw)) - assert.NoError(t, err) - sel := nb.Build() - assert.Equal(t, sel, allSelector) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared/movekey.go b/vendor/github.com/filecoin-project/go-fil-markets/shared/movekey.go new file mode 100644 index 0000000000..e01a4c4ee4 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/shared/movekey.go @@ -0,0 +1,29 @@ +package shared + +import "github.com/ipfs/go-datastore" + +// MoveKey moves a key in a data store +func MoveKey(ds datastore.Datastore, old string, new string) error { + oldKey := datastore.NewKey(old) + newKey := datastore.NewKey(new) + has, err := ds.Has(oldKey) + if err != nil { + return err + } + if !has { + return nil + } + value, err := ds.Get(oldKey) + if err != nil { + return err + } + err = ds.Put(newKey, value) + if err != nil { + return err + } + err = ds.Delete(oldKey) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared/ready.go b/vendor/github.com/filecoin-project/go-fil-markets/shared/ready.go new file mode 100644 index 0000000000..70ccd126de --- /dev/null +++ b/vendor/github.com/filecoin-project/go-fil-markets/shared/ready.go @@ -0,0 +1,24 @@ +package shared + +import ( + "errors" + + "github.com/hannahhoward/go-pubsub" +) + +// ReadyFunc is function that gets called once when an event is ready +type ReadyFunc func(error) + +// ReadyDispatcher is just an pubsub dispatcher where the callback is ReadyFunc +func ReadyDispatcher(evt pubsub.Event, fn pubsub.SubscriberFn) error { + migrateErr, ok := evt.(error) + if !ok && evt != nil { + return errors.New("wrong type of event") + } + cb, ok := fn.(ReadyFunc) + if !ok { + return errors.New("wrong type of event") + } + cb(migrateErr) + return nil +} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared/selectors.go b/vendor/github.com/filecoin-project/go-fil-markets/shared/selectors.go index 17989b13bf..3e3bb91aae 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/shared/selectors.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/shared/selectors.go @@ -9,7 +9,7 @@ import ( // entire DAG selector func AllSelector() ipld.Node { - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) return ssb.ExploreRecursive(selector.RecursionLimitNone(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())). Node() diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/generators.go b/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/generators.go deleted file mode 100644 index 8235874d25..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/generators.go +++ /dev/null @@ -1,315 +0,0 @@ -package shared_testutil - -import ( - "math/big" - "math/rand" - "testing" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/libp2p/go-libp2p-core/test" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/storagemarket" - smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -// MakeTestSignedVoucher generates a random SignedVoucher that has all non-zero fields -func MakeTestSignedVoucher() *paych.SignedVoucher { - return &paych.SignedVoucher{ - TimeLockMin: abi.ChainEpoch(rand.Int63()), - TimeLockMax: 0, - SecretPreimage: []byte("secret-preimage"), - Extra: MakeTestModVerifyParams(), - Lane: rand.Uint64(), - Nonce: rand.Uint64(), - Amount: MakeTestTokenAmount(), - Merges: []paych.Merge{MakeTestMerge()}, - Signature: MakeTestSignature(), - } -} - -// MakeTestModVerifyParams generates a random ModVerifyParams that has all non-zero fields -func MakeTestModVerifyParams() *paych.ModVerifyParams { - return &paych.ModVerifyParams{ - Actor: address.TestAddress, - Method: abi.MethodNum(rand.Int63()), - Data: []byte("ModVerifyParams data"), - } -} - -// MakeTestMerge generates a random Merge that has all non-zero fields -func MakeTestMerge() paych.Merge { - return paych.Merge{ - Lane: rand.Uint64(), - Nonce: rand.Uint64(), - } -} - -// MakeTestSignature generates a valid yet random Signature with all non-zero fields -func MakeTestSignature() *crypto.Signature { - return &crypto.Signature{ - Type: crypto.SigTypeSecp256k1, - Data: []byte("signature data"), - } -} - -// MakeTestTokenAmount generates a valid yet random TokenAmount with a non-zero value. -func MakeTestTokenAmount() abi.TokenAmount { - return abi.TokenAmount{Int: big.NewInt(rand.Int63())} -} - -// MakeTestQueryResponse generates a valid, random QueryResponse with no non-zero fields -func MakeTestQueryResponse() retrievalmarket.QueryResponse { - return retrievalmarket.QueryResponse{ - Status: retrievalmarket.QueryResponseUnavailable, - Size: rand.Uint64(), - PaymentAddress: address.TestAddress2, - MinPricePerByte: MakeTestTokenAmount(), - MaxPaymentInterval: rand.Uint64(), - MaxPaymentIntervalIncrease: rand.Uint64(), - } -} - -// MakeTestDealProposal generates a valid, random DealProposal -func MakeTestDealProposal() retrievalmarket.DealProposal { - cid := GenerateCids(1)[0] - return retrievalmarket.DealProposal{ - PayloadCID: cid, - ID: retrievalmarket.DealID(rand.Uint64()), - Params: retrievalmarket.NewParamsV0(MakeTestTokenAmount(), rand.Uint64(), rand.Uint64()), - } -} - -// MakeTestDealProposal generates a valid, random DealResponse -func MakeTestDealResponse() retrievalmarket.DealResponse { - fakeBlk := retrievalmarket.Block{ - Prefix: []byte("prefix"), - Data: []byte("data"), - } - - return retrievalmarket.DealResponse{ - Status: retrievalmarket.DealStatusOngoing, - ID: retrievalmarket.DealID(rand.Uint64()), - PaymentOwed: MakeTestTokenAmount(), - Message: "deal response message", - Blocks: []retrievalmarket.Block{fakeBlk}, - } -} - -// MakeTestDealPayment generates a valid, random DealPayment -func MakeTestDealPayment() retrievalmarket.DealPayment { - return retrievalmarket.DealPayment{ - ID: retrievalmarket.DealID(rand.Uint64()), - PaymentChannel: address.TestAddress, - PaymentVoucher: MakeTestSignedVoucher(), - } -} - -// MakeTestUnsignedDealProposal generates a deal proposal with no signature -func MakeTestUnsignedDealProposal() market.DealProposal { - start := uint64(rand.Int31()) - end := start + uint64(rand.Int31()) - - return market.DealProposal{ - PieceCID: GenerateCids(1)[0], - PieceSize: abi.PaddedPieceSize(rand.Int63()), - - Client: address.TestAddress, - Provider: address.TestAddress2, - - StartEpoch: abi.ChainEpoch(start), - EndEpoch: abi.ChainEpoch(end), - - StoragePricePerEpoch: MakeTestTokenAmount(), - ProviderCollateral: MakeTestTokenAmount(), - ClientCollateral: MakeTestTokenAmount(), - } -} - -// MakeTestClientDealProposal generates a valid storage deal proposal -func MakeTestClientDealProposal() *market.ClientDealProposal { - return &market.ClientDealProposal{ - Proposal: MakeTestUnsignedDealProposal(), - ClientSignature: *MakeTestSignature(), - } -} - -// MakeTestDataRef returns a storage market data ref -func MakeTestDataRef(manualXfer bool) *storagemarket.DataRef { - out := &storagemarket.DataRef{ - Root: GenerateCids(1)[0], - } - - if manualXfer { - out.TransferType = storagemarket.TTManual - } - - return out -} - -// MakeTestClientDeal returns a storage market client deal -func MakeTestClientDeal(state storagemarket.StorageDealStatus, clientDealProposal *market.ClientDealProposal, manualXfer bool) (*storagemarket.ClientDeal, error) { - proposalNd, err := cborutil.AsIpld(clientDealProposal) - - if err != nil { - return nil, err - } - - p, err := test.RandPeerID() - if err != nil { - return nil, err - } - return &storagemarket.ClientDeal{ - ProposalCid: proposalNd.Cid(), - ClientDealProposal: *clientDealProposal, - State: state, - Miner: p, - MinerWorker: address.TestAddress2, - DataRef: MakeTestDataRef(manualXfer), - }, nil -} - -// MakeTestMinerDeal returns a storage market provider deal -func MakeTestMinerDeal(state storagemarket.StorageDealStatus, clientDealProposal *market.ClientDealProposal, dataRef *storagemarket.DataRef) (*storagemarket.MinerDeal, error) { - proposalNd, err := cborutil.AsIpld(clientDealProposal) - - if err != nil { - return nil, err - } - - p, err := test.RandPeerID() - if err != nil { - return nil, err - } - - return &storagemarket.MinerDeal{ - ProposalCid: proposalNd.Cid(), - ClientDealProposal: *clientDealProposal, - State: state, - Client: p, - Ref: dataRef, - }, nil -} - -// MakeTestStorageAsk generates a storage ask -func MakeTestStorageAsk() *storagemarket.StorageAsk { - return &storagemarket.StorageAsk{ - Price: MakeTestTokenAmount(), - MinPieceSize: abi.PaddedPieceSize(rand.Uint64()), - Miner: address.TestAddress2, - Timestamp: abi.ChainEpoch(rand.Int63()), - Expiry: abi.ChainEpoch(rand.Int63()), - SeqNo: rand.Uint64(), - } -} - -// MakeTestSignedStorageAsk generates a signed storage ask -func MakeTestSignedStorageAsk() *storagemarket.SignedStorageAsk { - return &storagemarket.SignedStorageAsk{ - Ask: MakeTestStorageAsk(), - Signature: MakeTestSignature(), - } -} - -// MakeTestStorageNetworkProposal generates a proposal that can be sent over the -// network to a provider -func MakeTestStorageNetworkProposal() smnet.Proposal { - return smnet.Proposal{ - DealProposal: MakeTestClientDealProposal(), - Piece: &storagemarket.DataRef{Root: GenerateCids(1)[0]}, - } -} - -// MakeTestStorageNetworkResponse generates a response to a proposal sent over -// the network -func MakeTestStorageNetworkResponse() smnet.Response { - return smnet.Response{ - State: storagemarket.StorageDealSealing, - Proposal: GenerateCids(1)[0], - PublishMessage: &(GenerateCids(1)[0]), - } -} - -// MakeTestStorageNetworkSignedResponse generates a response to a proposal sent over -// the network that is signed -func MakeTestStorageNetworkSignedResponse() smnet.SignedResponse { - return smnet.SignedResponse{ - Response: MakeTestStorageNetworkResponse(), - Signature: MakeTestSignature(), - } -} - -// MakeTestStorageAskRequest generates a request to get a provider's ask -func MakeTestStorageAskRequest() smnet.AskRequest { - return smnet.AskRequest{ - Miner: address.TestAddress2, - } -} - -// MakeTestStorageAskResponse generates a response to an ask request -func MakeTestStorageAskResponse() smnet.AskResponse { - return smnet.AskResponse{ - Ask: MakeTestSignedStorageAsk(), - } -} - -// MakeTestDealStatusRequest generates a request to get a provider's query -func MakeTestDealStatusRequest() smnet.DealStatusRequest { - return smnet.DealStatusRequest{ - Proposal: GenerateCids(1)[0], - Signature: *MakeTestSignature(), - } -} - -// MakeTestDealStatusResponse generates a response to an query request -func MakeTestDealStatusResponse() smnet.DealStatusResponse { - proposal := MakeTestUnsignedDealProposal() - - ds := storagemarket.ProviderDealState{ - Proposal: &proposal, - ProposalCid: &GenerateCids(1)[0], - State: storagemarket.StorageDealActive, - } - - return smnet.DealStatusResponse{ - DealState: ds, - Signature: *MakeTestSignature(), - } -} - -func RequireGenerateRetrievalPeers(t *testing.T, numPeers int) []retrievalmarket.RetrievalPeer { - peers := make([]retrievalmarket.RetrievalPeer, numPeers) - for i := range peers { - pid, err := test.RandPeerID() - require.NoError(t, err) - addr, err := address.NewIDAddress(rand.Uint64()) - require.NoError(t, err) - peers[i] = retrievalmarket.RetrievalPeer{ - Address: addr, - ID: pid, - } - } - return peers -} - -type FakeDTValidator struct{} - -func (v *FakeDTValidator) ValidatePush(sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) error { - return nil -} - -func (v *FakeDTValidator) ValidatePull(receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) error { - return nil -} - -var _ datatransfer.RequestValidator = (*FakeDTValidator)(nil) diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/mocknet.go b/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/mocknet.go deleted file mode 100644 index d4d1bb34cd..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/mocknet.go +++ /dev/null @@ -1,227 +0,0 @@ -package shared_testutil - -import ( - "bytes" - "errors" - "io" - "os" - "path" - "path/filepath" - "runtime" - "testing" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - "github.com/ipfs/go-graphsync" - graphsyncimpl "github.com/ipfs/go-graphsync/impl" - "github.com/ipfs/go-graphsync/network" - bstore "github.com/ipfs/go-ipfs-blockstore" - chunk "github.com/ipfs/go-ipfs-chunker" - offline "github.com/ipfs/go-ipfs-exchange-offline" - files "github.com/ipfs/go-ipfs-files" - ipldformat "github.com/ipfs/go-ipld-format" - "github.com/ipfs/go-merkledag" - unixfile "github.com/ipfs/go-unixfs/file" - "github.com/ipfs/go-unixfs/importer/balanced" - "github.com/ipfs/go-unixfs/importer/helpers" - "github.com/ipld/go-ipld-prime" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/libp2p/go-libp2p-core/host" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/require" - "golang.org/x/net/context" - - "github.com/filecoin-project/go-storedcounter" -) - -type Libp2pTestData struct { - Ctx context.Context - Ds1 datastore.Batching - Ds2 datastore.Batching - DTStoredCounter1 *storedcounter.StoredCounter - DTStoredCounter2 *storedcounter.StoredCounter - RetrievalStoredCounter1 *storedcounter.StoredCounter - RetrievalStoredCounter2 *storedcounter.StoredCounter - Bs1 bstore.Blockstore - Bs2 bstore.Blockstore - DagService1 ipldformat.DAGService - DagService2 ipldformat.DAGService - GraphSync1 graphsync.GraphExchange - GraphSync2 graphsync.GraphExchange - Loader1 ipld.Loader - Loader2 ipld.Loader - Storer1 ipld.Storer - Storer2 ipld.Storer - Host1 host.Host - Host2 host.Host - OrigBytes []byte -} - -func NewLibp2pTestData(ctx context.Context, t *testing.T) *Libp2pTestData { - testData := &Libp2pTestData{} - testData.Ctx = ctx - makeLoader := func(bs bstore.Blockstore) ipld.Loader { - return func(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) { - c, ok := lnk.(cidlink.Link) - if !ok { - return nil, errors.New("incorrect Link Type") - } - // read block from one store - block, err := bs.Get(c.Cid) - if err != nil { - return nil, err - } - return bytes.NewReader(block.RawData()), nil - } - } - - makeStorer := func(bs bstore.Blockstore) ipld.Storer { - return func(lnkCtx ipld.LinkContext) (io.Writer, ipld.StoreCommitter, error) { - var buf bytes.Buffer - var committer ipld.StoreCommitter = func(lnk ipld.Link) error { - c, ok := lnk.(cidlink.Link) - if !ok { - return errors.New("incorrect Link Type") - } - block, err := blocks.NewBlockWithCid(buf.Bytes(), c.Cid) - if err != nil { - return err - } - return bs.Put(block) - } - return &buf, committer, nil - } - } - testData.Ds1 = dss.MutexWrap(datastore.NewMapDatastore()) - testData.Ds2 = dss.MutexWrap(datastore.NewMapDatastore()) - - testData.DTStoredCounter1 = storedcounter.New(testData.Ds1, datastore.NewKey("nextDTID")) - testData.DTStoredCounter2 = storedcounter.New(testData.Ds2, datastore.NewKey("nextDTID")) - - testData.RetrievalStoredCounter1 = storedcounter.New(testData.Ds1, datastore.NewKey("nextDealID")) - testData.RetrievalStoredCounter2 = storedcounter.New(testData.Ds2, datastore.NewKey("nextDealID")) - - // make a bstore and dag service - testData.Bs1 = bstore.NewBlockstore(testData.Ds1) - testData.Bs2 = bstore.NewBlockstore(testData.Ds2) - - testData.DagService1 = merkledag.NewDAGService(blockservice.New(testData.Bs1, offline.Exchange(testData.Bs1))) - testData.DagService2 = merkledag.NewDAGService(blockservice.New(testData.Bs2, offline.Exchange(testData.Bs2))) - - // setup an IPLD loader/storer for bstore 1 - testData.Loader1 = makeLoader(testData.Bs1) - testData.Storer1 = makeStorer(testData.Bs1) - - // setup an IPLD loader/storer for bstore 2 - testData.Loader2 = makeLoader(testData.Bs2) - testData.Storer2 = makeStorer(testData.Bs2) - - mn := mocknet.New(ctx) - - // setup network - var err error - testData.Host1, err = mn.GenPeer() - require.NoError(t, err) - - testData.Host2, err = mn.GenPeer() - require.NoError(t, err) - - err = mn.LinkAll() - require.NoError(t, err) - - testData.GraphSync1 = graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host1), testData.Loader1, testData.Storer1) - testData.GraphSync2 = graphsyncimpl.New(ctx, network.NewFromLibp2pHost(testData.Host2), testData.Loader2, testData.Storer2) - - return testData -} - -const unixfsChunkSize uint64 = 1 << 10 -const unixfsLinksPerLevel = 1024 - -// LoadUnixFSFile injects the fixture `filename` into the given blockstore from the -// fixtures directory. If useSecondNode is true, fixture is injected to the second node; -// otherwise the first node gets it -func (ltd *Libp2pTestData) LoadUnixFSFile(t *testing.T, fixturesPath string, useSecondNode bool) ipld.Link { - - // read in a fixture file - fpath, err := filepath.Abs(filepath.Join(thisDir(t), "..", fixturesPath)) - require.NoError(t, err) - - f, err := os.Open(fpath) - require.NoError(t, err) - - var buf bytes.Buffer - tr := io.TeeReader(f, &buf) - file := files.NewReaderFile(tr) - - // import to UnixFS - var dagService ipldformat.DAGService - if useSecondNode { - dagService = ltd.DagService2 - } else { - dagService = ltd.DagService1 - } - bufferedDS := ipldformat.NewBufferedDAG(ltd.Ctx, dagService) - - params := helpers.DagBuilderParams{ - Maxlinks: unixfsLinksPerLevel, - RawLeaves: true, - CidBuilder: nil, - Dagserv: bufferedDS, - } - - db, err := params.New(chunk.NewSizeSplitter(file, int64(unixfsChunkSize))) - require.NoError(t, err) - - nd, err := balanced.Layout(db) - require.NoError(t, err) - - err = bufferedDS.Commit() - require.NoError(t, err) - - // save the original files bytes - ltd.OrigBytes = buf.Bytes() - - return cidlink.Link{Cid: nd.Cid()} -} - -func thisDir(t *testing.T) string { - _, fname, _, ok := runtime.Caller(1) - require.True(t, ok) - return path.Dir(fname) -} - -// VerifyFileTransferred checks that the fixture file was sent from one node to the other. -func (ltd *Libp2pTestData) VerifyFileTransferred(t *testing.T, link ipld.Link, useSecondNode bool, readLen uint64) { - var dagService ipldformat.DAGService - if useSecondNode { - dagService = ltd.DagService2 - } else { - dagService = ltd.DagService1 - } - - c := link.(cidlink.Link).Cid - - // load the root of the UnixFS DAG from the new blockstore - otherNode, err := dagService.Get(ltd.Ctx, c) - require.NoError(t, err) - - // Setup a UnixFS file reader - n, err := unixfile.NewUnixfsFile(ltd.Ctx, dagService, otherNode) - require.NoError(t, err) - - fn, ok := n.(files.File) - require.True(t, ok) - - // Read the bytes for the UnixFS File - finalBytes := make([]byte, readLen) - _, err = fn.Read(finalBytes) - if err != nil { - require.Equal(t, "EOF", err.Error()) - } - - // verify original bytes match final bytes! - require.EqualValues(t, ltd.OrigBytes[:readLen], finalBytes) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_filestore.go b/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_filestore.go deleted file mode 100644 index c0a0f8512a..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_filestore.go +++ /dev/null @@ -1,176 +0,0 @@ -package shared_testutil - -import ( - "bytes" - "errors" - "math/rand" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/filestore" -) - -var TestErrNotFound = errors.New("file not found") -var TestErrTempFile = errors.New("temp file creation failed") - -// TestFileStoreParams are parameters for a test file store -type TestFileStoreParams struct { - Files []filestore.File - AvailableTempFiles []filestore.File - ExpectedDeletions []filestore.Path - ExpectedOpens []filestore.Path -} - -// TestFileStore is a mocked file store that can provide programmed returns -// and test expectations -type TestFileStore struct { - files []filestore.File - availableTempFiles []filestore.File - expectedDeletions map[filestore.Path]struct{} - expectedOpens map[filestore.Path]struct{} - deletedFiles map[filestore.Path]struct{} - openedFiles map[filestore.Path]struct{} -} - -// NewTestFileStore returns a new test file store from the given parameters -func NewTestFileStore(params TestFileStoreParams) *TestFileStore { - fs := &TestFileStore{ - files: params.Files, - availableTempFiles: params.AvailableTempFiles, - expectedDeletions: make(map[filestore.Path]struct{}), - expectedOpens: make(map[filestore.Path]struct{}), - deletedFiles: make(map[filestore.Path]struct{}), - openedFiles: make(map[filestore.Path]struct{}), - } - for _, path := range params.ExpectedDeletions { - fs.expectedDeletions[path] = struct{}{} - } - for _, path := range params.ExpectedOpens { - fs.expectedOpens[path] = struct{}{} - } - return fs -} - -// Open will open a file if it's in the file store -func (fs *TestFileStore) Open(p filestore.Path) (filestore.File, error) { - var foundFile filestore.File - for _, file := range fs.files { - if p == file.Path() { - foundFile = file - break - } - } - if foundFile == nil { - return nil, TestErrNotFound - } - fs.openedFiles[p] = struct{}{} - return foundFile, nil -} - -// Create is not implement -func (fs *TestFileStore) Create(p filestore.Path) (filestore.File, error) { - panic("not implemented") -} - -// Store is not implemented -func (fs *TestFileStore) Store(p filestore.Path, f filestore.File) (filestore.Path, error) { - panic("not implemented") -} - -// Delete will delete a file if it is in the file store -func (fs *TestFileStore) Delete(p filestore.Path) error { - var foundFile filestore.File - for i, file := range fs.files { - if p == file.Path() { - foundFile = file - fs.files[i] = fs.files[len(fs.files)-1] - fs.files[len(fs.files)-1] = nil - fs.files = fs.files[:len(fs.files)-1] - break - } - } - if foundFile == nil { - return TestErrNotFound - } - fs.deletedFiles[p] = struct{}{} - return nil -} - -// CreateTemp will create a temporary file from the provided set of temporary files -func (fs *TestFileStore) CreateTemp() (filestore.File, error) { - if len(fs.availableTempFiles) == 0 { - return nil, TestErrTempFile - } - var tempFile filestore.File - tempFile, fs.availableTempFiles = fs.availableTempFiles[0], fs.availableTempFiles[1:] - fs.files = append(fs.files, tempFile) - return tempFile, nil -} - -// VerifyExpectations will verify that the correct files were opened and deleted -func (fs *TestFileStore) VerifyExpectations(t *testing.T) { - require.Equal(t, fs.openedFiles, fs.expectedOpens) - require.Equal(t, fs.deletedFiles, fs.expectedDeletions) -} - -// TestFileParams are parameters for a test file -type TestFileParams struct { - Buffer *bytes.Buffer - Size int64 - Path filestore.Path -} - -// NewTestFile generates a mocked filestore.File that has programmed returns -func NewTestFile(params TestFileParams) *TestFile { - tf := &TestFile{ - Buffer: params.Buffer, - size: params.Size, - path: params.Path, - } - if tf.Buffer == nil { - tf.Buffer = new(bytes.Buffer) - } - if tf.size == 0 { - tf.size = rand.Int63() - } - if tf.path == filestore.Path("") { - buf := make([]byte, 16) - _, _ = rand.Read(buf) - tf.path = filestore.Path(buf) - } - return tf -} - -// TestFile is a mocked version of filestore.File with preset returns -// and a byte buffer for read/writes -type TestFile struct { - *bytes.Buffer - size int64 - path filestore.Path -} - -// Path returns the preset path -func (f *TestFile) Path() filestore.Path { - return f.path -} - -// OsPath is not implemented -func (f *TestFile) OsPath() filestore.OsPath { - panic("not implemented") -} - -// Size returns the preset size -func (f *TestFile) Size() int64 { - return f.size -} - -// Close does nothing -func (f *TestFile) Close() error { - return nil -} - -// Seek is not implemented -func (f *TestFile) Seek(offset int64, whence int) (int64, error) { - panic("not implemented") -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_ipld_tree.go b/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_ipld_tree.go deleted file mode 100644 index 4674fd0b09..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_ipld_tree.go +++ /dev/null @@ -1,147 +0,0 @@ -package shared_testutil - -import ( - "bytes" - "context" - "errors" - "io" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - "github.com/ipld/go-car" - "github.com/ipld/go-ipld-prime" - - // to register multicodec - _ "github.com/ipld/go-ipld-prime/codec/dagjson" - "github.com/ipld/go-ipld-prime/fluent" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - - "github.com/filecoin-project/go-fil-markets/shared" -) - -// TestIPLDTree is a set of IPLD Data that forms a tree spread across some blocks -// with a serialized in memory representation -type TestIPLDTree struct { - Storage map[ipld.Link][]byte - LeafAlpha ipld.Node - LeafAlphaLnk ipld.Link - LeafAlphaBlock blocks.Block - LeafBeta ipld.Node - LeafBetaLnk ipld.Link - LeafBetaBlock blocks.Block - MiddleMapNode ipld.Node - MiddleMapNodeLnk ipld.Link - MiddleMapBlock blocks.Block - MiddleListNode ipld.Node - MiddleListNodeLnk ipld.Link - MiddleListBlock blocks.Block - RootNode ipld.Node - RootNodeLnk ipld.Link - RootBlock blocks.Block -} - -// NewTestIPLDTree returns a fake tree of nodes, spread across 5 blocks -func NewTestIPLDTree() TestIPLDTree { - var storage = make(map[ipld.Link][]byte) - encode := func(n ipld.Node) (ipld.Node, ipld.Link) { - lb := cidlink.LinkBuilder{Prefix: cid.Prefix{ - Version: 1, - Codec: 0x0129, - MhType: 0x17, - MhLength: 4, - }} - lnk, err := lb.Build(context.Background(), ipld.LinkContext{}, n, - func(ipld.LinkContext) (io.Writer, ipld.StoreCommitter, error) { - buf := bytes.Buffer{} - return &buf, func(lnk ipld.Link) error { - storage[lnk] = buf.Bytes() - return nil - }, nil - }, - ) - if err != nil { - panic(err) - } - return n, lnk - } - - var ( - leafAlpha, leafAlphaLnk = encode(fluent.MustBuild(basicnode.Style.String, func(na fluent.NodeAssembler) { na.AssignString("alpha") })) - leafAlphaBlock, _ = blocks.NewBlockWithCid(storage[leafAlphaLnk], leafAlphaLnk.(cidlink.Link).Cid) - leafBeta, leafBetaLnk = encode(fluent.MustBuild(basicnode.Style.String, func(na fluent.NodeAssembler) { na.AssignString("beta") })) - leafBetaBlock, _ = blocks.NewBlockWithCid(storage[leafBetaLnk], leafBetaLnk.(cidlink.Link).Cid) - middleMapNode, middleMapNodeLnk = encode(fluent.MustBuildMap(basicnode.Style.Map, 3, func(ma fluent.MapAssembler) { - ma.AssembleEntry("foo").AssignBool(true) - ma.AssembleEntry("bar").AssignBool(false) - ma.AssembleEntry("nested").CreateMap(2, func(ma fluent.MapAssembler) { - ma.AssembleEntry("alink").AssignLink(leafAlphaLnk) - ma.AssembleEntry("nonlink").AssignString("zoo") - }) - })) - middleMapBlock, _ = blocks.NewBlockWithCid(storage[middleMapNodeLnk], middleMapNodeLnk.(cidlink.Link).Cid) - middleListNode, middleListNodeLnk = encode(fluent.MustBuildList(basicnode.Style.List, 4, func(la fluent.ListAssembler) { - la.AssembleValue().AssignLink(leafAlphaLnk) - la.AssembleValue().AssignLink(leafAlphaLnk) - la.AssembleValue().AssignLink(leafBetaLnk) - la.AssembleValue().AssignLink(leafAlphaLnk) - })) - middleListBlock, _ = blocks.NewBlockWithCid(storage[middleListNodeLnk], middleListNodeLnk.(cidlink.Link).Cid) - rootNode, rootNodeLnk = encode(fluent.MustBuildMap(basicnode.Style.Map, 4, func(ma fluent.MapAssembler) { - ma.AssembleEntry("plain").AssignString("olde string") - ma.AssembleEntry("linkedString").AssignLink(leafAlphaLnk) - ma.AssembleEntry("linkedMap").AssignLink(middleMapNodeLnk) - ma.AssembleEntry("linkedList").AssignLink(middleListNodeLnk) - })) - rootBlock, _ = blocks.NewBlockWithCid(storage[rootNodeLnk], rootNodeLnk.(cidlink.Link).Cid) - ) - return TestIPLDTree{ - Storage: storage, - LeafAlpha: leafAlpha, - LeafAlphaLnk: leafAlphaLnk, - LeafAlphaBlock: leafAlphaBlock, - LeafBeta: leafBeta, - LeafBetaLnk: leafBetaLnk, - LeafBetaBlock: leafBetaBlock, - MiddleMapNode: middleMapNode, - MiddleMapNodeLnk: middleMapNodeLnk, - MiddleMapBlock: middleMapBlock, - MiddleListNode: middleListNode, - MiddleListNodeLnk: middleListNodeLnk, - MiddleListBlock: middleListBlock, - RootNode: rootNode, - RootNodeLnk: rootNodeLnk, - RootBlock: rootBlock, - } -} - -// Loader is an IPLD comptabile loader for the "storage" part of the tree -func (tt TestIPLDTree) Loader(lnk ipld.Link, lnkCtx ipld.LinkContext) (io.Reader, error) { - data, ok := tt.Storage[lnk] - if !ok { - return nil, errors.New("No block found") - } - return bytes.NewBuffer(data), nil -} - -// Get makes a test tree behave like a block read store -func (tt TestIPLDTree) Get(c cid.Cid) (blocks.Block, error) { - data, ok := tt.Storage[cidlink.Link{Cid: c}] - if !ok { - return nil, errors.New("No block found") - } - return blocks.NewBlockWithCid(data, c) -} - -// DumpToCar puts the tree into a car file, with user configured functions -func (tt TestIPLDTree) DumpToCar(out io.Writer, userOnNewCarBlocks ...car.OnNewCarBlockFunc) error { - ctx := context.Background() - sc := car.NewSelectiveCar(ctx, tt, []car.Dag{ - { - Root: tt.RootNodeLnk.(cidlink.Link).Cid, - Selector: shared.AllSelector(), - }, - }) - - return sc.Write(out, userOnNewCarBlocks...) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_network_types.go b/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_network_types.go deleted file mode 100644 index 51d21b7155..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_network_types.go +++ /dev/null @@ -1,679 +0,0 @@ -package shared_testutil - -import ( - "errors" - "testing" - - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" - - rm "github.com/filecoin-project/go-fil-markets/retrievalmarket" - rmnet "github.com/filecoin-project/go-fil-markets/retrievalmarket/network" - smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -// QueryReader is a function to mock reading queries. -type QueryReader func() (rm.Query, error) - -// QueryResponseReader is a function to mock reading query responses. -type QueryResponseReader func() (rm.QueryResponse, error) - -// QueryResponseWriter is a function to mock writing query responses. -type QueryResponseWriter func(rm.QueryResponse) error - -// QueryWriter is a function to mock writing queries. -type QueryWriter func(rm.Query) error - -// TestRetrievalQueryStream is a retrieval query stream with predefined -// stubbed behavior. -type TestRetrievalQueryStream struct { - p peer.ID - reader QueryReader - respReader QueryResponseReader - respWriter QueryResponseWriter - writer QueryWriter -} - -// TestQueryStreamParams are parameters used to setup a TestRetrievalQueryStream. -// All parameters except the peer ID are optional. -type TestQueryStreamParams struct { - PeerID peer.ID - Reader QueryReader - RespReader QueryResponseReader - RespWriter QueryResponseWriter - Writer QueryWriter -} - -// NewTestRetrievalQueryStream returns a new TestRetrievalQueryStream with the -// behavior specified by the paramaters, or default behaviors if not specified. -func NewTestRetrievalQueryStream(params TestQueryStreamParams) rmnet.RetrievalQueryStream { - stream := TestRetrievalQueryStream{ - p: params.PeerID, - reader: TrivialQueryReader, - respReader: TrivialQueryResponseReader, - respWriter: TrivialQueryResponseWriter, - writer: TrivialQueryWriter, - } - if params.Reader != nil { - stream.reader = params.Reader - } - if params.Writer != nil { - stream.writer = params.Writer - } - if params.RespReader != nil { - stream.respReader = params.RespReader - } - if params.RespWriter != nil { - stream.respWriter = params.RespWriter - } - return &stream -} - -// ReadDealStatusRequest calls the mocked query reader. -func (trqs *TestRetrievalQueryStream) ReadQuery() (rm.Query, error) { - return trqs.reader() -} - -// WriteDealStatusRequest calls the mocked query writer. -func (trqs *TestRetrievalQueryStream) WriteQuery(newQuery rm.Query) error { - return trqs.writer(newQuery) -} - -// ReadDealStatusResponse calls the mocked query response reader. -func (trqs *TestRetrievalQueryStream) ReadQueryResponse() (rm.QueryResponse, error) { - return trqs.respReader() -} - -// WriteDealStatusResponse calls the mocked query response writer. -func (trqs *TestRetrievalQueryStream) WriteQueryResponse(newResp rm.QueryResponse) error { - return trqs.respWriter(newResp) -} - -// Close closes the stream (does nothing for test). -func (trqs *TestRetrievalQueryStream) Close() error { return nil } - -// DealProposalReader is a function to mock reading deal proposals. -type DealProposalReader func() (rm.DealProposal, error) - -// DealResponseReader is a function to mock reading deal responses. -type DealResponseReader func() (rm.DealResponse, error) - -// DealResponseWriter is a function to mock writing deal responses. -type DealResponseWriter func(rm.DealResponse) error - -// DealProposalWriter is a function to mock writing deal proposals. -type DealProposalWriter func(rm.DealProposal) error - -// DealPaymentReader is a function to mock reading deal payments. -type DealPaymentReader func() (rm.DealPayment, error) - -// DealPaymentWriter is a function to mock writing deal payments. -type DealPaymentWriter func(rm.DealPayment) error - -// TestRetrievalDealStream is a retrieval deal stream with predefined -// stubbed behavior. -type TestRetrievalDealStream struct { - p peer.ID - proposalReader DealProposalReader - proposalWriter DealProposalWriter - responseReader DealResponseReader - responseWriter DealResponseWriter - paymentReader DealPaymentReader - paymentWriter DealPaymentWriter -} - -// TestDealStreamParams are parameters used to setup a TestRetrievalDealStream. -// All parameters except the peer ID are optional. -type TestDealStreamParams struct { - PeerID peer.ID - ProposalReader DealProposalReader - ProposalWriter DealProposalWriter - ResponseReader DealResponseReader - ResponseWriter DealResponseWriter - PaymentReader DealPaymentReader - PaymentWriter DealPaymentWriter -} - -// NewTestRetrievalDealStream returns a new TestRetrievalDealStream with the -// behavior specified by the paramaters, or default behaviors if not specified. -func NewTestRetrievalDealStream(params TestDealStreamParams) rmnet.RetrievalDealStream { - stream := TestRetrievalDealStream{ - p: params.PeerID, - proposalReader: TrivialDealProposalReader, - proposalWriter: TrivialDealProposalWriter, - responseReader: TrivialDealResponseReader, - responseWriter: TrivialDealResponseWriter, - paymentReader: TrivialDealPaymentReader, - paymentWriter: TrivialDealPaymentWriter, - } - if params.ProposalReader != nil { - stream.proposalReader = params.ProposalReader - } - if params.ProposalWriter != nil { - stream.proposalWriter = params.ProposalWriter - } - if params.ResponseReader != nil { - stream.responseReader = params.ResponseReader - } - if params.ResponseWriter != nil { - stream.responseWriter = params.ResponseWriter - } - if params.PaymentReader != nil { - stream.paymentReader = params.PaymentReader - } - if params.PaymentWriter != nil { - stream.paymentWriter = params.PaymentWriter - } - return &stream -} - -// ReadDealProposal calls the mocked deal proposal reader function. -func (trds *TestRetrievalDealStream) ReadDealProposal() (rm.DealProposal, error) { - return trds.proposalReader() -} - -// WriteDealProposal calls the mocked deal proposal writer function. -func (trds *TestRetrievalDealStream) WriteDealProposal(dealProposal rm.DealProposal) error { - return trds.proposalWriter(dealProposal) -} - -// ReadDealResponse calls the mocked deal response reader function. -func (trds *TestRetrievalDealStream) ReadDealResponse() (rm.DealResponse, error) { - return trds.responseReader() -} - -// WriteDealResponse calls the mocked deal response writer function. -func (trds *TestRetrievalDealStream) WriteDealResponse(dealResponse rm.DealResponse) error { - return trds.responseWriter(dealResponse) -} - -// ReadDealPayment calls the mocked deal payment reader function. -func (trds *TestRetrievalDealStream) ReadDealPayment() (rm.DealPayment, error) { - return trds.paymentReader() -} - -// WriteDealPayment calls the mocked deal payment writer function. -func (trds *TestRetrievalDealStream) WriteDealPayment(dealPayment rm.DealPayment) error { - return trds.paymentWriter(dealPayment) -} - -// Receiver returns the other peer -func (trds TestRetrievalDealStream) Receiver() peer.ID { return trds.p } - -// Close closes the stream (does nothing for mocked stream) -func (trds TestRetrievalDealStream) Close() error { return nil } - -// QueryStreamBuilder is a function that builds retrieval query streams. -type QueryStreamBuilder func(peer.ID) (rmnet.RetrievalQueryStream, error) - -// DealStreamBuilder if a function that builds retrieval deal streams -type DealStreamBuilder func(peer.ID) (rmnet.RetrievalDealStream, error) - -// TestRetrievalMarketNetwork is a test network that has stubbed behavior -// for testing the retrieval market implementation -type TestRetrievalMarketNetwork struct { - receiver rmnet.RetrievalReceiver - dsbuilder DealStreamBuilder - qsbuilder QueryStreamBuilder -} - -// TestNetworkParams are parameters for setting up a test network. All -// parameters other than the receiver are optional -type TestNetworkParams struct { - DealStreamBuilder DealStreamBuilder - QueryStreamBuilder QueryStreamBuilder - Receiver rmnet.RetrievalReceiver -} - -// NewTestRetrievalMarketNetwork returns a new TestRetrievalMarketNetwork with the -// behavior specified by the paramaters, or default behaviors if not specified. -func NewTestRetrievalMarketNetwork(params TestNetworkParams) *TestRetrievalMarketNetwork { - trmn := TestRetrievalMarketNetwork{ - dsbuilder: TrivialNewDealStream, - qsbuilder: TrivialNewQueryStream, - receiver: params.Receiver, - } - if params.DealStreamBuilder != nil { - trmn.dsbuilder = params.DealStreamBuilder - } - if params.QueryStreamBuilder != nil { - trmn.qsbuilder = params.QueryStreamBuilder - } - return &trmn -} - -// NewDealStatusStream returns a query stream. -// Note this always returns the same stream. This is fine for testing for now. -func (trmn *TestRetrievalMarketNetwork) NewQueryStream(id peer.ID) (rmnet.RetrievalQueryStream, error) { - return trmn.qsbuilder(id) -} - -// NewDealStream returns a deal stream -// Note this always returns the same stream. This is fine for testing for now. -func (trmn *TestRetrievalMarketNetwork) NewDealStream(id peer.ID) (rmnet.RetrievalDealStream, error) { - return trmn.dsbuilder(id) -} - -// SetDelegate sets the market receiver -func (trmn *TestRetrievalMarketNetwork) SetDelegate(r rmnet.RetrievalReceiver) error { - trmn.receiver = r - return nil -} - -// ReceiveQueryStream simulates receiving a query stream -func (trmn *TestRetrievalMarketNetwork) ReceiveQueryStream(qs rmnet.RetrievalQueryStream) { - trmn.receiver.HandleQueryStream(qs) -} - -// ReceiveDealStream simulates receiving a deal stream -func (trmn *TestRetrievalMarketNetwork) ReceiveDealStream(ds rmnet.RetrievalDealStream) { - trmn.receiver.HandleDealStream(ds) -} - -// StopHandlingRequests sets receiver to nil -func (trmn *TestRetrievalMarketNetwork) StopHandlingRequests() error { - trmn.receiver = nil - return nil -} - -var _ rmnet.RetrievalMarketNetwork = &TestRetrievalMarketNetwork{} - -// Some convenience builders - -// FailNewQueryStream always fails -func FailNewQueryStream(peer.ID) (rmnet.RetrievalQueryStream, error) { - return nil, errors.New("new query stream failed") -} - -// FailNewDealStream always fails -func FailNewDealStream(peer.ID) (rmnet.RetrievalDealStream, error) { - return nil, errors.New("new deal stream failed") -} - -// FailQueryReader always fails -func FailQueryReader() (rm.Query, error) { - return rm.QueryUndefined, errors.New("read query failed") -} - -// FailQueryWriter always fails -func FailQueryWriter(rm.Query) error { - return errors.New("write query failed") -} - -// FailResponseReader always fails -func FailResponseReader() (rm.QueryResponse, error) { - return rm.QueryResponseUndefined, errors.New("query response failed") -} - -// FailResponseWriter always fails -func FailResponseWriter(rm.QueryResponse) error { - return errors.New("write query response failed") -} - -// FailDealProposalWriter always fails -func FailDealProposalWriter(rm.DealProposal) error { - return errors.New("write proposal failed") -} - -// FailDealProposalReader always fails -func FailDealProposalReader() (rm.DealProposal, error) { - return rm.DealProposalUndefined, errors.New("read proposal failed") -} - -// FailDealResponseWriter always fails -func FailDealResponseWriter(rm.DealResponse) error { - return errors.New("write proposal failed") -} - -// FailDealResponseReader always fails -func FailDealResponseReader() (rm.DealResponse, error) { - return rm.DealResponseUndefined, errors.New("write proposal failed") -} - -// FailDealPaymentWriter always fails -func FailDealPaymentWriter(rm.DealPayment) error { - return errors.New("write proposal failed") -} - -// FailDealPaymentReader always fails -func FailDealPaymentReader() (rm.DealPayment, error) { - return rm.DealPaymentUndefined, errors.New("write proposal failed") -} - -// TrivialNewQueryStream succeeds trivially, returning an empty query stream. -func TrivialNewQueryStream(p peer.ID) (rmnet.RetrievalQueryStream, error) { - return NewTestRetrievalQueryStream(TestQueryStreamParams{PeerID: p}), nil -} - -// ExpectPeerOnQueryStreamBuilder fails if the peer used does not match the expected peer -func ExpectPeerOnQueryStreamBuilder(t *testing.T, expectedPeer peer.ID, qb QueryStreamBuilder, msgAndArgs ...interface{}) QueryStreamBuilder { - return func(p peer.ID) (rmnet.RetrievalQueryStream, error) { - require.Equal(t, expectedPeer, p, msgAndArgs...) - return qb(p) - } -} - -// TrivialNewDealStream succeeds trivially, returning an empty deal stream. -func TrivialNewDealStream(p peer.ID) (rmnet.RetrievalDealStream, error) { - return NewTestRetrievalDealStream(TestDealStreamParams{PeerID: p}), nil -} - -// TrivialQueryReader succeeds trivially, returning an empty query. -func TrivialQueryReader() (rm.Query, error) { - return rm.Query{}, nil -} - -// TrivialQueryResponseReader succeeds trivially, returning an empty query response. -func TrivialQueryResponseReader() (rm.QueryResponse, error) { - return rm.QueryResponse{}, nil -} - -// TrivialQueryWriter succeeds trivially, returning no error. -func TrivialQueryWriter(rm.Query) error { - return nil -} - -// TrivialQueryResponseWriter succeeds trivially, returning no error. -func TrivialQueryResponseWriter(rm.QueryResponse) error { - return nil -} - -// TrivialDealProposalReader succeeds trivially, returning an empty proposal. -func TrivialDealProposalReader() (rm.DealProposal, error) { - return rm.DealProposal{}, nil -} - -// TrivialDealResponseReader succeeds trivially, returning an empty deal response. -func TrivialDealResponseReader() (rm.DealResponse, error) { - return rm.DealResponse{}, nil -} - -// TrivialDealPaymentReader succeeds trivially, returning an empty deal payment. -func TrivialDealPaymentReader() (rm.DealPayment, error) { - return rm.DealPayment{}, nil -} - -// TrivialDealProposalWriter succeeds trivially, returning no error. -func TrivialDealProposalWriter(rm.DealProposal) error { - return nil -} - -// TrivialDealResponseWriter succeeds trivially, returning no error. -func TrivialDealResponseWriter(rm.DealResponse) error { - return nil -} - -// TrivialDealPaymentWriter succeeds trivially, returning no error. -func TrivialDealPaymentWriter(rm.DealPayment) error { - return nil -} - -// StubbedQueryReader returns the given query when called -func StubbedQueryReader(query rm.Query) QueryReader { - return func() (rm.Query, error) { - return query, nil - } -} - -// StubbedQueryResponseReader returns the given query response when called -func StubbedQueryResponseReader(queryResponse rm.QueryResponse) QueryResponseReader { - return func() (rm.QueryResponse, error) { - return queryResponse, nil - } -} - -// ExpectQueryWriter will fail if the written query and expected query don't match -func ExpectQueryWriter(t *testing.T, expectedQuery rm.Query, msgAndArgs ...interface{}) QueryWriter { - return func(query rm.Query) error { - require.Equal(t, expectedQuery, query, msgAndArgs...) - return nil - } -} - -// ExpectQueryResponseWriter will fail if the written query response and expected query response don't match -func ExpectQueryResponseWriter(t *testing.T, expectedQueryResponse rm.QueryResponse, msgAndArgs ...interface{}) QueryResponseWriter { - return func(queryResponse rm.QueryResponse) error { - require.Equal(t, expectedQueryResponse, queryResponse, msgAndArgs...) - return nil - } -} - -// ExpectDealResponseWriter will fail if the written query and expected query don't match -func ExpectDealResponseWriter(t *testing.T, expectedDealResponse rm.DealResponse, msgAndArgs ...interface{}) DealResponseWriter { - return func(dealResponse rm.DealResponse) error { - require.Equal(t, expectedDealResponse, dealResponse, msgAndArgs...) - return nil - } -} - -// QueryReadWriter will read only if something is written, otherwise it errors -func QueryReadWriter() (QueryReader, QueryWriter) { - var q rm.Query - var written bool - queryRead := func() (rm.Query, error) { - if written { - return q, nil - } - return rm.QueryUndefined, errors.New("Unable to read value") - } - queryWrite := func(wq rm.Query) error { - q = wq - written = true - return nil - } - return queryRead, queryWrite -} - -// QueryResponseReadWriter will read only if something is written, otherwise it errors -func QueryResponseReadWriter() (QueryResponseReader, QueryResponseWriter) { - var q rm.QueryResponse - var written bool - queryResponseRead := func() (rm.QueryResponse, error) { - if written { - return q, nil - } - return rm.QueryResponseUndefined, errors.New("Unable to read value") - } - queryResponseWrite := func(wq rm.QueryResponse) error { - q = wq - written = true - return nil - } - return queryResponseRead, queryResponseWrite -} - -// StubbedDealProposalReader returns the given proposal when called -func StubbedDealProposalReader(proposal rm.DealProposal) DealProposalReader { - return func() (rm.DealProposal, error) { - return proposal, nil - } -} - -// StubbedDealResponseReader returns the given deal response when called -func StubbedDealResponseReader(response rm.DealResponse) DealResponseReader { - return func() (rm.DealResponse, error) { - return response, nil - } -} - -// StubbedDealPaymentReader returns the given deal payment when called -func StubbedDealPaymentReader(payment rm.DealPayment) DealPaymentReader { - return func() (rm.DealPayment, error) { - return payment, nil - } -} - -// StorageDealProposalReader is a function to mock reading deal proposals. -type StorageDealProposalReader func() (smnet.Proposal, error) - -// StorageDealResponseReader is a function to mock reading deal responses. -type StorageDealResponseReader func() (smnet.SignedResponse, error) - -// StorageDealResponseWriter is a function to mock writing deal responses. -type StorageDealResponseWriter func(smnet.SignedResponse) error - -// StorageDealProposalWriter is a function to mock writing deal proposals. -type StorageDealProposalWriter func(smnet.Proposal) error - -// TestStorageDealStream is a retrieval deal stream with predefined -// stubbed behavior. -type TestStorageDealStream struct { - p peer.ID - proposalReader StorageDealProposalReader - proposalWriter StorageDealProposalWriter - responseReader StorageDealResponseReader - responseWriter StorageDealResponseWriter - tags map[string]struct{} - - CloseCount int - CloseError error -} - -// TestStorageDealStreamParams are parameters used to setup a TestStorageDealStream. -// All parameters except the peer ID are optional. -type TestStorageDealStreamParams struct { - PeerID peer.ID - ProposalReader StorageDealProposalReader - ProposalWriter StorageDealProposalWriter - ResponseReader StorageDealResponseReader - ResponseWriter StorageDealResponseWriter -} - -// NewTestStorageDealStream returns a new TestStorageDealStream with the -// behavior specified by the paramaters, or default behaviors if not specified. -func NewTestStorageDealStream(params TestStorageDealStreamParams) *TestStorageDealStream { - stream := TestStorageDealStream{ - p: params.PeerID, - proposalReader: TrivialStorageDealProposalReader, - proposalWriter: TrivialStorageDealProposalWriter, - responseReader: TrivialStorageDealResponseReader, - responseWriter: TrivialStorageDealResponseWriter, - tags: make(map[string]struct{}), - } - if params.ProposalReader != nil { - stream.proposalReader = params.ProposalReader - } - if params.ProposalWriter != nil { - stream.proposalWriter = params.ProposalWriter - } - if params.ResponseReader != nil { - stream.responseReader = params.ResponseReader - } - if params.ResponseWriter != nil { - stream.responseWriter = params.ResponseWriter - } - return &stream -} - -// ReadDealProposal calls the mocked deal proposal reader function. -func (tsds *TestStorageDealStream) ReadDealProposal() (smnet.Proposal, error) { - return tsds.proposalReader() -} - -// WriteDealProposal calls the mocked deal proposal writer function. -func (tsds *TestStorageDealStream) WriteDealProposal(dealProposal smnet.Proposal) error { - return tsds.proposalWriter(dealProposal) -} - -// ReadDealResponse calls the mocked deal response reader function. -func (tsds *TestStorageDealStream) ReadDealResponse() (smnet.SignedResponse, error) { - return tsds.responseReader() -} - -// WriteDealResponse calls the mocked deal response writer function. -func (tsds *TestStorageDealStream) WriteDealResponse(dealResponse smnet.SignedResponse) error { - return tsds.responseWriter(dealResponse) -} - -// RemotePeer returns the other peer -func (tsds TestStorageDealStream) RemotePeer() peer.ID { return tsds.p } - -// Close closes the stream (does nothing for mocked stream) -func (tsds *TestStorageDealStream) Close() error { - tsds.CloseCount += 1 - return tsds.CloseError -} - -// TagProtectedConnection preserves this connection as higher priority than others -func (tsds TestStorageDealStream) TagProtectedConnection(identifier string) { - tsds.tags[identifier] = struct{}{} -} - -// UntagProtectedConnection removes the given tag on this connection, increasing -// the likelyhood it will be cleaned up -func (tsds TestStorageDealStream) UntagProtectedConnection(identifier string) { - delete(tsds.tags, identifier) -} - -// AssertConnectionTagged verifies a connection was tagged with the given identifier -func (tsds TestStorageDealStream) AssertConnectionTagged(t *testing.T, identifier string) { - _, ok := tsds.tags[identifier] - require.True(t, ok) -} - -// TrivialStorageDealProposalReader succeeds trivially, returning an empty proposal. -func TrivialStorageDealProposalReader() (smnet.Proposal, error) { - return smnet.Proposal{}, nil -} - -// TrivialStorageDealResponseReader succeeds trivially, returning an empty deal response. -func TrivialStorageDealResponseReader() (smnet.SignedResponse, error) { - return smnet.SignedResponse{}, nil -} - -// TrivialStorageDealProposalWriter succeeds trivially, returning no error. -func TrivialStorageDealProposalWriter(smnet.Proposal) error { - return nil -} - -// TrivialStorageDealResponseWriter succeeds trivially, returning no error. -func TrivialStorageDealResponseWriter(smnet.SignedResponse) error { - return nil -} - -// StubbedStorageProposalReader returns the given proposal when called -func StubbedStorageProposalReader(proposal smnet.Proposal) StorageDealProposalReader { - return func() (smnet.Proposal, error) { - return proposal, nil - } -} - -// StubbedStorageResponseReader returns the given deal response when called -func StubbedStorageResponseReader(response smnet.SignedResponse) StorageDealResponseReader { - return func() (smnet.SignedResponse, error) { - return response, nil - } -} - -// FailStorageProposalWriter always fails -func FailStorageProposalWriter(smnet.Proposal) error { - return errors.New("write proposal failed") -} - -// FailStorageProposalReader always fails -func FailStorageProposalReader() (smnet.Proposal, error) { - return smnet.ProposalUndefined, errors.New("read proposal failed") -} - -// FailStorageResponseWriter always fails -func FailStorageResponseWriter(smnet.SignedResponse) error { - return errors.New("write proposal failed") -} - -// FailStorageResponseReader always fails -func FailStorageResponseReader() (smnet.SignedResponse, error) { - return smnet.SignedResponseUndefined, errors.New("read response failed") -} - -// TestPeerResolver provides a fake retrievalmarket PeerResolver -type TestPeerResolver struct { - Peers []rm.RetrievalPeer - ResolverError error -} - -func (tpr TestPeerResolver) GetPeers(cid.Cid) ([]rm.RetrievalPeer, error) { - return tpr.Peers, tpr.ResolverError -} - -var _ rm.PeerResolver = &TestPeerResolver{} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_piecestore.go b/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_piecestore.go deleted file mode 100644 index 6ef138b9d9..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/test_piecestore.go +++ /dev/null @@ -1,138 +0,0 @@ -package shared_testutil - -import ( - "errors" - "testing" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" -) - -// TestPieceStore is piecestore who's query results are mocked -type TestPieceStore struct { - addPieceBlockLocationsError error - addDealForPieceError error - getPieceInfoError error - piecesStubbed map[cid.Cid]piecestore.PieceInfo - piecesExpected map[cid.Cid]struct{} - piecesReceived map[cid.Cid]struct{} - cidInfosStubbed map[cid.Cid]piecestore.CIDInfo - cidInfosExpected map[cid.Cid]struct{} - cidInfosReceived map[cid.Cid]struct{} -} - -// TestPieceStoreParams sets parameters for a piece store -type TestPieceStoreParams struct { - AddDealForPieceError error - AddPieceBlockLocationsError error - GetPieceInfoError error -} - -var _ piecestore.PieceStore = &TestPieceStore{} - -// NewTestPieceStore creates a TestPieceStore -func NewTestPieceStore() *TestPieceStore { - return NewTestPieceStoreWithParams(TestPieceStoreParams{}) -} - -// NewTestPieceStoreWithParams creates a TestPieceStore with the given parameters -func NewTestPieceStoreWithParams(params TestPieceStoreParams) *TestPieceStore { - return &TestPieceStore{ - addDealForPieceError: params.AddDealForPieceError, - addPieceBlockLocationsError: params.AddPieceBlockLocationsError, - getPieceInfoError: params.GetPieceInfoError, - piecesStubbed: make(map[cid.Cid]piecestore.PieceInfo), - piecesExpected: make(map[cid.Cid]struct{}), - piecesReceived: make(map[cid.Cid]struct{}), - cidInfosStubbed: make(map[cid.Cid]piecestore.CIDInfo), - cidInfosExpected: make(map[cid.Cid]struct{}), - cidInfosReceived: make(map[cid.Cid]struct{}), - } -} - -// StubPiece creates a return value for the given piece cid without expecting it -// to be called -func (tps *TestPieceStore) StubPiece(pieceCid cid.Cid, pieceInfo piecestore.PieceInfo) { - tps.piecesStubbed[pieceCid] = pieceInfo -} - -// ExpectPiece records a piece being expected to be queried and return the given piece info -func (tps *TestPieceStore) ExpectPiece(pieceCid cid.Cid, pieceInfo piecestore.PieceInfo) { - tps.piecesExpected[pieceCid] = struct{}{} - tps.StubPiece(pieceCid, pieceInfo) -} - -// ExpectMissingPiece records a piece being expected to be queried and should fail -func (tps *TestPieceStore) ExpectMissingPiece(pieceCid cid.Cid) { - tps.piecesExpected[pieceCid] = struct{}{} -} - -// StubCID creates a return value for the given CID without expecting it -// to be called -func (tps *TestPieceStore) StubCID(c cid.Cid, cidInfo piecestore.CIDInfo) { - tps.cidInfosStubbed[c] = cidInfo -} - -// ExpectCID records a CID being expected to be queried and return the given CID info -func (tps *TestPieceStore) ExpectCID(c cid.Cid, cidInfo piecestore.CIDInfo) { - tps.cidInfosExpected[c] = struct{}{} - tps.StubCID(c, cidInfo) -} - -// ExpectMissingCID records a CID being expected to be queried and should fail -func (tps *TestPieceStore) ExpectMissingCID(c cid.Cid) { - tps.cidInfosExpected[c] = struct{}{} -} - -// VerifyExpectations verifies that the piecestore was queried in the expected ways -func (tps *TestPieceStore) VerifyExpectations(t *testing.T) { - require.Equal(t, tps.piecesExpected, tps.piecesReceived) - require.Equal(t, tps.cidInfosExpected, tps.cidInfosReceived) -} - -// AddDealForPiece returns a preprogrammed error -func (tps *TestPieceStore) AddDealForPiece(pieceCID cid.Cid, dealInfo piecestore.DealInfo) error { - return tps.addDealForPieceError -} - -// AddPieceBlockLocations returns a preprogrammed error -func (tps *TestPieceStore) AddPieceBlockLocations(pieceCID cid.Cid, blockLocations map[cid.Cid]piecestore.BlockLocation) error { - return tps.addPieceBlockLocationsError -} - -// GetPieceInfo returns a piece info if it's been stubbed -func (tps *TestPieceStore) GetPieceInfo(pieceCID cid.Cid) (piecestore.PieceInfo, error) { - if tps.getPieceInfoError != nil { - return piecestore.PieceInfoUndefined, tps.getPieceInfoError - } - - tps.piecesReceived[pieceCID] = struct{}{} - - pio, ok := tps.piecesStubbed[pieceCID] - if ok { - return pio, nil - } - _, ok = tps.piecesExpected[pieceCID] - if ok { - return piecestore.PieceInfoUndefined, retrievalmarket.ErrNotFound - } - return piecestore.PieceInfoUndefined, errors.New("GetPieceInfo failed") -} - -// GetCIDInfo returns cid info if it's been stubbed -func (tps *TestPieceStore) GetCIDInfo(c cid.Cid) (piecestore.CIDInfo, error) { - tps.cidInfosReceived[c] = struct{}{} - - cio, ok := tps.cidInfosStubbed[c] - if ok { - return cio, nil - } - _, ok = tps.cidInfosExpected[c] - if ok { - return piecestore.CIDInfoUndefined, retrievalmarket.ErrNotFound - } - return piecestore.CIDInfoUndefined, errors.New("GetCIDInfo failed") -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/testutil.go b/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/testutil.go deleted file mode 100644 index 67ca0cf930..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/shared_testutil/testutil.go +++ /dev/null @@ -1,114 +0,0 @@ -package shared_testutil - -import ( - "bytes" - "testing" - - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - "github.com/jbenet/go-random" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/storagemarket" -) - -var blockGenerator = blocksutil.NewBlockGenerator() - -//var prioritySeq int -var seedSeq int64 - -// RandomBytes returns a byte array of the given size with random values. -func RandomBytes(n int64) []byte { - data := new(bytes.Buffer) - random.WritePseudoRandomBytes(n, data, seedSeq) // nolint: gosec,errcheck - seedSeq++ - return data.Bytes() -} - -// GenerateBlocksOfSize generates a series of blocks of the given byte size -func GenerateBlocksOfSize(n int, size int64) []blocks.Block { - generatedBlocks := make([]blocks.Block, 0, n) - for i := 0; i < n; i++ { - b := blocks.NewBlock(RandomBytes(size)) - generatedBlocks = append(generatedBlocks, b) - - } - return generatedBlocks -} - -// GenerateCids produces n content identifiers. -func GenerateCids(n int) []cid.Cid { - cids := make([]cid.Cid, 0, n) - for i := 0; i < n; i++ { - c := blockGenerator.Next().Cid() - cids = append(cids, c) - } - return cids -} - -var peerSeq int - -// GeneratePeers creates n peer ids. -func GeneratePeers(n int) []peer.ID { - peerIds := make([]peer.ID, 0, n) - for i := 0; i < n; i++ { - peerSeq++ - p := peer.ID(peerSeq) - peerIds = append(peerIds, p) - } - return peerIds -} - -// ContainsPeer returns true if a peer is found n a list of peers. -func ContainsPeer(peers []peer.ID, p peer.ID) bool { - for _, n := range peers { - if p == n { - return true - } - } - return false -} - -// IndexOf returns the index of a given cid in an array of blocks -func IndexOf(blks []blocks.Block, c cid.Cid) int { - for i, n := range blks { - if n.Cid() == c { - return i - } - } - return -1 -} - -// ContainsBlock returns true if a block is found n a list of blocks -func ContainsBlock(blks []blocks.Block, block blocks.Block) bool { - return IndexOf(blks, block.Cid()) != -1 -} - -// TestVoucherEquality verifies that two vouchers are equal to one another -func TestVoucherEquality(t *testing.T, a, b *paych.SignedVoucher) { - aB, err := cborutil.Dump(a) - require.NoError(t, err) - bB, err := cborutil.Dump(b) - require.NoError(t, err) - require.True(t, bytes.Equal(aB, bB)) -} - -// AssertDealState asserts equality of StorageDealStatus but with better error messaging -func AssertDealState(t *testing.T, expected storagemarket.StorageDealStatus, actual storagemarket.StorageDealStatus) { - assert.Equal(t, expected, actual, - "Unexpected deal status\nexpected: %s (%d)\nactual : %s (%d)", - storagemarket.DealStates[expected], expected, - storagemarket.DealStates[actual], actual, - ) -} - -func GenerateCid(t *testing.T, o interface{}) cid.Cid { - node, err := cborutil.AsIpld(o) - assert.NoError(t, err) - return node.Cid() -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/README.md b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/README.md index ec2b20d5ab..d89c02c40b 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/README.md +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/README.md @@ -144,16 +144,6 @@ func PublishDeals(ctx context.Context, deal MinerDeal) (cid.Cid, error) ``` Post the deal to chain, returning the posted message CID. -#### ListProviderDeals -```go -func ListProviderDeals(ctx context.Context, addr address.Address, tok shared.TipSetToken, - ) ([]StorageDeal, error) -``` - -List all storage deals for storage provider `addr`, as of `tok`. Return a slice of `StorageDeal`. -`StorageDeal` is a local combination of a storage deal proposal and a current deal -state. See [storagemarket/types.go](./types.go) - #### OnDealComplete ```go func OnDealComplete(ctx context.Context, deal MinerDeal, pieceSize abi.UnpaddedPieceSize, @@ -203,7 +193,6 @@ Register callbacks to be called when a deal expires or is slashed. * [`GetDefaultWalletAddress`](#GetDefaultWalletAddress) * [`OnDealSectorCommitted`](#OnDealSectorCommitted) * [`OnDealExpiredOrSlashed`](#OnDealExpiredOrSlashed) -* [`ValidateAskSignature`](#ValidateAskSignature) #### StorageCommon `StorageClientNode` implements `StorageCommon`, described above. @@ -214,13 +203,6 @@ func GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, erro ``` Get the current chain head. Return its TipSetToken and its abi.ChainEpoch. -#### ListClientDeals -```go -func ListClientDeals(ctx context.Context, addr address.Address, tok shared.TipSetToken, - ) ([]StorageDeal, error) -``` -List all deals associated with storage client `addr`, as of `tok`. Return a slice of `StorageDeal`. - #### ListStorageProviders ```go func ListStorageProviders(ctx context.Context, tok shared.TipSetToken @@ -270,14 +252,6 @@ func OnDealExpiredOrSlashed( Register callbacks to be called when a deal expires or is slashed. -#### ValidateAskSignature -```go -func ValidateAskSignature(ctx context.Context, ask *SignedStorageAsk, tok shared.TipSetToken, - ) (bool, error) -``` -Verify the signature in `ask`, returning true (valid) or false (invalid). - - #### GetMinerInfo ```go func GetMinerInfo(ctx context.Context, maddr address.Address, tok shared.TipSetToken, @@ -370,7 +344,7 @@ See this repo's [piecestore module](../piecestore). that was written for your node. * `minerAddress address.Address` is the miner owner address. * `rt abi.RegisteredProof` is an int64 indicating the type of proof to use when generating a piece commitment (CommP). - see [github.com/filecoin-project/specs-actors/actors/abi/sector.go](https://github.com/filecoin-project/specs-actors/blob/master/actors/abi/sector.go) + see [github.com/filecoin-project/go-state-types/abi/sector.go](https://github.com/filecoin-project/specs-actors/blob/master/actors/abi/sector.go) for the list and meaning of accepted values. * `storedAsk StoredAsk` is an interface for getting and adding storage Asks. It is implemented in storagemarket. To create a `StoredAsk`: diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/client.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/client.go index 5998ff1c94..e31ddd7e9a 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/client.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/client.go @@ -3,10 +3,12 @@ package storagemarket import ( "context" + "github.com/ipfs/go-cid" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" ) // ClientSubscriber is a callback that is run when events are emitted on a StorageClient @@ -19,15 +21,15 @@ type StorageClient interface { // in progress deals Start(ctx context.Context) error + // OnReady registers a listener for when the client comes on line + OnReady(shared.ReadyFunc) + // Stop ends deal processing on a StorageClient Stop() error // ListProviders queries chain state and returns active storage providers ListProviders(ctx context.Context) (<-chan StorageProviderInfo, error) - // ListDeals lists on-chain deals associated with this storage client - ListDeals(ctx context.Context, addr address.Address) ([]StorageDeal, error) - // ListLocalDeals lists deals initiated by this storage client ListLocalDeals(ctx context.Context) ([]ClientDeal, error) @@ -35,13 +37,13 @@ type StorageClient interface { GetLocalDeal(ctx context.Context, cid cid.Cid) (ClientDeal, error) // GetAsk returns the current ask for a storage provider - GetAsk(ctx context.Context, info StorageProviderInfo) (*SignedStorageAsk, error) + GetAsk(ctx context.Context, info StorageProviderInfo) (*StorageAsk, error) // GetProviderDealState queries a provider for the current state of a client's deal GetProviderDealState(ctx context.Context, proposalCid cid.Cid) (*ProviderDealState, error) // ProposeStorageDeal initiates deal negotiation with a Storage Provider - ProposeStorageDeal(ctx context.Context, addr address.Address, info *StorageProviderInfo, data *DataRef, startEpoch abi.ChainEpoch, endEpoch abi.ChainEpoch, price abi.TokenAmount, collateral abi.TokenAmount, rt abi.RegisteredSealProof, fastRetrieval bool, verifiedDeal bool) (*ProposeStorageDealResult, error) + ProposeStorageDeal(ctx context.Context, params ProposeStorageDealParams) (*ProposeStorageDealResult, error) // GetPaymentEscrow returns the current funds available for deal payment GetPaymentEscrow(ctx context.Context, addr address.Address) (Balance, error) diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/dealstatus.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/dealstatus.go index 494129978a..156c335746 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/dealstatus.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/dealstatus.go @@ -26,9 +26,9 @@ const ( // StorageDealSealing means a deal is in a sector that is being sealed StorageDealSealing - // StorageDealRecordPiece means a deal is in a sealed sector and the piece - // is being added to the piece store - StorageDealRecordPiece + // StorageDealFinalizing means a deal is in a sealed sector and we're doing final + // housekeeping before marking it active + StorageDealFinalizing // StorageDealActive means a deal is in a sealed sector and the miner is proving the data // for the deal @@ -94,34 +94,45 @@ const ( // StorageDealError means the deal has failed due to an error, and no further updates will occur StorageDealError + + // StorageDealProviderTransferRestart means a storage deal data transfer from client to provider will be restarted + // by the provider + StorageDealProviderTransferRestart + + // StorageDealClientTransferRestart means a storage deal data transfer from client to provider will be restarted + // by the client + StorageDealClientTransferRestart ) // DealStates maps StorageDealStatus codes to string names var DealStates = map[StorageDealStatus]string{ - StorageDealUnknown: "StorageDealUnknown", - StorageDealProposalNotFound: "StorageDealProposalNotFound", - StorageDealProposalRejected: "StorageDealProposalRejected", - StorageDealProposalAccepted: "StorageDealProposalAccepted", - StorageDealAcceptWait: "StorageDealAcceptWait", - StorageDealStartDataTransfer: "StorageDealStartDataTransfer", - StorageDealStaged: "StorageDealStaged", - StorageDealSealing: "StorageDealSealing", - StorageDealActive: "StorageDealActive", - StorageDealExpired: "StorageDealExpired", - StorageDealSlashed: "StorageDealSlashed", - StorageDealRejecting: "StorageDealRejecting", - StorageDealFailing: "StorageDealFailing", - StorageDealFundsEnsured: "StorageDealFundsEnsured", - StorageDealCheckForAcceptance: "StorageDealCheckForAcceptance", - StorageDealValidating: "StorageDealValidating", - StorageDealTransferring: "StorageDealTransferring", - StorageDealWaitingForData: "StorageDealWaitingForData", - StorageDealVerifyData: "StorageDealVerifyData", - StorageDealEnsureProviderFunds: "StorageDealEnsureProviderFunds", - StorageDealEnsureClientFunds: "StorageDealEnsureClientFunds", - StorageDealProviderFunding: "StorageDealProviderFunding", - StorageDealClientFunding: "StorageDealClientFunding", - StorageDealPublish: "StorageDealPublish", - StorageDealPublishing: "StorageDealPublishing", - StorageDealError: "StorageDealError", + StorageDealUnknown: "StorageDealUnknown", + StorageDealProposalNotFound: "StorageDealProposalNotFound", + StorageDealProposalRejected: "StorageDealProposalRejected", + StorageDealProposalAccepted: "StorageDealProposalAccepted", + StorageDealAcceptWait: "StorageDealAcceptWait", + StorageDealStartDataTransfer: "StorageDealStartDataTransfer", + StorageDealStaged: "StorageDealStaged", + StorageDealSealing: "StorageDealSealing", + StorageDealActive: "StorageDealActive", + StorageDealExpired: "StorageDealExpired", + StorageDealSlashed: "StorageDealSlashed", + StorageDealRejecting: "StorageDealRejecting", + StorageDealFailing: "StorageDealFailing", + StorageDealFundsEnsured: "StorageDealFundsEnsured", + StorageDealCheckForAcceptance: "StorageDealCheckForAcceptance", + StorageDealValidating: "StorageDealValidating", + StorageDealTransferring: "StorageDealTransferring", + StorageDealWaitingForData: "StorageDealWaitingForData", + StorageDealVerifyData: "StorageDealVerifyData", + StorageDealEnsureProviderFunds: "StorageDealEnsureProviderFunds", + StorageDealEnsureClientFunds: "StorageDealEnsureClientFunds", + StorageDealProviderFunding: "StorageDealProviderFunding", + StorageDealClientFunding: "StorageDealClientFunding", + StorageDealPublish: "StorageDealPublish", + StorageDealPublishing: "StorageDealPublishing", + StorageDealError: "StorageDealError", + StorageDealFinalizing: "StorageDealFinalizing", + StorageDealClientTransferRestart: "StorageDealClientTransferRestart", + StorageDealProviderTransferRestart: "StorageDealProviderTransferRestart", } diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/events.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/events.go index 5a1d126224..391ecbe5a0 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/events.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/events.go @@ -13,6 +13,12 @@ const ( // ClientEventFundingInitiated happens when a client has sent a message adding funds to its balance ClientEventFundingInitiated + // ClientEventFundsReserved happens when a client reserves funds for a deal (updating our tracked funds) + ClientEventFundsReserved + + // ClientEventFundsReleased happens when a client released funds for a deal (updating our tracked funds) + ClientEventFundsReleased + // ClientEventFundsEnsured happens when a client successfully ensures it has funds for a deal ClientEventFundsEnsured @@ -25,6 +31,9 @@ const ( // ClientEventDataTransferInitiated happens when piece data transfer has started ClientEventDataTransferInitiated + // ClientEventDataTransferRestarted happens when a data transfer from client to provider is restarted by the client + ClientEventDataTransferRestarted + // ClientEventDataTransferComplete happens when piece data transfer has been completed ClientEventDataTransferComplete @@ -34,6 +43,9 @@ const ( // ClientEventDataTransferFailed happens the client can't initiate a push data transfer to the provider ClientEventDataTransferFailed + // ClientEventDataTransferRestartFailed happens when the client can't restart an existing data transfer + ClientEventDataTransferRestartFailed + // ClientEventReadResponseFailed means a network error occurred reading a deal response ClientEventReadResponseFailed @@ -81,6 +93,9 @@ const ( // ClientEventRestart is used to resume the deal after a state machine shutdown ClientEventRestart + + // ClientEventDataTransferStalled happens when the clients data transfer experiences a disconnect + ClientEventDataTransferStalled ) // ClientEvents maps client event codes to string names @@ -88,6 +103,8 @@ var ClientEvents = map[ClientEvent]string{ ClientEventOpen: "ClientEventOpen", ClientEventEnsureFundsFailed: "ClientEventEnsureFundsFailed", ClientEventFundingInitiated: "ClientEventFundingInitiated", + ClientEventFundsReserved: "ClientEventFundsReserved", + ClientEventFundsReleased: "ClientEventFundsReleased", ClientEventFundsEnsured: "ClientEventFundsEnsured", ClientEventWriteProposalFailed: "ClientEventWriteProposalFailed", ClientEventInitiateDataTransfer: "ClientEventInitiateDataTransfer", @@ -111,6 +128,9 @@ var ClientEvents = map[ClientEvent]string{ ClientEventDealSlashed: "ClientEventDealSlashed", ClientEventFailed: "ClientEventFailed", ClientEventRestart: "ClientEventRestart", + ClientEventDataTransferRestarted: "ClientEventDataTransferRestarted", + ClientEventDataTransferRestartFailed: "ClientEventDataTransferRestartFailed", + ClientEventDataTransferStalled: "ClientEventDataTransferStalled", } // ProviderEvent is an event that happens in the provider's deal state machine @@ -138,6 +158,12 @@ const ( // ProviderEventInsufficientFunds indicates not enough funds available for a deal ProviderEventInsufficientFunds + // ProviderEventFundsReserved indicates we've reserved funds for a deal, adding to our overall total + ProviderEventFundsReserved + + // ProviderEventFundsReleased indicates we've released funds for a deal + ProviderEventFundsReleased + // ProviderEventFundingInitiated indicates provider collateral funding has been initiated ProviderEventFundingInitiated @@ -153,6 +179,9 @@ const ( // ProviderEventDataTransferInitiated happens when a data transfer starts ProviderEventDataTransferInitiated + // ProviderEventDataTransferRestarted happens when a data transfer restarts + ProviderEventDataTransferRestarted + // ProviderEventDataTransferCompleted happens when a data transfer is successful ProviderEventDataTransferCompleted @@ -202,12 +231,15 @@ const ( // ProviderEventReadMetadataErrored happens when an error occurs reading recorded piece metadata ProviderEventReadMetadataErrored - // ProviderEventPieceRecorded happens when a piece is successfully recorded - ProviderEventPieceRecorded + // ProviderEventFinalized happens when final housekeeping is complete and a deal is active + ProviderEventFinalized // ProviderEventDealCompletionFailed happens when a miner cannot verify a deal expired or was slashed ProviderEventDealCompletionFailed + // ProviderEventMultistoreErrored indicates an error happened with a store for a deal + ProviderEventMultistoreErrored + // ProviderEventDealExpired happens when a deal expires ProviderEventDealExpired @@ -217,44 +249,58 @@ const ( // ProviderEventFailed indicates a deal has failed and should no longer be processed ProviderEventFailed + // ProviderEventTrackFundsFailed indicates a failure trying to locally track funds needed for deals + ProviderEventTrackFundsFailed + // ProviderEventRestart is used to resume the deal after a state machine shutdown ProviderEventRestart + + // ProviderEventDataTransferRestartFailed means a data transfer that was restarted by the provider failed + ProviderEventDataTransferRestartFailed + + // ProviderEventDataTransferStalled happens when the providers data transfer experiences a disconnect + ProviderEventDataTransferStalled ) // ProviderEvents maps provider event codes to string names var ProviderEvents = map[ProviderEvent]string{ - ProviderEventOpen: "ProviderEventOpen", - ProviderEventNodeErrored: "ProviderEventNodeErrored", - ProviderEventDealRejected: "ProviderEventDealRejected", - ProviderEventRejectionSent: "ProviderEventRejectionSent", - ProviderEventDealAccepted: "ProviderEventDealAccepted", - ProviderEventDealDeciding: "ProviderEventDealDeciding", - ProviderEventInsufficientFunds: "ProviderEventInsufficientFunds", - ProviderEventFundingInitiated: "ProviderEventFundingInitiated", - ProviderEventFunded: "ProviderEventFunded", - ProviderEventDataTransferFailed: "ProviderEventDataTransferFailed", - ProviderEventDataRequested: "ProviderEventDataRequested", - ProviderEventDataTransferInitiated: "ProviderEventDataTransferInitiated", - ProviderEventDataTransferCompleted: "ProviderEventDataTransferCompleted", - ProviderEventManualDataReceived: "ProviderEventManualDataReceived", - ProviderEventDataVerificationFailed: "ProviderEventDataVerificationFailed", - ProviderEventVerifiedData: "ProviderEventVerifiedData", - ProviderEventSendResponseFailed: "ProviderEventSendResponseFailed", - ProviderEventDealPublishInitiated: "ProviderEventDealPublishInitiated", - ProviderEventDealPublished: "ProviderEventDealPublished", - ProviderEventDealPublishError: "ProviderEventDealPublishError", - ProviderEventFileStoreErrored: "ProviderEventFileStoreErrored", - ProviderEventDealHandoffFailed: "ProviderEventDealHandoffFailed", - ProviderEventDealHandedOff: "ProviderEventDealHandedOff", - ProviderEventDealActivationFailed: "ProviderEventDealActivationFailed", - ProviderEventUnableToLocatePiece: "ProviderEventUnableToLocatePiece", - ProviderEventDealActivated: "ProviderEventDealActivated", - ProviderEventPieceStoreErrored: "ProviderEventPieceStoreErrored", - ProviderEventReadMetadataErrored: "ProviderEventReadMetadataErrored", - ProviderEventPieceRecorded: "ProviderEventPieceRecorded", - ProviderEventDealCompletionFailed: "ProviderEventDealCompletionFailed", - ProviderEventDealExpired: "ProviderEventDealExpired", - ProviderEventDealSlashed: "ProviderEventDealSlashed", - ProviderEventFailed: "ProviderEventFailed", - ProviderEventRestart: "ProviderEventRestart", + ProviderEventOpen: "ProviderEventOpen", + ProviderEventNodeErrored: "ProviderEventNodeErrored", + ProviderEventDealRejected: "ProviderEventDealRejected", + ProviderEventRejectionSent: "ProviderEventRejectionSent", + ProviderEventDealAccepted: "ProviderEventDealAccepted", + ProviderEventDealDeciding: "ProviderEventDealDeciding", + ProviderEventInsufficientFunds: "ProviderEventInsufficientFunds", + ProviderEventFundsReserved: "ProviderEventFundsReserved", + ProviderEventFundsReleased: "ProviderEventFundsReleased", + ProviderEventFundingInitiated: "ProviderEventFundingInitiated", + ProviderEventFunded: "ProviderEventFunded", + ProviderEventDataTransferFailed: "ProviderEventDataTransferFailed", + ProviderEventDataRequested: "ProviderEventDataRequested", + ProviderEventDataTransferInitiated: "ProviderEventDataTransferInitiated", + ProviderEventDataTransferCompleted: "ProviderEventDataTransferCompleted", + ProviderEventManualDataReceived: "ProviderEventManualDataReceived", + ProviderEventDataVerificationFailed: "ProviderEventDataVerificationFailed", + ProviderEventVerifiedData: "ProviderEventVerifiedData", + ProviderEventSendResponseFailed: "ProviderEventSendResponseFailed", + ProviderEventDealPublishInitiated: "ProviderEventDealPublishInitiated", + ProviderEventDealPublished: "ProviderEventDealPublished", + ProviderEventDealPublishError: "ProviderEventDealPublishError", + ProviderEventFileStoreErrored: "ProviderEventFileStoreErrored", + ProviderEventDealHandoffFailed: "ProviderEventDealHandoffFailed", + ProviderEventDealHandedOff: "ProviderEventDealHandedOff", + ProviderEventDealActivationFailed: "ProviderEventDealActivationFailed", + ProviderEventDealActivated: "ProviderEventDealActivated", + ProviderEventPieceStoreErrored: "ProviderEventPieceStoreErrored", + ProviderEventFinalized: "ProviderEventCleanupFinished", + ProviderEventDealCompletionFailed: "ProviderEventDealCompletionFailed", + ProviderEventMultistoreErrored: "ProviderEventMultistoreErrored", + ProviderEventDealExpired: "ProviderEventDealExpired", + ProviderEventDealSlashed: "ProviderEventDealSlashed", + ProviderEventFailed: "ProviderEventFailed", + ProviderEventTrackFundsFailed: "ProviderEventTrackFundsFailed", + ProviderEventRestart: "ProviderEventRestart", + ProviderEventDataTransferRestarted: "ProviderEventDataTransferRestarted", + ProviderEventDataTransferRestartFailed: "ProviderEventDataTransferRestartFailed", + ProviderEventDataTransferStalled: "ProviderEventDataTransferStalled", } diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/fixtures/payload.txt b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/fixtures/payload.txt deleted file mode 100644 index fd4a2f3c1f..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/fixtures/payload.txt +++ /dev/null @@ -1,49 +0,0 @@ -Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Vitae semper quis lectus nulla at volutpat diam ut venenatis. Ac tortor dignissim convallis aenean et tortor at. Faucibus ornare suspendisse sed nisi lacus sed. Commodo ullamcorper a lacus vestibulum sed arcu non. Est pellentesque elit ullamcorper dignissim. Quam quisque id diam vel quam. Pretium aenean pharetra magna ac. In nulla posuere sollicitudin aliquam ultrices. Sed arcu non odio euismod lacinia at. Suspendisse ultrices gravida dictum fusce ut placerat orci nulla pellentesque. Feugiat vivamus at augue eget arcu. - -Pellentesque nec nam aliquam sem et tortor. Vitae tortor condimentum lacinia quis vel. Cras pulvinar mattis nunc sed. In massa tempor nec feugiat. Ornare arcu odio ut sem nulla. Diam maecenas sed enim ut sem. Pretium vulputate sapien nec sagittis. Bibendum arcu vitae elementum curabitur vitae nunc sed velit dignissim. Duis ut diam quam nulla porttitor massa. Viverra mauris in aliquam sem fringilla ut morbi. Ullamcorper eget nulla facilisi etiam dignissim. Vulputate mi sit amet mauris commodo quis imperdiet massa tincidunt. Nunc consequat interdum varius sit. Nunc mi ipsum faucibus vitae aliquet nec ullamcorper. Nunc sed augue lacus viverra. Lobortis scelerisque fermentum dui faucibus in ornare quam. Urna neque viverra justo nec ultrices. Varius vel pharetra vel turpis nunc eget lorem dolor sed. - -Feugiat nisl pretium fusce id velit ut tortor pretium. Lorem dolor sed viverra ipsum nunc aliquet bibendum. Ultrices vitae auctor eu augue ut lectus. Pharetra massa massa ultricies mi quis. Nibh cras pulvinar mattis nunc sed blandit libero. Ac felis donec et odio pellentesque diam volutpat. Lectus proin nibh nisl condimentum id venenatis. Quis vel eros donec ac odio. Commodo sed egestas egestas fringilla phasellus faucibus scelerisque eleifend donec. Adipiscing diam donec adipiscing tristique. - -Tempus imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Libero nunc consequat interdum varius sit. Et pharetra pharetra massa massa. Feugiat pretium nibh ipsum consequat. Amet commodo nulla facilisi nullam vehicula. Ornare arcu dui vivamus arcu felis bibendum ut tristique. At erat pellentesque adipiscing commodo elit at imperdiet dui. Auctor neque vitae tempus quam pellentesque nec nam aliquam sem. Eget velit aliquet sagittis id consectetur. Enim diam vulputate ut pharetra sit amet aliquam id diam. Eget velit aliquet sagittis id consectetur purus ut faucibus pulvinar. Amet porttitor eget dolor morbi. Felis eget velit aliquet sagittis id. Facilisis magna etiam tempor orci eu. Lacus suspendisse faucibus interdum posuere lorem. Pharetra et ultrices neque ornare aenean euismod. Platea dictumst quisque sagittis purus. - -Quis varius quam quisque id diam vel quam elementum. Augue mauris augue neque gravida in fermentum et sollicitudin. Sapien nec sagittis aliquam malesuada bibendum arcu. Urna duis convallis convallis tellus id interdum velit. Tellus in hac habitasse platea dictumst vestibulum. Fames ac turpis egestas maecenas pharetra convallis. Diam volutpat commodo sed egestas egestas fringilla phasellus faucibus. Placerat orci nulla pellentesque dignissim enim sit amet venenatis. Sed adipiscing diam donec adipiscing. Praesent elementum facilisis leo vel fringilla est. Sed enim ut sem viverra aliquet eget sit amet tellus. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra. Turpis egestas pretium aenean pharetra magna ac placerat vestibulum. Massa id neque aliquam vestibulum morbi blandit cursus risus. Vitae congue eu consequat ac. Egestas erat imperdiet sed euismod nisi porta lorem mollis aliquam. Dolor purus non enim praesent elementum facilisis. Ultrices mi tempus imperdiet nulla malesuada pellentesque elit. In est ante in nibh. - -Facilisis gravida neque convallis a. Urna nunc id cursus metus aliquam eleifend mi. Lacus luctus accumsan tortor posuere ac. Molestie nunc non blandit massa. Iaculis urna id volutpat lacus laoreet non. Cursus vitae congue mauris rhoncus aenean. Nunc vel risus commodo viverra maecenas. A pellentesque sit amet porttitor eget dolor morbi. Leo vel orci porta non pulvinar neque laoreet suspendisse. Sit amet facilisis magna etiam tempor. Consectetur a erat nam at lectus urna duis convallis convallis. Vestibulum morbi blandit cursus risus at ultrices. Dolor purus non enim praesent elementum. Adipiscing elit pellentesque habitant morbi tristique senectus et netus et. Et odio pellentesque diam volutpat commodo sed egestas egestas fringilla. Leo vel fringilla est ullamcorper eget nulla. Dui ut ornare lectus sit amet. Erat pellentesque adipiscing commodo elit at imperdiet dui accumsan sit. - -Tristique senectus et netus et. Pellentesque diam volutpat commodo sed egestas egestas fringilla. Mauris pharetra et ultrices neque ornare aenean. Amet tellus cras adipiscing enim. Convallis aenean et tortor at risus viverra adipiscing at. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. Dictumst vestibulum rhoncus est pellentesque elit. Fringilla ut morbi tincidunt augue interdum velit euismod in pellentesque. Dictum at tempor commodo ullamcorper a lacus vestibulum. Sed viverra tellus in hac habitasse platea. Sed id semper risus in hendrerit. In hendrerit gravida rutrum quisque non tellus orci ac. Sit amet risus nullam eget. Sit amet est placerat in egestas erat imperdiet sed. In nisl nisi scelerisque eu ultrices. Sit amet mattis vulputate enim nulla aliquet. - -Dignissim suspendisse in est ante in nibh mauris cursus. Vitae proin sagittis nisl rhoncus. Id leo in vitae turpis massa sed elementum. Lobortis elementum nibh tellus molestie nunc non blandit massa enim. Arcu dictum varius duis at consectetur. Suspendisse faucibus interdum posuere lorem ipsum dolor sit amet consectetur. Imperdiet nulla malesuada pellentesque elit eget gravida cum sociis. Sed adipiscing diam donec adipiscing. Purus sit amet volutpat consequat mauris nunc congue nisi vitae. Elementum nisi quis eleifend quam adipiscing vitae proin sagittis nisl. Mattis ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Sit amet nisl purus in mollis nunc sed. Turpis tincidunt id aliquet risus feugiat in ante. Id diam maecenas ultricies mi eget mauris pharetra et ultrices. - -Aliquam purus sit amet luctus venenatis lectus magna fringilla urna. Id diam vel quam elementum pulvinar. Elementum sagittis vitae et leo duis. Viverra aliquet eget sit amet tellus cras adipiscing enim eu. Et tortor at risus viverra adipiscing at in tellus integer. Purus in massa tempor nec feugiat. Augue neque gravida in fermentum et sollicitudin ac orci. Sodales ut eu sem integer vitae justo eget magna fermentum. Netus et malesuada fames ac. Augue interdum velit euismod in. Sed elementum tempus egestas sed sed risus pretium. Mattis vulputate enim nulla aliquet porttitor lacus luctus. Dui vivamus arcu felis bibendum ut tristique et egestas quis. - -Viverra justo nec ultrices dui sapien. Quisque egestas diam in arcu cursus euismod quis viverra nibh. Nam libero justo laoreet sit amet cursus sit amet. Lacus sed viverra tellus in hac habitasse. Blandit aliquam etiam erat velit scelerisque in. Ut sem nulla pharetra diam sit amet nisl suscipit adipiscing. Diam sollicitudin tempor id eu nisl nunc. Eget duis at tellus at urna condimentum mattis. Urna porttitor rhoncus dolor purus non enim praesent elementum facilisis. Sed turpis tincidunt id aliquet risus feugiat. Est velit egestas dui id ornare arcu odio ut sem. Nibh sit amet commodo nulla facilisi nullam vehicula. Sit amet consectetur adipiscing elit duis tristique sollicitudin. Eu facilisis sed odio morbi. Massa id neque aliquam vestibulum morbi. In eu mi bibendum neque egestas congue quisque egestas. Massa sed elementum tempus egestas sed sed risus. Quam elementum pulvinar etiam non. At augue eget arcu dictum varius duis at consectetur lorem. - -Penatibus et magnis dis parturient montes nascetur ridiculus. Dictumst quisque sagittis purus sit amet volutpat consequat. Bibendum at varius vel pharetra. Sed adipiscing diam donec adipiscing tristique risus nec feugiat in. Phasellus faucibus scelerisque eleifend donec pretium. Vitae tortor condimentum lacinia quis vel eros. Ac tincidunt vitae semper quis lectus nulla at volutpat diam. Eget sit amet tellus cras adipiscing. Morbi tristique senectus et netus. Nullam vehicula ipsum a arcu cursus vitae congue mauris rhoncus. Auctor urna nunc id cursus metus aliquam eleifend. Ultrices vitae auctor eu augue. Eu non diam phasellus vestibulum lorem sed risus ultricies. Fames ac turpis egestas sed tempus. Volutpat blandit aliquam etiam erat. Dictum varius duis at consectetur lorem. Sit amet volutpat consequat mauris nunc congue. Volutpat sed cras ornare arcu dui vivamus arcu felis. - -Scelerisque fermentum dui faucibus in ornare quam viverra. Interdum velit laoreet id donec ultrices tincidunt arcu. Netus et malesuada fames ac. Netus et malesuada fames ac turpis. Suscipit tellus mauris a diam maecenas sed enim ut sem. Id velit ut tortor pretium. Neque aliquam vestibulum morbi blandit cursus risus at. Cum sociis natoque penatibus et magnis dis parturient. Lobortis elementum nibh tellus molestie nunc non blandit. Ipsum dolor sit amet consectetur adipiscing elit duis tristique. Amet nisl purus in mollis. Amet massa vitae tortor condimentum lacinia quis vel eros donec. Proin sagittis nisl rhoncus mattis rhoncus urna neque viverra justo. - -Nullam ac tortor vitae purus faucibus. Dis parturient montes nascetur ridiculus mus mauris. Molestie at elementum eu facilisis sed odio morbi. Scelerisque felis imperdiet proin fermentum leo vel orci porta. Lectus proin nibh nisl condimentum id venenatis a. Eget nullam non nisi est sit amet facilisis. Hendrerit gravida rutrum quisque non tellus orci ac auctor. Ut faucibus pulvinar elementum integer enim. Rhoncus dolor purus non enim praesent elementum facilisis. Enim sed faucibus turpis in eu mi bibendum. Faucibus nisl tincidunt eget nullam. - -Cursus risus at ultrices mi tempus imperdiet nulla malesuada pellentesque. Pretium nibh ipsum consequat nisl vel pretium lectus quam. Semper viverra nam libero justo laoreet sit amet cursus sit. Augue eget arcu dictum varius duis at consectetur lorem donec. Et malesuada fames ac turpis. Erat nam at lectus urna duis convallis convallis. Dictum sit amet justo donec enim. Urna condimentum mattis pellentesque id nibh tortor id. Morbi tempus iaculis urna id. Lectus proin nibh nisl condimentum id venenatis a condimentum. Nibh sit amet commodo nulla facilisi nullam vehicula. Dui faucibus in ornare quam. Gravida arcu ac tortor dignissim convallis aenean. Consectetur adipiscing elit pellentesque habitant morbi tristique. Pulvinar elementum integer enim neque volutpat ac tincidunt vitae. Pharetra pharetra massa massa ultricies mi quis hendrerit. Dictum at tempor commodo ullamcorper a lacus vestibulum sed. Mattis pellentesque id nibh tortor id. Ultricies integer quis auctor elit sed vulputate. Pretium vulputate sapien nec sagittis aliquam malesuada. - -Auctor augue mauris augue neque gravida. Porttitor lacus luctus accumsan tortor posuere ac ut. Urna neque viverra justo nec ultrices dui. Sit amet est placerat in egestas. Urna nec tincidunt praesent semper feugiat nibh sed pulvinar. Tincidunt eget nullam non nisi est sit amet facilisis magna. Elementum tempus egestas sed sed risus pretium quam vulputate dignissim. Fermentum posuere urna nec tincidunt praesent semper feugiat nibh sed. Porttitor eget dolor morbi non arcu risus quis. Non quam lacus suspendisse faucibus interdum. Venenatis cras sed felis eget velit aliquet sagittis id. Arcu ac tortor dignissim convallis aenean et. Morbi tincidunt ornare massa eget egestas purus. Ac feugiat sed lectus vestibulum mattis ullamcorper velit sed ullamcorper. Vestibulum morbi blandit cursus risus at ultrices. Volutpat blandit aliquam etiam erat velit scelerisque. - -Et egestas quis ipsum suspendisse. Amet consectetur adipiscing elit duis. Purus ut faucibus pulvinar elementum integer enim neque. Cursus vitae congue mauris rhoncus aenean vel elit scelerisque mauris. Tincidunt eget nullam non nisi est. Aliquam purus sit amet luctus. Dui ut ornare lectus sit amet est placerat in. Fringilla ut morbi tincidunt augue interdum velit euismod in. Felis eget nunc lobortis mattis aliquam faucibus purus in. Suspendisse interdum consectetur libero id faucibus nisl. - -Scelerisque fermentum dui faucibus in ornare quam. Lectus proin nibh nisl condimentum id venenatis a condimentum vitae. Fames ac turpis egestas integer eget aliquet nibh praesent tristique. Arcu non sodales neque sodales ut etiam sit. Pharetra convallis posuere morbi leo urna. Nec dui nunc mattis enim ut tellus. Nunc sed augue lacus viverra vitae. Consequat id porta nibh venenatis cras sed felis. Dolor sit amet consectetur adipiscing. Tellus rutrum tellus pellentesque eu tincidunt tortor aliquam nulla. - -Metus aliquam eleifend mi in nulla posuere. Blandit massa enim nec dui nunc mattis enim. Aliquet nibh praesent tristique magna. In aliquam sem fringilla ut. Magna fermentum iaculis eu non. Eget aliquet nibh praesent tristique magna sit amet purus. Ultrices gravida dictum fusce ut placerat orci. Fermentum posuere urna nec tincidunt praesent. Enim tortor at auctor urna nunc. Ridiculus mus mauris vitae ultricies leo integer malesuada nunc vel. Sed id semper risus in hendrerit gravida rutrum. Vestibulum lectus mauris ultrices eros in cursus turpis. Et sollicitudin ac orci phasellus egestas tellus rutrum. Pellentesque elit ullamcorper dignissim cras tincidunt lobortis feugiat vivamus at. Metus vulputate eu scelerisque felis imperdiet proin fermentum leo. Porta non pulvinar neque laoreet suspendisse. Suscipit adipiscing bibendum est ultricies integer quis auctor elit sed. Euismod in pellentesque massa placerat duis ultricies lacus sed. Pellentesque adipiscing commodo elit at imperdiet dui accumsan sit amet. - -Pellentesque eu tincidunt tortor aliquam nulla facilisi. Commodo nulla facilisi nullam vehicula ipsum a arcu. Commodo quis imperdiet massa tincidunt nunc pulvinar sapien et. Faucibus purus in massa tempor. Purus semper eget duis at tellus at urna condimentum. Vivamus at augue eget arcu dictum. Lacus vel facilisis volutpat est velit egestas dui id. Malesuada fames ac turpis egestas maecenas pharetra. Nunc faucibus a pellentesque sit amet porttitor eget dolor. Ultricies tristique nulla aliquet enim. Vel risus commodo viverra maecenas accumsan lacus vel facilisis volutpat. Dignissim diam quis enim lobortis scelerisque. Donec ultrices tincidunt arcu non sodales neque sodales ut etiam. - -Vitae proin sagittis nisl rhoncus mattis rhoncus urna neque. Fermentum leo vel orci porta non. At elementum eu facilisis sed. Quis enim lobortis scelerisque fermentum. Fermentum odio eu feugiat pretium nibh ipsum consequat. Habitant morbi tristique senectus et netus et. Enim praesent elementum facilisis leo vel fringilla est ullamcorper. Egestas quis ipsum suspendisse ultrices gravida dictum. Nam libero justo laoreet sit amet cursus sit amet. Viverra tellus in hac habitasse platea dictumst vestibulum. Varius vel pharetra vel turpis nunc eget. Nullam non nisi est sit amet facilisis magna. Ullamcorper eget nulla facilisi etiam dignissim diam. Ante metus dictum at tempor commodo ullamcorper a lacus. - -Etiam non quam lacus suspendisse. Ut venenatis tellus in metus vulputate eu scelerisque felis. Pulvinar sapien et ligula ullamcorper malesuada proin libero. Consequat interdum varius sit amet mattis. Nunc eget lorem dolor sed viverra ipsum nunc aliquet. Potenti nullam ac tortor vitae purus faucibus ornare. Urna et pharetra pharetra massa massa ultricies mi quis hendrerit. Purus in mollis nunc sed id. Pharetra vel turpis nunc eget lorem dolor sed viverra. Et netus et malesuada fames ac turpis. Libero id faucibus nisl tincidunt eget nullam non nisi. Cursus sit amet dictum sit amet. Porttitor lacus luctus accumsan tortor. - -Volutpat diam ut venenatis tellus in metus vulputate eu scelerisque. Sed viverra tellus in hac habitasse. Aliquam sem et tortor consequat id. Pellentesque habitant morbi tristique senectus et netus et. Consectetur purus ut faucibus pulvinar elementum. Aliquam malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Malesuada bibendum arcu vitae elementum curabitur vitae nunc sed. Sollicitudin tempor id eu nisl nunc mi ipsum. Fringilla phasellus faucibus scelerisque eleifend donec pretium vulputate sapien nec. Quis eleifend quam adipiscing vitae proin sagittis nisl rhoncus. Bibendum neque egestas congue quisque egestas. A iaculis at erat pellentesque adipiscing commodo elit at imperdiet. Pulvinar etiam non quam lacus. Adipiscing commodo elit at imperdiet. Scelerisque eu ultrices vitae auctor. Sed cras ornare arcu dui vivamus arcu felis bibendum ut. Ornare lectus sit amet est. - -Consequat semper viverra nam libero justo laoreet sit. Imperdiet sed euismod nisi porta lorem mollis aliquam ut porttitor. Cras sed felis eget velit aliquet sagittis id consectetur. Dolor morbi non arcu risus quis. Adipiscing tristique risus nec feugiat in fermentum posuere urna. Dolor magna eget est lorem ipsum dolor. Mauris pharetra et ultrices neque ornare aenean euismod. Nulla facilisi etiam dignissim diam quis. Ultrices tincidunt arcu non sodales. Fames ac turpis egestas maecenas pharetra convallis posuere morbi leo. Interdum varius sit amet mattis vulputate. Tincidunt praesent semper feugiat nibh sed pulvinar. Quisque sagittis purus sit amet volutpat. - -Sed vulputate odio ut enim blandit. Vitae auctor eu augue ut lectus arcu bibendum. Consectetur adipiscing elit pellentesque habitant morbi tristique senectus et. Scelerisque eu ultrices vitae auctor eu augue. Etiam dignissim diam quis enim lobortis scelerisque fermentum dui faucibus. Tellus integer feugiat scelerisque varius. Vulputate enim nulla aliquet porttitor lacus luctus accumsan tortor. Amet nisl purus in mollis. Scelerisque viverra mauris in aliquam sem fringilla ut morbi tincidunt. Semper eget duis at tellus at. Erat velit scelerisque in dictum non consectetur a erat nam. Gravida rutrum quisque non tellus orci. Morbi blandit cursus risus at. Mauris sit amet massa vitae. Non odio euismod lacinia at quis risus sed vulputate. Fermentum posuere urna nec tincidunt praesent. Ut eu sem integer vitae justo eget magna fermentum iaculis. Ullamcorper velit sed ullamcorper morbi tincidunt ornare massa. Arcu cursus euismod quis viverra nibh. Arcu dui vivamus arcu felis bibendum. - -Eros in cursus turpis massa tincidunt dui ut. Urna condimentum mattis pellentesque id nibh tortor id aliquet lectus. Nibh venenatis cras sed felis. Ac felis donec et odio pellentesque diam. Ultricies lacus sed turpis tincidunt id aliquet risus. Diam volutpat commodo sed egestas. Dignissim sodales ut eu sem integer vitae. Pellentesque eu tincidunt tortor aliquam nulla facilisi. Et tortor consequat id porta nibh venenatis cras sed. \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder/blockrecorder.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder/blockrecorder.go deleted file mode 100644 index a57ca7b3a1..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder/blockrecorder.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Package blockrecorder provides utilits to record locations of CIDs to a -temporary metadata file, since writing a CAR happens BEFORE we actually hand off for sealing. -The metadata file is later used to populate the PieceStore -*/ -package blockrecorder - -import ( - "bufio" - "io" - - "github.com/ipfs/go-cid" - "github.com/ipld/go-car" -) - -//go:generate cbor-gen-for PieceBlockMetadata - -// PieceBlockMetadata is a record of where a given CID lives in a piece, -// in terms of its offset and size -type PieceBlockMetadata struct { - CID cid.Cid - Offset uint64 - Size uint64 -} - -// RecordEachBlockTo returns a OnNewCarBlockFunc that records the exact -// location of a given block's data in a CAR file, and writes that data -// to the given writer -func RecordEachBlockTo(out io.Writer) car.OnNewCarBlockFunc { - return func(block car.Block) error { - pbMetadata := &PieceBlockMetadata{ - CID: block.BlockCID, - Offset: block.Offset + block.Size - uint64(len(block.Data)), - Size: uint64(len(block.Data)), - } - return pbMetadata.MarshalCBOR(out) - } -} - -// ReadBlockMetadata reads previously recorded block metadata -func ReadBlockMetadata(input io.Reader) ([]PieceBlockMetadata, error) { - var metadatas []PieceBlockMetadata - buf := bufio.NewReaderSize(input, 16) - for { - var nextMetadata PieceBlockMetadata - err := nextMetadata.UnmarshalCBOR(buf) - if err != nil { - if err != io.EOF { - return nil, err - } - return metadatas, nil - } - metadatas = append(metadatas, nextMetadata) - } -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder/blockrecorder_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder/blockrecorder_cbor_gen.go deleted file mode 100644 index 41b9808fe8..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder/blockrecorder_cbor_gen.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package blockrecorder - -import ( - "fmt" - "io" - - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *PieceBlockMetadata) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.CID (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.CID); err != nil { - return xerrors.Errorf("failed to write cid field t.CID: %w", err) - } - - // t.Offset (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Offset))); err != nil { - return err - } - - // t.Size (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Size))); err != nil { - return err - } - - return nil -} - -func (t *PieceBlockMetadata) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.CID (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.CID: %w", err) - } - - t.CID = c - - } - // t.Offset (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Offset = uint64(extra) - - } - // t.Size (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Size = uint64(extra) - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder/blockrecorder_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder/blockrecorder_test.go deleted file mode 100644 index 6afe5b8b3b..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder/blockrecorder_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package blockrecorder_test - -import ( - "bytes" - "context" - "testing" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipld/go-car" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder" -) - -func TestBlockRecording(t *testing.T) { - testData := shared_testutil.NewTestIPLDTree() - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) - node := ssb.ExploreFields(func(efsb builder.ExploreFieldsSpecBuilder) { - efsb.Insert("linkedMap", - ssb.ExploreRecursive(selector.RecursionLimitNone(), ssb.ExploreAll(ssb.ExploreRecursiveEdge()))) - }).Node() - - ctx := context.Background() - sc := car.NewSelectiveCar(ctx, testData, []car.Dag{ - car.Dag{ - Root: testData.RootNodeLnk.(cidlink.Link).Cid, - Selector: node, - }, - }) - - carBuf := new(bytes.Buffer) - blockLocationBuf := new(bytes.Buffer) - err := sc.Write(carBuf, blockrecorder.RecordEachBlockTo(blockLocationBuf)) - require.NoError(t, err) - - metadata, err := blockrecorder.ReadBlockMetadata(blockLocationBuf) - require.NoError(t, err) - - blks := []blocks.Block{ - testData.LeafAlphaBlock, - testData.MiddleMapBlock, - testData.RootBlock, - } - carBytes := carBuf.Bytes() - for _, blk := range blks { - cid := blk.Cid() - var found bool - var metadatum blockrecorder.PieceBlockMetadata - for _, testMetadatum := range metadata { - if testMetadatum.CID.Equals(cid) { - metadatum = testMetadatum - found = true - break - } - } - require.True(t, found) - testBuf := carBytes[metadatum.Offset : metadatum.Offset+metadatum.Size] - require.Equal(t, blk.RawData(), testBuf) - } - missingBlks := []blocks.Block{ - testData.LeafBetaBlock, - testData.MiddleListBlock, - } - for _, blk := range missingBlks { - cid := blk.Cid() - var found bool - for _, testMetadatum := range metadata { - if testMetadatum.CID.Equals(cid) { - found = true - break - } - } - require.False(t, found) - } -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/client.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/client.go deleted file mode 100644 index b1048a6aad..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/client.go +++ /dev/null @@ -1,552 +0,0 @@ -package storageimpl - -import ( - "context" - "fmt" - "time" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/hannahhoward/go-pubsub" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-ipld-prime" - "github.com/libp2p/go-libp2p-core/peer" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/pieceio" - "github.com/filecoin-project/go-fil-markets/pieceio/cario" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -var log = logging.Logger("storagemarket_impl") - -const DefaultPollingInterval = 30 * time.Second - -var _ storagemarket.StorageClient = &Client{} - -// Client is the production implementation of the StorageClient interface -type Client struct { - net network.StorageMarketNetwork - - dataTransfer datatransfer.Manager - bs blockstore.Blockstore - pio pieceio.PieceIO - discovery *discovery.Local - - node storagemarket.StorageClientNode - pubSub *pubsub.PubSub - statemachines fsm.Group - pollingInterval time.Duration -} - -// StorageClientOption allows custom configuration of a storage client -type StorageClientOption func(c *Client) - -// DealPollingInterval sets the interval at which this client will query the Provider for deal state while -// waiting for deal acceptance -func DealPollingInterval(t time.Duration) StorageClientOption { - return func(c *Client) { - c.pollingInterval = t - } -} - -// NewClient creates a new storage client -func NewClient( - net network.StorageMarketNetwork, - bs blockstore.Blockstore, - dataTransfer datatransfer.Manager, - discovery *discovery.Local, - ds datastore.Batching, - scn storagemarket.StorageClientNode, - options ...StorageClientOption, -) (*Client, error) { - carIO := cario.NewCarIO() - pio := pieceio.NewPieceIO(carIO, bs) - - c := &Client{ - net: net, - dataTransfer: dataTransfer, - bs: bs, - pio: pio, - discovery: discovery, - node: scn, - pubSub: pubsub.New(clientDispatcher), - pollingInterval: DefaultPollingInterval, - } - - statemachines, err := newClientStateMachine( - ds, - &clientDealEnvironment{c}, - c.dispatch, - ) - if err != nil { - return nil, err - } - c.statemachines = statemachines - - c.Configure(options...) - - // register a data transfer event handler -- this will send events to the state machines based on DT events - dataTransfer.SubscribeToEvents(dtutils.ClientDataTransferSubscriber(statemachines)) - - return c, nil -} - -// Start initializes deal processing on a StorageClient and restarts -// in progress deals -func (c *Client) Start(ctx context.Context) error { - go func() { - err := c.restartDeals() - if err != nil { - log.Errorf("Failed to restart deals: %s", err.Error()) - } - }() - return nil -} - -// Stop ends deal processing on a StorageClient -func (c *Client) Stop() error { - return c.statemachines.Stop(context.TODO()) -} - -// ListProviders queries chain state and returns active storage providers -func (c *Client) ListProviders(ctx context.Context) (<-chan storagemarket.StorageProviderInfo, error) { - tok, _, err := c.node.GetChainHead(ctx) - if err != nil { - return nil, err - } - - providers, err := c.node.ListStorageProviders(ctx, tok) - if err != nil { - return nil, err - } - - out := make(chan storagemarket.StorageProviderInfo) - - go func() { - defer close(out) - for _, p := range providers { - select { - case out <- *p: - case <-ctx.Done(): - return - } - } - }() - - return out, nil -} - -// ListDeals lists on-chain deals associated with this storage client -func (c *Client) ListDeals(ctx context.Context, addr address.Address) ([]storagemarket.StorageDeal, error) { - tok, _, err := c.node.GetChainHead(ctx) - if err != nil { - return nil, err - } - - return c.node.ListClientDeals(ctx, addr, tok) -} - -// ListLocalDeals lists deals initiated by this storage client -func (c *Client) ListLocalDeals(ctx context.Context) ([]storagemarket.ClientDeal, error) { - var out []storagemarket.ClientDeal - if err := c.statemachines.List(&out); err != nil { - return nil, err - } - return out, nil -} - -// GetLocalDeal lists deals that are in progress or rejected -func (c *Client) GetLocalDeal(ctx context.Context, cid cid.Cid) (storagemarket.ClientDeal, error) { - var out storagemarket.ClientDeal - if err := c.statemachines.Get(cid).Get(&out); err != nil { - return storagemarket.ClientDeal{}, err - } - return out, nil -} - -// GetAsk queries a provider for its current storage ask -// -// The client creates a new `StorageAskStream` for the chosen peer ID, -// and calls WriteAskRequest on it, which constructs a message and writes it to the Ask stream. -// When it receives a response, it verifies the signature and returns the validated -// StorageAsk if successful -func (c *Client) GetAsk(ctx context.Context, info storagemarket.StorageProviderInfo) (*storagemarket.SignedStorageAsk, error) { - if len(info.Addrs) > 0 { - c.net.AddAddrs(info.PeerID, info.Addrs) - } - s, err := c.net.NewAskStream(ctx, info.PeerID) - if err != nil { - return nil, xerrors.Errorf("failed to open stream to miner: %w", err) - } - - request := network.AskRequest{Miner: info.Address} - if err := s.WriteAskRequest(request); err != nil { - return nil, xerrors.Errorf("failed to send ask request: %w", err) - } - - out, err := s.ReadAskResponse() - if err != nil { - return nil, xerrors.Errorf("failed to read ask response: %w", err) - } - - if out.Ask == nil { - return nil, xerrors.Errorf("got no ask back") - } - - if out.Ask.Ask.Miner != info.Address { - return nil, xerrors.Errorf("got back ask for wrong miner") - } - - tok, _, err := c.node.GetChainHead(ctx) - if err != nil { - return nil, err - } - - isValid, err := c.node.ValidateAskSignature(ctx, out.Ask, tok) - if err != nil { - return nil, err - } - - if !isValid { - return nil, xerrors.Errorf("ask was not properly signed") - } - - return out.Ask, nil -} - -// GetProviderDealState queries a provider for the current state of a client's deal -func (c *Client) GetProviderDealState(ctx context.Context, proposalCid cid.Cid) (*storagemarket.ProviderDealState, error) { - var deal storagemarket.ClientDeal - err := c.statemachines.Get(proposalCid).Get(&deal) - if err != nil { - return nil, xerrors.Errorf("could not get client deal state: %w", err) - } - - s, err := c.net.NewDealStatusStream(ctx, deal.Miner) - if err != nil { - return nil, xerrors.Errorf("failed to open stream to miner: %w", err) - } - - buf, err := cborutil.Dump(&deal.ProposalCid) - if err != nil { - return nil, xerrors.Errorf("failed serialize deal status request: %w", err) - } - - addr, err := c.node.GetDefaultWalletAddress(ctx) - if err != nil { - return nil, xerrors.Errorf("failed to get client address: %w", err) - } - - signature, err := c.node.SignBytes(ctx, addr, buf) - if err != nil { - return nil, xerrors.Errorf("failed to sign deal status request: %w", err) - } - - if err := s.WriteDealStatusRequest(network.DealStatusRequest{Proposal: proposalCid, Signature: *signature}); err != nil { - return nil, xerrors.Errorf("failed to send deal status request: %w", err) - } - - resp, err := s.ReadDealStatusResponse() - if err != nil { - return nil, xerrors.Errorf("failed to read deal status response: %w", err) - } - - valid, err := c.verifyStatusResponseSignature(ctx, deal.MinerWorker, resp) - if err != nil { - return nil, err - } - - if !valid { - return nil, xerrors.Errorf("invalid deal status response signature") - } - - return &resp.DealState, nil -} - -/* -ProposeStorageDeal initiates the retrieval deal flow, which involves multiple requests and responses. - -This function is called after using ListProviders and QueryAs are used to identify an appropriate provider -to store data. The parameters passed to ProposeStorageDeal should matched those returned by the miner from -QueryAsk to ensure the greatest likelihood the provider will accept the deal. - -When called, the client takes the following actions: - -1. Calculates the PieceCID for this deal from the given PayloadCID. (by writing the payload to a CAR file then calculating -a merkle root for the resulting data) - -2. Constructs a `DealProposal` (spec-actors type) with deal terms - -3. Signs the `DealProposal` to make a ClientDealProposal - -4. Gets the CID for the ClientDealProposal - -5. Construct a ClientDeal to track the state of this deal. - -6. Tells its statemachine to begin tracking the deal state by the CID of the ClientDealProposal - -7. Triggers a `ClientEventOpen` event on its statemachine. - -8. Records the Provider as a possible peer for retrieving this data in the future - -From then on, the statemachine controls the deal flow in the client. Other components may listen for events in this flow by calling -`SubscribeToEvents` on the Client. The Client also provides access to the node and network and other functionality through -its implementation of the Client FSM's ClientDealEnvironment. - -Documentation of the client state machine can be found at https://godoc.org/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates -*/ -func (c *Client) ProposeStorageDeal(ctx context.Context, addr address.Address, info *storagemarket.StorageProviderInfo, data *storagemarket.DataRef, startEpoch abi.ChainEpoch, endEpoch abi.ChainEpoch, price abi.TokenAmount, collateral abi.TokenAmount, rt abi.RegisteredSealProof, fastRetrieval bool, verifiedDeal bool) (*storagemarket.ProposeStorageDealResult, error) { - commP, pieceSize, err := clientutils.CommP(ctx, c.pio, rt, data) - if err != nil { - return nil, xerrors.Errorf("computing commP failed: %w", err) - } - - if uint64(pieceSize.Padded()) > info.SectorSize { - return nil, fmt.Errorf("cannot propose a deal whose piece size (%d) is greater than sector size (%d)", pieceSize.Padded(), info.SectorSize) - } - - dealProposal := market.DealProposal{ - PieceCID: commP, - PieceSize: pieceSize.Padded(), - Client: addr, - Provider: info.Address, - StartEpoch: startEpoch, - EndEpoch: endEpoch, - StoragePricePerEpoch: price, - ProviderCollateral: abi.NewTokenAmount(int64(pieceSize)), // TODO: real calc - ClientCollateral: big.Zero(), - VerifiedDeal: verifiedDeal, - } - - clientDealProposal, err := c.node.SignProposal(ctx, addr, dealProposal) - if err != nil { - return nil, xerrors.Errorf("signing deal proposal failed: %w", err) - } - - proposalNd, err := cborutil.AsIpld(clientDealProposal) - if err != nil { - return nil, xerrors.Errorf("getting proposal node failed: %w", err) - } - - deal := &storagemarket.ClientDeal{ - ProposalCid: proposalNd.Cid(), - ClientDealProposal: *clientDealProposal, - State: storagemarket.StorageDealUnknown, - Miner: info.PeerID, - MinerWorker: info.Worker, - DataRef: data, - FastRetrieval: fastRetrieval, - } - - err = c.statemachines.Begin(proposalNd.Cid(), deal) - if err != nil { - return nil, xerrors.Errorf("setting up deal tracking: %w", err) - } - - err = c.statemachines.Send(deal.ProposalCid, storagemarket.ClientEventOpen) - if err != nil { - return nil, xerrors.Errorf("initializing state machine: %w", err) - } - - return &storagemarket.ProposeStorageDealResult{ - ProposalCid: deal.ProposalCid, - }, c.discovery.AddPeer(data.Root, retrievalmarket.RetrievalPeer{ - Address: dealProposal.Provider, - ID: deal.Miner, - }) -} - -// GetPaymentEscrow returns the current funds available for deal payment -func (c *Client) GetPaymentEscrow(ctx context.Context, addr address.Address) (storagemarket.Balance, error) { - tok, _, err := c.node.GetChainHead(ctx) - if err != nil { - return storagemarket.Balance{}, err - } - - return c.node.GetBalance(ctx, addr, tok) -} - -// AddPaymentEscrow adds funds for storage deals -func (c *Client) AddPaymentEscrow(ctx context.Context, addr address.Address, amount abi.TokenAmount) error { - done := make(chan error, 1) - - mcid, err := c.node.AddFunds(ctx, addr, amount) - if err != nil { - return err - } - - err = c.node.WaitForMessage(ctx, mcid, func(code exitcode.ExitCode, bytes []byte, err error) error { - if err != nil { - done <- xerrors.Errorf("AddFunds errored: %w", err) - } else if code != exitcode.Ok { - done <- xerrors.Errorf("AddFunds error, exit code: %s", code.String()) - } else { - done <- nil - } - return nil - }) - - if err != nil { - return err - } - - return <-done -} - -// SubscribeToEvents allows another component to listen for events on the StorageClient -// in order to track deals as they progress through the deal flow -func (c *Client) SubscribeToEvents(subscriber storagemarket.ClientSubscriber) shared.Unsubscribe { - return shared.Unsubscribe(c.pubSub.Subscribe(subscriber)) -} - -// PollingInterval is a getter for the polling interval option -func (c *Client) PollingInterval() time.Duration { - return c.pollingInterval -} - -// Configure applies the given list of StorageClientOptions after a StorageClient -// is initialized -func (c *Client) Configure(options ...StorageClientOption) { - for _, option := range options { - option(c) - } -} - -func (c *Client) restartDeals() error { - var deals []storagemarket.ClientDeal - err := c.statemachines.List(&deals) - if err != nil { - return err - } - - for _, deal := range deals { - err = c.statemachines.Send(deal.ProposalCid, storagemarket.ClientEventRestart) - if err != nil { - return err - } - } - return nil -} - -func (c *Client) dispatch(eventName fsm.EventName, deal fsm.StateType) { - evt, ok := eventName.(storagemarket.ClientEvent) - if !ok { - log.Errorf("dropped bad event %s", eventName) - } - realDeal, ok := deal.(storagemarket.ClientDeal) - if !ok { - log.Errorf("not a ClientDeal %v", deal) - } - pubSubEvt := internalClientEvent{evt, realDeal} - - if err := c.pubSub.Publish(pubSubEvt); err != nil { - log.Errorf("failed to publish event %d", evt) - } -} - -func (c *Client) verifyStatusResponseSignature(ctx context.Context, miner address.Address, response network.DealStatusResponse) (bool, error) { - tok, _, err := c.node.GetChainHead(ctx) - if err != nil { - return false, xerrors.Errorf("getting chain head: %w", err) - } - - buf, err := cborutil.Dump(&response.DealState) - if err != nil { - return false, xerrors.Errorf("serializing: %w", err) - } - - valid, err := c.node.VerifySignature(ctx, response.Signature, miner, buf, tok) - if err != nil { - return false, xerrors.Errorf("validating signature: %w", err) - } - - return valid, nil -} - -func newClientStateMachine(ds datastore.Datastore, env fsm.Environment, notifier fsm.Notifier) (fsm.Group, error) { - return fsm.New(ds, fsm.Parameters{ - Environment: env, - StateType: storagemarket.ClientDeal{}, - StateKeyField: "State", - Events: clientstates.ClientEvents, - StateEntryFuncs: clientstates.ClientStateEntryFuncs, - FinalityStates: clientstates.ClientFinalityStates, - Notifier: notifier, - }) -} - -type internalClientEvent struct { - evt storagemarket.ClientEvent - deal storagemarket.ClientDeal -} - -func clientDispatcher(evt pubsub.Event, fn pubsub.SubscriberFn) error { - ie, ok := evt.(internalClientEvent) - if !ok { - return xerrors.New("wrong type of event") - } - cb, ok := fn.(storagemarket.ClientSubscriber) - if !ok { - return xerrors.New("wrong type of event") - } - cb(ie.evt, ie.deal) - return nil -} - -// ------- -// clientDealEnvironment -// ------- - -type clientDealEnvironment struct { - c *Client -} - -func (c *clientDealEnvironment) NewDealStream(ctx context.Context, p peer.ID) (network.StorageDealStream, error) { - return c.c.net.NewDealStream(ctx, p) -} - -func (c *clientDealEnvironment) Node() storagemarket.StorageClientNode { - return c.c.node -} - -func (c *clientDealEnvironment) StartDataTransfer(ctx context.Context, to peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) error { - _, err := c.c.dataTransfer.OpenPushDataChannel(ctx, to, voucher, baseCid, selector) - return err -} - -func (c *clientDealEnvironment) GetProviderDealState(ctx context.Context, proposalCid cid.Cid) (*storagemarket.ProviderDealState, error) { - return c.c.GetProviderDealState(ctx, proposalCid) -} - -func (c *clientDealEnvironment) PollingInterval() time.Duration { - return c.c.pollingInterval -} - -// ClientFSMParameterSpec is a valid set of parameters for a client deal FSM - used in doc generation -var ClientFSMParameterSpec = fsm.Parameters{ - Environment: &clientDealEnvironment{}, - StateType: storagemarket.ClientDeal{}, - StateKeyField: "State", - Events: clientstates.ClientEvents, - StateEntryFuncs: clientstates.ClientStateEntryFuncs, - FinalityStates: clientstates.ClientFinalityStates, -} - -var _ clientstates.ClientDealEnvironment = &clientDealEnvironment{} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/client_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/client_test.go deleted file mode 100644 index 0d4aac9e46..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/client_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package storageimpl_test - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - - storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" -) - -func TestClient_Configure(t *testing.T) { - c := &storageimpl.Client{} - assert.Equal(t, time.Duration(0), c.PollingInterval()) - - c.Configure(storageimpl.DealPollingInterval(123 * time.Second)) - - assert.Equal(t, 123*time.Second, c.PollingInterval()) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/client_fsm.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/client_fsm.go deleted file mode 100644 index b44407d5cc..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/client_fsm.go +++ /dev/null @@ -1,157 +0,0 @@ -package clientstates - -import ( - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/storagemarket" -) - -// ClientEvents are the events that can happen in a storage client -var ClientEvents = fsm.Events{ - fsm.Event(storagemarket.ClientEventOpen). - From(storagemarket.StorageDealUnknown).To(storagemarket.StorageDealEnsureClientFunds), - fsm.Event(storagemarket.ClientEventFundingInitiated). - From(storagemarket.StorageDealEnsureClientFunds).To(storagemarket.StorageDealClientFunding). - Action(func(deal *storagemarket.ClientDeal, mcid cid.Cid) error { - deal.AddFundsCid = &mcid - return nil - }), - fsm.Event(storagemarket.ClientEventEnsureFundsFailed). - FromMany(storagemarket.StorageDealClientFunding, storagemarket.StorageDealEnsureClientFunds).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.ClientDeal, err error) error { - deal.Message = xerrors.Errorf("adding market funds failed: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventFundsEnsured). - FromMany(storagemarket.StorageDealEnsureClientFunds, storagemarket.StorageDealClientFunding).To(storagemarket.StorageDealFundsEnsured), - fsm.Event(storagemarket.ClientEventWriteProposalFailed). - From(storagemarket.StorageDealFundsEnsured).To(storagemarket.StorageDealError). - Action(func(deal *storagemarket.ClientDeal, err error) error { - deal.Message = xerrors.Errorf("sending proposal to storage provider failed: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventReadResponseFailed). - From(storagemarket.StorageDealFundsEnsured).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.ClientDeal, err error) error { - deal.Message = xerrors.Errorf("error reading Response message: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventResponseVerificationFailed). - From(storagemarket.StorageDealFundsEnsured).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.ClientDeal) error { - deal.Message = "unable to verify signature on deal response" - return nil - }), - fsm.Event(storagemarket.ClientEventInitiateDataTransfer). - From(storagemarket.StorageDealFundsEnsured).To(storagemarket.StorageDealStartDataTransfer), - fsm.Event(storagemarket.ClientEventUnexpectedDealState). - From(storagemarket.StorageDealFundsEnsured).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.ClientDeal, status storagemarket.StorageDealStatus, providerMessage string) error { - deal.Message = xerrors.Errorf("unexpected deal status while waiting for data request: %d (%s). Provider message: %s", status, storagemarket.DealStates[status], providerMessage).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventDataTransferFailed). - FromMany(storagemarket.StorageDealStartDataTransfer, storagemarket.StorageDealTransferring).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.ClientDeal, err error) error { - deal.Message = xerrors.Errorf("failed to initiate data transfer: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventDataTransferInitiated). - From(storagemarket.StorageDealStartDataTransfer).To(storagemarket.StorageDealTransferring), - fsm.Event(storagemarket.ClientEventDataTransferComplete). - FromMany(storagemarket.StorageDealTransferring, storagemarket.StorageDealStartDataTransfer).To(storagemarket.StorageDealCheckForAcceptance), - fsm.Event(storagemarket.ClientEventWaitForDealState). - From(storagemarket.StorageDealCheckForAcceptance).ToNoChange(). - Action(func(deal *storagemarket.ClientDeal, pollError bool) error { - deal.PollRetryCount += 1 - if pollError { - deal.PollErrorCount += 1 - } - return nil - }), - fsm.Event(storagemarket.ClientEventResponseDealDidNotMatch). - From(storagemarket.StorageDealCheckForAcceptance).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.ClientDeal, responseCid cid.Cid, proposalCid cid.Cid) error { - deal.Message = xerrors.Errorf("miner responded to a wrong proposal: %s != %s", responseCid, proposalCid).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventDealRejected). - From(storagemarket.StorageDealCheckForAcceptance).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.ClientDeal, state storagemarket.StorageDealStatus, reason string) error { - deal.Message = xerrors.Errorf("deal failed: (State=%d) %s", state, reason).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventDealAccepted). - From(storagemarket.StorageDealCheckForAcceptance).To(storagemarket.StorageDealProposalAccepted). - Action(func(deal *storagemarket.ClientDeal, publishMessage *cid.Cid) error { - deal.PublishMessage = publishMessage - return nil - }), - fsm.Event(storagemarket.ClientEventStreamCloseError). - FromAny().To(storagemarket.StorageDealError). - Action(func(deal *storagemarket.ClientDeal, err error) error { - deal.Message = xerrors.Errorf("error attempting to close stream: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventDealPublishFailed). - From(storagemarket.StorageDealProposalAccepted).To(storagemarket.StorageDealError). - Action(func(deal *storagemarket.ClientDeal, err error) error { - deal.Message = xerrors.Errorf("error validating deal published: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventDealPublished). - From(storagemarket.StorageDealProposalAccepted).To(storagemarket.StorageDealSealing). - Action(func(deal *storagemarket.ClientDeal, dealID abi.DealID) error { - deal.DealID = dealID - return nil - }), - fsm.Event(storagemarket.ClientEventDealActivationFailed). - From(storagemarket.StorageDealSealing).To(storagemarket.StorageDealError). - Action(func(deal *storagemarket.ClientDeal, err error) error { - deal.Message = xerrors.Errorf("error in deal activation: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventDealActivated). - From(storagemarket.StorageDealSealing).To(storagemarket.StorageDealActive), - fsm.Event(storagemarket.ClientEventDealSlashed). - From(storagemarket.StorageDealActive).To(storagemarket.StorageDealSlashed). - Action(func(deal *storagemarket.ClientDeal, slashEpoch abi.ChainEpoch) error { - deal.SlashEpoch = slashEpoch - return nil - }), - fsm.Event(storagemarket.ClientEventDealExpired). - From(storagemarket.StorageDealActive).To(storagemarket.StorageDealExpired), - fsm.Event(storagemarket.ClientEventDealCompletionFailed). - From(storagemarket.StorageDealActive).To(storagemarket.StorageDealError). - Action(func(deal *storagemarket.ClientDeal, err error) error { - deal.Message = xerrors.Errorf("error waiting for deal completion: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ClientEventFailed). - From(storagemarket.StorageDealFailing).To(storagemarket.StorageDealError), - fsm.Event(storagemarket.ClientEventRestart).FromAny().ToNoChange(), -} - -// ClientStateEntryFuncs are the handlers for different states in a storage client -var ClientStateEntryFuncs = fsm.StateEntryFuncs{ - storagemarket.StorageDealEnsureClientFunds: EnsureClientFunds, - storagemarket.StorageDealClientFunding: WaitForFunding, - storagemarket.StorageDealFundsEnsured: ProposeDeal, - storagemarket.StorageDealStartDataTransfer: InitiateDataTransfer, - storagemarket.StorageDealCheckForAcceptance: CheckForDealAcceptance, - storagemarket.StorageDealProposalAccepted: ValidateDealPublished, - storagemarket.StorageDealSealing: VerifyDealActivated, - storagemarket.StorageDealActive: WaitForDealCompletion, - storagemarket.StorageDealFailing: FailDeal, -} - -// ClientFinalityStates are the states that terminate deal processing for a deal. -// When a client restarts, it restarts only deals that are not in a finality state. -var ClientFinalityStates = []fsm.StateKey{ - storagemarket.StorageDealSlashed, - storagemarket.StorageDealExpired, - storagemarket.StorageDealError, -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/client_states.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/client_states.go deleted file mode 100644 index 044a06bbe1..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/client_states.go +++ /dev/null @@ -1,261 +0,0 @@ -package clientstates - -import ( - "context" - "time" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-ipld-prime" - "github.com/libp2p/go-libp2p-core/peer" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -var log = logging.Logger("storagemarket_impl") - -// ClientDealEnvironment is an abstraction for interacting with -// dependencies from the storage client environment -type ClientDealEnvironment interface { - Node() storagemarket.StorageClientNode - NewDealStream(ctx context.Context, p peer.ID) (network.StorageDealStream, error) - StartDataTransfer(ctx context.Context, to peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) error - GetProviderDealState(ctx context.Context, proposalCid cid.Cid) (*storagemarket.ProviderDealState, error) - PollingInterval() time.Duration -} - -// ClientStateEntryFunc is the type for all state entry functions on a storage client -type ClientStateEntryFunc func(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error - -// EnsureClientFunds attempts to ensure the client has enough funds for the deal being proposed -func EnsureClientFunds(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { - node := environment.Node() - - tok, _, err := node.GetChainHead(ctx.Context()) - if err != nil { - return ctx.Trigger(storagemarket.ClientEventEnsureFundsFailed, xerrors.Errorf("acquiring chain head: %w", err)) - } - - mcid, err := node.EnsureFunds(ctx.Context(), deal.Proposal.Client, deal.Proposal.Client, deal.Proposal.ClientBalanceRequirement(), tok) - - if err != nil { - return ctx.Trigger(storagemarket.ClientEventEnsureFundsFailed, err) - } - - // if no message was sent, and there was no error, funds were already available - if mcid == cid.Undef { - return ctx.Trigger(storagemarket.ClientEventFundsEnsured) - } - - return ctx.Trigger(storagemarket.ClientEventFundingInitiated, mcid) -} - -// WaitForFunding waits for an AddFunds message to appear on the chain -func WaitForFunding(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { - node := environment.Node() - - return node.WaitForMessage(ctx.Context(), *deal.AddFundsCid, func(code exitcode.ExitCode, bytes []byte, err error) error { - if err != nil { - return ctx.Trigger(storagemarket.ClientEventEnsureFundsFailed, xerrors.Errorf("AddFunds err: %w", err)) - } - if code != exitcode.Ok { - return ctx.Trigger(storagemarket.ClientEventEnsureFundsFailed, xerrors.Errorf("AddFunds exit code: %s", code.String())) - } - return ctx.Trigger(storagemarket.ClientEventFundsEnsured) - - }) -} - -// ProposeDeal sends the deal proposal to the provider -func ProposeDeal(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { - proposal := network.Proposal{ - DealProposal: &deal.ClientDealProposal, - Piece: deal.DataRef, - FastRetrieval: deal.FastRetrieval, - } - - s, err := environment.NewDealStream(ctx.Context(), deal.Miner) - if err != nil { - return ctx.Trigger(storagemarket.ClientEventWriteProposalFailed, err) - } - - if err := s.WriteDealProposal(proposal); err != nil { - return ctx.Trigger(storagemarket.ClientEventWriteProposalFailed, err) - } - - resp, err := s.ReadDealResponse() - if err != nil { - return ctx.Trigger(storagemarket.ClientEventReadResponseFailed, err) - } - - err = s.Close() - if err != nil { - return ctx.Trigger(storagemarket.ClientEventStreamCloseError, err) - } - - tok, _, err := environment.Node().GetChainHead(ctx.Context()) - if err != nil { - return ctx.Trigger(storagemarket.ClientEventResponseVerificationFailed) - } - - if err := clientutils.VerifyResponse(ctx.Context(), resp, deal.MinerWorker, tok, environment.Node().VerifySignature); err != nil { - return ctx.Trigger(storagemarket.ClientEventResponseVerificationFailed) - } - - if resp.Response.State != storagemarket.StorageDealWaitingForData { - return ctx.Trigger(storagemarket.ClientEventUnexpectedDealState, resp.Response.State, resp.Response.Message) - } - - return ctx.Trigger(storagemarket.ClientEventInitiateDataTransfer) -} - -// InitiateDataTransfer initiates data transfer to the provider -func InitiateDataTransfer(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { - if deal.DataRef.TransferType == storagemarket.TTManual { - log.Infof("manual data transfer for deal %s", deal.ProposalCid) - return ctx.Trigger(storagemarket.ClientEventDataTransferComplete) - } - - log.Infof("sending data for a deal %s", deal.ProposalCid) - - // initiate a push data transfer. This will complete asynchronously and the - // completion of the data transfer will trigger a change in deal state - err := environment.StartDataTransfer(ctx.Context(), - deal.Miner, - &requestvalidation.StorageDataTransferVoucher{Proposal: deal.ProposalCid}, - deal.DataRef.Root, - shared.AllSelector(), - ) - - if err != nil { - return ctx.Trigger(storagemarket.ClientEventDataTransferFailed, xerrors.Errorf("failed to open push data channel: %w", err)) - } - - return ctx.Trigger(storagemarket.ClientEventDataTransferInitiated) -} - -// CheckForDealAcceptance is run until the deal is sealed and published by the provider, or errors -func CheckForDealAcceptance(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { - dealState, err := environment.GetProviderDealState(ctx.Context(), deal.ProposalCid) - if err != nil { - log.Warnf("error when querying provider deal state: %w", err) // TODO: at what point do we fail the deal? - return waitAgain(ctx, environment, true) - } - - if isFailed(dealState.State) { - return ctx.Trigger(storagemarket.ClientEventDealRejected, dealState.State, dealState.Message) - } - - if isAccepted(dealState.State) { - if *dealState.ProposalCid != deal.ProposalCid { - return ctx.Trigger(storagemarket.ClientEventResponseDealDidNotMatch, *dealState.ProposalCid, deal.ProposalCid) - } - - return ctx.Trigger(storagemarket.ClientEventDealAccepted, dealState.PublishCid) - } - - return waitAgain(ctx, environment, false) -} - -func waitAgain(ctx fsm.Context, environment ClientDealEnvironment, pollError bool) error { - t := time.NewTimer(environment.PollingInterval()) - - go func() { - select { - case <-t.C: - _ = ctx.Trigger(storagemarket.ClientEventWaitForDealState, pollError) - case <-ctx.Context().Done(): - t.Stop() - return - } - }() - - return nil -} - -// ValidateDealPublished confirms with the chain that a deal was published -func ValidateDealPublished(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { - - dealID, err := environment.Node().ValidatePublishedDeal(ctx.Context(), deal) - if err != nil { - return ctx.Trigger(storagemarket.ClientEventDealPublishFailed, err) - } - - return ctx.Trigger(storagemarket.ClientEventDealPublished, dealID) -} - -// VerifyDealActivated confirms that a deal was successfully committed to a sector and is active -func VerifyDealActivated(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { - cb := func(err error) { - if err != nil { - _ = ctx.Trigger(storagemarket.ClientEventDealActivationFailed, err) - } else { - _ = ctx.Trigger(storagemarket.ClientEventDealActivated) - } - } - - if err := environment.Node().OnDealSectorCommitted(ctx.Context(), deal.Proposal.Provider, deal.DealID, cb); err != nil { - return ctx.Trigger(storagemarket.ClientEventDealActivationFailed, err) - } - - return nil -} - -// WaitForDealCompletion waits for the deal to be slashed or to expire -func WaitForDealCompletion(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { - node := environment.Node() - - // Called when the deal expires - expiredCb := func(err error) { - if err != nil { - _ = ctx.Trigger(storagemarket.ClientEventDealCompletionFailed, xerrors.Errorf("deal expiration err: %w", err)) - } else { - _ = ctx.Trigger(storagemarket.ClientEventDealExpired) - } - } - - // Called when the deal is slashed - slashedCb := func(slashEpoch abi.ChainEpoch, err error) { - if err != nil { - _ = ctx.Trigger(storagemarket.ClientEventDealCompletionFailed, xerrors.Errorf("deal slashing err: %w", err)) - } else { - _ = ctx.Trigger(storagemarket.ClientEventDealSlashed, slashEpoch) - } - } - - if err := node.OnDealExpiredOrSlashed(ctx.Context(), deal.DealID, expiredCb, slashedCb); err != nil { - return ctx.Trigger(storagemarket.ClientEventDealCompletionFailed, err) - } - - return nil -} - -// FailDeal cleans up a failing deal -func FailDeal(ctx fsm.Context, environment ClientDealEnvironment, deal storagemarket.ClientDeal) error { - // TODO: store in some sort of audit log - log.Errorf("deal %s failed: %s", deal.ProposalCid, deal.Message) - - return ctx.Trigger(storagemarket.ClientEventFailed) -} - -func isAccepted(status storagemarket.StorageDealStatus) bool { - return status == storagemarket.StorageDealStaged || - status == storagemarket.StorageDealSealing || - status == storagemarket.StorageDealActive || - status == storagemarket.StorageDealExpired || - status == storagemarket.StorageDealSlashed -} - -func isFailed(status storagemarket.StorageDealStatus) bool { - return status == storagemarket.StorageDealFailing || - status == storagemarket.StorageDealError -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/client_states_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/client_states_test.go deleted file mode 100644 index 52e54e1455..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/client_states_test.go +++ /dev/null @@ -1,638 +0,0 @@ -package clientstates_test - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-statemachine/fsm" - fsmtest "github.com/filecoin-project/go-statemachine/fsm/testutil" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "golang.org/x/xerrors" - - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates" - smnet "github.com/filecoin-project/go-fil-markets/storagemarket/network" - "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" -) - -var clientDealProposal = tut.MakeTestClientDealProposal() - -func TestEnsureFunds(t *testing.T) { - t.Run("immediately succeeds", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealEnsureClientFunds, clientstates.EnsureClientFunds, testCase{ - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFundsEnsured, deal.State) - }, - }) - }) - t.Run("succeeds by sending an AddFunds message", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealEnsureClientFunds, clientstates.EnsureClientFunds, testCase{ - nodeParams: nodeParams{AddFundsCid: tut.GenerateCids(1)[0]}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealClientFunding, deal.State) - }, - }) - }) - t.Run("EnsureClientFunds fails", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealEnsureClientFunds, clientstates.EnsureClientFunds, testCase{ - nodeParams: nodeParams{ - EnsureFundsError: errors.New("Something went wrong"), - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - assert.Equal(t, "adding market funds failed: Something went wrong", deal.Message) - }, - }) - }) -} - -func TestWaitForFunding(t *testing.T) { - t.Run("succeeds", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealClientFunding, clientstates.WaitForFunding, testCase{ - nodeParams: nodeParams{WaitForMessageExitCode: exitcode.Ok}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFundsEnsured, deal.State) - }, - }) - }) - t.Run("EnsureClientFunds fails", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealClientFunding, clientstates.WaitForFunding, testCase{ - nodeParams: nodeParams{WaitForMessageExitCode: exitcode.ErrInsufficientFunds}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - assert.Equal(t, "adding market funds failed: AddFunds exit code: 19", deal.Message) - }, - }) - }) -} - -func TestProposeDeal(t *testing.T) { - t.Run("succeeds and closes stream", func(t *testing.T) { - ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{ - ResponseReader: testResponseReader(t, responseParams{ - state: storagemarket.StorageDealWaitingForData, - proposal: clientDealProposal, - }), - }) - runAndInspect(t, storagemarket.StorageDealFundsEnsured, clientstates.ProposeDeal, testCase{ - envParams: envParams{dealStream: ds}, - nodeParams: nodeParams{WaitForMessageExitCode: exitcode.Ok}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealStartDataTransfer, deal.State) - assert.Equal(t, 1, env.dealStream.CloseCount) - }, - }) - }) - t.Run("sends a fast retrieval flag", func(t *testing.T) { - var sentProposal *smnet.Proposal - - ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{ - ResponseReader: testResponseReader(t, responseParams{ - state: storagemarket.StorageDealWaitingForData, - proposal: clientDealProposal, - }), - ProposalWriter: func(proposal smnet.Proposal) error { - sentProposal = &proposal - return nil - }, - }) - - runAndInspect(t, storagemarket.StorageDealFundsEnsured, clientstates.ProposeDeal, testCase{ - envParams: envParams{dealStream: ds}, - stateParams: dealStateParams{fastRetrieval: true}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealStartDataTransfer, deal.State) - assert.Equal(t, true, sentProposal.FastRetrieval) - }, - }) - }) - - t.Run("write proposal fails fails", func(t *testing.T) { - ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{ - ProposalWriter: tut.FailStorageProposalWriter, - }) - runAndInspect(t, storagemarket.StorageDealFundsEnsured, clientstates.ProposeDeal, testCase{ - envParams: envParams{dealStream: ds}, - nodeParams: nodeParams{WaitForMessageExitCode: exitcode.Ok}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - assert.Equal(t, "sending proposal to storage provider failed: write proposal failed", deal.Message) - }, - }) - }) - t.Run("read response fails", func(t *testing.T) { - ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{ - ResponseReader: tut.FailStorageResponseReader, - }) - runAndInspect(t, storagemarket.StorageDealFundsEnsured, clientstates.ProposeDeal, testCase{ - envParams: envParams{ - dealStream: ds, - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - assert.Equal(t, "error reading Response message: read response failed", deal.Message) - }, - }) - }) - t.Run("closing the stream fails", func(t *testing.T) { - ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{}) - ds.CloseError = xerrors.Errorf("failed to close stream") - runAndInspect(t, storagemarket.StorageDealFundsEnsured, clientstates.ProposeDeal, testCase{ - envParams: envParams{dealStream: ds}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - assert.Equal(t, "error attempting to close stream: failed to close stream", deal.Message) - }, - }) - }) - t.Run("getting chain head fails", func(t *testing.T) { - ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{}) - runAndInspect(t, storagemarket.StorageDealFundsEnsured, clientstates.ProposeDeal, testCase{ - envParams: envParams{ - dealStream: ds, - }, - nodeParams: nodeParams{ - GetChainHeadError: xerrors.Errorf("failed getting chain head"), - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - assert.Equal(t, "unable to verify signature on deal response", deal.Message) - }, - }) - }) - t.Run("verify signature fails", func(t *testing.T) { - ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{}) - runAndInspect(t, storagemarket.StorageDealFundsEnsured, clientstates.ProposeDeal, testCase{ - envParams: envParams{ - dealStream: ds, - }, - nodeParams: nodeParams{ - VerifySignatureFails: true, - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - assert.Equal(t, "unable to verify signature on deal response", deal.Message) - }, - }) - }) - t.Run("response contains unexpected state", func(t *testing.T) { - ds := tut.NewTestStorageDealStream(tut.TestStorageDealStreamParams{ - ResponseReader: testResponseReader(t, responseParams{ - proposal: clientDealProposal, - state: storagemarket.StorageDealProposalNotFound, - message: "couldn't find deal in store", - }), - }) - runAndInspect(t, storagemarket.StorageDealFundsEnsured, clientstates.ProposeDeal, testCase{ - envParams: envParams{ - dealStream: ds, - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - assert.Equal(t, "unexpected deal status while waiting for data request: 1 (StorageDealProposalNotFound). Provider message: couldn't find deal in store", deal.Message) - }, - }) - }) -} - -func TestInitiateDataTransfer(t *testing.T) { - t.Run("succeeds and starts the data transfer", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealStartDataTransfer, clientstates.InitiateDataTransfer, testCase{ - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - assert.Len(t, env.startDataTransferCalls, 1) - assert.Equal(t, env.startDataTransferCalls[0].to, deal.Miner) - assert.Equal(t, env.startDataTransferCalls[0].baseCid, deal.DataRef.Root) - tut.AssertDealState(t, storagemarket.StorageDealTransferring, deal.State) - }, - }) - }) - t.Run("starts polling for acceptance with manual transfers", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealStartDataTransfer, clientstates.InitiateDataTransfer, testCase{ - envParams: envParams{ - manualTransfer: true, - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealCheckForAcceptance, deal.State) - assert.Len(t, env.startDataTransferCalls, 0) - }, - }) - }) - t.Run("fails if it can't initiate data transfer", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealStartDataTransfer, clientstates.InitiateDataTransfer, testCase{ - envParams: envParams{ - startDataTransferError: xerrors.Errorf("failed to start data transfer"), - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - }, - }) - }) -} - -func TestCheckForDealAcceptance(t *testing.T) { - testCids := tut.GenerateCids(4) - proposalCid := tut.GenerateCid(t, clientDealProposal) - - makeProviderDealState := func(status storagemarket.StorageDealStatus) *storagemarket.ProviderDealState { - return &storagemarket.ProviderDealState{ - State: status, - Message: "", - Proposal: &clientDealProposal.Proposal, - ProposalCid: &proposalCid, - AddFundsCid: &testCids[1], - PublishCid: &testCids[2], - DealID: 123, - } - } - - t.Run("succeeds when provider indicates a successful deal", func(t *testing.T) { - successStates := []storagemarket.StorageDealStatus{ - storagemarket.StorageDealActive, - storagemarket.StorageDealSealing, - storagemarket.StorageDealStaged, - storagemarket.StorageDealSlashed, - storagemarket.StorageDealExpired, - } - - for _, s := range successStates { - runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ - envParams: envParams{ - providerDealState: makeProviderDealState(s), - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealProposalAccepted, deal.State) - }, - }) - } - }) - - t.Run("fails when provider indicates a failed deal", func(t *testing.T) { - failureStates := []storagemarket.StorageDealStatus{ - storagemarket.StorageDealFailing, - storagemarket.StorageDealError, - } - - for _, s := range failureStates { - runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ - envParams: envParams{ - providerDealState: makeProviderDealState(s), - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - }, - }) - } - }) - - t.Run("continues polling if there is an error querying provider deal state", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ - envParams: envParams{ - getDealStatusErr: xerrors.Errorf("network error"), - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealCheckForAcceptance, deal.State) - assert.Equal(t, uint64(1), deal.PollRetryCount) - assert.Equal(t, uint64(1), deal.PollErrorCount) - }, - }) - }) - - t.Run("continues polling with an indeterminate deal state", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ - envParams: envParams{ - providerDealState: makeProviderDealState(storagemarket.StorageDealVerifyData), - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealCheckForAcceptance, deal.State) - }, - }) - }) - - t.Run("fails if the wrong proposal comes back", func(t *testing.T) { - pds := makeProviderDealState(storagemarket.StorageDealActive) - pds.ProposalCid = &tut.GenerateCids(1)[0] - - runAndInspect(t, storagemarket.StorageDealCheckForAcceptance, clientstates.CheckForDealAcceptance, testCase{ - envParams: envParams{providerDealState: pds}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - assert.Regexp(t, "miner responded to a wrong proposal", deal.Message) - }, - }) - }) -} - -func TestValidateDealPublished(t *testing.T) { - t.Run("succeeds", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealProposalAccepted, clientstates.ValidateDealPublished, testCase{ - nodeParams: nodeParams{ValidatePublishedDealID: abi.DealID(5)}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealSealing, deal.State) - assert.Equal(t, abi.DealID(5), deal.DealID) - }, - }) - }) - t.Run("fails", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealProposalAccepted, clientstates.ValidateDealPublished, testCase{ - nodeParams: nodeParams{ - ValidatePublishedDealID: abi.DealID(5), - ValidatePublishedError: errors.New("Something went wrong"), - }, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - assert.Equal(t, "error validating deal published: Something went wrong", deal.Message) - }, - }) - }) -} - -func TestVerifyDealActivated(t *testing.T) { - t.Run("succeeds", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealSealing, clientstates.VerifyDealActivated, testCase{ - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealActive, deal.State) - }, - }) - }) - t.Run("fails synchronously", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealSealing, clientstates.VerifyDealActivated, testCase{ - nodeParams: nodeParams{DealCommittedSyncError: errors.New("Something went wrong")}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - assert.Equal(t, "error in deal activation: Something went wrong", deal.Message) - }, - }) - }) - t.Run("fails asynchronously", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealSealing, clientstates.VerifyDealActivated, testCase{ - nodeParams: nodeParams{DealCommittedAsyncError: errors.New("Something went wrong later")}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - assert.Equal(t, "error in deal activation: Something went wrong later", deal.Message) - }, - }) - }) -} - -func TestWaitForDealCompletion(t *testing.T) { - t.Run("slashing succeeds", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealActive, clientstates.WaitForDealCompletion, testCase{ - nodeParams: nodeParams{OnDealSlashedEpoch: abi.ChainEpoch(5)}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealSlashed, deal.State) - assert.Equal(t, abi.ChainEpoch(5), deal.SlashEpoch) - }, - }) - }) - t.Run("expiration succeeds", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealActive, clientstates.WaitForDealCompletion, testCase{ - // OnDealSlashedEpoch of zero signals to test node to call onDealExpired() - nodeParams: nodeParams{OnDealSlashedEpoch: abi.ChainEpoch(0)}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealExpired, deal.State) - }, - }) - }) - t.Run("slashing fails", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealActive, clientstates.WaitForDealCompletion, testCase{ - nodeParams: nodeParams{OnDealSlashedError: errors.New("an err")}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - assert.Equal(t, "error waiting for deal completion: deal slashing err: an err", deal.Message) - }, - }) - }) - t.Run("expiration fails", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealActive, clientstates.WaitForDealCompletion, testCase{ - nodeParams: nodeParams{OnDealExpiredError: errors.New("an err")}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - assert.Equal(t, "error waiting for deal completion: deal expiration err: an err", deal.Message) - }, - }) - }) - t.Run("fails synchronously", func(t *testing.T) { - runAndInspect(t, storagemarket.StorageDealActive, clientstates.WaitForDealCompletion, testCase{ - nodeParams: nodeParams{WaitForDealCompletionError: errors.New("an err")}, - inspector: func(deal storagemarket.ClientDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - assert.Equal(t, "error waiting for deal completion: an err", deal.Message) - }, - }) - }) -} - -type envParams struct { - dealStream *tut.TestStorageDealStream - startDataTransferError error - manualTransfer bool - providerDealState *storagemarket.ProviderDealState - getDealStatusErr error - pollingInterval time.Duration -} - -type dealStateParams struct { - addFundsCid *cid.Cid - fastRetrieval bool -} - -type executor func(t *testing.T, - nodeParams nodeParams, - envParams envParams, - dealInspector func(deal storagemarket.ClientDeal, env *fakeEnvironment)) - -func makeExecutor(ctx context.Context, - eventProcessor fsm.EventProcessor, - initialState storagemarket.StorageDealStatus, - stateEntryFunc clientstates.ClientStateEntryFunc, - dealParams dealStateParams, - clientDealProposal *market.ClientDealProposal) executor { - return func(t *testing.T, - nodeParams nodeParams, - envParams envParams, - dealInspector func(deal storagemarket.ClientDeal, env *fakeEnvironment)) { - node := makeNode(nodeParams) - dealState, err := tut.MakeTestClientDeal(initialState, clientDealProposal, envParams.manualTransfer) - assert.NoError(t, err) - dealState.AddFundsCid = &tut.GenerateCids(1)[0] - dealState.FastRetrieval = dealParams.fastRetrieval - - if dealParams.addFundsCid != nil { - dealState.AddFundsCid = dealParams.addFundsCid - } - - environment := &fakeEnvironment{ - node: node, - dealStream: envParams.dealStream, - startDataTransferError: envParams.startDataTransferError, - providerDealState: envParams.providerDealState, - getDealStatusErr: envParams.getDealStatusErr, - pollingInterval: envParams.pollingInterval, - } - - if environment.pollingInterval == 0 { - environment.pollingInterval = 0 - } - - fsmCtx := fsmtest.NewTestContext(ctx, eventProcessor) - err = stateEntryFunc(fsmCtx, environment, *dealState) - assert.NoError(t, err) - time.Sleep(10 * time.Millisecond) - fsmCtx.ReplayEvents(t, dealState) - dealInspector(*dealState, environment) - } -} - -type nodeParams struct { - AddFundsCid cid.Cid - EnsureFundsError error - VerifySignatureFails bool - GetBalanceError error - GetChainHeadError error - WaitForMessageBlocks bool - WaitForMessageError error - WaitForMessageExitCode exitcode.ExitCode - WaitForMessageRetBytes []byte - ClientAddr address.Address - ValidationError error - ValidatePublishedDealID abi.DealID - ValidatePublishedError error - DealCommittedSyncError error - DealCommittedAsyncError error - WaitForDealCompletionError error - OnDealExpiredError error - OnDealSlashedError error - OnDealSlashedEpoch abi.ChainEpoch -} - -func makeNode(params nodeParams) storagemarket.StorageClientNode { - var out testnodes.FakeClientNode - out.SMState = testnodes.NewStorageMarketState() - out.AddFundsCid = params.AddFundsCid - out.EnsureFundsError = params.EnsureFundsError - out.VerifySignatureFails = params.VerifySignatureFails - out.GetBalanceError = params.GetBalanceError - out.GetChainHeadError = params.GetChainHeadError - out.WaitForMessageBlocks = params.WaitForMessageBlocks - out.WaitForMessageError = params.WaitForMessageError - out.WaitForMessageExitCode = params.WaitForMessageExitCode - out.WaitForMessageRetBytes = params.WaitForMessageRetBytes - out.ClientAddr = params.ClientAddr - out.ValidationError = params.ValidationError - out.ValidatePublishedDealID = params.ValidatePublishedDealID - out.ValidatePublishedError = params.ValidatePublishedError - out.DealCommittedSyncError = params.DealCommittedSyncError - out.DealCommittedAsyncError = params.DealCommittedAsyncError - out.WaitForDealCompletionError = params.WaitForDealCompletionError - out.OnDealExpiredError = params.OnDealExpiredError - out.OnDealSlashedError = params.OnDealSlashedError - out.OnDealSlashedEpoch = params.OnDealSlashedEpoch - return &out -} - -type fakeEnvironment struct { - node storagemarket.StorageClientNode - dealStream *tut.TestStorageDealStream - startDataTransferError error - startDataTransferCalls []dataTransferParams - providerDealState *storagemarket.ProviderDealState - getDealStatusErr error - pollingInterval time.Duration -} - -type dataTransferParams struct { - to peer.ID - voucher datatransfer.Voucher - baseCid cid.Cid - selector ipld.Node -} - -func (fe *fakeEnvironment) StartDataTransfer(_ context.Context, to peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) error { - fe.startDataTransferCalls = append(fe.startDataTransferCalls, dataTransferParams{ - to: to, - voucher: voucher, - baseCid: baseCid, - selector: selector, - }) - return fe.startDataTransferError -} - -func (fe *fakeEnvironment) Node() storagemarket.StorageClientNode { - return fe.node -} - -func (fe *fakeEnvironment) WriteDealProposal(_ peer.ID, _ cid.Cid, proposal smnet.Proposal) error { - return fe.dealStream.WriteDealProposal(proposal) -} - -func (fe *fakeEnvironment) NewDealStream(_ context.Context, _ peer.ID) (smnet.StorageDealStream, error) { - return fe.dealStream, nil -} - -func (fe *fakeEnvironment) GetProviderDealState(_ context.Context, _ cid.Cid) (*storagemarket.ProviderDealState, error) { - if fe.getDealStatusErr != nil { - return nil, fe.getDealStatusErr - } - return fe.providerDealState, nil -} - -func (fe *fakeEnvironment) PollingInterval() time.Duration { - return fe.pollingInterval -} - -var _ clientstates.ClientDealEnvironment = &fakeEnvironment{} - -type responseParams struct { - proposal *market.ClientDealProposal - state storagemarket.StorageDealStatus - message string - publishMessage *cid.Cid - proposalCid cid.Cid -} - -func testResponseReader(t *testing.T, params responseParams) tut.StorageDealResponseReader { - response := smnet.Response{ - State: params.state, - Proposal: params.proposalCid, - Message: params.message, - PublishMessage: params.publishMessage, - } - - if response.Proposal == cid.Undef { - proposalNd, err := cborutil.AsIpld(params.proposal) - assert.NoError(t, err) - response.Proposal = proposalNd.Cid() - } - - return tut.StubbedStorageResponseReader(smnet.SignedResponse{ - Response: response, - Signature: tut.MakeTestSignature(), - }) -} - -type testCase struct { - envParams envParams - nodeParams nodeParams - stateParams dealStateParams - inspector func(deal storagemarket.ClientDeal, env *fakeEnvironment) -} - -func runAndInspect(t *testing.T, initialState storagemarket.StorageDealStatus, stateFunc clientstates.ClientStateEntryFunc, tc testCase) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.ClientDeal{}, "State", clientstates.ClientEvents) - assert.NoError(t, err) - executor := makeExecutor(ctx, eventProcessor, initialState, stateFunc, tc.stateParams, clientDealProposal) - executor(t, tc.nodeParams, tc.envParams, tc.inspector) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/doc.go deleted file mode 100644 index becf2462a3..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientstates/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -/* -Package clientstates contains state machine logic relating to the `StorageMarket`. - -client_fsm.go is where the state transitions are defined, and the default handlers for each new state are defined. - -client_states.go contains state handler functions. - -The following diagram illustrates the operation of the client state machine. This diagram is auto-generated from current code and should remain up to date over time: - -https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageclient.mmd.svg - -*/ -package clientstates diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils/clientutils.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils/clientutils.go deleted file mode 100644 index f12879118c..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils/clientutils.go +++ /dev/null @@ -1,58 +0,0 @@ -// Package clientutils provides utility functions for the storage client & client FSM -package clientutils - -import ( - "context" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/pieceio" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -// CommP calculates the commP for a given dataref -func CommP(ctx context.Context, pieceIO pieceio.PieceIO, rt abi.RegisteredSealProof, data *storagemarket.DataRef) (cid.Cid, abi.UnpaddedPieceSize, error) { - if data.PieceCid != nil { - return *data.PieceCid, data.PieceSize, nil - } - - if data.TransferType == storagemarket.TTManual { - return cid.Undef, 0, xerrors.New("Piece CID and size must be set for manual transfer") - } - - commp, paddedSize, err := pieceIO.GeneratePieceCommitment(rt, data.Root, shared.AllSelector()) - if err != nil { - return cid.Undef, 0, xerrors.Errorf("generating CommP: %w", err) - } - - return commp, paddedSize, nil -} - -// VerifyFunc is a function that can validate a signature for a given address and bytes -type VerifyFunc func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) - -// VerifyResponse verifies the signature on the given signed response matches -// the given miner address, using the given signature verification function -func VerifyResponse(ctx context.Context, resp network.SignedResponse, minerAddr address.Address, tok shared.TipSetToken, verifier VerifyFunc) error { - b, err := cborutil.Dump(&resp.Response) - if err != nil { - return err - } - verified, err := verifier(ctx, *resp.Signature, minerAddr, b, tok) - if err != nil { - return err - } - - if !verified { - return xerrors.New("could not verify signature") - } - - return nil -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils/clientutils_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils/clientutils_test.go deleted file mode 100644 index c5557e96f6..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils/clientutils_test.go +++ /dev/null @@ -1,128 +0,0 @@ -package clientutils_test - -import ( - "context" - "errors" - "fmt" - "io" - "math/rand" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/clientutils" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -func TestCommP(t *testing.T) { - ctx := context.Background() - proofType := abi.RegisteredSealProof_StackedDrg2KiBV1 - t.Run("when PieceCID is present on data ref", func(t *testing.T) { - pieceCid := &shared_testutil.GenerateCids(1)[0] - pieceSize := abi.UnpaddedPieceSize(rand.Uint64()) - data := &storagemarket.DataRef{ - TransferType: storagemarket.TTManual, - PieceCid: pieceCid, - PieceSize: pieceSize, - } - respcid, ressize, err := clientutils.CommP(ctx, nil, proofType, data) - require.NoError(t, err) - require.Equal(t, respcid, *pieceCid) - require.Equal(t, ressize, pieceSize) - }) - - t.Run("when PieceCID is not present on data ref", func(t *testing.T) { - root := shared_testutil.GenerateCids(1)[0] - data := &storagemarket.DataRef{ - TransferType: storagemarket.TTGraphsync, - Root: root, - } - allSelector := shared.AllSelector() - t.Run("when pieceIO succeeds", func(t *testing.T) { - pieceCid := shared_testutil.GenerateCids(1)[0] - pieceSize := abi.UnpaddedPieceSize(rand.Uint64()) - pieceIO := &testPieceIO{t, proofType, root, allSelector, pieceCid, pieceSize, nil} - respcid, ressize, err := clientutils.CommP(ctx, pieceIO, proofType, data) - require.NoError(t, err) - require.Equal(t, respcid, pieceCid) - require.Equal(t, ressize, pieceSize) - }) - - t.Run("when pieceIO fails", func(t *testing.T) { - expectedMsg := "something went wrong" - pieceIO := &testPieceIO{t, proofType, root, allSelector, cid.Undef, 0, errors.New(expectedMsg)} - respcid, ressize, err := clientutils.CommP(ctx, pieceIO, proofType, data) - require.EqualError(t, err, fmt.Sprintf("generating CommP: %s", expectedMsg)) - require.Equal(t, respcid, cid.Undef) - require.Equal(t, ressize, abi.UnpaddedPieceSize(0)) - }) - }) -} - -func TestVerifyResponse(t *testing.T) { - tests := map[string]struct { - sresponse network.SignedResponse - verifier clientutils.VerifyFunc - shouldErr bool - }{ - "successful verification": { - sresponse: shared_testutil.MakeTestStorageNetworkSignedResponse(), - verifier: func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) { - return true, nil - }, - shouldErr: false, - }, - "bad response": { - sresponse: network.SignedResponse{ - Response: network.Response{}, - Signature: shared_testutil.MakeTestSignature(), - }, - verifier: func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) { - return true, nil - }, - shouldErr: true, - }, - "verification fails": { - sresponse: shared_testutil.MakeTestStorageNetworkSignedResponse(), - verifier: func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) { - return false, nil - }, - shouldErr: true, - }, - } - for name, data := range tests { - t.Run(name, func(t *testing.T) { - err := clientutils.VerifyResponse(context.Background(), data.sresponse, address.TestAddress, shared.TipSetToken{}, data.verifier) - require.Equal(t, err != nil, data.shouldErr) - }) - } -} - -type testPieceIO struct { - t *testing.T - expectedRt abi.RegisteredSealProof - expectedPayloadCid cid.Cid - expectedSelector ipld.Node - pieceCID cid.Cid - pieceSize abi.UnpaddedPieceSize - err error -} - -func (t *testPieceIO) GeneratePieceCommitment(rt abi.RegisteredSealProof, payloadCid cid.Cid, selector ipld.Node) (cid.Cid, abi.UnpaddedPieceSize, error) { - require.Equal(t.t, rt, t.expectedRt) - require.Equal(t.t, payloadCid, t.expectedPayloadCid) - require.Equal(t.t, selector, t.expectedSelector) - return t.pieceCID, t.pieceSize, t.err -} - -func (t *testPieceIO) ReadPiece(r io.Reader) (cid.Cid, error) { - panic("not implemented") -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/connmanager/connmanager.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/connmanager/connmanager.go deleted file mode 100644 index 1bd9b67484..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/connmanager/connmanager.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package connmanager tracks open connections maping storage proposal CID -> StorageDealStream -package connmanager - -import ( - "sync" - - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -// ConnManager is a simple threadsafe map of proposal CID -> network deal stream -type ConnManager struct { - connsLk sync.RWMutex - conns map[cid.Cid]network.StorageDealStream -} - -// NewConnManager returns a new conn manager -func NewConnManager() *ConnManager { - return &ConnManager{ - conns: map[cid.Cid]network.StorageDealStream{}, - } -} - -// DealStream returns the deal stream for the given proposal, or an error if not present -func (c *ConnManager) DealStream(proposalCid cid.Cid) (network.StorageDealStream, error) { - c.connsLk.RLock() - s, ok := c.conns[proposalCid] - c.connsLk.RUnlock() - if ok { - return s, nil - } - return nil, xerrors.New("no connection to provider") -} - -// AddStream adds the given stream to the conn manager, and errors if one already -// exists for the given proposal CID -func (c *ConnManager) AddStream(proposalCid cid.Cid, s network.StorageDealStream) error { - c.connsLk.Lock() - defer c.connsLk.Unlock() - _, ok := c.conns[proposalCid] - if ok { - return xerrors.Errorf("already have connected for proposal %s", proposalCid) - } - c.conns[proposalCid] = s - return nil -} - -// Disconnect removes the given connection from the conn manager and closes -// the stream. It errors if an error occurs closing the stream -func (c *ConnManager) Disconnect(proposalCid cid.Cid) error { - c.connsLk.Lock() - defer c.connsLk.Unlock() - s, ok := c.conns[proposalCid] - if !ok { - return nil - } - - err := s.Close() - delete(c.conns, proposalCid) - return err -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/connmanager/connmanager_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/connmanager/connmanager_test.go deleted file mode 100644 index 36a6a98597..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/connmanager/connmanager_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package connmanager_test - -import ( - "sync" - "testing" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/connmanager" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -func TestConnManager(t *testing.T) { - conns := connmanager.NewConnManager() - cids := shared_testutil.GenerateCids(10) - streams := make([]network.StorageDealStream, 0, 10) - var wait sync.WaitGroup - - for i := 0; i < 10; i++ { - streams = append(streams, shared_testutil.NewTestStorageDealStream( - shared_testutil.TestStorageDealStreamParams{})) - } - t.Run("no conns present initially", func(t *testing.T) { - for _, c := range cids { - stream, err := conns.DealStream(c) - require.Nil(t, stream) - require.Error(t, err) - } - }) - - t.Run("adding conns, can retrieve", func(t *testing.T) { - for i, c := range cids { - wait.Add(1) - stream := streams[i] - go func(c cid.Cid, stream network.StorageDealStream) { - defer wait.Done() - err := conns.AddStream(c, stream) - require.NoError(t, err) - }(c, stream) - } - wait.Wait() - for i, c := range cids { - wait.Add(1) - stream := streams[i] - go func(c cid.Cid, stream network.StorageDealStream) { - defer wait.Done() - received, err := conns.DealStream(c) - require.Equal(t, stream, received) - require.NoError(t, err) - }(c, stream) - } - wait.Wait() - }) - - t.Run("adding conns twice fails", func(t *testing.T) { - for i, c := range cids { - wait.Add(1) - stream := streams[i] - go func(c cid.Cid, stream network.StorageDealStream) { - defer wait.Done() - err := conns.AddStream(c, stream) - require.Error(t, err) - }(c, stream) - } - wait.Wait() - }) - - t.Run("disconnection removes", func(t *testing.T) { - for _, c := range cids { - wait.Add(1) - go func(c cid.Cid) { - defer wait.Done() - err := conns.Disconnect(c) - require.NoError(t, err) - }(c) - } - wait.Wait() - for _, c := range cids { - wait.Add(1) - go func(c cid.Cid) { - defer wait.Done() - received, err := conns.DealStream(c) - require.Nil(t, received) - require.Error(t, err) - }(c) - } - wait.Wait() - }) - - t.Run("disconnecting twice causes no error", func(t *testing.T) { - for _, c := range cids { - wait.Add(1) - go func(c cid.Cid) { - defer wait.Done() - err := conns.Disconnect(c) - require.NoError(t, err) - }(c) - } - wait.Wait() - }) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/doc.go deleted file mode 100644 index 3f1e24151a..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -/* -Package storageimpl provides the primary implementation of storage market top level interfaces - -This package provides a production implementation of `StorageClient` and `StorageProvider`. -*/ -package storageimpl diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils/dtutils.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils/dtutils.go deleted file mode 100644 index 29bd86da1c..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils/dtutils.go +++ /dev/null @@ -1,90 +0,0 @@ -// Package dtutils provides event listeners for the client and provider to -// listen for events on the data transfer module and dispatch FSM events based on them -package dtutils - -import ( - "errors" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-statemachine/fsm" - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" -) - -var log = logging.Logger("storagemarket_impl") - -var ( - // ErrDataTransferFailed means a data transfer for a deal failed - ErrDataTransferFailed = errors.New("deal data transfer failed") -) - -// EventReceiver is any thing that can receive FSM events -type EventReceiver interface { - Send(id interface{}, name fsm.EventName, args ...interface{}) (err error) -} - -// ProviderDataTransferSubscriber is the function called when an event occurs in a data -// transfer received by a provider -- it reads the voucher to verify this event occurred -// in a storage market deal, then, based on the data transfer event that occurred, it generates -// and update message for the deal -- either moving to staged for a completion -// event or moving to error if a data transfer error occurs -func ProviderDataTransferSubscriber(deals EventReceiver) datatransfer.Subscriber { - return func(event datatransfer.Event, channelState datatransfer.ChannelState) { - voucher, ok := channelState.Voucher().(*requestvalidation.StorageDataTransferVoucher) - // if this event is for a transfer not related to storage, ignore - if !ok { - return - } - - // data transfer events for progress do not affect deal state - switch event.Code { - case datatransfer.Open: - err := deals.Send(voucher.Proposal, storagemarket.ProviderEventDataTransferInitiated) - if err != nil { - log.Errorf("processing dt event: %w", err) - } - case datatransfer.Complete: - err := deals.Send(voucher.Proposal, storagemarket.ProviderEventDataTransferCompleted) - if err != nil { - log.Errorf("processing dt event: %w", err) - } - case datatransfer.Error: - err := deals.Send(voucher.Proposal, storagemarket.ProviderEventDataTransferFailed, ErrDataTransferFailed) - if err != nil { - log.Errorf("processing dt event: %w", err) - } - default: - } - } -} - -// ClientDataTransferSubscriber is the function called when an event occurs in a data -// transfer initiated on the client -- it reads the voucher to verify this even occurred -// in a storage market deal, then, based on the data transfer event that occurred, it dispatches -// an event to the appropriate state machine -func ClientDataTransferSubscriber(deals EventReceiver) datatransfer.Subscriber { - return func(event datatransfer.Event, channelState datatransfer.ChannelState) { - voucher, ok := channelState.Voucher().(*requestvalidation.StorageDataTransferVoucher) - // if this event is for a transfer not related to storage, ignore - if !ok { - return - } - - // data transfer events for progress do not affect deal state - switch event.Code { - case datatransfer.Complete: - err := deals.Send(voucher.Proposal, storagemarket.ClientEventDataTransferComplete) - if err != nil { - log.Errorf("processing dt event: %w", err) - } - case datatransfer.Error: - err := deals.Send(voucher.Proposal, storagemarket.ClientEventDataTransferFailed, ErrDataTransferFailed) - if err != nil { - log.Errorf("processing dt event: %w", err) - } - default: - } - } -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils/dtutils_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils/dtutils_test.go deleted file mode 100644 index 40059d38ac..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils/dtutils_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package dtutils_test - -import ( - "testing" - - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" -) - -func TestProviderDataTransferSubscriber(t *testing.T) { - expectedProposalCID := shared_testutil.GenerateCids(1)[0] - tests := map[string]struct { - code datatransfer.EventCode - called bool - voucher datatransfer.Voucher - expectedID interface{} - expectedEvent fsm.EventName - expectedArgs []interface{} - }{ - "not a storage voucher": { - called: false, - voucher: nil, - }, - "open event": { - code: datatransfer.Open, - called: true, - voucher: &requestvalidation.StorageDataTransferVoucher{ - Proposal: expectedProposalCID, - }, - expectedID: expectedProposalCID, - expectedEvent: storagemarket.ProviderEventDataTransferInitiated, - }, - "completion event": { - code: datatransfer.Complete, - called: true, - voucher: &requestvalidation.StorageDataTransferVoucher{ - Proposal: expectedProposalCID, - }, - expectedID: expectedProposalCID, - expectedEvent: storagemarket.ProviderEventDataTransferCompleted, - }, - "error event": { - code: datatransfer.Error, - called: true, - voucher: &requestvalidation.StorageDataTransferVoucher{ - Proposal: expectedProposalCID, - }, - expectedID: expectedProposalCID, - expectedEvent: storagemarket.ProviderEventDataTransferFailed, - expectedArgs: []interface{}{dtutils.ErrDataTransferFailed}, - }, - "other event": { - code: datatransfer.Progress, - called: false, - voucher: &requestvalidation.StorageDataTransferVoucher{ - Proposal: expectedProposalCID, - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - fdg := &fakeDealGroup{} - subscriber := dtutils.ProviderDataTransferSubscriber(fdg) - subscriber(datatransfer.Event{Code: data.code}, datatransfer.ChannelState{ - Channel: datatransfer.NewChannel(datatransfer.TransferID(0), cid.Undef, nil, data.voucher, peer.ID(""), peer.ID(""), 0), - }) - if data.called { - require.True(t, fdg.called) - require.Equal(t, fdg.lastID, data.expectedID) - require.Equal(t, fdg.lastEvent, data.expectedEvent) - require.Equal(t, fdg.lastArgs, data.expectedArgs) - } else { - require.False(t, fdg.called) - } - }) - } -} - -func TestClientDataTransferSubscriber(t *testing.T) { - expectedProposalCID := shared_testutil.GenerateCids(1)[0] - tests := map[string]struct { - code datatransfer.EventCode - called bool - voucher datatransfer.Voucher - expectedID interface{} - expectedEvent fsm.EventName - expectedArgs []interface{} - }{ - "not a storage voucher": { - called: false, - voucher: nil, - }, - "completion event": { - code: datatransfer.Complete, - called: true, - voucher: &requestvalidation.StorageDataTransferVoucher{ - Proposal: expectedProposalCID, - }, - expectedID: expectedProposalCID, - expectedEvent: storagemarket.ClientEventDataTransferComplete, - }, - "error event": { - code: datatransfer.Error, - called: true, - voucher: &requestvalidation.StorageDataTransferVoucher{ - Proposal: expectedProposalCID, - }, - expectedID: expectedProposalCID, - expectedEvent: storagemarket.ClientEventDataTransferFailed, - expectedArgs: []interface{}{dtutils.ErrDataTransferFailed}, - }, - "other event": { - code: datatransfer.Progress, - called: false, - voucher: &requestvalidation.StorageDataTransferVoucher{ - Proposal: expectedProposalCID, - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - fdg := &fakeDealGroup{} - subscriber := dtutils.ClientDataTransferSubscriber(fdg) - subscriber(datatransfer.Event{Code: data.code}, datatransfer.ChannelState{ - Channel: datatransfer.NewChannel(datatransfer.TransferID(0), cid.Undef, nil, data.voucher, peer.ID(""), peer.ID(""), 0), - }) - if data.called { - require.True(t, fdg.called) - require.Equal(t, fdg.lastID, data.expectedID) - require.Equal(t, fdg.lastEvent, data.expectedEvent) - require.Equal(t, fdg.lastArgs, data.expectedArgs) - } else { - require.False(t, fdg.called) - } - }) - } -} - -type fakeDealGroup struct { - returnedErr error - called bool - lastID interface{} - lastEvent fsm.EventName - lastArgs []interface{} -} - -func (fdg *fakeDealGroup) Send(id interface{}, name fsm.EventName, args ...interface{}) (err error) { - fdg.lastID = id - fdg.lastEvent = name - fdg.lastArgs = args - fdg.called = true - return fdg.returnedErr -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/provider.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/provider.go deleted file mode 100644 index c836b6e953..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/provider.go +++ /dev/null @@ -1,672 +0,0 @@ -package storageimpl - -import ( - "context" - "io" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/hannahhoward/go-pubsub" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/ipld/go-ipld-prime" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/go-fil-markets/pieceio" - "github.com/filecoin-project/go-fil-markets/pieceio/cario" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/connmanager" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/dtutils" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -// DefaultDealAcceptanceBuffer is the minimum number of epochs ahead of the current epoch -// a deal's StartEpoch must be for the deal to be accepted. -// The StartEpoch must be more than simply greater than the current epoch because we -// need time to transfer data, publish the deal on chain, and seal the sector with the data -var DefaultDealAcceptanceBuffer = abi.ChainEpoch(100) -var _ storagemarket.StorageProvider = &Provider{} -var _ network.StorageReceiver = &Provider{} - -// StoredAsk is an interface which provides access to a StorageAsk -type StoredAsk interface { - GetAsk() *storagemarket.SignedStorageAsk - SetAsk(price abi.TokenAmount, duration abi.ChainEpoch, options ...storagemarket.StorageAskOption) error -} - -// Provider is the production implementation of the StorageProvider interface -type Provider struct { - net network.StorageMarketNetwork - - proofType abi.RegisteredSealProof - - spn storagemarket.StorageProviderNode - fs filestore.FileStore - pio pieceio.PieceIOWithStore - pieceStore piecestore.PieceStore - conns *connmanager.ConnManager - storedAsk StoredAsk - actor address.Address - dataTransfer datatransfer.Manager - universalRetrievalEnabled bool - customDealDeciderFunc DealDeciderFunc - dealAcceptanceBuffer abi.ChainEpoch - pubSub *pubsub.PubSub - - deals fsm.Group -} - -// StorageProviderOption allows custom configuration of a storage provider -type StorageProviderOption func(p *Provider) - -// EnableUniversalRetrieval causes a storage provider to track all CIDs in a piece, -// so that any CID, not just the root payload CID, can be retrieved -func EnableUniversalRetrieval() StorageProviderOption { - return func(p *Provider) { - p.universalRetrievalEnabled = true - } -} - -// DealAcceptanceBuffer allows a provider to set a buffer (in epochs) to account for the time -// required for data transfer, deal verification, publishing, sealing, and committing. -func DealAcceptanceBuffer(buffer abi.ChainEpoch) StorageProviderOption { - return func(p *Provider) { - p.dealAcceptanceBuffer = buffer - } -} - -// DealDeciderFunc is a function which evaluates an incoming deal to decide if -// it its accepted -// It returns: -// - boolean = true if deal accepted, false if rejected -// - string = reason deal was not excepted, if rejected -// - error = if an error occurred trying to decide -type DealDeciderFunc func(context.Context, storagemarket.MinerDeal) (bool, string, error) - -// CustomDealDecisionLogic allows a provider to call custom decision logic when validating incoming -// deal proposals -func CustomDealDecisionLogic(decider DealDeciderFunc) StorageProviderOption { - return func(p *Provider) { - p.customDealDeciderFunc = decider - } -} - -// NewProvider returns a new storage provider -func NewProvider(net network.StorageMarketNetwork, - ds datastore.Batching, - bs blockstore.Blockstore, - fs filestore.FileStore, - pieceStore piecestore.PieceStore, - dataTransfer datatransfer.Manager, - spn storagemarket.StorageProviderNode, - minerAddress address.Address, - rt abi.RegisteredSealProof, - storedAsk StoredAsk, - options ...StorageProviderOption, -) (storagemarket.StorageProvider, error) { - carIO := cario.NewCarIO() - pio := pieceio.NewPieceIOWithStore(carIO, fs, bs) - - h := &Provider{ - net: net, - proofType: rt, - spn: spn, - fs: fs, - pio: pio, - pieceStore: pieceStore, - conns: connmanager.NewConnManager(), - storedAsk: storedAsk, - actor: minerAddress, - dataTransfer: dataTransfer, - dealAcceptanceBuffer: DefaultDealAcceptanceBuffer, - pubSub: pubsub.New(providerDispatcher), - } - - deals, err := newProviderStateMachine( - ds, - &providerDealEnvironment{h}, - h.dispatch, - ) - if err != nil { - return nil, err - } - - h.deals = deals - - h.Configure(options...) - - // register a data transfer event handler -- this will send events to the state machines based on DT events - dataTransfer.SubscribeToEvents(dtutils.ProviderDataTransferSubscriber(deals)) - - return h, nil -} - -// Start initializes deal processing on a StorageProvider and restarts in progress deals. -// It also registers the provider with a StorageMarketNetwork so it can receive incoming -// messages on the storage market's libp2p protocols -func (p *Provider) Start(ctx context.Context) error { - err := p.net.SetDelegate(p) - if err != nil { - return err - } - go func() { - err := p.restartDeals() - if err != nil { - log.Errorf("Failed to restart deals: %s", err.Error()) - } - }() - return nil -} - -/* -HandleDealStream is called by the network implementation whenever a new message is received on the deal protocol - -It initiates the provider side of the deal flow. - -When a provider receives a DealProposal of the deal protocol, it takes the following steps: - -1. Calculates the CID for the received ClientDealProposal. - -2. Constructs a MinerDeal to track the state of this deal. - -3. Tells its statemachine to begin tracking this deal state by CID of the received ClientDealProposal - -4. Tracks the received deal stream by the CID of the ClientDealProposal - -4. Triggers a `ProviderEventOpen` event on its statemachine. - -From then on, the statemachine controls the deal flow in the client. Other components may listen for events in this flow by calling -`SubscribeToEvents` on the Provider. The Provider handles loading the next block to send to the client. - -Documentation of the client state machine can be found at https://godoc.org/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates -*/ -func (p *Provider) HandleDealStream(s network.StorageDealStream) { - log.Info("Handling storage deal proposal!") - - err := p.receiveDeal(s) - if err != nil { - log.Errorf("%+v", err) - s.Close() - return - } -} - -func (p *Provider) receiveDeal(s network.StorageDealStream) error { - proposal, err := s.ReadDealProposal() - if err != nil { - return xerrors.Errorf("failed to read proposal message: %w", err) - } - - proposalNd, err := cborutil.AsIpld(proposal.DealProposal) - if err != nil { - return err - } - - deal := &storagemarket.MinerDeal{ - Client: s.RemotePeer(), - Miner: p.net.ID(), - ClientDealProposal: *proposal.DealProposal, - ProposalCid: proposalNd.Cid(), - State: storagemarket.StorageDealUnknown, - Ref: proposal.Piece, - FastRetrieval: proposal.FastRetrieval, - } - - err = p.deals.Begin(proposalNd.Cid(), deal) - if err != nil { - return err - } - err = p.conns.AddStream(proposalNd.Cid(), s) - if err != nil { - return err - } - return p.deals.Send(proposalNd.Cid(), storagemarket.ProviderEventOpen) -} - -// Stop terminates processing of deals on a StorageProvider -func (p *Provider) Stop() error { - err := p.deals.Stop(context.TODO()) - if err != nil { - return err - } - return p.net.StopHandlingRequests() -} - -// ImportDataForDeal manually imports data for an offline storage deal -// It will verify that the data in the passed io.Reader matches the expected piece -// cid for the given deal or it will error -func (p *Provider) ImportDataForDeal(ctx context.Context, propCid cid.Cid, data io.Reader) error { - // TODO: be able to check if we have enough disk space - var d storagemarket.MinerDeal - if err := p.deals.Get(propCid).Get(&d); err != nil { - return xerrors.Errorf("failed getting deal %s: %w", propCid, err) - } - - tempfi, err := p.fs.CreateTemp() - if err != nil { - return xerrors.Errorf("failed to create temp file for data import: %w", err) - } - cleanup := func() { - _ = tempfi.Close() - _ = p.fs.Delete(tempfi.Path()) - } - - n, err := io.Copy(tempfi, data) - if err != nil { - cleanup() - return xerrors.Errorf("importing deal data failed: %w", err) - } - - _ = n // TODO: verify n? - - pieceSize := uint64(tempfi.Size()) - - _, err = tempfi.Seek(0, io.SeekStart) - if err != nil { - cleanup() - return xerrors.Errorf("failed to seek through temp imported file: %w", err) - } - - pieceCid, _, err := pieceio.GeneratePieceCommitment(p.proofType, tempfi, pieceSize) - if err != nil { - cleanup() - return xerrors.Errorf("failed to generate commP") - } - - // Verify CommP matches - if !pieceCid.Equals(d.Proposal.PieceCID) { - cleanup() - return xerrors.Errorf("given data does not match expected commP (got: %x, expected %x)", pieceCid, d.Proposal.PieceCID) - } - - return p.deals.Send(propCid, storagemarket.ProviderEventVerifiedData, tempfi.Path(), filestore.Path("")) - -} - -// GetAsk returns the storage miner's ask, or nil if one does not exist. -func (p *Provider) GetAsk() *storagemarket.SignedStorageAsk { - return p.storedAsk.GetAsk() -} - -// ListDeals lists on-chain deals associated with this storage provider -func (p *Provider) ListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error) { - tok, _, err := p.spn.GetChainHead(ctx) - if err != nil { - return nil, err - } - - return p.spn.ListProviderDeals(ctx, p.actor, tok) -} - -// AddStorageCollateral adds storage collateral -func (p *Provider) AddStorageCollateral(ctx context.Context, amount abi.TokenAmount) error { - done := make(chan error, 1) - - mcid, err := p.spn.AddFunds(ctx, p.actor, amount) - if err != nil { - return err - } - - err = p.spn.WaitForMessage(ctx, mcid, func(code exitcode.ExitCode, bytes []byte, err error) error { - if err != nil { - done <- xerrors.Errorf("AddFunds errored: %w", err) - } else if code != exitcode.Ok { - done <- xerrors.Errorf("AddFunds error, exit code: %s", code.String()) - } else { - done <- nil - } - return nil - }) - - if err != nil { - return err - } - - return <-done -} - -// GetStorageCollateral returns the current collateral balance -func (p *Provider) GetStorageCollateral(ctx context.Context) (storagemarket.Balance, error) { - tok, _, err := p.spn.GetChainHead(ctx) - if err != nil { - return storagemarket.Balance{}, err - } - - return p.spn.GetBalance(ctx, p.actor, tok) -} - -// ListLocalDeals lists deals processed by this storage provider -func (p *Provider) ListLocalDeals() ([]storagemarket.MinerDeal, error) { - var out []storagemarket.MinerDeal - if err := p.deals.List(&out); err != nil { - return nil, err - } - return out, nil -} - -// SetAsk configures the storage miner's ask with the provided price, -// duration, and options. Any previously-existing ask is replaced. -func (p *Provider) SetAsk(price abi.TokenAmount, duration abi.ChainEpoch, options ...storagemarket.StorageAskOption) error { - return p.storedAsk.SetAsk(price, duration, options...) -} - -/* -HandleAskStream is called by the network implementation whenever a new message is received on the ask protocol - -A Provider handling a `AskRequest` does the following: - -1. Reads the current signed storage ask from storage - -2. Wraps the signed ask in an AskResponse and writes it on the StorageAskStream - -The connection is kept open only as long as the request-response exchange. -*/ -func (p *Provider) HandleAskStream(s network.StorageAskStream) { - defer s.Close() - ar, err := s.ReadAskRequest() - if err != nil { - log.Errorf("failed to read AskRequest from incoming stream: %s", err) - return - } - - var ask *storagemarket.SignedStorageAsk - if p.actor != ar.Miner { - log.Warnf("storage provider for address %s receive ask for miner with address %s", p.actor, ar.Miner) - } else { - ask = p.storedAsk.GetAsk() - } - - resp := network.AskResponse{ - Ask: ask, - } - - if err := s.WriteAskResponse(resp); err != nil { - log.Errorf("failed to write ask response: %s", err) - return - } -} - -/* -HandleDealStatusStream is called by the network implementation whenever a new message is received on the deal status protocol - -A Provider handling a `DealStatuRequest` does the following: - -1. Lots the deal state from the Provider FSM - -2. Verifies the signature on the DealStatusRequest matches the Client for this deal - -3. Constructs a ProviderDealState from the deal state - -4. Signs the ProviderDealState with its private key - -5. Writes a DealStatusResponse with the ProviderDealState and signature onto the DealStatusStream - -The connection is kept open only as long as the request-response exchange. -*/ -func (p *Provider) HandleDealStatusStream(s network.DealStatusStream) { - ctx := context.TODO() - defer s.Close() - request, err := s.ReadDealStatusRequest() - if err != nil { - log.Errorf("failed to read DealStatusRequest from incoming stream: %s", err) - return - } - - // fetch deal state - var md = storagemarket.MinerDeal{} - if err := p.deals.Get(request.Proposal).Get(&md); err != nil { - log.Errorf("proposal doesn't exist in state store: %s", err) - return - } - - // verify query signature - buf, err := cborutil.Dump(&request.Proposal) - if err != nil { - log.Errorf("failed to serialize status request: %s", err) - return - } - - tok, _, err := p.spn.GetChainHead(ctx) - if err != nil { - log.Errorf("failed to get chain head: %s", err) - return - } - - err = providerutils.VerifySignature(ctx, request.Signature, md.ClientDealProposal.Proposal.Client, buf, tok, p.spn.VerifySignature) - if err != nil { - log.Errorf("invalid deal status request signature: %s", err) - return - } - - dealState := storagemarket.ProviderDealState{ - State: md.State, - Message: md.Message, - Proposal: &md.Proposal, - ProposalCid: &md.ProposalCid, - AddFundsCid: md.AddFundsCid, - PublishCid: md.PublishCid, - DealID: md.DealID, - FastRetrieval: md.FastRetrieval, - } - - signature, err := p.sign(ctx, &dealState) - if err != nil { - log.Errorf("failed to sign deal status response: %s", err) - return - } - - response := network.DealStatusResponse{ - DealState: dealState, - Signature: *signature, - } - - if err := s.WriteDealStatusResponse(response); err != nil { - log.Errorf("failed to write deal status response: %s", err) - return - } -} - -// Configure applies the given list of StorageProviderOptions after a StorageProvider -// is initialized -func (p *Provider) Configure(options ...StorageProviderOption) { - for _, option := range options { - option(p) - } -} - -// DealAcceptanceBuffer returns the current deal acceptance buffer -func (p *Provider) DealAcceptanceBuffer() abi.ChainEpoch { - return p.dealAcceptanceBuffer -} - -// UniversalRetrievalEnabled returns whether or not universal retrieval -// (retrieval by any CID, not just the root payload CID) is enabled -// for this provider -func (p *Provider) UniversalRetrievalEnabled() bool { - return p.universalRetrievalEnabled -} - -// SubscribeToEvents allows another component to listen for events on the StorageProvider -// in order to track deals as they progress through the deal flow -func (p *Provider) SubscribeToEvents(subscriber storagemarket.ProviderSubscriber) shared.Unsubscribe { - return shared.Unsubscribe(p.pubSub.Subscribe(subscriber)) -} - -// dispatch puts the fsm event into a form that pubSub can consume, -// then publishes the event -func (p *Provider) dispatch(eventName fsm.EventName, deal fsm.StateType) { - evt, ok := eventName.(storagemarket.ProviderEvent) - if !ok { - log.Errorf("dropped bad event %s", eventName) - } - realDeal, ok := deal.(storagemarket.MinerDeal) - if !ok { - log.Errorf("not a MinerDeal %v", deal) - } - pubSubEvt := internalProviderEvent{evt, realDeal} - - if err := p.pubSub.Publish(pubSubEvt); err != nil { - log.Errorf("failed to publish event %d", evt) - } -} - -func (p *Provider) restartDeals() error { - var deals []storagemarket.MinerDeal - err := p.deals.List(&deals) - if err != nil { - return err - } - - for _, deal := range deals { - if p.deals.IsTerminated(deal) { - continue - } - - err = p.deals.Send(deal.ProposalCid, storagemarket.ProviderEventRestart) - if err != nil { - return err - } - } - return nil -} - -func (p *Provider) sign(ctx context.Context, data interface{}) (*crypto.Signature, error) { - tok, _, err := p.spn.GetChainHead(ctx) - if err != nil { - return nil, xerrors.Errorf("couldn't get chain head: %w", err) - } - - return providerutils.SignMinerData(ctx, data, p.actor, tok, p.spn.GetMinerWorkerAddress, p.spn.SignBytes) -} - -func newProviderStateMachine(ds datastore.Datastore, env fsm.Environment, notifier fsm.Notifier) (fsm.Group, error) { - return fsm.New(ds, fsm.Parameters{ - Environment: env, - StateType: storagemarket.MinerDeal{}, - StateKeyField: "State", - Events: providerstates.ProviderEvents, - StateEntryFuncs: providerstates.ProviderStateEntryFuncs, - FinalityStates: providerstates.ProviderFinalityStates, - Notifier: notifier, - }) -} - -type internalProviderEvent struct { - evt storagemarket.ProviderEvent - deal storagemarket.MinerDeal -} - -func providerDispatcher(evt pubsub.Event, fn pubsub.SubscriberFn) error { - ie, ok := evt.(internalProviderEvent) - if !ok { - return xerrors.New("wrong type of event") - } - cb, ok := fn.(storagemarket.ProviderSubscriber) - if !ok { - return xerrors.New("wrong type of callback") - } - cb(ie.evt, ie.deal) - return nil -} - -// ------- -// providerDealEnvironment -// ------- - -type providerDealEnvironment struct { - p *Provider -} - -func (p *providerDealEnvironment) Address() address.Address { - return p.p.actor -} - -func (p *providerDealEnvironment) Node() storagemarket.StorageProviderNode { - return p.p.spn -} - -func (p *providerDealEnvironment) Ask() storagemarket.StorageAsk { - sask := p.p.storedAsk.GetAsk() - if sask == nil { - return storagemarket.StorageAskUndefined - } - return *sask.Ask -} - -func (p *providerDealEnvironment) GeneratePieceCommitmentToFile(payloadCid cid.Cid, selector ipld.Node) (cid.Cid, filestore.Path, filestore.Path, error) { - if p.p.universalRetrievalEnabled { - return providerutils.GeneratePieceCommitmentWithMetadata(p.p.fs, p.p.pio.GeneratePieceCommitmentToFile, p.p.proofType, payloadCid, selector) - } - pieceCid, piecePath, _, err := p.p.pio.GeneratePieceCommitmentToFile(p.p.proofType, payloadCid, selector) - return pieceCid, piecePath, filestore.Path(""), err -} - -func (p *providerDealEnvironment) FileStore() filestore.FileStore { - return p.p.fs -} - -func (p *providerDealEnvironment) PieceStore() piecestore.PieceStore { - return p.p.pieceStore -} - -func (p *providerDealEnvironment) SendSignedResponse(ctx context.Context, resp *network.Response) error { - s, err := p.p.conns.DealStream(resp.Proposal) - if err != nil { - return xerrors.Errorf("couldn't send response: %w", err) - } - - sig, err := p.p.sign(ctx, resp) - if err != nil { - return xerrors.Errorf("failed to sign response message: %w", err) - } - - signedResponse := network.SignedResponse{ - Response: *resp, - Signature: sig, - } - - err = s.WriteDealResponse(signedResponse) - if err != nil { - // Assume client disconnected - _ = p.p.conns.Disconnect(resp.Proposal) - } - return err -} - -func (p *providerDealEnvironment) Disconnect(proposalCid cid.Cid) error { - return p.p.conns.Disconnect(proposalCid) -} - -func (p *providerDealEnvironment) DealAcceptanceBuffer() abi.ChainEpoch { - return p.p.dealAcceptanceBuffer -} - -func (p *providerDealEnvironment) RunCustomDecisionLogic(ctx context.Context, deal storagemarket.MinerDeal) (bool, string, error) { - if p.p.customDealDeciderFunc == nil { - return true, "", nil - } - return p.p.customDealDeciderFunc(ctx, deal) -} - -var _ providerstates.ProviderDealEnvironment = &providerDealEnvironment{} - -// ProviderFSMParameterSpec is a valid set of parameters for a provider FSM - used in doc generation -var ProviderFSMParameterSpec = fsm.Parameters{ - Environment: &providerDealEnvironment{}, - StateType: storagemarket.MinerDeal{}, - StateKeyField: "State", - Events: providerstates.ProviderEvents, - StateEntryFuncs: providerstates.ProviderStateEntryFuncs, - FinalityStates: providerstates.ProviderFinalityStates, -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/provider_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/provider_test.go deleted file mode 100644 index 4a5ca93011..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/provider_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package storageimpl_test - -import ( - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/assert" - - storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" -) - -func TestConfigure(t *testing.T) { - p := &storageimpl.Provider{} - - assert.False(t, p.UniversalRetrievalEnabled()) - assert.Equal(t, abi.ChainEpoch(0), p.DealAcceptanceBuffer()) - - p.Configure( - storageimpl.EnableUniversalRetrieval(), - storageimpl.DealAcceptanceBuffer(abi.ChainEpoch(123)), - ) - - assert.True(t, p.UniversalRetrievalEnabled()) - assert.Equal(t, abi.ChainEpoch(123), p.DealAcceptanceBuffer()) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/doc.go deleted file mode 100644 index 96ba495f17..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -/* -Package providerstates contains state machine logic relating to the `StorageProvider`. - -provider_fsm.go is where the state transitions are defined, and the default handlers for each new state are defined. - -provider_states.go contains state handler functions. - -The following diagram illustrates the operation of the provider state machine. This diagram is auto-generated from current code and should remain up to date over time: - -https://raw.githubusercontent.com/filecoin-project/go-fil-markets/master/docs/storageprovider.mmd.svg - -*/ -package providerstates diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_fsm.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_fsm.go deleted file mode 100644 index 4d0da22900..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_fsm.go +++ /dev/null @@ -1,174 +0,0 @@ -package providerstates - -import ( - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/go-fil-markets/storagemarket" -) - -// ProviderEvents are the events that can happen in a storage provider -var ProviderEvents = fsm.Events{ - fsm.Event(storagemarket.ProviderEventOpen).From(storagemarket.StorageDealUnknown).To(storagemarket.StorageDealValidating), - fsm.Event(storagemarket.ProviderEventNodeErrored).FromAny().To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("error calling node: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventDealRejected). - FromMany(storagemarket.StorageDealValidating, storagemarket.StorageDealVerifyData, storagemarket.StorageDealAcceptWait).To(storagemarket.StorageDealRejecting). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("deal rejected: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventRejectionSent). - From(storagemarket.StorageDealRejecting).To(storagemarket.StorageDealFailing), - fsm.Event(storagemarket.ProviderEventDealDeciding). - From(storagemarket.StorageDealValidating).To(storagemarket.StorageDealAcceptWait), - fsm.Event(storagemarket.ProviderEventDataRequested). - From(storagemarket.StorageDealAcceptWait).To(storagemarket.StorageDealWaitingForData), - fsm.Event(storagemarket.ProviderEventDataTransferFailed). - From(storagemarket.StorageDealTransferring).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("error transferring data: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventDataTransferInitiated). - From(storagemarket.StorageDealWaitingForData).To(storagemarket.StorageDealTransferring), - fsm.Event(storagemarket.ProviderEventDataTransferCompleted). - From(storagemarket.StorageDealTransferring).To(storagemarket.StorageDealVerifyData), - fsm.Event(storagemarket.ProviderEventDataVerificationFailed). - From(storagemarket.StorageDealVerifyData).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("deal data verification failed: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventVerifiedData). - FromMany(storagemarket.StorageDealVerifyData, storagemarket.StorageDealWaitingForData).To(storagemarket.StorageDealEnsureProviderFunds). - Action(func(deal *storagemarket.MinerDeal, path filestore.Path, metadataPath filestore.Path) error { - deal.PiecePath = path - deal.MetadataPath = metadataPath - return nil - }), - fsm.Event(storagemarket.ProviderEventFundingInitiated). - From(storagemarket.StorageDealEnsureProviderFunds).To(storagemarket.StorageDealProviderFunding). - Action(func(deal *storagemarket.MinerDeal, mcid cid.Cid) error { - deal.AddFundsCid = &mcid - return nil - }), - fsm.Event(storagemarket.ProviderEventFunded). - FromMany(storagemarket.StorageDealProviderFunding, storagemarket.StorageDealEnsureProviderFunds).To(storagemarket.StorageDealPublish), - fsm.Event(storagemarket.ProviderEventDealPublishInitiated). - From(storagemarket.StorageDealPublish).To(storagemarket.StorageDealPublishing). - Action(func(deal *storagemarket.MinerDeal, publishCid cid.Cid) error { - deal.PublishCid = &publishCid - return nil - }), - fsm.Event(storagemarket.ProviderEventDealPublishError). - From(storagemarket.StorageDealPublishing).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("PublishStorageDeal error: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventSendResponseFailed). - FromMany(storagemarket.StorageDealAcceptWait, storagemarket.StorageDealRejecting).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("sending response to deal: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventDealPublished). - From(storagemarket.StorageDealPublishing).To(storagemarket.StorageDealStaged). - Action(func(deal *storagemarket.MinerDeal, dealID abi.DealID) error { - deal.DealID = dealID - return nil - }), - fsm.Event(storagemarket.ProviderEventFileStoreErrored). - FromMany(storagemarket.StorageDealStaged, storagemarket.StorageDealSealing, storagemarket.StorageDealActive).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("accessing file store: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventDealHandoffFailed).From(storagemarket.StorageDealStaged).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("handing off deal to node: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventDealHandedOff).From(storagemarket.StorageDealStaged).To(storagemarket.StorageDealSealing), - fsm.Event(storagemarket.ProviderEventDealActivationFailed). - From(storagemarket.StorageDealSealing).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("error activating deal: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventDealActivated). - From(storagemarket.StorageDealSealing).To(storagemarket.StorageDealRecordPiece), - - fsm.Event(storagemarket.ProviderEventPieceStoreErrored). - From(storagemarket.StorageDealRecordPiece).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("accessing piece store: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventUnableToLocatePiece). - From(storagemarket.StorageDealRecordPiece).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, dealID abi.DealID, err error) error { - deal.Message = xerrors.Errorf("locating piece for deal ID %d in sector: %w", deal.DealID, err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventReadMetadataErrored). - From(storagemarket.StorageDealRecordPiece).To(storagemarket.StorageDealFailing). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("error reading piece metadata: %w", err).Error() - return nil - }), - fsm.Event(storagemarket.ProviderEventPieceRecorded). - From(storagemarket.StorageDealRecordPiece).To(storagemarket.StorageDealActive), - - fsm.Event(storagemarket.ProviderEventDealSlashed). - From(storagemarket.StorageDealActive).To(storagemarket.StorageDealSlashed). - Action(func(deal *storagemarket.MinerDeal, slashEpoch abi.ChainEpoch) error { - deal.SlashEpoch = slashEpoch - return nil - }), - fsm.Event(storagemarket.ProviderEventDealExpired). - From(storagemarket.StorageDealActive).To(storagemarket.StorageDealExpired), - fsm.Event(storagemarket.ProviderEventDealCompletionFailed). - From(storagemarket.StorageDealActive).To(storagemarket.StorageDealError). - Action(func(deal *storagemarket.MinerDeal, err error) error { - deal.Message = xerrors.Errorf("error waiting for deal completion: %w", err).Error() - return nil - }), - - fsm.Event(storagemarket.ProviderEventFailed).From(storagemarket.StorageDealFailing).To(storagemarket.StorageDealError), - fsm.Event(storagemarket.ProviderEventRestart). - FromMany(storagemarket.StorageDealValidating, storagemarket.StorageDealAcceptWait, storagemarket.StorageDealRejecting).To(storagemarket.StorageDealError). - FromAny().ToNoChange(), -} - -// ProviderStateEntryFuncs are the handlers for different states in a storage client -var ProviderStateEntryFuncs = fsm.StateEntryFuncs{ - storagemarket.StorageDealValidating: ValidateDealProposal, - storagemarket.StorageDealAcceptWait: DecideOnProposal, - storagemarket.StorageDealVerifyData: VerifyData, - storagemarket.StorageDealEnsureProviderFunds: EnsureProviderFunds, - storagemarket.StorageDealProviderFunding: WaitForFunding, - storagemarket.StorageDealPublish: PublishDeal, - storagemarket.StorageDealPublishing: WaitForPublish, - storagemarket.StorageDealStaged: HandoffDeal, - storagemarket.StorageDealSealing: VerifyDealActivated, - storagemarket.StorageDealRejecting: RejectDeal, - storagemarket.StorageDealRecordPiece: RecordPieceInfo, - storagemarket.StorageDealActive: WaitForDealCompletion, - storagemarket.StorageDealFailing: FailDeal, -} - -// ProviderFinalityStates are the states that terminate deal processing for a deal. -// When a provider restarts, it restarts only deals that are not in a finality state. -var ProviderFinalityStates = []fsm.StateKey{ - storagemarket.StorageDealError, - storagemarket.StorageDealSlashed, - storagemarket.StorageDealExpired, -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_states.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_states.go deleted file mode 100644 index 6a32d4b092..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_states.go +++ /dev/null @@ -1,410 +0,0 @@ -package providerstates - -import ( - "bytes" - "context" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-padreader" - "github.com/filecoin-project/go-statemachine/fsm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-ipld-prime" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -var log = logging.Logger("providerstates") - -// ProviderDealEnvironment are the dependencies needed for processing deals -// with a ProviderStateEntryFunc -type ProviderDealEnvironment interface { - Address() address.Address - Node() storagemarket.StorageProviderNode - Ask() storagemarket.StorageAsk - GeneratePieceCommitmentToFile(payloadCid cid.Cid, selector ipld.Node) (cid.Cid, filestore.Path, filestore.Path, error) - SendSignedResponse(ctx context.Context, response *network.Response) error - Disconnect(proposalCid cid.Cid) error - FileStore() filestore.FileStore - PieceStore() piecestore.PieceStore - DealAcceptanceBuffer() abi.ChainEpoch - RunCustomDecisionLogic(context.Context, storagemarket.MinerDeal) (bool, string, error) -} - -// ProviderStateEntryFunc is the signature for a StateEntryFunc in the provider FSM -type ProviderStateEntryFunc func(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error - -// ValidateDealProposal validates a proposed deal against the provider criteria -func ValidateDealProposal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - tok, height, err := environment.Node().GetChainHead(ctx.Context()) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("node error getting most recent state id: %w", err)) - } - - if err := providerutils.VerifyProposal(ctx.Context(), deal.ClientDealProposal, tok, environment.Node().VerifySignature); err != nil { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("verifying StorageDealProposal: %w", err)) - } - - proposal := deal.Proposal - - if proposal.Provider != environment.Address() { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("incorrect provider for deal")) - } - - if height > proposal.StartEpoch-environment.DealAcceptanceBuffer() { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("deal start epoch is too soon or deal already expired")) - } - - // TODO: check StorageCollateral - - minPrice := big.Div(big.Mul(environment.Ask().Price, abi.NewTokenAmount(int64(proposal.PieceSize))), abi.NewTokenAmount(1<<30)) - if proposal.StoragePricePerEpoch.LessThan(minPrice) { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, - xerrors.Errorf("storage price per epoch less than asking price: %s < %s", proposal.StoragePricePerEpoch, minPrice)) - } - - if proposal.PieceSize < environment.Ask().MinPieceSize { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, - xerrors.Errorf("piece size less than minimum required size: %d < %d", proposal.PieceSize, environment.Ask().MinPieceSize)) - } - - if proposal.PieceSize > environment.Ask().MaxPieceSize { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, - xerrors.Errorf("piece size more than maximum allowed size: %d > %d", proposal.PieceSize, environment.Ask().MaxPieceSize)) - } - - // check market funds - clientMarketBalance, err := environment.Node().GetBalance(ctx.Context(), proposal.Client, tok) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("node error getting client market balance failed: %w", err)) - } - - // This doesn't guarantee that the client won't withdraw / lock those funds - // but it's a decent first filter - if clientMarketBalance.Available.LessThan(proposal.TotalStorageFee()) { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.New("clientMarketBalance.Available too small")) - } - - // Verified deal checks - if proposal.VerifiedDeal { - dataCap, err := environment.Node().GetDataCap(ctx.Context(), proposal.Client, tok) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("node error fetching verified data cap: %w", err)) - } - - pieceSize := big.NewIntUnsigned(uint64(proposal.PieceSize)) - if dataCap.LessThan(pieceSize) { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("verified deal DataCap too small for proposed piece size")) - } - } - - return ctx.Trigger(storagemarket.ProviderEventDealDeciding) -} - -// DecideOnProposal allows custom decision logic to run before accepting a deal, such as allowing a manual -// operator to decide whether or not to accept the deal -func DecideOnProposal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - accept, reason, err := environment.RunCustomDecisionLogic(ctx.Context(), deal) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, xerrors.Errorf("custom deal decision logic failed: %w", err)) - } - - if !accept { - return ctx.Trigger(storagemarket.ProviderEventDealRejected, fmt.Errorf(reason)) - } - - // Send intent to accept - err = environment.SendSignedResponse(ctx.Context(), &network.Response{ - State: storagemarket.StorageDealWaitingForData, - Proposal: deal.ProposalCid, - }) - - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventSendResponseFailed, err) - } - - if err := environment.Disconnect(deal.ProposalCid); err != nil { - log.Warnf("closing client connection: %+v", err) - } - - return ctx.Trigger(storagemarket.ProviderEventDataRequested) -} - -// VerifyData verifies that data received for a deal matches the pieceCID -// in the proposal -func VerifyData(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - - pieceCid, piecePath, metadataPath, err := environment.GeneratePieceCommitmentToFile(deal.Ref.Root, shared.AllSelector()) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventDataVerificationFailed, xerrors.Errorf("error generating CommP: %w", err)) - } - - // Verify CommP matches - if pieceCid != deal.Proposal.PieceCID { - return ctx.Trigger(storagemarket.ProviderEventDataVerificationFailed, xerrors.Errorf("proposal CommP doesn't match calculated CommP")) - } - - return ctx.Trigger(storagemarket.ProviderEventVerifiedData, piecePath, metadataPath) -} - -// EnsureProviderFunds adds funds, as needed to the StorageMarketActor, so the miner has adequate collateral for the deal -func EnsureProviderFunds(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - node := environment.Node() - - tok, _, err := node.GetChainHead(ctx.Context()) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("acquiring chain head: %w", err)) - } - - waddr, err := node.GetMinerWorkerAddress(ctx.Context(), deal.Proposal.Provider, tok) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("looking up miner worker: %w", err)) - } - - mcid, err := node.EnsureFunds(ctx.Context(), deal.Proposal.Provider, waddr, deal.Proposal.ProviderCollateral, tok) - - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("ensuring funds: %w", err)) - } - - // if no message was sent, and there was no error, it was instantaneous - if mcid == cid.Undef { - return ctx.Trigger(storagemarket.ProviderEventFunded) - } - - return ctx.Trigger(storagemarket.ProviderEventFundingInitiated, mcid) -} - -// WaitForFunding waits for a message posted to add funds to the StorageMarketActor to appear on chain -func WaitForFunding(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - node := environment.Node() - - return node.WaitForMessage(ctx.Context(), *deal.AddFundsCid, func(code exitcode.ExitCode, bytes []byte, err error) error { - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("AddFunds errored: %w", err)) - } - if code != exitcode.Ok { - return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("AddFunds exit code: %s", code.String())) - } - return ctx.Trigger(storagemarket.ProviderEventFunded) - }) -} - -// PublishDeal sends a message to publish a deal on chain -func PublishDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - smDeal := storagemarket.MinerDeal{ - Client: deal.Client, - ClientDealProposal: deal.ClientDealProposal, - ProposalCid: deal.ProposalCid, - State: deal.State, - Ref: deal.Ref, - } - - mcid, err := environment.Node().PublishDeals(ctx.Context(), smDeal) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventNodeErrored, xerrors.Errorf("publishing deal: %w", err)) - } - - return ctx.Trigger(storagemarket.ProviderEventDealPublishInitiated, mcid) -} - -// WaitForPublish waits for the publish message on chain and sends the deal id back to the client -func WaitForPublish(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - return environment.Node().WaitForMessage(ctx.Context(), *deal.PublishCid, func(code exitcode.ExitCode, retBytes []byte, err error) error { - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventDealPublishError, xerrors.Errorf("PublishStorageDeals errored: %w", err)) - } - if code != exitcode.Ok { - return ctx.Trigger(storagemarket.ProviderEventDealPublishError, xerrors.Errorf("PublishStorageDeals exit code: %s", code.String())) - } - var retval market.PublishStorageDealsReturn - err = retval.UnmarshalCBOR(bytes.NewReader(retBytes)) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventDealPublishError, xerrors.Errorf("PublishStorageDeals error unmarshalling result: %w", err)) - } - - return ctx.Trigger(storagemarket.ProviderEventDealPublished, retval.IDs[0]) - }) -} - -// HandoffDeal hands off a published deal for sealing and commitment in a sector -func HandoffDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - file, err := environment.FileStore().Open(deal.PiecePath) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventFileStoreErrored, xerrors.Errorf("reading piece at path %s: %w", deal.PiecePath, err)) - } - paddedReader, paddedSize := padreader.New(file, uint64(file.Size())) - err = environment.Node().OnDealComplete( - ctx.Context(), - storagemarket.MinerDeal{ - Client: deal.Client, - ClientDealProposal: deal.ClientDealProposal, - ProposalCid: deal.ProposalCid, - State: deal.State, - Ref: deal.Ref, - DealID: deal.DealID, - FastRetrieval: deal.FastRetrieval, - }, - paddedSize, - paddedReader, - ) - - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventDealHandoffFailed, err) - } - return ctx.Trigger(storagemarket.ProviderEventDealHandedOff) -} - -// VerifyDealActivated verifies that a deal has been committed to a sector and activated -func VerifyDealActivated(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - // TODO: consider waiting for seal to happen - cb := func(err error) { - if err != nil { - _ = ctx.Trigger(storagemarket.ProviderEventDealActivationFailed, err) - } else { - _ = ctx.Trigger(storagemarket.ProviderEventDealActivated) - } - } - - err := environment.Node().OnDealSectorCommitted(ctx.Context(), deal.Proposal.Provider, deal.DealID, cb) - - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventDealActivationFailed, err) - } - return nil -} - -// RecordPieceInfo records sector information about an activated deal so that the data -// can be retrieved later -func RecordPieceInfo(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - tok, _, err := environment.Node().GetChainHead(ctx.Context()) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventUnableToLocatePiece, deal.DealID, err) - } - - sectorID, offset, length, err := environment.Node().LocatePieceForDealWithinSector(ctx.Context(), deal.DealID, tok) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventUnableToLocatePiece, deal.DealID, err) - } - - var blockLocations map[cid.Cid]piecestore.BlockLocation - if deal.MetadataPath != filestore.Path("") { - blockLocations, err = providerutils.LoadBlockLocations(environment.FileStore(), deal.MetadataPath) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventReadMetadataErrored, err) - } - } else { - blockLocations = map[cid.Cid]piecestore.BlockLocation{ - deal.Ref.Root: {}, - } - } - - // TODO: Record actual block locations for all CIDs in piece by improving car writing - err = environment.PieceStore().AddPieceBlockLocations(deal.Proposal.PieceCID, blockLocations) - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventPieceStoreErrored, xerrors.Errorf("adding piece block locations: %w", err)) - } - - err = environment.PieceStore().AddDealForPiece(deal.Proposal.PieceCID, piecestore.DealInfo{ - DealID: deal.DealID, - SectorID: sectorID, - Offset: offset, - Length: length, - }) - - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventPieceStoreErrored, xerrors.Errorf("adding deal info for piece: %w", err)) - } - - err = environment.FileStore().Delete(deal.PiecePath) - if err != nil { - log.Warnf("deleting piece at path %s: %w", deal.PiecePath, err) - } - if deal.MetadataPath != filestore.Path("") { - err := environment.FileStore().Delete(deal.MetadataPath) - if err != nil { - log.Warnf("deleting piece at path %s: %w", deal.MetadataPath, err) - } - } - - return ctx.Trigger(storagemarket.ProviderEventPieceRecorded) -} - -// WaitForDealCompletion waits for the deal to be slashed or to expire -func WaitForDealCompletion(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - node := environment.Node() - - // Called when the deal expires - expiredCb := func(err error) { - if err != nil { - _ = ctx.Trigger(storagemarket.ProviderEventDealCompletionFailed, xerrors.Errorf("deal expiration err: %w", err)) - } else { - _ = ctx.Trigger(storagemarket.ProviderEventDealExpired) - } - } - - // Called when the deal is slashed - slashedCb := func(slashEpoch abi.ChainEpoch, err error) { - if err != nil { - _ = ctx.Trigger(storagemarket.ProviderEventDealCompletionFailed, xerrors.Errorf("deal slashing err: %w", err)) - } else { - _ = ctx.Trigger(storagemarket.ProviderEventDealSlashed, slashEpoch) - } - } - - if err := node.OnDealExpiredOrSlashed(ctx.Context(), deal.DealID, expiredCb, slashedCb); err != nil { - return ctx.Trigger(storagemarket.ProviderEventDealCompletionFailed, err) - } - - return nil -} - -// RejectDeal sends a failure response before terminating a deal -func RejectDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - err := environment.SendSignedResponse(ctx.Context(), &network.Response{ - State: storagemarket.StorageDealFailing, - Message: deal.Message, - Proposal: deal.ProposalCid, - }) - - if err != nil { - return ctx.Trigger(storagemarket.ProviderEventSendResponseFailed, err) - } - - if err := environment.Disconnect(deal.ProposalCid); err != nil { - log.Warnf("closing client connection: %+v", err) - } - - return ctx.Trigger(storagemarket.ProviderEventRejectionSent) -} - -// FailDeal cleans up before terminating a deal -func FailDeal(ctx fsm.Context, environment ProviderDealEnvironment, deal storagemarket.MinerDeal) error { - - log.Warnf("deal %s failed: %s", deal.ProposalCid, deal.Message) - - if deal.PiecePath != filestore.Path("") { - err := environment.FileStore().Delete(deal.PiecePath) - if err != nil { - log.Warnf("deleting piece at path %s: %w", deal.PiecePath, err) - } - } - if deal.MetadataPath != filestore.Path("") { - err := environment.FileStore().Delete(deal.MetadataPath) - if err != nil { - log.Warnf("deleting piece at path %s: %w", deal.MetadataPath, err) - } - } - return ctx.Trigger(storagemarket.ProviderEventFailed) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_states_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_states_test.go deleted file mode 100644 index 38eca3489d..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates/provider_states_test.go +++ /dev/null @@ -1,1136 +0,0 @@ -package providerstates_test - -import ( - "bytes" - "context" - "errors" - "fmt" - "math/rand" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-statemachine/fsm" - fsmtest "github.com/filecoin-project/go-statemachine/fsm/testutil" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - "github.com/ipld/go-ipld-prime" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/shared" - tut "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerstates" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" - "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" -) - -func TestValidateDealProposal(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runValidateDealProposal := makeExecutor(ctx, eventProcessor, providerstates.ValidateDealProposal, storagemarket.StorageDealValidating) - otherAddr, err := address.NewActorAddress([]byte("applesauce")) - require.NoError(t, err) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealAcceptWait, deal.State) - }, - }, - "verify signature fails": { - nodeParams: nodeParams{ - VerifySignatureFails: true, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: verifying StorageDealProposal: could not verify signature", deal.Message) - }, - }, - "provider address does not match": { - environmentParams: environmentParams{ - Address: otherAddr, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: incorrect provider for deal", deal.Message) - }, - }, - "MostRecentStateID errors": { - nodeParams: nodeParams{ - MostRecentStateIDError: errors.New("couldn't get id"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: node error getting most recent state id: couldn't get id", deal.Message) - }, - }, - "CurrentHeight <= StartEpoch - DealAcceptanceBuffer() succeeds": { - environmentParams: environmentParams{DealAcceptanceBuffer: 10}, - dealParams: dealParams{StartEpoch: 200}, - nodeParams: nodeParams{Height: 190}, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealAcceptWait, deal.State) - }, - }, - "CurrentHeight > StartEpoch - DealAcceptanceBuffer() fails": { - environmentParams: environmentParams{DealAcceptanceBuffer: 10}, - dealParams: dealParams{StartEpoch: 200}, - nodeParams: nodeParams{Height: 191}, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: deal start epoch is too soon or deal already expired", deal.Message) - }, - }, - "PricePerEpoch too low": { - dealParams: dealParams{ - StoragePricePerEpoch: abi.NewTokenAmount(5000), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: storage price per epoch less than asking price: 5000 < 9765", deal.Message) - }, - }, - "PieceSize < MinPieceSize": { - dealParams: dealParams{ - PieceSize: abi.PaddedPieceSize(128), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: piece size less than minimum required size: 128 < 256", deal.Message) - }, - }, - "Get balance error": { - nodeParams: nodeParams{ - ClientMarketBalanceError: errors.New("could not get balance"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: node error getting client market balance failed: could not get balance", deal.Message) - }, - }, - "Not enough funds": { - nodeParams: nodeParams{ - ClientMarketBalance: abi.NewTokenAmount(150 * 10000), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: clientMarketBalance.Available too small", deal.Message) - }, - }, - "verified deal succeeds": { - dealParams: dealParams{ - VerifiedDeal: true, - }, - nodeParams: nodeParams{ - DataCap: big.NewIntUnsigned(uint64(defaultPieceSize)), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - require.True(t, deal.Proposal.VerifiedDeal) - tut.AssertDealState(t, storagemarket.StorageDealAcceptWait, deal.State) - }, - }, - "verified deal fails getting client data cap": { - dealParams: dealParams{ - VerifiedDeal: true, - }, - nodeParams: nodeParams{ - GetDataCapError: xerrors.Errorf("failure getting data cap"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - require.True(t, deal.Proposal.VerifiedDeal) - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: node error fetching verified data cap: failure getting data cap", deal.Message) - }, - }, - "verified deal fails with insufficient data cap": { - dealParams: dealParams{ - VerifiedDeal: true, - }, - nodeParams: nodeParams{ - DataCap: big.NewIntUnsigned(uint64(defaultPieceSize - 1)), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - require.True(t, deal.Proposal.VerifiedDeal) - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: verified deal DataCap too small for proposed piece size", deal.Message) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runValidateDealProposal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestDecideOnProposal(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runDecideOndeal := makeExecutor(ctx, eventProcessor, providerstates.DecideOnProposal, storagemarket.StorageDealAcceptWait) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealWaitingForData, deal.State) - }, - }, - "Custom Decision Rejects Deal": { - environmentParams: environmentParams{ - RejectDeal: true, - RejectReason: "I just don't like it", - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: I just don't like it", deal.Message) - }, - }, - "Custom Decision Errors": { - environmentParams: environmentParams{ - DecisionError: errors.New("I can't make up my mind"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRejecting, deal.State) - require.Equal(t, "deal rejected: custom deal decision logic failed: I can't make up my mind", deal.Message) - }, - }, - "SendSignedResponse errors": { - environmentParams: environmentParams{ - SendSignedResponseError: errors.New("could not send"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "sending response to deal: could not send", deal.Message) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runDecideOndeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestVerifyData(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - expPath := filestore.Path("applesauce.txt") - expMetaPath := filestore.Path("somemetadata.txt") - runVerifyData := makeExecutor(ctx, eventProcessor, providerstates.VerifyData, storagemarket.StorageDealVerifyData) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - environmentParams: environmentParams{ - Path: expPath, - MetadataPath: expMetaPath, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealEnsureProviderFunds, deal.State) - require.Equal(t, expPath, deal.PiecePath) - require.Equal(t, expMetaPath, deal.MetadataPath) - }, - }, - "generate piece CID fails": { - environmentParams: environmentParams{ - GenerateCommPError: errors.New("could not generate CommP"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "deal data verification failed: error generating CommP: could not generate CommP", deal.Message) - }, - }, - "piece CIDs do not match": { - environmentParams: environmentParams{ - PieceCid: tut.GenerateCids(1)[0], - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "deal data verification failed: proposal CommP doesn't match calculated CommP", deal.Message) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runVerifyData(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestWaitForFunding(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runWaitForFunding := makeExecutor(ctx, eventProcessor, providerstates.WaitForFunding, storagemarket.StorageDealProviderFunding) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - nodeParams: nodeParams{ - WaitForMessageExitCode: exitcode.Ok, - WaitForMessageRetBytes: []byte{}, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealPublish, deal.State) - }, - }, - "AddFunds returns non-ok exit code": { - nodeParams: nodeParams{ - WaitForMessageExitCode: exitcode.ErrInsufficientFunds, - WaitForMessageRetBytes: []byte{}, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, fmt.Sprintf("error calling node: AddFunds exit code: %s", exitcode.ErrInsufficientFunds), deal.Message) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runWaitForFunding(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestEnsureProviderFunds(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runEnsureProviderFunds := makeExecutor(ctx, eventProcessor, providerstates.EnsureProviderFunds, storagemarket.StorageDealEnsureProviderFunds) - cids := tut.GenerateCids(1) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds immediately": { - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealPublish, deal.State) - }, - }, - "succeeds by sending an AddBalance message": { - dealParams: dealParams{ - ProviderCollateral: abi.NewTokenAmount(1), - }, - nodeParams: nodeParams{ - AddFundsCid: cids[0], - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealProviderFunding, deal.State) - require.Equal(t, &cids[0], deal.AddFundsCid) - }, - }, - "get miner worker fails": { - nodeParams: nodeParams{ - MinerWorkerError: errors.New("could not get worker"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "error calling node: looking up miner worker: could not get worker", deal.Message) - }, - }, - "ensureFunds errors": { - nodeParams: nodeParams{ - EnsureFundsError: errors.New("not enough funds"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "error calling node: ensuring funds: not enough funds", deal.Message) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runEnsureProviderFunds(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestPublishDeal(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runPublishDeal := makeExecutor(ctx, eventProcessor, providerstates.PublishDeal, storagemarket.StorageDealPublish) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealPublishing, deal.State) - }, - }, - "PublishDealsErrors errors": { - nodeParams: nodeParams{ - PublishDealsError: errors.New("could not post to chain"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "error calling node: publishing deal: could not post to chain", deal.Message) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runPublishDeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestWaitForPublish(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runWaitForPublish := makeExecutor(ctx, eventProcessor, providerstates.WaitForPublish, storagemarket.StorageDealPublishing) - expDealID, psdReturnBytes := generatePublishDealsReturn(t) - - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - nodeParams: nodeParams{ - WaitForMessageRetBytes: psdReturnBytes, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealStaged, deal.State) - require.Equal(t, expDealID, deal.DealID) - }, - }, - "PublishStorageDeal errors": { - nodeParams: nodeParams{ - WaitForMessageExitCode: exitcode.SysErrForbidden, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "PublishStorageDeal error: PublishStorageDeals exit code: SysErrForbidden(8)", deal.Message) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runWaitForPublish(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestHandoffDeal(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runHandoffDeal := makeExecutor(ctx, eventProcessor, providerstates.HandoffDeal, storagemarket.StorageDealStaged) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - dealParams: dealParams{ - PiecePath: defaultPath, - FastRetrieval: true, - }, - fileStoreParams: tut.TestFileStoreParams{ - Files: []filestore.File{defaultDataFile}, - ExpectedOpens: []filestore.Path{defaultPath}, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealSealing, deal.State) - require.Len(t, env.node.OnDealCompleteCalls, 1) - require.True(t, env.node.OnDealCompleteCalls[0].FastRetrieval) - }, - }, - "opening file errors": { - dealParams: dealParams{ - PiecePath: filestore.Path("missing.txt"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, fmt.Sprintf("accessing file store: reading piece at path missing.txt: %s", tut.TestErrNotFound.Error()), deal.Message) - }, - }, - "OnDealComplete errors": { - dealParams: dealParams{ - PiecePath: defaultPath, - }, - fileStoreParams: tut.TestFileStoreParams{ - Files: []filestore.File{defaultDataFile}, - ExpectedOpens: []filestore.Path{defaultPath}, - }, - nodeParams: nodeParams{ - OnDealCompleteError: errors.New("failed building sector"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "handing off deal to node: failed building sector", deal.Message) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runHandoffDeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestVerifyDealActivated(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runVerifyDealActivated := makeExecutor(ctx, eventProcessor, providerstates.VerifyDealActivated, storagemarket.StorageDealSealing) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealRecordPiece, deal.State) - }, - }, - "sync error": { - nodeParams: nodeParams{ - DealCommittedSyncError: errors.New("couldn't check deal commitment"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "error activating deal: couldn't check deal commitment", deal.Message) - }, - }, - "async error": { - nodeParams: nodeParams{ - DealCommittedAsyncError: errors.New("deal did not appear on chain"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "error activating deal: deal did not appear on chain", deal.Message) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runVerifyDealActivated(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestRecordPieceInfo(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runRecordPieceInfo := makeExecutor(ctx, eventProcessor, providerstates.RecordPieceInfo, storagemarket.StorageDealRecordPiece) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - dealParams: dealParams{ - PiecePath: defaultPath, - }, - fileStoreParams: tut.TestFileStoreParams{ - Files: []filestore.File{defaultDataFile}, - ExpectedDeletions: []filestore.Path{defaultPath}, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealActive, deal.State) - }, - }, - "succeeds w metadata": { - dealParams: dealParams{ - PiecePath: defaultPath, - MetadataPath: defaultMetadataPath, - }, - fileStoreParams: tut.TestFileStoreParams{ - Files: []filestore.File{defaultDataFile, defaultMetadataFile}, - ExpectedOpens: []filestore.Path{defaultMetadataPath}, - ExpectedDeletions: []filestore.Path{defaultMetadataPath, defaultPath}, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealActive, deal.State) - }, - }, - "locate piece fails": { - dealParams: dealParams{ - DealID: abi.DealID(1234), - }, - nodeParams: nodeParams{ - LocatePieceForDealWithinSectorError: errors.New("could not find piece"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "locating piece for deal ID 1234 in sector: could not find piece", deal.Message) - }, - }, - "reading metadata fails": { - dealParams: dealParams{ - MetadataPath: filestore.Path("Missing.txt"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, fmt.Sprintf("error reading piece metadata: %s", tut.TestErrNotFound.Error()), deal.Message) - }, - }, - "add piece block locations errors": { - pieceStoreParams: tut.TestPieceStoreParams{ - AddPieceBlockLocationsError: errors.New("could not add block locations"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "accessing piece store: adding piece block locations: could not add block locations", deal.Message) - }, - }, - "add deal for piece errors": { - pieceStoreParams: tut.TestPieceStoreParams{ - AddDealForPieceError: errors.New("could not add deal info"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, "accessing piece store: adding deal info for piece: could not add deal info", deal.Message) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runRecordPieceInfo(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestWaitForDealCompletion(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runWaitForDealCompletion := makeExecutor(ctx, eventProcessor, providerstates.WaitForDealCompletion, storagemarket.StorageDealActive) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "slashing succeeds": { - nodeParams: nodeParams{OnDealSlashedEpoch: abi.ChainEpoch(5)}, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealSlashed, deal.State) - require.Equal(t, abi.ChainEpoch(5), deal.SlashEpoch) - }, - }, - "expiration succeeds": { - // OnDealSlashedEpoch of zero signals to test node to call onDealExpired() - nodeParams: nodeParams{OnDealSlashedEpoch: abi.ChainEpoch(0)}, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealExpired, deal.State) - }, - }, - "slashing fails": { - nodeParams: nodeParams{OnDealSlashedError: errors.New("an err")}, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - require.Equal(t, "error waiting for deal completion: deal slashing err: an err", deal.Message) - }, - }, - "expiration fails": { - nodeParams: nodeParams{OnDealExpiredError: errors.New("an err")}, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - require.Equal(t, "error waiting for deal completion: deal expiration err: an err", deal.Message) - }, - }, - "fails synchronously": { - nodeParams: nodeParams{WaitForDealCompletionError: errors.New("an err")}, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - require.Equal(t, "error waiting for deal completion: an err", deal.Message) - }, - }, - } - - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runWaitForDealCompletion(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestRejectDeal(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runRejectDeal := makeExecutor(ctx, eventProcessor, providerstates.RejectDeal, storagemarket.StorageDealRejecting) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, 1, env.disconnectCalls) - }, - }, - "fails if it cannot send a response": { - environmentParams: environmentParams{ - SendSignedResponseError: xerrors.New("error sending response"), - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealFailing, deal.State) - require.Equal(t, deal.Message, "sending response to deal: error sending response") - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runRejectDeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -func TestFailDeal(t *testing.T) { - ctx := context.Background() - eventProcessor, err := fsm.NewEventProcessor(storagemarket.MinerDeal{}, "State", providerstates.ProviderEvents) - require.NoError(t, err) - runFailDeal := makeExecutor(ctx, eventProcessor, providerstates.FailDeal, storagemarket.StorageDealFailing) - tests := map[string]struct { - nodeParams nodeParams - dealParams dealParams - environmentParams environmentParams - fileStoreParams tut.TestFileStoreParams - pieceStoreParams tut.TestPieceStoreParams - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) - }{ - "succeeds": { - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - }, - }, - "succeeds, file deletions": { - dealParams: dealParams{ - PiecePath: defaultPath, - MetadataPath: defaultMetadataPath, - }, - fileStoreParams: tut.TestFileStoreParams{ - Files: []filestore.File{defaultDataFile, defaultMetadataFile}, - ExpectedDeletions: []filestore.Path{defaultPath, defaultMetadataPath}, - }, - dealInspector: func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment) { - tut.AssertDealState(t, storagemarket.StorageDealError, deal.State) - }, - }, - } - for test, data := range tests { - t.Run(test, func(t *testing.T) { - runFailDeal(t, data.nodeParams, data.environmentParams, data.dealParams, data.fileStoreParams, data.pieceStoreParams, data.dealInspector) - }) - } -} - -// all of these default parameters are setup to allow a deal to complete each handler with no errors -var defaultHeight = abi.ChainEpoch(50) -var defaultTipSetToken = []byte{1, 2, 3} -var defaultStoragePricePerEpoch = abi.NewTokenAmount(10000) -var defaultPieceSize = abi.PaddedPieceSize(1048576) -var defaultStartEpoch = abi.ChainEpoch(200) -var defaultEndEpoch = abi.ChainEpoch(400) -var defaultPieceCid = tut.GenerateCids(1)[0] -var defaultPath = filestore.Path("file.txt") -var defaultMetadataPath = filestore.Path("metadataPath.txt") -var defaultClientAddress = address.TestAddress -var defaultProviderAddress = address.TestAddress2 -var defaultMinerAddr, _ = address.NewActorAddress([]byte("miner")) -var defaultClientCollateral = abi.NewTokenAmount(0) -var defaultProviderCollateral = abi.NewTokenAmount(0) -var defaultDataRef = storagemarket.DataRef{ - Root: tut.GenerateCids(1)[0], - TransferType: storagemarket.TTGraphsync, -} -var defaultClientMarketBalance = abi.NewTokenAmount(200 * 10000) - -var defaultAsk = storagemarket.StorageAsk{ - Price: abi.NewTokenAmount(10000000), - MinPieceSize: abi.PaddedPieceSize(256), - MaxPieceSize: 1 << 20, -} - -var testData = tut.NewTestIPLDTree() -var dataBuf = new(bytes.Buffer) -var blockLocationBuf = new(bytes.Buffer) -var _ error = testData.DumpToCar(dataBuf, blockrecorder.RecordEachBlockTo(blockLocationBuf)) -var defaultDataFile = tut.NewTestFile(tut.TestFileParams{ - Buffer: dataBuf, - Path: defaultPath, - Size: 400, -}) -var defaultMetadataFile = tut.NewTestFile(tut.TestFileParams{ - Buffer: blockLocationBuf, - Path: defaultMetadataPath, - Size: 400, -}) - -func generatePublishDealsReturn(t *testing.T) (abi.DealID, []byte) { - dealId := abi.DealID(rand.Uint64()) - - psdReturn := market.PublishStorageDealsReturn{IDs: []abi.DealID{dealId}} - psdReturnBytes := bytes.NewBuffer([]byte{}) - err := psdReturn.MarshalCBOR(psdReturnBytes) - require.NoError(t, err) - - return dealId, psdReturnBytes.Bytes() -} - -type nodeParams struct { - MinerAddr address.Address - MinerWorkerError error - EnsureFundsError error - Height abi.ChainEpoch - TipSetToken shared.TipSetToken - ClientMarketBalance abi.TokenAmount - ClientMarketBalanceError error - AddFundsCid cid.Cid - VerifySignatureFails bool - MostRecentStateIDError error - PieceLength uint64 - PieceSectorID uint64 - PublishDealsError error - OnDealCompleteError error - LocatePieceForDealWithinSectorError error - DealCommittedSyncError error - DealCommittedAsyncError error - WaitForMessageBlocks bool - WaitForMessageError error - WaitForMessageExitCode exitcode.ExitCode - WaitForMessageRetBytes []byte - WaitForDealCompletionError error - OnDealExpiredError error - OnDealSlashedError error - OnDealSlashedEpoch abi.ChainEpoch - DataCap verifreg.DataCap - GetDataCapError error -} - -type dealParams struct { - PiecePath filestore.Path - MetadataPath filestore.Path - DealID abi.DealID - DataRef *storagemarket.DataRef - StoragePricePerEpoch abi.TokenAmount - ProviderCollateral abi.TokenAmount - PieceSize abi.PaddedPieceSize - StartEpoch abi.ChainEpoch - EndEpoch abi.ChainEpoch - FastRetrieval bool - VerifiedDeal bool -} - -type environmentParams struct { - Address address.Address - Ask storagemarket.StorageAsk - DataTransferError error - PieceCid cid.Cid - Path filestore.Path - MetadataPath filestore.Path - GenerateCommPError error - SendSignedResponseError error - DisconnectError error - DealAcceptanceBuffer int64 - TagsProposal bool - RejectDeal bool - RejectReason string - DecisionError error -} - -type executor func(t *testing.T, - node nodeParams, - params environmentParams, - dealParams dealParams, - fileStoreParams tut.TestFileStoreParams, - pieceStoreParams tut.TestPieceStoreParams, - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment)) - -func makeExecutor(ctx context.Context, - eventProcessor fsm.EventProcessor, - stateEntryFunc providerstates.ProviderStateEntryFunc, - initialState storagemarket.StorageDealStatus) executor { - return func(t *testing.T, - nodeParams nodeParams, - params environmentParams, - dealParams dealParams, - fileStoreParams tut.TestFileStoreParams, - pieceStoreParams tut.TestPieceStoreParams, - dealInspector func(t *testing.T, deal storagemarket.MinerDeal, env *fakeEnvironment)) { - - smstate := testnodes.NewStorageMarketState() - if nodeParams.Height != abi.ChainEpoch(0) { - smstate.Epoch = nodeParams.Height - smstate.TipSetToken = nodeParams.TipSetToken - } else { - smstate.Epoch = defaultHeight - smstate.TipSetToken = defaultTipSetToken - } - if !nodeParams.ClientMarketBalance.Nil() { - smstate.AddFunds(defaultClientAddress, nodeParams.ClientMarketBalance) - } else { - smstate.AddFunds(defaultClientAddress, defaultClientMarketBalance) - } - - common := testnodes.FakeCommonNode{ - SMState: smstate, - GetChainHeadError: nodeParams.MostRecentStateIDError, - GetBalanceError: nodeParams.ClientMarketBalanceError, - VerifySignatureFails: nodeParams.VerifySignatureFails, - EnsureFundsError: nodeParams.EnsureFundsError, - DealCommittedSyncError: nodeParams.DealCommittedSyncError, - DealCommittedAsyncError: nodeParams.DealCommittedAsyncError, - AddFundsCid: nodeParams.AddFundsCid, - WaitForMessageBlocks: nodeParams.WaitForMessageBlocks, - WaitForMessageError: nodeParams.WaitForMessageError, - WaitForMessageExitCode: nodeParams.WaitForMessageExitCode, - WaitForMessageRetBytes: nodeParams.WaitForMessageRetBytes, - WaitForDealCompletionError: nodeParams.WaitForDealCompletionError, - OnDealExpiredError: nodeParams.OnDealExpiredError, - OnDealSlashedError: nodeParams.OnDealSlashedError, - OnDealSlashedEpoch: nodeParams.OnDealSlashedEpoch, - } - - node := &testnodes.FakeProviderNode{ - FakeCommonNode: common, - MinerAddr: nodeParams.MinerAddr, - MinerWorkerError: nodeParams.MinerWorkerError, - PieceLength: nodeParams.PieceLength, - PieceSectorID: nodeParams.PieceSectorID, - PublishDealsError: nodeParams.PublishDealsError, - OnDealCompleteError: nodeParams.OnDealCompleteError, - LocatePieceForDealWithinSectorError: nodeParams.LocatePieceForDealWithinSectorError, - DataCap: nodeParams.DataCap, - GetDataCapErr: nodeParams.GetDataCapError, - } - - if nodeParams.MinerAddr == address.Undef { - node.MinerAddr = defaultMinerAddr - } - - proposal := market.DealProposal{ - PieceCID: defaultPieceCid, - PieceSize: defaultPieceSize, - Client: defaultClientAddress, - Provider: defaultProviderAddress, - StartEpoch: defaultStartEpoch, - EndEpoch: defaultEndEpoch, - StoragePricePerEpoch: defaultStoragePricePerEpoch, - ProviderCollateral: defaultProviderCollateral, - ClientCollateral: defaultClientCollateral, - } - if !dealParams.StoragePricePerEpoch.Nil() { - proposal.StoragePricePerEpoch = dealParams.StoragePricePerEpoch - } - if !dealParams.ProviderCollateral.Nil() { - proposal.ProviderCollateral = dealParams.ProviderCollateral - } - if dealParams.StartEpoch != abi.ChainEpoch(0) { - proposal.StartEpoch = dealParams.StartEpoch - } - if dealParams.EndEpoch != abi.ChainEpoch(0) { - proposal.EndEpoch = dealParams.EndEpoch - } - if dealParams.PieceSize != abi.PaddedPieceSize(0) { - proposal.PieceSize = dealParams.PieceSize - } - proposal.VerifiedDeal = dealParams.VerifiedDeal - signedProposal := &market.ClientDealProposal{ - Proposal: proposal, - ClientSignature: *tut.MakeTestSignature(), - } - dataRef := &defaultDataRef - if dealParams.DataRef != nil { - dataRef = dealParams.DataRef - } - dealState, err := tut.MakeTestMinerDeal(initialState, - signedProposal, dataRef) - require.NoError(t, err) - dealState.AddFundsCid = &tut.GenerateCids(1)[0] - dealState.PublishCid = &tut.GenerateCids(1)[0] - if dealParams.PiecePath != filestore.Path("") { - dealState.PiecePath = dealParams.PiecePath - } - if dealParams.MetadataPath != filestore.Path("") { - dealState.MetadataPath = dealParams.MetadataPath - } - if dealParams.DealID != abi.DealID(0) { - dealState.DealID = dealParams.DealID - } - dealState.FastRetrieval = dealParams.FastRetrieval - fs := tut.NewTestFileStore(fileStoreParams) - pieceStore := tut.NewTestPieceStoreWithParams(pieceStoreParams) - expectedTags := make(map[string]struct{}) - if params.TagsProposal { - expectedTags[dealState.ProposalCid.String()] = struct{}{} - } - environment := &fakeEnvironment{ - expectedTags: expectedTags, - receivedTags: make(map[string]struct{}), - address: params.Address, - node: node, - ask: params.Ask, - dataTransferError: params.DataTransferError, - pieceCid: params.PieceCid, - path: params.Path, - metadataPath: params.MetadataPath, - generateCommPError: params.GenerateCommPError, - sendSignedResponseError: params.SendSignedResponseError, - disconnectError: params.DisconnectError, - rejectDeal: params.RejectDeal, - rejectReason: params.RejectReason, - decisionError: params.DecisionError, - dealAcceptanceBuffer: abi.ChainEpoch(params.DealAcceptanceBuffer), - fs: fs, - pieceStore: pieceStore, - } - if environment.pieceCid == cid.Undef { - environment.pieceCid = defaultPieceCid - } - if environment.path == filestore.Path("") { - environment.path = defaultPath - } - if environment.metadataPath == filestore.Path("") { - environment.metadataPath = defaultMetadataPath - } - if environment.address == address.Undef { - environment.address = defaultProviderAddress - } - if environment.ask == storagemarket.StorageAskUndefined { - environment.ask = defaultAsk - } - - fsmCtx := fsmtest.NewTestContext(ctx, eventProcessor) - err = stateEntryFunc(fsmCtx, environment, *dealState) - require.NoError(t, err) - fsmCtx.ReplayEvents(t, dealState) - dealInspector(t, *dealState, environment) - - fs.VerifyExpectations(t) - pieceStore.VerifyExpectations(t) - environment.VerifyExpectations(t) - } -} - -type fakeEnvironment struct { - address address.Address - node *testnodes.FakeProviderNode - ask storagemarket.StorageAsk - dataTransferError error - pieceCid cid.Cid - path filestore.Path - metadataPath filestore.Path - generateCommPError error - sendSignedResponseError error - disconnectCalls int - disconnectError error - rejectDeal bool - rejectReason string - decisionError error - fs filestore.FileStore - pieceStore piecestore.PieceStore - dealAcceptanceBuffer abi.ChainEpoch - expectedTags map[string]struct{} - receivedTags map[string]struct{} -} - -func (fe *fakeEnvironment) Address() address.Address { - return fe.address -} - -func (fe *fakeEnvironment) Node() storagemarket.StorageProviderNode { - return fe.node -} - -func (fe *fakeEnvironment) Ask() storagemarket.StorageAsk { - return fe.ask -} - -func (fe *fakeEnvironment) GeneratePieceCommitmentToFile(payloadCid cid.Cid, selector ipld.Node) (cid.Cid, filestore.Path, filestore.Path, error) { - return fe.pieceCid, fe.path, fe.metadataPath, fe.generateCommPError -} - -func (fe *fakeEnvironment) SendSignedResponse(ctx context.Context, response *network.Response) error { - return fe.sendSignedResponseError -} - -func (fe *fakeEnvironment) VerifyExpectations(t *testing.T) { - require.Equal(t, fe.expectedTags, fe.receivedTags) -} - -func (fe *fakeEnvironment) Disconnect(proposalCid cid.Cid) error { - fe.disconnectCalls += 1 - return fe.disconnectError -} - -func (fe *fakeEnvironment) FileStore() filestore.FileStore { - return fe.fs -} - -func (fe *fakeEnvironment) PieceStore() piecestore.PieceStore { - return fe.pieceStore -} - -func (fe *fakeEnvironment) DealAcceptanceBuffer() abi.ChainEpoch { - return fe.dealAcceptanceBuffer -} - -func (fe *fakeEnvironment) RunCustomDecisionLogic(context.Context, storagemarket.MinerDeal) (bool, string, error) { - return !fe.rejectDeal, fe.rejectReason, fe.decisionError -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils/providerutils.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils/providerutils.go deleted file mode 100644 index ba691a06e3..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils/providerutils.go +++ /dev/null @@ -1,116 +0,0 @@ -// Package providerutils provides utility functions for the storage provider & provider FSM -package providerutils - -import ( - "context" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - "github.com/ipld/go-car" - "github.com/ipld/go-ipld-prime" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder" -) - -// VerifyFunc is a function that can validate a signature for a given address and bytes -type VerifyFunc func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) - -// VerifyProposal verifies the signature on the given signed proposal matches -// the client addres for the proposal, using the given signature verification function -func VerifyProposal(ctx context.Context, sdp market.ClientDealProposal, tok shared.TipSetToken, verifier VerifyFunc) error { - b, err := cborutil.Dump(&sdp.Proposal) - if err != nil { - return err - } - - return VerifySignature(ctx, sdp.ClientSignature, sdp.Proposal.Client, b, tok, verifier) -} - -// VerifySignature verifies the signature over the given bytes -func VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, buf []byte, tok shared.TipSetToken, verifier VerifyFunc) error { - verified, err := verifier(ctx, signature, signer, buf, tok) - if err != nil { - return xerrors.Errorf("verifying: %w", err) - } - - if !verified { - return xerrors.New("could not verify signature") - } - - return nil -} - -// WorkerLookupFunc is a function that can lookup a miner worker address from a storage miner actor -type WorkerLookupFunc func(context.Context, address.Address, shared.TipSetToken) (address.Address, error) - -// SignFunc is a function that can sign a set of bytes with a given address -type SignFunc func(context.Context, address.Address, []byte) (*crypto.Signature, error) - -// SignMinerData signs the given data structure with a signature for the given address -func SignMinerData(ctx context.Context, data interface{}, address address.Address, tok shared.TipSetToken, workerLookup WorkerLookupFunc, sign SignFunc) (*crypto.Signature, error) { - msg, err := cborutil.Dump(data) - if err != nil { - return nil, xerrors.Errorf("serializing: %w", err) - } - - worker, err := workerLookup(ctx, address, tok) - if err != nil { - return nil, err - } - - sig, err := sign(ctx, worker, msg) - if err != nil { - return nil, xerrors.Errorf("failed to sign: %w", err) - } - return sig, nil -} - -// CommPGenerator is a commP generating function that writes to a file -type CommPGenerator func(abi.RegisteredSealProof, cid.Cid, ipld.Node, ...car.OnNewCarBlockFunc) (cid.Cid, filestore.Path, abi.UnpaddedPieceSize, error) - -// GeneratePieceCommitmentWithMetadata generates a piece commitment along with block metadata -func GeneratePieceCommitmentWithMetadata( - fileStore filestore.FileStore, - commPGenerator CommPGenerator, - proofType abi.RegisteredSealProof, - payloadCid cid.Cid, - selector ipld.Node) (cid.Cid, filestore.Path, filestore.Path, error) { - metadataFile, err := fileStore.CreateTemp() - if err != nil { - return cid.Cid{}, "", "", err - } - blockRecorder := blockrecorder.RecordEachBlockTo(metadataFile) - pieceCid, path, _, err := commPGenerator(proofType, payloadCid, selector, blockRecorder) - _ = metadataFile.Close() - if err != nil { - _ = fileStore.Delete(metadataFile.Path()) - return cid.Cid{}, "", "", err - } - return pieceCid, path, metadataFile.Path(), err -} - -// LoadBlockLocations loads a metadata file then converts it to a map of cid -> blockLocation -func LoadBlockLocations(fs filestore.FileStore, metadataPath filestore.Path) (map[cid.Cid]piecestore.BlockLocation, error) { - metadataFile, err := fs.Open(metadataPath) - if err != nil { - return nil, err - } - metadata, err := blockrecorder.ReadBlockMetadata(metadataFile) - _ = metadataFile.Close() - if err != nil { - return nil, err - } - blockLocations := make(map[cid.Cid]piecestore.BlockLocation, len(metadata)) - for _, metadatum := range metadata { - blockLocations[metadatum.CID] = piecestore.BlockLocation{RelOffset: metadatum.Offset, BlockSize: metadatum.Size} - } - return blockLocations, nil -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils/providerutils_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils/providerutils_test.go deleted file mode 100644 index 946f7cc8ff..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils/providerutils_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package providerutils_test - -import ( - "bytes" - "context" - "errors" - "math/rand" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - "github.com/ipld/go-car" - "github.com/ipld/go-ipld-prime" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - "github.com/ipld/go-ipld-prime/traversal/selector/builder" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/blockrecorder" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -func TestVerifyProposal(t *testing.T) { - tests := map[string]struct { - proposal market.ClientDealProposal - verifier providerutils.VerifyFunc - shouldErr bool - }{ - "successful verification": { - proposal: *shared_testutil.MakeTestClientDealProposal(), - verifier: func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) { - return true, nil - }, - shouldErr: false, - }, - "bad proposal": { - proposal: market.ClientDealProposal{ - Proposal: market.DealProposal{}, - ClientSignature: *shared_testutil.MakeTestSignature(), - }, - verifier: func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) { - return true, nil - }, - shouldErr: true, - }, - "verification fails": { - proposal: *shared_testutil.MakeTestClientDealProposal(), - verifier: func(context.Context, crypto.Signature, address.Address, []byte, shared.TipSetToken) (bool, error) { - return false, nil - }, - shouldErr: true, - }, - } - for name, data := range tests { - t.Run(name, func(t *testing.T) { - err := providerutils.VerifyProposal(context.Background(), data.proposal, shared.TipSetToken{}, data.verifier) - require.Equal(t, err != nil, data.shouldErr) - }) - } -} - -func TestSignMinerData(t *testing.T) { - ctx := context.Background() - successLookup := func(context.Context, address.Address, shared.TipSetToken) (address.Address, error) { - return address.TestAddress2, nil - } - successSign := func(context.Context, address.Address, []byte) (*crypto.Signature, error) { - return shared_testutil.MakeTestSignature(), nil - } - tests := map[string]struct { - data interface{} - workerLookup providerutils.WorkerLookupFunc - signBytes providerutils.SignFunc - shouldErr bool - }{ - "succeeds": { - data: shared_testutil.MakeTestStorageAsk(), - workerLookup: successLookup, - signBytes: successSign, - shouldErr: false, - }, - "cbor dump errors": { - data: &network.Response{}, - workerLookup: successLookup, - signBytes: successSign, - shouldErr: true, - }, - "worker lookup errors": { - data: shared_testutil.MakeTestStorageAsk(), - workerLookup: func(context.Context, address.Address, shared.TipSetToken) (address.Address, error) { - return address.Undef, errors.New("Something went wrong") - }, - signBytes: successSign, - shouldErr: true, - }, - "signing errors": { - data: shared_testutil.MakeTestStorageAsk(), - workerLookup: successLookup, - signBytes: func(context.Context, address.Address, []byte) (*crypto.Signature, error) { - return nil, errors.New("something went wrong") - }, - shouldErr: true, - }, - } - for name, data := range tests { - t.Run(name, func(t *testing.T) { - _, err := providerutils.SignMinerData(ctx, data.data, address.TestAddress, shared.TipSetToken{}, data.workerLookup, data.signBytes) - require.Equal(t, err != nil, data.shouldErr) - }) - } -} - -func TestCommPGenerationWithMetadata(t *testing.T) { - tempFilePath := filestore.Path("applesauce.jpg") - tempFile := shared_testutil.NewTestFile(shared_testutil.TestFileParams{Path: tempFilePath}) - payloadCid := shared_testutil.GenerateCids(1)[0] - ssb := builder.NewSelectorSpecBuilder(basicnode.Style.Any) - selector := ssb.ExploreAll(ssb.Matcher()).Node() - proofType := abi.RegisteredSealProof_StackedDrg2KiBV1 - pieceCid := shared_testutil.GenerateCids(1)[0] - piecePath := filestore.Path("apiece.jpg") - pieceSize := abi.UnpaddedPieceSize(rand.Uint64()) - testCases := map[string]struct { - fileStoreParams shared_testutil.TestFileStoreParams - commPErr error - expectedPieceCid cid.Cid - expectedPiecePath filestore.Path - expectedMetadataPath filestore.Path - shouldErr bool - }{ - "success": { - fileStoreParams: shared_testutil.TestFileStoreParams{ - AvailableTempFiles: []filestore.File{tempFile}, - }, - expectedPieceCid: pieceCid, - expectedPiecePath: piecePath, - expectedMetadataPath: tempFilePath, - shouldErr: false, - }, - "tempfile creations fails": { - fileStoreParams: shared_testutil.TestFileStoreParams{}, - shouldErr: true, - }, - "commP generation fails": { - fileStoreParams: shared_testutil.TestFileStoreParams{ - AvailableTempFiles: []filestore.File{tempFile}, - ExpectedDeletions: []filestore.Path{tempFile.Path()}, - }, - commPErr: errors.New("Could not generate commP"), - shouldErr: true, - }, - } - - for testName, testCase := range testCases { - t.Run(testName, func(t *testing.T) { - fcp := &fakeCommPGenerator{pieceCid, piecePath, pieceSize, testCase.commPErr} - fs := shared_testutil.NewTestFileStore(testCase.fileStoreParams) - resultPieceCid, resultPiecePath, resultMetadataPath, resultErr := providerutils.GeneratePieceCommitmentWithMetadata( - fs, fcp.GenerateCommPToFile, proofType, payloadCid, selector) - require.Equal(t, resultPieceCid, testCase.expectedPieceCid) - require.Equal(t, resultPiecePath, testCase.expectedPiecePath) - require.Equal(t, resultMetadataPath, testCase.expectedMetadataPath) - if testCase.shouldErr { - require.Error(t, resultErr) - } else { - require.NoError(t, resultErr) - } - fs.VerifyExpectations(t) - }) - } -} - -type fakeCommPGenerator struct { - pieceCid cid.Cid - path filestore.Path - size abi.UnpaddedPieceSize - err error -} - -func (fcp *fakeCommPGenerator) GenerateCommPToFile(abi.RegisteredSealProof, cid.Cid, ipld.Node, ...car.OnNewCarBlockFunc) (cid.Cid, filestore.Path, abi.UnpaddedPieceSize, error) { - return fcp.pieceCid, fcp.path, fcp.size, fcp.err -} - -func TestLoadBlockLocations(t *testing.T) { - testData := shared_testutil.NewTestIPLDTree() - - carBuf := new(bytes.Buffer) - blockLocationBuf := new(bytes.Buffer) - err := testData.DumpToCar(carBuf, blockrecorder.RecordEachBlockTo(blockLocationBuf)) - require.NoError(t, err) - validPath := filestore.Path("valid.data") - validFile := shared_testutil.NewTestFile(shared_testutil.TestFileParams{ - Buffer: blockLocationBuf, - Path: validPath, - }) - missingPath := filestore.Path("missing.data") - invalidPath := filestore.Path("invalid.data") - invalidData := make([]byte, 512) - _, _ = rand.Read(invalidData) - invalidFile := shared_testutil.NewTestFile(shared_testutil.TestFileParams{ - Buffer: bytes.NewBuffer(invalidData), - Path: invalidPath, - }) - fs := shared_testutil.NewTestFileStore(shared_testutil.TestFileStoreParams{ - Files: []filestore.File{validFile, invalidFile}, - ExpectedOpens: []filestore.Path{validPath, invalidPath}, - }) - testCases := map[string]struct { - path filestore.Path - shouldErr bool - expectedCids []cid.Cid - }{ - "valid data": { - path: validPath, - shouldErr: false, - expectedCids: []cid.Cid{ - testData.LeafAlphaBlock.Cid(), - testData.LeafBetaBlock.Cid(), - testData.MiddleListBlock.Cid(), - testData.MiddleMapBlock.Cid(), - testData.RootBlock.Cid(), - }, - }, - "missing data": { - path: missingPath, - shouldErr: true, - }, - "invalid data": { - path: invalidPath, - shouldErr: true, - }, - } - for testCase, data := range testCases { - t.Run(testCase, func(t *testing.T) { - results, err := providerutils.LoadBlockLocations(fs, data.path) - if data.shouldErr { - require.Error(t, err) - require.Nil(t, results) - } else { - require.NoError(t, err) - for _, c := range data.expectedCids { - _, ok := results[c] - require.True(t, ok) - } - } - }) - } - fs.VerifyExpectations(t) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/common.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/common.go index 08fc484e37..e265c0376b 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/common.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/common.go @@ -1,13 +1,13 @@ package requestvalidation import ( - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-statestore" "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime" "github.com/libp2p/go-libp2p-core/peer" "golang.org/x/xerrors" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/storagemarket" ) @@ -19,7 +19,7 @@ import ( // - referenced deal matches the given base CID // - referenced deal is in an acceptable state func ValidatePush( - deals *statestore.StateStore, + deals PushDeals, sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, @@ -30,7 +30,7 @@ func ValidatePush( } var deal storagemarket.MinerDeal - err := deals.Get(dealVoucher.Proposal).Get(&deal) + deal, err := deals.Get(dealVoucher.Proposal) if err != nil { return xerrors.Errorf("Proposal CID %s: %w", dealVoucher.Proposal.String(), ErrNoDeal) } @@ -57,7 +57,7 @@ func ValidatePush( // - referenced deal matches the given base CID // - referenced deal is in an acceptable state func ValidatePull( - deals *statestore.StateStore, + deals PullDeals, receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, @@ -66,9 +66,7 @@ func ValidatePull( if !ok { return xerrors.Errorf("voucher type %s: %w", voucher.Type(), ErrWrongVoucherType) } - - var deal storagemarket.ClientDeal - err := deals.Get(dealVoucher.Proposal).Get(&deal) + deal, err := deals.Get(dealVoucher.Proposal) if err != nil { return xerrors.Errorf("Proposal CID %s: %w", dealVoucher.Proposal.String(), ErrNoDeal) } diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/request_validation_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/request_validation_test.go deleted file mode 100644 index cb9551f97a..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/request_validation_test.go +++ /dev/null @@ -1,274 +0,0 @@ -package requestvalidation_test - -import ( - "math/rand" - "testing" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - dss "github.com/ipfs/go-datastore/sync" - blocksutil "github.com/ipfs/go-ipfs-blocksutil" - "github.com/libp2p/go-libp2p-core/peer" - xerrors "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - rv "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" -) - -var blockGenerator = blocksutil.NewBlockGenerator() - -type wrongDTType struct { -} - -func (wrongDTType) Type() datatransfer.TypeIdentifier { - return "WrongDTTYPE" -} - -func uniqueStorageDealProposal() (market.ClientDealProposal, error) { - clientAddr, err := address.NewIDAddress(uint64(rand.Int())) - if err != nil { - return market.ClientDealProposal{}, err - } - providerAddr, err := address.NewIDAddress(uint64(rand.Int())) - if err != nil { - return market.ClientDealProposal{}, err - } - return market.ClientDealProposal{ - Proposal: market.DealProposal{ - PieceCID: blockGenerator.Next().Cid(), - Client: clientAddr, - Provider: providerAddr, - }, - ClientSignature: crypto.Signature{ - Data: []byte("foo bar cat dog"), - Type: crypto.SigTypeBLS, - }, - }, nil -} - -func newClientDeal(minerID peer.ID, state storagemarket.StorageDealStatus) (storagemarket.ClientDeal, error) { - newProposal, err := uniqueStorageDealProposal() - if err != nil { - return storagemarket.ClientDeal{}, err - } - proposalNd, err := cborutil.AsIpld(&newProposal) - if err != nil { - return storagemarket.ClientDeal{}, err - } - minerAddr, err := address.NewIDAddress(uint64(rand.Int())) - if err != nil { - return storagemarket.ClientDeal{}, err - } - - return storagemarket.ClientDeal{ - ClientDealProposal: newProposal, - ProposalCid: proposalNd.Cid(), - DataRef: &storagemarket.DataRef{ - Root: blockGenerator.Next().Cid(), - }, - Miner: minerID, - MinerWorker: minerAddr, - State: state, - }, nil -} - -func newMinerDeal(clientID peer.ID, state storagemarket.StorageDealStatus) (storagemarket.MinerDeal, error) { - newProposal, err := uniqueStorageDealProposal() - if err != nil { - return storagemarket.MinerDeal{}, err - } - proposalNd, err := cborutil.AsIpld(&newProposal) - if err != nil { - return storagemarket.MinerDeal{}, err - } - ref := blockGenerator.Next().Cid() - - return storagemarket.MinerDeal{ - ClientDealProposal: newProposal, - ProposalCid: proposalNd.Cid(), - Client: clientID, - State: state, - Ref: &storagemarket.DataRef{Root: ref}, - }, nil -} - -func TestUnifiedRequestValidator(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - state := statestore.New(namespace.Wrap(ds, datastore.NewKey("/deals/client"))) - minerID := peer.ID("fakepeerid") - clientID := peer.ID("fakepeerid2") - block := blockGenerator.Next() - - t.Run("which only accepts pulls", func(t *testing.T) { - urv := rv.NewUnifiedRequestValidator(nil, state) - - t.Run("ValidatePush fails", func(t *testing.T) { - if !xerrors.Is(urv.ValidatePush(minerID, wrongDTType{}, block.Cid(), nil), rv.ErrNoPushAccepted) { - t.Fatal("Push should fail for the client request validator for storage deals") - } - }) - - AssertValidatesPulls(t, urv, minerID, state) - }) - - t.Run("which only accepts pushes", func(t *testing.T) { - urv := rv.NewUnifiedRequestValidator(state, nil) - - t.Run("ValidatePull fails", func(t *testing.T) { - if !xerrors.Is(urv.ValidatePull(clientID, wrongDTType{}, block.Cid(), nil), rv.ErrNoPullAccepted) { - t.Fatal("Pull should fail for the provider request validator for storage deals") - } - }) - - AssertPushValidator(t, urv, clientID, state) - }) - - t.Run("which accepts pushes and pulls", func(t *testing.T) { - urv := rv.NewUnifiedRequestValidator(state, state) - - AssertValidatesPulls(t, urv, minerID, state) - AssertPushValidator(t, urv, clientID, state) - }) -} - -func AssertPushValidator(t *testing.T, validator datatransfer.RequestValidator, sender peer.ID, state *statestore.StateStore) { - t.Run("ValidatePush fails deal not found", func(t *testing.T) { - proposal, err := uniqueStorageDealProposal() - if err != nil { - t.Fatal("error creating proposal") - } - proposalNd, err := cborutil.AsIpld(&proposal) - if err != nil { - t.Fatal("error serializing proposal") - } - if !xerrors.Is(validator.ValidatePush(sender, &rv.StorageDataTransferVoucher{proposalNd.Cid()}, proposal.Proposal.PieceCID, nil), rv.ErrNoDeal) { - t.Fatal("Push should fail if there is no deal stored") - } - }) - t.Run("ValidatePush fails wrong miner", func(t *testing.T) { - otherClient := peer.ID("otherclient") - minerDeal, err := newMinerDeal(otherClient, storagemarket.StorageDealProposalAccepted) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { - t.Fatal("deal tracking failed") - } - ref := minerDeal.Ref - if !xerrors.Is(validator.ValidatePush(sender, &rv.StorageDataTransferVoucher{minerDeal.ProposalCid}, ref.Root, nil), rv.ErrWrongPeer) { - t.Fatal("Push should fail if miner address is incorrect") - } - }) - t.Run("ValidatePush fails wrong piece ref", func(t *testing.T) { - minerDeal, err := newMinerDeal(sender, storagemarket.StorageDealProposalAccepted) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { - t.Fatal("deal tracking failed") - } - if !xerrors.Is(validator.ValidatePush(sender, &rv.StorageDataTransferVoucher{minerDeal.ProposalCid}, blockGenerator.Next().Cid(), nil), rv.ErrWrongPiece) { - t.Fatal("Push should fail if piece ref is incorrect") - } - }) - t.Run("ValidatePush fails wrong deal state", func(t *testing.T) { - minerDeal, err := newMinerDeal(sender, storagemarket.StorageDealActive) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { - t.Fatal("deal tracking failed") - } - ref := minerDeal.Ref - if !xerrors.Is(validator.ValidatePush(sender, &rv.StorageDataTransferVoucher{minerDeal.ProposalCid}, ref.Root, nil), rv.ErrInacceptableDealState) { - t.Fatal("Push should fail if deal is in a state that cannot be data transferred") - } - }) - t.Run("ValidatePush succeeds", func(t *testing.T) { - minerDeal, err := newMinerDeal(sender, storagemarket.StorageDealValidating) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(minerDeal.ProposalCid, &minerDeal); err != nil { - t.Fatal("deal tracking failed") - } - ref := minerDeal.Ref - if validator.ValidatePush(sender, &rv.StorageDataTransferVoucher{minerDeal.ProposalCid}, ref.Root, nil) != nil { - t.Fatal("Push should should succeed when all parameters are correct") - } - }) -} - -func AssertValidatesPulls(t *testing.T, validator datatransfer.RequestValidator, receiver peer.ID, state *statestore.StateStore) { - t.Run("ValidatePull fails deal not found", func(t *testing.T) { - proposal, err := uniqueStorageDealProposal() - if err != nil { - t.Fatal("error creating proposal") - } - proposalNd, err := cborutil.AsIpld(&proposal) - if err != nil { - t.Fatal("error serializing proposal") - } - if !xerrors.Is(validator.ValidatePull(receiver, &rv.StorageDataTransferVoucher{proposalNd.Cid()}, proposal.Proposal.PieceCID, nil), rv.ErrNoDeal) { - t.Fatal("Pull should fail if there is no deal stored") - } - }) - t.Run("ValidatePull fails wrong client", func(t *testing.T) { - otherMiner := peer.ID("otherminer") - clientDeal, err := newClientDeal(otherMiner, storagemarket.StorageDealProposalAccepted) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { - t.Fatal("deal tracking failed") - } - payloadCid := clientDeal.DataRef.Root - if !xerrors.Is(validator.ValidatePull(receiver, &rv.StorageDataTransferVoucher{clientDeal.ProposalCid}, payloadCid, nil), rv.ErrWrongPeer) { - t.Fatal("Pull should fail if miner address is incorrect") - } - }) - t.Run("ValidatePull fails wrong piece ref", func(t *testing.T) { - clientDeal, err := newClientDeal(receiver, storagemarket.StorageDealProposalAccepted) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { - t.Fatal("deal tracking failed") - } - if !xerrors.Is(validator.ValidatePull(receiver, &rv.StorageDataTransferVoucher{clientDeal.ProposalCid}, blockGenerator.Next().Cid(), nil), rv.ErrWrongPiece) { - t.Fatal("Pull should fail if piece ref is incorrect") - } - }) - t.Run("ValidatePull fails wrong deal state", func(t *testing.T) { - clientDeal, err := newClientDeal(receiver, storagemarket.StorageDealActive) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { - t.Fatal("deal tracking failed") - } - payloadCid := clientDeal.DataRef.Root - if !xerrors.Is(validator.ValidatePull(receiver, &rv.StorageDataTransferVoucher{clientDeal.ProposalCid}, payloadCid, nil), rv.ErrInacceptableDealState) { - t.Fatal("Pull should fail if deal is in a state that cannot be data transferred") - } - }) - t.Run("ValidatePull succeeds", func(t *testing.T) { - clientDeal, err := newClientDeal(receiver, storagemarket.StorageDealValidating) - if err != nil { - t.Fatal("error creating client deal") - } - if err := state.Begin(clientDeal.ProposalCid, &clientDeal); err != nil { - t.Fatal("deal tracking failed") - } - payloadCid := clientDeal.DataRef.Root - if validator.ValidatePull(receiver, &rv.StorageDataTransferVoucher{clientDeal.ProposalCid}, payloadCid, nil) != nil { - t.Fatal("Pull should should succeed when all parameters are correct") - } - }) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types.go index dccfd2e880..ae076df393 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types.go @@ -3,9 +3,10 @@ package requestvalidation import ( "errors" - datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/ipfs/go-cid" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/storagemarket" ) @@ -37,7 +38,9 @@ var ( ErrInacceptableDealState = errors.New("deal is not a in a state where deals are accepted") // DataTransferStates are the states in which it would make sense to actually start a data transfer - DataTransferStates = []storagemarket.StorageDealStatus{storagemarket.StorageDealValidating, storagemarket.StorageDealWaitingForData, storagemarket.StorageDealUnknown} + // We accept deals even in the StorageDealTransferring state too as we could also also receive a data transfer restart request + DataTransferStates = []storagemarket.StorageDealStatus{storagemarket.StorageDealValidating, storagemarket.StorageDealWaitingForData, storagemarket.StorageDealUnknown, + storagemarket.StorageDealTransferring, storagemarket.StorageDealProviderTransferRestart} ) // StorageDataTransferVoucher is the voucher type for data transfers diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types_cbor_gen.go index ae25e9ac76..d952f221d8 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types_cbor_gen.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/types_cbor_gen.go @@ -12,18 +12,22 @@ import ( var _ = xerrors.Errorf +var lengthBufStorageDataTransferVoucher = []byte{129} + func (t *StorageDataTransferVoucher) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{129}); err != nil { + if _, err := w.Write(lengthBufStorageDataTransferVoucher); err != nil { return err } + scratch := make([]byte, 9) + // t.Proposal (cid.Cid) (struct) - if err := cbg.WriteCid(w, t.Proposal); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.Proposal); err != nil { return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) } @@ -31,9 +35,12 @@ func (t *StorageDataTransferVoucher) MarshalCBOR(w io.Writer) error { } func (t *StorageDataTransferVoucher) UnmarshalCBOR(r io.Reader) error { + *t = StorageDataTransferVoucher{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/unified_request_validator.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/unified_request_validator.go index 9f4107769e..065df6cb2b 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/unified_request_validator.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation/unified_request_validator.go @@ -1,24 +1,36 @@ package requestvalidation import ( - datatransfer "github.com/filecoin-project/go-data-transfer" - "github.com/filecoin-project/go-statestore" "github.com/ipfs/go-cid" "github.com/ipld/go-ipld-prime" "github.com/libp2p/go-libp2p-core/peer" + + datatransfer "github.com/filecoin-project/go-data-transfer" + + "github.com/filecoin-project/go-fil-markets/storagemarket" ) +// PushDeals gets deal states for Push validations +type PushDeals interface { + Get(cid.Cid) (storagemarket.MinerDeal, error) +} + +// PullDeals gets deal states for Pull validations +type PullDeals interface { + Get(cid.Cid) (storagemarket.ClientDeal, error) +} + // UnifiedRequestValidator is a data transfer request validator that validates // StorageDataTransferVoucher from the given state store // It can be made to only accept push requests (Provider) or pull requests (Client) // by passing nil for the statestore value for pushes or pulls type UnifiedRequestValidator struct { - pushDeals *statestore.StateStore - pullDeals *statestore.StateStore + pushDeals PushDeals + pullDeals PullDeals } // NewUnifiedRequestValidator returns a new instance of UnifiedRequestValidator -func NewUnifiedRequestValidator(pushDeals *statestore.StateStore, pullDeals *statestore.StateStore) *UnifiedRequestValidator { +func NewUnifiedRequestValidator(pushDeals PushDeals, pullDeals PullDeals) *UnifiedRequestValidator { return &UnifiedRequestValidator{ pushDeals: pushDeals, pullDeals: pullDeals, @@ -26,35 +38,35 @@ func NewUnifiedRequestValidator(pushDeals *statestore.StateStore, pullDeals *sta } // SetPushDeals sets the store to look up push deals with -func (v *UnifiedRequestValidator) SetPushDeals(pushDeals *statestore.StateStore) { +func (v *UnifiedRequestValidator) SetPushDeals(pushDeals PushDeals) { v.pushDeals = pushDeals } // SetPullDeals sets the store to look up pull deals with -func (v *UnifiedRequestValidator) SetPullDeals(pullDeals *statestore.StateStore) { +func (v *UnifiedRequestValidator) SetPullDeals(pullDeals PullDeals) { v.pullDeals = pullDeals } // ValidatePush implements the ValidatePush method of a data transfer request validator. // If no pushStore exists, it rejects the request // Otherwise, it calls the ValidatePush function to validate the deal -func (v *UnifiedRequestValidator) ValidatePush(sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) error { +func (v *UnifiedRequestValidator) ValidatePush(sender peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { if v.pushDeals == nil { - return ErrNoPushAccepted + return nil, ErrNoPushAccepted } - return ValidatePush(v.pushDeals, sender, voucher, baseCid, selector) + return nil, ValidatePush(v.pushDeals, sender, voucher, baseCid, selector) } // ValidatePull implements the ValidatePull method of a data transfer request validator. // If no pullStore exists, it rejects the request // Otherwise, it calls the ValidatePull function to validate the deal -func (v *UnifiedRequestValidator) ValidatePull(receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) error { +func (v *UnifiedRequestValidator) ValidatePull(receiver peer.ID, voucher datatransfer.Voucher, baseCid cid.Cid, selector ipld.Node) (datatransfer.VoucherResult, error) { if v.pullDeals == nil { - return ErrNoPullAccepted + return nil, ErrNoPullAccepted } - return ValidatePull(v.pullDeals, receiver, voucher, baseCid, selector) + return nil, ValidatePull(v.pullDeals, receiver, voucher, baseCid, selector) } var _ datatransfer.RequestValidator = &UnifiedRequestValidator{} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask/storedask.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask/storedask.go deleted file mode 100644 index 2e70b4ff04..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask/storedask.go +++ /dev/null @@ -1,172 +0,0 @@ -package storedask - -import ( - "bytes" - "context" - "sync" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-datastore" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/providerutils" -) - -var log = logging.Logger("storedask") - -// DefaultPrice is the default price set for StorageAsk in Fil / GiB / Epoch -var DefaultPrice = abi.NewTokenAmount(500000000) - -// DefaultDuration is the default number of epochs a storage ask is in effect for -const DefaultDuration abi.ChainEpoch = 1000000 - -// DefaultMinPieceSize is the minimum accepted piece size for data -const DefaultMinPieceSize abi.PaddedPieceSize = 256 - -// DefaultMaxPieceSize is the default maximum accepted size for pieces for deals -// TODO: It would be nice to default this to the miner's sector size -const DefaultMaxPieceSize abi.PaddedPieceSize = 1 << 20 - -// StoredAsk implements a persisted SignedStorageAsk that lasts through restarts -// It also maintains a cache of the current SignedStorageAsk in memory -type StoredAsk struct { - askLk sync.RWMutex - ask *storagemarket.SignedStorageAsk - ds datastore.Batching - dsKey datastore.Key - spn storagemarket.StorageProviderNode - actor address.Address -} - -// NewStoredAsk returns a new instance of StoredAsk -// It will initialize a new SignedStorageAsk on disk if one is not set -// Otherwise it loads the current SignedStorageAsk from disk -func NewStoredAsk(ds datastore.Batching, dsKey datastore.Key, spn storagemarket.StorageProviderNode, actor address.Address) (*StoredAsk, error) { - - s := &StoredAsk{ - ds: ds, - spn: spn, - actor: actor, - } - - if err := s.tryLoadAsk(); err != nil { - return nil, err - } - - if s.ask == nil { - // TODO: we should be fine with this state, and just say it means 'not actively accepting deals' - // for now... lets just set a price - if err := s.SetAsk(DefaultPrice, DefaultDuration); err != nil { - return nil, xerrors.Errorf("failed setting a default price: %w", err) - } - } - return s, nil -} - -// SetAsk writes a new ask to disk with the provided price, -// duration, and options. Any previously-existing ask is replaced. -// It also increments the sequence number on the ask -func (s *StoredAsk) SetAsk(price abi.TokenAmount, duration abi.ChainEpoch, options ...storagemarket.StorageAskOption) error { - s.askLk.Lock() - defer s.askLk.Unlock() - var seqno uint64 - if s.ask != nil { - seqno = s.ask.Ask.SeqNo + 1 - } - - ctx := context.TODO() - - _, height, err := s.spn.GetChainHead(ctx) - if err != nil { - return err - } - ask := &storagemarket.StorageAsk{ - Price: price, - Timestamp: height, - Expiry: height + duration, - Miner: s.actor, - SeqNo: seqno, - MinPieceSize: DefaultMinPieceSize, - MaxPieceSize: DefaultMaxPieceSize, - } - - for _, option := range options { - option(ask) - } - - tok, _, err := s.spn.GetChainHead(ctx) - if err != nil { - return err - } - - sig, err := providerutils.SignMinerData(ctx, ask, s.actor, tok, s.spn.GetMinerWorkerAddress, s.spn.SignBytes) - if err != nil { - return err - } - - return s.saveAsk(&storagemarket.SignedStorageAsk{ - Ask: ask, - Signature: sig, - }) - -} - -// GetAsk returns the current signed storage ask, or nil if one does not exist. -func (s *StoredAsk) GetAsk() *storagemarket.SignedStorageAsk { - s.askLk.RLock() - defer s.askLk.RUnlock() - if s.ask == nil { - return nil - } - ask := *s.ask - return &ask -} - -func (s *StoredAsk) tryLoadAsk() error { - s.askLk.Lock() - defer s.askLk.Unlock() - - err := s.loadAsk() - if err != nil { - if xerrors.Is(err, datastore.ErrNotFound) { - log.Warn("no previous ask found, miner will not accept deals until a price is set") - return nil - } - return err - } - - return nil -} - -func (s *StoredAsk) loadAsk() error { - askb, err := s.ds.Get(s.dsKey) - if err != nil { - return xerrors.Errorf("failed to load most recent ask from disk: %w", err) - } - - var ssa storagemarket.SignedStorageAsk - if err := cborutil.ReadCborRPC(bytes.NewReader(askb), &ssa); err != nil { - return err - } - - s.ask = &ssa - return nil -} - -func (s *StoredAsk) saveAsk(a *storagemarket.SignedStorageAsk) error { - b, err := cborutil.Dump(a) - if err != nil { - return err - } - - if err := s.ds.Put(s.dsKey, b); err != nil { - return err - } - - s.ask = a - return nil -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask/storedask_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask/storedask_test.go deleted file mode 100644 index 8136034cea..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask/storedask_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package storedask_test - -import ( - "errors" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-datastore" - dss "github.com/ipfs/go-datastore/sync" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" - "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" -) - -func TestStoredAsk(t *testing.T) { - ds := dss.MutexWrap(datastore.NewMapDatastore()) - spn := &testnodes.FakeProviderNode{ - FakeCommonNode: testnodes.FakeCommonNode{ - SMState: testnodes.NewStorageMarketState(), - }, - } - actor := address.TestAddress2 - storedAsk, err := storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spn, actor) - require.NoError(t, err) - - testPrice := abi.NewTokenAmount(1000000000) - testDuration := abi.ChainEpoch(200) - t.Run("auto initializing", func(t *testing.T) { - ask := storedAsk.GetAsk() - require.NotNil(t, ask) - }) - t.Run("setting ask price", func(t *testing.T) { - minPieceSize := abi.PaddedPieceSize(1024) - err := storedAsk.SetAsk(testPrice, testDuration, storagemarket.MinPieceSize(minPieceSize)) - require.NoError(t, err) - ask := storedAsk.GetAsk() - require.Equal(t, ask.Ask.Price, testPrice) - require.Equal(t, ask.Ask.Expiry-ask.Ask.Timestamp, testDuration) - require.Equal(t, ask.Ask.MinPieceSize, minPieceSize) - }) - t.Run("reloading stored ask from disk", func(t *testing.T) { - storedAsk2, err := storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spn, actor) - require.NoError(t, err) - ask := storedAsk2.GetAsk() - require.Equal(t, ask.Ask.Price, testPrice) - require.Equal(t, ask.Ask.Expiry-ask.Ask.Timestamp, testDuration) - }) - t.Run("node errors", func(t *testing.T) { - spnStateIDErr := &testnodes.FakeProviderNode{ - FakeCommonNode: testnodes.FakeCommonNode{ - GetChainHeadError: errors.New("something went wrong"), - SMState: testnodes.NewStorageMarketState(), - }, - } - // should load cause ask is is still in data store - storedAskError, err := storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spnStateIDErr, actor) - require.NoError(t, err) - err = storedAskError.SetAsk(testPrice, testDuration) - require.Error(t, err) - - spnMinerWorkerErr := &testnodes.FakeProviderNode{ - FakeCommonNode: testnodes.FakeCommonNode{ - SMState: testnodes.NewStorageMarketState(), - }, - MinerWorkerError: errors.New("something went wrong"), - } - // should load cause ask is is still in data store - storedAskError, err = storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spnMinerWorkerErr, actor) - require.NoError(t, err) - err = storedAskError.SetAsk(testPrice, testDuration) - require.Error(t, err) - - spnSignBytesErr := &testnodes.FakeProviderNode{ - FakeCommonNode: testnodes.FakeCommonNode{ - SMState: testnodes.NewStorageMarketState(), - SignBytesError: errors.New("something went wrong"), - }, - } - // should load cause ask is is still in data store - storedAskError, err = storedask.NewStoredAsk(ds, datastore.NewKey("latest-ask"), spnSignBytesErr, actor) - require.NoError(t, err) - err = storedAskError.SetAsk(testPrice, testDuration) - require.Error(t, err) - }) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/integration_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/integration_test.go deleted file mode 100644 index 57c8f3c94f..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/integration_test.go +++ /dev/null @@ -1,461 +0,0 @@ -package storagemarket_test - -import ( - "bytes" - "context" - "encoding/json" - "io/ioutil" - "math/rand" - "path/filepath" - "sync" - "testing" - - "github.com/filecoin-project/go-address" - graphsync "github.com/filecoin-project/go-data-transfer/impl/graphsync" - "github.com/filecoin-project/go-statestore" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/go-fil-markets/pieceio" - "github.com/filecoin-project/go-fil-markets/pieceio/cario" - "github.com/filecoin-project/go-fil-markets/piecestore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket/discovery" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket" - storageimpl "github.com/filecoin-project/go-fil-markets/storagemarket/impl" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" - "github.com/filecoin-project/go-fil-markets/storagemarket/impl/storedask" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" - "github.com/filecoin-project/go-fil-markets/storagemarket/testnodes" -) - -func TestMakeDeal(t *testing.T) { - ctx := context.Background() - h := newHarness(t, ctx) - require.NoError(t, h.Provider.Start(ctx)) - require.NoError(t, h.Client.Start(ctx)) - - // set up a subscriber - providerDealChan := make(chan storagemarket.MinerDeal) - var checkedUnmarshalling bool - subscriber := func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - if !checkedUnmarshalling { - // test that deal created can marshall and unmarshalled - jsonBytes, err := json.Marshal(deal) - require.NoError(t, err) - var unmDeal storagemarket.MinerDeal - err = json.Unmarshal(jsonBytes, &unmDeal) - require.NoError(t, err) - require.Equal(t, deal, unmDeal) - checkedUnmarshalling = true - } - providerDealChan <- deal - } - _ = h.Provider.SubscribeToEvents(subscriber) - - clientDealChan := make(chan storagemarket.ClientDeal) - clientSubscriber := func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - clientDealChan <- deal - } - _ = h.Client.SubscribeToEvents(clientSubscriber) - - // set ask price where we'll accept any price - err := h.Provider.SetAsk(big.NewInt(0), 50_000) - assert.NoError(t, err) - - result := h.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: h.PayloadCid}, true, false) - proposalCid := result.ProposalCid - - dealStatesToStrings := func(states []storagemarket.StorageDealStatus) []string { - var out []string - for _, state := range states { - out = append(out, storagemarket.DealStates[state]) - } - return out - } - - var providerSeenDeal storagemarket.MinerDeal - var clientSeenDeal storagemarket.ClientDeal - var providerstates, clientstates []storagemarket.StorageDealStatus - for providerSeenDeal.State != storagemarket.StorageDealExpired || - clientSeenDeal.State != storagemarket.StorageDealExpired { - select { - case clientSeenDeal = <-clientDealChan: - if len(clientstates) == 0 || clientSeenDeal.State != clientstates[len(clientstates)-1] { - clientstates = append(clientstates, clientSeenDeal.State) - } - case providerSeenDeal = <-providerDealChan: - if len(providerstates) == 0 || providerSeenDeal.State != providerstates[len(providerstates)-1] { - providerstates = append(providerstates, providerSeenDeal.State) - } - } - } - - expProviderStates := []storagemarket.StorageDealStatus{ - storagemarket.StorageDealValidating, - storagemarket.StorageDealAcceptWait, - storagemarket.StorageDealWaitingForData, - storagemarket.StorageDealTransferring, - storagemarket.StorageDealVerifyData, - storagemarket.StorageDealEnsureProviderFunds, - storagemarket.StorageDealPublish, - storagemarket.StorageDealPublishing, - storagemarket.StorageDealStaged, - storagemarket.StorageDealSealing, - storagemarket.StorageDealRecordPiece, - storagemarket.StorageDealActive, - storagemarket.StorageDealExpired, - } - - expClientStates := []storagemarket.StorageDealStatus{ - storagemarket.StorageDealEnsureClientFunds, - //storagemarket.StorageDealClientFunding, // skipped because funds available - storagemarket.StorageDealFundsEnsured, - storagemarket.StorageDealStartDataTransfer, - storagemarket.StorageDealTransferring, - storagemarket.StorageDealCheckForAcceptance, - storagemarket.StorageDealProposalAccepted, - storagemarket.StorageDealSealing, - storagemarket.StorageDealActive, - storagemarket.StorageDealExpired, - } - - assert.Equal(t, dealStatesToStrings(expProviderStates), dealStatesToStrings(providerstates)) - assert.Equal(t, dealStatesToStrings(expClientStates), dealStatesToStrings(clientstates)) - - // check a couple of things to make sure we're getting the whole deal - assert.Equal(t, h.TestData.Host1.ID(), providerSeenDeal.Client) - assert.Empty(t, providerSeenDeal.Message) - assert.Equal(t, proposalCid, providerSeenDeal.ProposalCid) - assert.Equal(t, h.ProviderAddr, providerSeenDeal.ClientDealProposal.Proposal.Provider) - - cd, err := h.Client.GetLocalDeal(ctx, proposalCid) - assert.NoError(t, err) - shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, cd.State) - assert.True(t, cd.FastRetrieval) - - providerDeals, err := h.Provider.ListLocalDeals() - assert.NoError(t, err) - - pd := providerDeals[0] - assert.Equal(t, proposalCid, pd.ProposalCid) - assert.True(t, pd.FastRetrieval) - shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, pd.State) - - // test out query protocol - status, err := h.Client.GetProviderDealState(ctx, proposalCid) - assert.NoError(t, err) - shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, status.State) - assert.True(t, status.FastRetrieval) - - // ensure that the handoff has fast retrieval info - assert.Len(t, h.ProviderNode.OnDealCompleteCalls, 1) - assert.True(t, h.ProviderNode.OnDealCompleteCalls[0].FastRetrieval) -} - -func TestMakeDealOffline(t *testing.T) { - ctx := context.Background() - h := newHarness(t, ctx) - require.NoError(t, h.Client.Start(ctx)) - - carBuf := new(bytes.Buffer) - - err := cario.NewCarIO().WriteCar(ctx, h.TestData.Bs1, h.PayloadCid, shared.AllSelector(), carBuf) - require.NoError(t, err) - - commP, size, err := pieceio.GeneratePieceCommitment(abi.RegisteredSealProof_StackedDrg2KiBV1, carBuf, uint64(carBuf.Len())) - assert.NoError(t, err) - - dataRef := &storagemarket.DataRef{ - TransferType: storagemarket.TTManual, - Root: h.PayloadCid, - PieceCid: &commP, - PieceSize: size, - } - - result := h.ProposeStorageDeal(t, dataRef, false, false) - proposalCid := result.ProposalCid - - wg := sync.WaitGroup{} - - h.WaitForClientEvent(&wg, storagemarket.ClientEventDataTransferComplete) - h.WaitForProviderEvent(&wg, storagemarket.ProviderEventDataRequested) - wg.Wait() - - cd, err := h.Client.GetLocalDeal(ctx, proposalCid) - assert.NoError(t, err) - shared_testutil.AssertDealState(t, storagemarket.StorageDealCheckForAcceptance, cd.State) - - providerDeals, err := h.Provider.ListLocalDeals() - assert.NoError(t, err) - - pd := providerDeals[0] - assert.True(t, pd.ProposalCid.Equals(proposalCid)) - shared_testutil.AssertDealState(t, storagemarket.StorageDealWaitingForData, pd.State) - - err = cario.NewCarIO().WriteCar(ctx, h.TestData.Bs1, h.PayloadCid, shared.AllSelector(), carBuf) - require.NoError(t, err) - err = h.Provider.ImportDataForDeal(ctx, pd.ProposalCid, carBuf) - require.NoError(t, err) - - h.WaitForClientEvent(&wg, storagemarket.ClientEventDealExpired) - h.WaitForProviderEvent(&wg, storagemarket.ProviderEventDealExpired) - wg.Wait() - - cd, err = h.Client.GetLocalDeal(ctx, proposalCid) - assert.NoError(t, err) - shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, cd.State) - - providerDeals, err = h.Provider.ListLocalDeals() - assert.NoError(t, err) - - pd = providerDeals[0] - assert.True(t, pd.ProposalCid.Equals(proposalCid)) - shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, pd.State) -} - -func TestMakeDealNonBlocking(t *testing.T) { - ctx := context.Background() - h := newHarness(t, ctx) - testCids := shared_testutil.GenerateCids(2) - - h.ProviderNode.WaitForMessageBlocks = true - h.ProviderNode.AddFundsCid = testCids[1] - require.NoError(t, h.Provider.Start(ctx)) - - h.ClientNode.AddFundsCid = testCids[0] - require.NoError(t, h.Client.Start(ctx)) - - result := h.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: h.PayloadCid}, false, false) - - wg := sync.WaitGroup{} - h.WaitForClientEvent(&wg, storagemarket.ClientEventDataTransferComplete) - h.WaitForProviderEvent(&wg, storagemarket.ProviderEventFundingInitiated) - wg.Wait() - - cd, err := h.Client.GetLocalDeal(ctx, result.ProposalCid) - assert.NoError(t, err) - shared_testutil.AssertDealState(t, storagemarket.StorageDealCheckForAcceptance, cd.State) - - providerDeals, err := h.Provider.ListLocalDeals() - assert.NoError(t, err) - - // Provider should be blocking on waiting for funds to appear on chain - pd := providerDeals[0] - assert.Equal(t, result.ProposalCid, pd.ProposalCid) - shared_testutil.AssertDealState(t, storagemarket.StorageDealProviderFunding, pd.State) -} - -func TestRestartClient(t *testing.T) { - ctx := context.Background() - h := newHarness(t, ctx) - - require.NoError(t, h.Provider.Start(ctx)) - require.NoError(t, h.Client.Start(ctx)) - - // set ask price where we'll accept any price - err := h.Provider.SetAsk(big.NewInt(0), 50_000) - assert.NoError(t, err) - - wg := sync.WaitGroup{} - wg.Add(1) - _ = h.Client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - if event == storagemarket.ClientEventFundsEnsured { - // Stop the client and provider at some point during deal negotiation - require.NoError(t, h.Client.Stop()) - require.NoError(t, h.Provider.Stop()) - wg.Done() - } - }) - - result := h.ProposeStorageDeal(t, &storagemarket.DataRef{TransferType: storagemarket.TTGraphsync, Root: h.PayloadCid}, false, false) - proposalCid := result.ProposalCid - - wg.Wait() - - cd, err := h.Client.GetLocalDeal(ctx, proposalCid) - assert.NoError(t, err) - assert.NotEqual(t, storagemarket.StorageDealActive, cd.State) - - h = newHarnessWithTestData(t, ctx, h.TestData, h.SMState) - - wg.Add(1) - _ = h.Client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - if event == storagemarket.ClientEventDealExpired { - wg.Done() - } - }) - - wg.Add(1) - _ = h.Provider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - if event == storagemarket.ProviderEventDealExpired { - wg.Done() - } - }) - - require.NoError(t, h.Provider.Start(ctx)) - require.NoError(t, h.Client.Start(ctx)) - - wg.Wait() - - cd, err = h.Client.GetLocalDeal(ctx, proposalCid) - assert.NoError(t, err) - shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, cd.State) - - providerDeals, err := h.Provider.ListLocalDeals() - assert.NoError(t, err) - - pd := providerDeals[0] - assert.Equal(t, pd.ProposalCid, proposalCid) - shared_testutil.AssertDealState(t, storagemarket.StorageDealExpired, pd.State) -} - -type harness struct { - Ctx context.Context - Epoch abi.ChainEpoch - PayloadCid cid.Cid - ProviderAddr address.Address - Client storagemarket.StorageClient - ClientNode *testnodes.FakeClientNode - Provider storagemarket.StorageProvider - ProviderNode *testnodes.FakeProviderNode - SMState *testnodes.StorageMarketState - ProviderInfo storagemarket.StorageProviderInfo - TestData *shared_testutil.Libp2pTestData -} - -func newHarness(t *testing.T, ctx context.Context) *harness { - smState := testnodes.NewStorageMarketState() - return newHarnessWithTestData(t, ctx, shared_testutil.NewLibp2pTestData(ctx, t), smState) -} - -func newHarnessWithTestData(t *testing.T, ctx context.Context, td *shared_testutil.Libp2pTestData, smState *testnodes.StorageMarketState) *harness { - epoch := abi.ChainEpoch(100) - fpath := filepath.Join("storagemarket", "fixtures", "payload.txt") - rootLink := td.LoadUnixFSFile(t, fpath, false) - payloadCid := rootLink.(cidlink.Link).Cid - - clientNode := testnodes.FakeClientNode{ - FakeCommonNode: testnodes.FakeCommonNode{SMState: smState}, - ClientAddr: address.TestAddress, - } - - expDealID := abi.DealID(rand.Uint64()) - psdReturn := market.PublishStorageDealsReturn{IDs: []abi.DealID{expDealID}} - psdReturnBytes := bytes.NewBuffer([]byte{}) - err := psdReturn.MarshalCBOR(psdReturnBytes) - assert.NoError(t, err) - - providerAddr := address.TestAddress2 - tempPath, err := ioutil.TempDir("", "storagemarket_test") - assert.NoError(t, err) - ps := piecestore.NewPieceStore(td.Ds2) - providerNode := &testnodes.FakeProviderNode{ - FakeCommonNode: testnodes.FakeCommonNode{ - SMState: smState, - WaitForMessageRetBytes: psdReturnBytes.Bytes(), - }, - MinerAddr: providerAddr, - } - fs, err := filestore.NewLocalFileStore(filestore.OsPath(tempPath)) - assert.NoError(t, err) - - // create provider and client - dt1 := graphsync.NewGraphSyncDataTransfer(td.Host1, td.GraphSync1, td.DTStoredCounter1) - rv1 := requestvalidation.NewUnifiedRequestValidator(nil, statestore.New(td.Ds1)) - require.NoError(t, dt1.RegisterVoucherType(&requestvalidation.StorageDataTransferVoucher{}, rv1)) - - client, err := storageimpl.NewClient( - network.NewFromLibp2pHost(td.Host1), - td.Bs1, - dt1, - discovery.NewLocal(td.Ds1), - td.Ds1, - &clientNode, - storageimpl.DealPollingInterval(0), - ) - require.NoError(t, err) - - dt2 := graphsync.NewGraphSyncDataTransfer(td.Host2, td.GraphSync2, td.DTStoredCounter2) - rv2 := requestvalidation.NewUnifiedRequestValidator(statestore.New(td.Ds2), nil) - require.NoError(t, dt2.RegisterVoucherType(&requestvalidation.StorageDataTransferVoucher{}, rv2)) - - storedAsk, err := storedask.NewStoredAsk(td.Ds2, datastore.NewKey("latest-ask"), providerNode, providerAddr) - assert.NoError(t, err) - provider, err := storageimpl.NewProvider( - network.NewFromLibp2pHost(td.Host2), - td.Ds2, - td.Bs2, - fs, - ps, - dt2, - providerNode, - providerAddr, - abi.RegisteredSealProof_StackedDrg2KiBV1, - storedAsk, - ) - assert.NoError(t, err) - - // set ask price where we'll accept any price - err = provider.SetAsk(big.NewInt(0), 50_000) - assert.NoError(t, err) - - err = provider.Start(ctx) - assert.NoError(t, err) - - // Closely follows the MinerInfo struct in the spec - providerInfo := storagemarket.StorageProviderInfo{ - Address: providerAddr, - Owner: providerAddr, - Worker: providerAddr, - SectorSize: 1 << 20, - PeerID: td.Host2.ID(), - } - - smState.Providers = []*storagemarket.StorageProviderInfo{&providerInfo} - return &harness{ - Ctx: ctx, - Epoch: epoch, - PayloadCid: payloadCid, - ProviderAddr: providerAddr, - Client: client, - ClientNode: &clientNode, - Provider: provider, - ProviderNode: providerNode, - ProviderInfo: providerInfo, - TestData: td, - SMState: smState, - } -} - -func (h *harness) ProposeStorageDeal(t *testing.T, dataRef *storagemarket.DataRef, fastRetrieval, verifiedDeal bool) *storagemarket.ProposeStorageDealResult { - result, err := h.Client.ProposeStorageDeal(h.Ctx, h.ProviderAddr, &h.ProviderInfo, dataRef, h.Epoch+100, h.Epoch+20100, big.NewInt(1), big.NewInt(0), abi.RegisteredSealProof_StackedDrg2KiBV1, fastRetrieval, verifiedDeal) - assert.NoError(t, err) - return result -} - -func (h *harness) WaitForProviderEvent(wg *sync.WaitGroup, waitEvent storagemarket.ProviderEvent) { - wg.Add(1) - h.Provider.SubscribeToEvents(func(event storagemarket.ProviderEvent, deal storagemarket.MinerDeal) { - if event == waitEvent { - wg.Done() - } - }) -} - -func (h *harness) WaitForClientEvent(wg *sync.WaitGroup, waitEvent storagemarket.ClientEvent) { - wg.Add(1) - h.Client.SubscribeToEvents(func(event storagemarket.ClientEvent, deal storagemarket.ClientDeal) { - if event == waitEvent { - wg.Done() - } - }) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/ask_stream.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/ask_stream.go deleted file mode 100644 index 6622e0ad75..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/ask_stream.go +++ /dev/null @@ -1,52 +0,0 @@ -package network - -import ( - "bufio" - - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/libp2p/go-libp2p-core/mux" - "github.com/libp2p/go-libp2p-core/peer" -) - -type askStream struct { - p peer.ID - rw mux.MuxedStream - buffered *bufio.Reader -} - -var _ StorageAskStream = (*askStream)(nil) - -func (as *askStream) ReadAskRequest() (AskRequest, error) { - var a AskRequest - - if err := a.UnmarshalCBOR(as.buffered); err != nil { - log.Warn(err) - return AskRequestUndefined, err - - } - - return a, nil -} - -func (as *askStream) WriteAskRequest(q AskRequest) error { - return cborutil.WriteCborRPC(as.rw, &q) -} - -func (as *askStream) ReadAskResponse() (AskResponse, error) { - var resp AskResponse - - if err := resp.UnmarshalCBOR(as.buffered); err != nil { - log.Warn(err) - return AskResponseUndefined, err - } - - return resp, nil -} - -func (as *askStream) WriteAskResponse(qr AskResponse) error { - return cborutil.WriteCborRPC(as.rw, &qr) -} - -func (as *askStream) Close() error { - return as.rw.Close() -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/deal_status_stream.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/deal_status_stream.go deleted file mode 100644 index 2a8a98b9ec..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/deal_status_stream.go +++ /dev/null @@ -1,54 +0,0 @@ -package network - -import ( - "bufio" - - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/mux" - "github.com/libp2p/go-libp2p-core/peer" -) - -type dealStatusStream struct { - p peer.ID - host host.Host - rw mux.MuxedStream - buffered *bufio.Reader -} - -var _ DealStatusStream = (*dealStatusStream)(nil) - -func (d *dealStatusStream) ReadDealStatusRequest() (DealStatusRequest, error) { - var q DealStatusRequest - - if err := q.UnmarshalCBOR(d.buffered); err != nil { - log.Warn(err) - return DealStatusRequestUndefined, err - } - return q, nil -} - -func (d *dealStatusStream) WriteDealStatusRequest(q DealStatusRequest) error { - return cborutil.WriteCborRPC(d.rw, &q) -} - -func (d *dealStatusStream) ReadDealStatusResponse() (DealStatusResponse, error) { - var qr DealStatusResponse - - if err := qr.UnmarshalCBOR(d.buffered); err != nil { - return DealStatusResponseUndefined, err - } - return qr, nil -} - -func (d *dealStatusStream) WriteDealStatusResponse(qr DealStatusResponse) error { - return cborutil.WriteCborRPC(d.rw, &qr) -} - -func (d *dealStatusStream) Close() error { - return d.rw.Close() -} - -func (d *dealStatusStream) RemotePeer() peer.ID { - return d.p -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/deal_stream.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/deal_stream.go deleted file mode 100644 index 60b5198fd5..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/deal_stream.go +++ /dev/null @@ -1,65 +0,0 @@ -package network - -import ( - "bufio" - - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/mux" - "github.com/libp2p/go-libp2p-core/peer" -) - -// TagPriority is the priority for deal streams -- they should generally be preserved above all else -const TagPriority = 100 - -type dealStream struct { - p peer.ID - host host.Host - rw mux.MuxedStream - buffered *bufio.Reader -} - -var _ StorageDealStream = (*dealStream)(nil) - -func (d *dealStream) ReadDealProposal() (Proposal, error) { - var ds Proposal - - if err := ds.UnmarshalCBOR(d.buffered); err != nil { - log.Warn(err) - return ProposalUndefined, err - } - return ds, nil -} - -func (d *dealStream) WriteDealProposal(dp Proposal) error { - return cborutil.WriteCborRPC(d.rw, &dp) -} - -func (d *dealStream) ReadDealResponse() (SignedResponse, error) { - var dr SignedResponse - - if err := dr.UnmarshalCBOR(d.buffered); err != nil { - return SignedResponseUndefined, err - } - return dr, nil -} - -func (d *dealStream) WriteDealResponse(dr SignedResponse) error { - return cborutil.WriteCborRPC(d.rw, &dr) -} - -func (d *dealStream) Close() error { - return d.rw.Close() -} - -func (d *dealStream) RemotePeer() peer.ID { - return d.p -} - -func (d *dealStream) TagProtectedConnection(identifier string) { - d.host.ConnManager().TagPeer(d.p, identifier, TagPriority) -} - -func (d *dealStream) UntagProtectedConnection(identifier string) { - d.host.ConnManager().UntagPeer(d.p, identifier) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/doc.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/doc.go deleted file mode 100644 index 84a589da4d..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/doc.go +++ /dev/null @@ -1,11 +0,0 @@ -/* -Package network providers an abstraction over a libp2p host for managing storage markets's Libp2p protocols: - -network.go - defines the interfaces that must be implemented to serve as a storage network layer -deal_stream.go - implements the `StorageDealStream` interface, a data stream for proposing storage deals -ask_stream.go - implements the `StorageAskStream` interface, a data stream for querying provider asks -deal_status_stream.go - implements the `StorageDealStatusStream` interface, a data stream for querying for deal status -libp2p_impl.go - provides the production implementation of the `StorageMarketNetwork` interface. -types.go - types for messages sent on the storage market libp2p protocols -*/ -package network diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/libp2p_impl.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/libp2p_impl.go deleted file mode 100644 index c83583b29a..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/libp2p_impl.go +++ /dev/null @@ -1,116 +0,0 @@ -package network - -import ( - "bufio" - "context" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/host" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - ma "github.com/multiformats/go-multiaddr" - - "github.com/filecoin-project/go-fil-markets/storagemarket" -) - -var log = logging.Logger("storagemarket_network") - -// NewFromLibp2pHost builds a storage market network on top of libp2p -func NewFromLibp2pHost(h host.Host) StorageMarketNetwork { - return &libp2pStorageMarketNetwork{host: h} -} - -// libp2pStorageMarketNetwork transforms the libp2p host interface, which sends and receives -// NetMessage objects, into the graphsync network interface. -type libp2pStorageMarketNetwork struct { - host host.Host - // inbound messages from the network are forwarded to the receiver - receiver StorageReceiver -} - -func (impl *libp2pStorageMarketNetwork) NewAskStream(ctx context.Context, id peer.ID) (StorageAskStream, error) { - s, err := impl.host.NewStream(ctx, id, storagemarket.AskProtocolID) - if err != nil { - log.Warn(err) - return nil, err - } - buffered := bufio.NewReaderSize(s, 16) - return &askStream{p: id, rw: s, buffered: buffered}, nil -} - -func (impl *libp2pStorageMarketNetwork) NewDealStream(ctx context.Context, id peer.ID) (StorageDealStream, error) { - s, err := impl.host.NewStream(ctx, id, storagemarket.DealProtocolID) - if err != nil { - return nil, err - } - buffered := bufio.NewReaderSize(s, 16) - return &dealStream{p: id, rw: s, buffered: buffered, host: impl.host}, nil -} - -func (impl *libp2pStorageMarketNetwork) NewDealStatusStream(ctx context.Context, id peer.ID) (DealStatusStream, error) { - s, err := impl.host.NewStream(ctx, id, storagemarket.DealStatusProtocolID) - if err != nil { - log.Warn(err) - return nil, err - } - buffered := bufio.NewReaderSize(s, 16) - return &dealStatusStream{p: id, rw: s, buffered: buffered}, nil -} - -func (impl *libp2pStorageMarketNetwork) SetDelegate(r StorageReceiver) error { - impl.receiver = r - impl.host.SetStreamHandler(storagemarket.DealProtocolID, impl.handleNewDealStream) - impl.host.SetStreamHandler(storagemarket.AskProtocolID, impl.handleNewAskStream) - impl.host.SetStreamHandler(storagemarket.DealStatusProtocolID, impl.handleNewDealStatusStream) - return nil -} - -func (impl *libp2pStorageMarketNetwork) StopHandlingRequests() error { - impl.receiver = nil - impl.host.RemoveStreamHandler(storagemarket.DealProtocolID) - impl.host.RemoveStreamHandler(storagemarket.AskProtocolID) - impl.host.RemoveStreamHandler(storagemarket.DealStatusProtocolID) - return nil -} - -func (impl *libp2pStorageMarketNetwork) handleNewAskStream(s network.Stream) { - reader := impl.getReaderOrReset(s) - if reader != nil { - as := &askStream{s.Conn().RemotePeer(), s, reader} - impl.receiver.HandleAskStream(as) - } -} - -func (impl *libp2pStorageMarketNetwork) handleNewDealStream(s network.Stream) { - reader := impl.getReaderOrReset(s) - if reader != nil { - ds := &dealStream{s.Conn().RemotePeer(), impl.host, s, reader} - impl.receiver.HandleDealStream(ds) - } -} - -func (impl *libp2pStorageMarketNetwork) handleNewDealStatusStream(s network.Stream) { - reader := impl.getReaderOrReset(s) - if reader != nil { - qs := &dealStatusStream{s.Conn().RemotePeer(), impl.host, s, reader} - impl.receiver.HandleDealStatusStream(qs) - } -} - -func (impl *libp2pStorageMarketNetwork) getReaderOrReset(s network.Stream) *bufio.Reader { - if impl.receiver == nil { - log.Warn("no receiver set") - s.Reset() // nolint: errcheck,gosec - return nil - } - return bufio.NewReaderSize(s, 16) -} - -func (impl *libp2pStorageMarketNetwork) ID() peer.ID { - return impl.host.ID() -} - -func (impl *libp2pStorageMarketNetwork) AddAddrs(p peer.ID, addrs []ma.Multiaddr) { - impl.host.Peerstore().AddAddrs(p, addrs, time.Minute*10) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/libp2p_impl_test.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/libp2p_impl_test.go deleted file mode 100644 index 1e82cd875b..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/libp2p_impl_test.go +++ /dev/null @@ -1,485 +0,0 @@ -package network_test - -import ( - "context" - "testing" - "time" - - "github.com/libp2p/go-libp2p-core/peer" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket/network" -) - -type testReceiver struct { - t *testing.T - dealStreamHandler func(network.StorageDealStream) - askStreamHandler func(network.StorageAskStream) - dealStatusStreamHandler func(stream network.DealStatusStream) -} - -var _ network.StorageReceiver = &testReceiver{} - -func (tr *testReceiver) HandleDealStream(s network.StorageDealStream) { - defer s.Close() - if tr.dealStreamHandler != nil { - tr.dealStreamHandler(s) - } -} - -func (tr *testReceiver) HandleAskStream(s network.StorageAskStream) { - defer s.Close() - if tr.askStreamHandler != nil { - tr.askStreamHandler(s) - } -} - -func (tr *testReceiver) HandleDealStatusStream(s network.DealStatusStream) { - defer s.Close() - if tr.dealStatusStreamHandler != nil { - tr.dealStatusStreamHandler(s) - } -} - -func TestAskStreamSendReceiveAskRequest(t *testing.T) { - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toHost := td.Host2.ID() - - // host1 gets no-op receiver - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - // host2 gets receiver - achan := make(chan network.AskRequest) - tr2 := &testReceiver{t: t, askStreamHandler: func(s network.StorageAskStream) { - readq, err := s.ReadAskRequest() - require.NoError(t, err) - achan <- readq - }} - require.NoError(t, toNetwork.SetDelegate(tr2)) - - // setup query stream host1 --> host 2 - assertAskRequestReceived(ctx, t, fromNetwork, toHost, achan) -} - -func TestAskStreamSendReceiveAskResponse(t *testing.T) { - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toHost := td.Host2.ID() - - // host1 gets no-op receiver - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - // host2 gets receiver - achan := make(chan network.AskResponse) - tr2 := &testReceiver{t: t, askStreamHandler: func(s network.StorageAskStream) { - a, err := s.ReadAskResponse() - require.NoError(t, err) - achan <- a - }} - require.NoError(t, toNetwork.SetDelegate(tr2)) - - assertAskResponseReceived(ctx, t, fromNetwork, toHost, achan) - -} - -func TestAskStreamSendReceiveMultipleSuccessful(t *testing.T) { - // send query, read in handler, send response back, read response - ctxBg := context.Background() - td := shared_testutil.NewLibp2pTestData(ctxBg, t) - nw1 := network.NewFromLibp2pHost(td.Host1) - nw2 := network.NewFromLibp2pHost(td.Host2) - require.NoError(t, td.Host1.Connect(ctxBg, peer.AddrInfo{ID: td.Host2.ID()})) - - // host2 gets a query and sends a response - ar := shared_testutil.MakeTestStorageAskResponse() - done := make(chan bool) - tr2 := &testReceiver{t: t, askStreamHandler: func(s network.StorageAskStream) { - _, err := s.ReadAskRequest() - require.NoError(t, err) - - require.NoError(t, s.WriteAskResponse(ar)) - done <- true - }} - require.NoError(t, nw2.SetDelegate(tr2)) - - ctx, cancel := context.WithTimeout(ctxBg, 10*time.Second) - defer cancel() - - qs, err := nw1.NewAskStream(ctx, td.Host2.ID()) - require.NoError(t, err) - - var resp network.AskResponse - go require.NoError(t, qs.WriteAskRequest(shared_testutil.MakeTestStorageAskRequest())) - resp, err = qs.ReadAskResponse() - require.NoError(t, err) - - select { - case <-ctx.Done(): - t.Error("response not received") - case <-done: - } - - assert.Equal(t, ar, resp) -} - -func TestDealStreamSendReceiveDealProposal(t *testing.T) { - // send proposal, read in handler - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toHost := td.Host2.ID() - - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - dchan := make(chan network.Proposal) - tr2 := &testReceiver{ - t: t, - dealStreamHandler: func(s network.StorageDealStream) { - readD, err := s.ReadDealProposal() - require.NoError(t, err) - dchan <- readD - }, - } - require.NoError(t, toNetwork.SetDelegate(tr2)) - - assertDealProposalReceived(ctx, t, fromNetwork, toHost, dchan) -} - -func TestDealStreamSendReceiveDealResponse(t *testing.T) { - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toPeer := td.Host2.ID() - - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - drChan := make(chan network.SignedResponse) - tr2 := &testReceiver{ - t: t, - dealStreamHandler: func(s network.StorageDealStream) { - readDP, err := s.ReadDealResponse() - require.NoError(t, err) - drChan <- readDP - }, - } - require.NoError(t, toNetwork.SetDelegate(tr2)) - assertDealResponseReceived(ctx, t, fromNetwork, toPeer, drChan) -} - -func TestDealStreamSendReceiveMultipleSuccessful(t *testing.T) { - // send proposal, read in handler, send response back, - // read response, - - bgCtx := context.Background() - td := shared_testutil.NewLibp2pTestData(bgCtx, t) - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toPeer := td.Host2.ID() - - // set up stream handler, channels, and response - dr := shared_testutil.MakeTestStorageNetworkSignedResponse() - done := make(chan bool) - - tr2 := &testReceiver{t: t, dealStreamHandler: func(s network.StorageDealStream) { - _, err := s.ReadDealProposal() - require.NoError(t, err) - - require.NoError(t, s.WriteDealResponse(dr)) - done <- true - }} - require.NoError(t, toNetwork.SetDelegate(tr2)) - - ctx, cancel := context.WithTimeout(bgCtx, 10*time.Second) - defer cancel() - - // start sending deal proposal - ds1, err := fromNetwork.NewDealStream(ctx, toPeer) - require.NoError(t, err) - - dp := shared_testutil.MakeTestStorageNetworkProposal() - - // write proposal - require.NoError(t, ds1.WriteDealProposal(dp)) - - // read response and verify it's the one we told toNetwork to send - responseReceived, err := ds1.ReadDealResponse() - require.NoError(t, err) - assert.Equal(t, dr, responseReceived) - - select { - case <-ctx.Done(): - t.Errorf("failed to receive messages") - case <-done: - } -} - -func TestDealStatusStreamSendReceiveRequest(t *testing.T) { - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toHost := td.Host2.ID() - - // host1 gets no-op receiver - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - // host2 gets receiver - achan := make(chan network.DealStatusRequest) - tr2 := &testReceiver{t: t, dealStatusStreamHandler: func(s network.DealStatusStream) { - readq, err := s.ReadDealStatusRequest() - require.NoError(t, err) - achan <- readq - }} - require.NoError(t, toNetwork.SetDelegate(tr2)) - - // setup query stream host1 --> host 2 - assertDealStatusRequestReceived(ctx, t, fromNetwork, toHost, achan) -} - -func TestDealStatusStreamSendReceiveResponse(t *testing.T) { - ctx := context.Background() - td := shared_testutil.NewLibp2pTestData(ctx, t) - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toHost := td.Host2.ID() - - // host1 gets no-op receiver - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - // host2 gets receiver - achan := make(chan network.DealStatusResponse) - tr2 := &testReceiver{t: t, dealStatusStreamHandler: func(s network.DealStatusStream) { - a, err := s.ReadDealStatusResponse() - require.NoError(t, err) - achan <- a - }} - require.NoError(t, toNetwork.SetDelegate(tr2)) - - assertDealStatusResponseReceived(ctx, t, fromNetwork, toHost, achan) -} - -func TestDealStatusStreamSendReceiveMultipleSuccessful(t *testing.T) { - // send query, read in handler, send response back, read response - ctxBg := context.Background() - td := shared_testutil.NewLibp2pTestData(ctxBg, t) - nw1 := network.NewFromLibp2pHost(td.Host1) - nw2 := network.NewFromLibp2pHost(td.Host2) - require.NoError(t, td.Host1.Connect(ctxBg, peer.AddrInfo{ID: td.Host2.ID()})) - - // host2 gets a query and sends a response - ar := shared_testutil.MakeTestDealStatusResponse() - done := make(chan bool) - tr2 := &testReceiver{t: t, dealStatusStreamHandler: func(s network.DealStatusStream) { - _, err := s.ReadDealStatusRequest() - require.NoError(t, err) - - require.NoError(t, s.WriteDealStatusResponse(ar)) - done <- true - }} - require.NoError(t, nw2.SetDelegate(tr2)) - - ctx, cancel := context.WithTimeout(ctxBg, 10*time.Second) - defer cancel() - - qs, err := nw1.NewDealStatusStream(ctx, td.Host2.ID()) - require.NoError(t, err) - - var resp network.DealStatusResponse - go require.NoError(t, qs.WriteDealStatusRequest(shared_testutil.MakeTestDealStatusRequest())) - resp, err = qs.ReadDealStatusResponse() - require.NoError(t, err) - - select { - case <-ctx.Done(): - t.Error("response not received") - case <-done: - } - - assert.Equal(t, ar, resp) -} - -func TestLibp2pStorageMarketNetwork_StopHandlingRequests(t *testing.T) { - bgCtx := context.Background() - td := shared_testutil.NewLibp2pTestData(bgCtx, t) - - fromNetwork := network.NewFromLibp2pHost(td.Host1) - toNetwork := network.NewFromLibp2pHost(td.Host2) - toHost := td.Host2.ID() - - // host1 gets no-op receiver - tr := &testReceiver{t: t} - require.NoError(t, fromNetwork.SetDelegate(tr)) - - // host2 gets receiver - achan := make(chan network.AskRequest) - tr2 := &testReceiver{t: t, askStreamHandler: func(s network.StorageAskStream) { - readar, err := s.ReadAskRequest() - require.NoError(t, err) - achan <- readar - }} - require.NoError(t, toNetwork.SetDelegate(tr2)) - - require.NoError(t, toNetwork.StopHandlingRequests()) - - _, err := fromNetwork.NewAskStream(bgCtx, toHost) - require.Error(t, err, "protocol not supported") -} - -// assertDealProposalReceived performs the verification that a deal proposal is received -func assertDealProposalReceived(inCtx context.Context, t *testing.T, fromNetwork network.StorageMarketNetwork, toPeer peer.ID, inChan chan network.Proposal) { - ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) - defer cancel() - - qs1, err := fromNetwork.NewDealStream(ctx, toPeer) - require.NoError(t, err) - - // send query to host2 - dp := shared_testutil.MakeTestStorageNetworkProposal() - require.NoError(t, qs1.WriteDealProposal(dp)) - - var dealReceived network.Proposal - select { - case <-ctx.Done(): - t.Error("deal proposal not received") - case dealReceived = <-inChan: - } - require.NotNil(t, dealReceived) - assert.Equal(t, dp, dealReceived) -} - -func assertDealResponseReceived(parentCtx context.Context, t *testing.T, fromNetwork network.StorageMarketNetwork, toPeer peer.ID, inChan chan network.SignedResponse) { - ctx, cancel := context.WithTimeout(parentCtx, 10*time.Second) - defer cancel() - - ds1, err := fromNetwork.NewDealStream(ctx, toPeer) - require.NoError(t, err) - - dr := shared_testutil.MakeTestStorageNetworkSignedResponse() - require.NoError(t, ds1.WriteDealResponse(dr)) - - var responseReceived network.SignedResponse - select { - case <-ctx.Done(): - t.Error("response not received") - case responseReceived = <-inChan: - } - require.NotNil(t, responseReceived) - assert.Equal(t, dr, responseReceived) -} - -// assertAskRequestReceived performs the verification that a AskRequest is received -func assertAskRequestReceived(inCtx context.Context, t *testing.T, fromNetwork network.StorageMarketNetwork, toHost peer.ID, achan chan network.AskRequest) { - ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) - defer cancel() - - as1, err := fromNetwork.NewAskStream(ctx, toHost) - require.NoError(t, err) - - // send query to host2 - a := shared_testutil.MakeTestStorageAskRequest() - require.NoError(t, as1.WriteAskRequest(a)) - - var ina network.AskRequest - select { - case <-ctx.Done(): - t.Error("msg not received") - case ina = <-achan: - } - require.NotNil(t, ina) - assert.Equal(t, a.Miner, ina.Miner) -} - -// assertAskResponseReceived performs the verification that a AskResponse is received -func assertAskResponseReceived(inCtx context.Context, t *testing.T, - fromNetwork network.StorageMarketNetwork, - toHost peer.ID, - achan chan network.AskResponse) { - ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) - defer cancel() - - // setup query stream host1 --> host 2 - as1, err := fromNetwork.NewAskStream(ctx, toHost) - require.NoError(t, err) - - // send queryresponse to host2 - ar := shared_testutil.MakeTestStorageAskResponse() - require.NoError(t, as1.WriteAskResponse(ar)) - - // read queryresponse - var inar network.AskResponse - select { - case <-ctx.Done(): - t.Error("msg not received") - case inar = <-achan: - } - - require.NotNil(t, inar) - assert.Equal(t, ar, inar) -} - -// assertDealStatusRequestReceived performs the verification that a DealStatusRequest is received -func assertDealStatusRequestReceived(inCtx context.Context, t *testing.T, fromNetwork network.StorageMarketNetwork, toHost peer.ID, achan chan network.DealStatusRequest) { - ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) - defer cancel() - - as1, err := fromNetwork.NewDealStatusStream(ctx, toHost) - require.NoError(t, err) - - // send query to host2 - a := shared_testutil.MakeTestDealStatusRequest() - require.NoError(t, as1.WriteDealStatusRequest(a)) - - var ina network.DealStatusRequest - select { - case <-ctx.Done(): - t.Error("msg not received") - case ina = <-achan: - } - require.NotNil(t, ina) - assert.Equal(t, a, ina) -} - -// assertDealStatusResponseReceived performs the verification that a QueryResponse is received -func assertDealStatusResponseReceived(inCtx context.Context, t *testing.T, - fromNetwork network.StorageMarketNetwork, - toHost peer.ID, - achan chan network.DealStatusResponse) { - ctx, cancel := context.WithTimeout(inCtx, 10*time.Second) - defer cancel() - - // setup query stream host1 --> host 2 - as1, err := fromNetwork.NewDealStatusStream(ctx, toHost) - require.NoError(t, err) - - // send queryresponse to host2 - ar := shared_testutil.MakeTestDealStatusResponse() - require.NoError(t, as1.WriteDealStatusResponse(ar)) - - // read queryresponse - var inar network.DealStatusResponse - select { - case <-ctx.Done(): - t.Error("msg not received") - case inar = <-achan: - } - - require.NotNil(t, inar) - assert.Equal(t, ar, inar) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/network.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/network.go deleted file mode 100644 index 50df0830ec..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/network.go +++ /dev/null @@ -1,64 +0,0 @@ -package network - -import ( - "context" - - "github.com/libp2p/go-libp2p-core/peer" - - ma "github.com/multiformats/go-multiaddr" -) - -// These are the required interfaces that must be implemented to send and receive data -// for storage deals. - -// StorageAskStream is a stream for reading/writing requests & -// responses on the Storage Ask protocol -type StorageAskStream interface { - ReadAskRequest() (AskRequest, error) - WriteAskRequest(AskRequest) error - ReadAskResponse() (AskResponse, error) - WriteAskResponse(AskResponse) error - Close() error -} - -// StorageDealStream is a stream for reading and writing requests -// and responses on the storage deal protocol -type StorageDealStream interface { - ReadDealProposal() (Proposal, error) - WriteDealProposal(Proposal) error - ReadDealResponse() (SignedResponse, error) - WriteDealResponse(SignedResponse) error - RemotePeer() peer.ID - TagProtectedConnection(identifier string) - UntagProtectedConnection(identifier string) - Close() error -} - -// DealStatusStream is a stream for reading and writing requests -// and responses on the deal status protocol -type DealStatusStream interface { - ReadDealStatusRequest() (DealStatusRequest, error) - WriteDealStatusRequest(DealStatusRequest) error - ReadDealStatusResponse() (DealStatusResponse, error) - WriteDealStatusResponse(DealStatusResponse) error - Close() error -} - -// StorageReceiver implements functions for receiving -// incoming data on storage protocols -type StorageReceiver interface { - HandleAskStream(StorageAskStream) - HandleDealStream(StorageDealStream) - HandleDealStatusStream(DealStatusStream) -} - -// StorageMarketNetwork is a network abstraction for the storage market -type StorageMarketNetwork interface { - NewAskStream(context.Context, peer.ID) (StorageAskStream, error) - NewDealStream(context.Context, peer.ID) (StorageDealStream, error) - NewDealStatusStream(context.Context, peer.ID) (DealStatusStream, error) - SetDelegate(StorageReceiver) error - StopHandlingRequests() error - ID() peer.ID - AddAddrs(peer.ID, []ma.Multiaddr) -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/types.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/types.go deleted file mode 100644 index 4d2b822a67..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/types.go +++ /dev/null @@ -1,80 +0,0 @@ -package network - -import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-fil-markets/storagemarket" -) - -//go:generate cbor-gen-for AskRequest AskResponse Proposal Response SignedResponse DealStatusRequest DealStatusResponse - -// Proposal is the data sent over the network from client to provider when proposing -// a deal -type Proposal struct { - DealProposal *market.ClientDealProposal - Piece *storagemarket.DataRef - FastRetrieval bool -} - -// ProposalUndefined is an empty Proposal message -var ProposalUndefined = Proposal{} - -// Response is a response to a proposal sent over the network -type Response struct { - State storagemarket.StorageDealStatus - - // DealProposalRejected - Message string - Proposal cid.Cid - - // StorageDealProposalAccepted - PublishMessage *cid.Cid -} - -// SignedResponse is a response that is signed -type SignedResponse struct { - Response Response - - Signature *crypto.Signature -} - -// SignedResponseUndefined represents an empty SignedResponse message -var SignedResponseUndefined = SignedResponse{} - -// AskRequest is a request for current ask parameters for a given miner -type AskRequest struct { - Miner address.Address -} - -// AskRequestUndefined represents and empty AskRequest message -var AskRequestUndefined = AskRequest{} - -// AskResponse is the response sent over the network in response -// to an ask request -type AskResponse struct { - Ask *storagemarket.SignedStorageAsk -} - -// AskResponseUndefined represents an empty AskResponse message -var AskResponseUndefined = AskResponse{} - -// DealStatusRequest sent by a client to query deal status -type DealStatusRequest struct { - Proposal cid.Cid - Signature crypto.Signature -} - -// DealStatusRequestUndefined represents an empty DealStatusRequest message -var DealStatusRequestUndefined = DealStatusRequest{} - -// DealStatusResponse is a provider's response to DealStatusRequest -type DealStatusResponse struct { - DealState storagemarket.ProviderDealState - Signature crypto.Signature -} - -// DealStatusResponseUndefined represents an empty DealStatusResponse message -var DealStatusResponseUndefined = DealStatusResponse{} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/types_cbor_gen.go deleted file mode 100644 index 167f247d81..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/network/types_cbor_gen.go +++ /dev/null @@ -1,530 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package network - -import ( - "fmt" - "io" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/crypto" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *AskRequest) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Miner (address.Address) (struct) - if err := t.Miner.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *AskRequest) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Miner (address.Address) (struct) - - { - - if err := t.Miner.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Miner: %w", err) - } - - } - return nil -} - -func (t *AskResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{129}); err != nil { - return err - } - - // t.Ask (storagemarket.SignedStorageAsk) (struct) - if err := t.Ask.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *AskResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 1 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Ask (storagemarket.SignedStorageAsk) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Ask = new(storagemarket.SignedStorageAsk) - if err := t.Ask.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) - } - } - - } - return nil -} - -func (t *Proposal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{131}); err != nil { - return err - } - - // t.DealProposal (market.ClientDealProposal) (struct) - if err := t.DealProposal.MarshalCBOR(w); err != nil { - return err - } - - // t.Piece (storagemarket.DataRef) (struct) - if err := t.Piece.MarshalCBOR(w); err != nil { - return err - } - - // t.FastRetrieval (bool) (bool) - if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { - return err - } - return nil -} - -func (t *Proposal) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealProposal (market.ClientDealProposal) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.DealProposal = new(market.ClientDealProposal) - if err := t.DealProposal.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealProposal pointer: %w", err) - } - } - - } - // t.Piece (storagemarket.DataRef) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Piece = new(storagemarket.DataRef) - if err := t.Piece.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Piece pointer: %w", err) - } - } - - } - // t.FastRetrieval (bool) (bool) - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.FastRetrieval = false - case 21: - t.FastRetrieval = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - return nil -} - -func (t *Response) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{132}); err != nil { - return err - } - - // t.State (uint64) (uint64) - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.State))); err != nil { - return err - } - - // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") - } - - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Message)))); err != nil { - return err - } - if _, err := w.Write([]byte(t.Message)); err != nil { - return err - } - - // t.Proposal (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Proposal); err != nil { - return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) - } - - // t.PublishMessage (cid.Cid) (struct) - - if t.PublishMessage == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCid(w, *t.PublishMessage); err != nil { - return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) - } - } - - return nil -} - -func (t *Response) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.State (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.State = uint64(extra) - - } - // t.Message (string) (string) - - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } - - t.Message = string(sval) - } - // t.Proposal (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) - } - - t.Proposal = c - - } - // t.PublishMessage (cid.Cid) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) - } - - t.PublishMessage = &c - } - - } - return nil -} - -func (t *SignedResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Response (network.Response) (struct) - if err := t.Response.MarshalCBOR(w); err != nil { - return err - } - - // t.Signature (crypto.Signature) (struct) - if err := t.Signature.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *SignedResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Response (network.Response) (struct) - - { - - if err := t.Response.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Response: %w", err) - } - - } - // t.Signature (crypto.Signature) (struct) - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - t.Signature = new(crypto.Signature) - if err := t.Signature.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) - } - } - - } - return nil -} - -func (t *DealStatusRequest) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.Proposal (cid.Cid) (struct) - - if err := cbg.WriteCid(w, t.Proposal); err != nil { - return xerrors.Errorf("failed to write cid field t.Proposal: %w", err) - } - - // t.Signature (crypto.Signature) (struct) - if err := t.Signature.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *DealStatusRequest) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Proposal (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Proposal: %w", err) - } - - t.Proposal = c - - } - // t.Signature (crypto.Signature) (struct) - - { - - if err := t.Signature.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Signature: %w", err) - } - - } - return nil -} - -func (t *DealStatusResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } - - // t.DealState (storagemarket.ProviderDealState) (struct) - if err := t.DealState.MarshalCBOR(w); err != nil { - return err - } - - // t.Signature (crypto.Signature) (struct) - if err := t.Signature.MarshalCBOR(w); err != nil { - return err - } - return nil -} - -func (t *DealStatusResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.DealState (storagemarket.ProviderDealState) (struct) - - { - - if err := t.DealState.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealState: %w", err) - } - - } - // t.Signature (crypto.Signature) (struct) - - { - - if err := t.Signature.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Signature: %w", err) - } - - } - return nil -} diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/nodes.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/nodes.go index 1c38a74401..613980dd9b 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/nodes.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/nodes.go @@ -4,15 +4,16 @@ import ( "context" "io" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" "github.com/ipfs/go-cid" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" + "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + + "github.com/filecoin-project/go-fil-markets/shared" ) // DealSectorCommittedCallback is a callback that runs when a sector is committed @@ -45,11 +46,14 @@ type StorageCommon interface { VerifySignature(ctx context.Context, signature crypto.Signature, signer address.Address, plaintext []byte, tok shared.TipSetToken) (bool, error) // WaitForMessage waits until a message appears on chain. If it is already on chain, the callback is called immediately - WaitForMessage(ctx context.Context, mcid cid.Cid, onCompletion func(exitcode.ExitCode, []byte, error) error) error + WaitForMessage(ctx context.Context, mcid cid.Cid, onCompletion func(exitcode.ExitCode, []byte, cid.Cid, error) error) error // SignsBytes signs the given data with the given address's private key SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) + // DealProviderCollateralBounds returns the min and max collateral a storage provider can issue. + DealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, isVerified bool) (abi.TokenAmount, abi.TokenAmount, error) + // OnDealSectorCommitted waits for a deal's sector to be sealed and proved, indicating the deal is active OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, cb DealSectorCommittedCallback) error @@ -57,6 +61,13 @@ type StorageCommon interface { OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired DealExpiredCallback, onDealSlashed DealSlashedCallback) error } +// PackingResult returns information about how a deal was put into a sector +type PackingResult struct { + SectorNumber abi.SectorNumber + Offset abi.PaddedPieceSize + Size abi.PaddedPieceSize +} + // StorageProviderNode are node dependencies for a StorageProvider type StorageProviderNode interface { StorageCommon @@ -64,29 +75,23 @@ type StorageProviderNode interface { // PublishDeals publishes a deal on chain, returns the message cid, but does not wait for message to appear PublishDeals(ctx context.Context, deal MinerDeal) (cid.Cid, error) - // ListProviderDeals lists all deals on chain associated with a storage provider - ListProviderDeals(ctx context.Context, addr address.Address, tok shared.TipSetToken) ([]StorageDeal, error) - // OnDealComplete is called when a deal is complete and on chain, and data has been transferred and is ready to be added to a sector - OnDealComplete(ctx context.Context, deal MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) error + OnDealComplete(ctx context.Context, deal MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) (*PackingResult, error) // GetMinerWorkerAddress returns the worker address associated with a miner GetMinerWorkerAddress(ctx context.Context, addr address.Address, tok shared.TipSetToken) (address.Address, error) // LocatePieceForDealWithinSector looks up a given dealID in the miners sectors, and returns its sectorID and location - LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, tok shared.TipSetToken) (sectorID uint64, offset uint64, length uint64, err error) + LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, tok shared.TipSetToken) (sectorID abi.SectorNumber, offset abi.PaddedPieceSize, length abi.PaddedPieceSize, err error) // GetDataCap gets the current data cap for addr - GetDataCap(ctx context.Context, addr address.Address, tok shared.TipSetToken) (verifreg.DataCap, error) + GetDataCap(ctx context.Context, addr address.Address, tok shared.TipSetToken) (*verifreg.DataCap, error) } // StorageClientNode are node dependencies for a StorageClient type StorageClientNode interface { StorageCommon - // ListClientDeals lists all on-chain deals associated with a storage client - ListClientDeals(ctx context.Context, addr address.Address, tok shared.TipSetToken) ([]StorageDeal, error) - // GetStorageProviders returns information about known miners ListStorageProviders(ctx context.Context, tok shared.TipSetToken) ([]*StorageProviderInfo, error) @@ -99,9 +104,6 @@ type StorageClientNode interface { // GetDefaultWalletAddress returns the address for this client GetDefaultWalletAddress(ctx context.Context) (address.Address, error) - // ValidateAskSignature verifies a the signature is valid for a given SignedStorageAsk - ValidateAskSignature(ctx context.Context, ask *SignedStorageAsk, tok shared.TipSetToken) (bool, error) - // GetMinerInfo returns info for a single miner with the given address GetMinerInfo(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (*StorageProviderInfo, error) } diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/provider.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/provider.go index 0f61631aaf..2459fd5b67 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/provider.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/provider.go @@ -4,9 +4,11 @@ import ( "context" "io" - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/specs-actors/actors/abi" "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/go-fil-markets/shared" ) // ProviderSubscriber is a callback that is run when events are emitted on a StorageProvider @@ -21,19 +23,19 @@ type StorageProvider interface { // messages on the storage market's libp2p protocols Start(ctx context.Context) error + // OnReady registers a listener for when the provider comes on line + OnReady(shared.ReadyFunc) + // Stop terminates processing of deals on a StorageProvider Stop() error - // SetAsk configures the storage miner's ask with the provided price, + // SetAsk configures the storage miner's ask with the provided prices (for unverified and verified deals), // duration, and options. Any previously-existing ask is replaced. - SetAsk(price abi.TokenAmount, duration abi.ChainEpoch, options ...StorageAskOption) error + SetAsk(price abi.TokenAmount, verifiedPrice abi.TokenAmount, duration abi.ChainEpoch, options ...StorageAskOption) error // GetAsk returns the storage miner's ask, or nil if one does not exist. GetAsk() *SignedStorageAsk - // ListDeals lists on-chain deals associated with this storage provider - ListDeals(ctx context.Context) ([]StorageDeal, error) - // ListLocalDeals lists deals processed by this storage provider ListLocalDeals() ([]MinerDeal, error) diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/testnodes/testnodes.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/testnodes/testnodes.go deleted file mode 100644 index 7c3b55a2d4..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/testnodes/testnodes.go +++ /dev/null @@ -1,335 +0,0 @@ -// Package testnodes contains stubbed implementations of the StorageProviderNode -// and StorageClientNode interface to simulate communications with a filecoin node -package testnodes - -import ( - "context" - "errors" - "io" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-fil-markets/shared" - "github.com/filecoin-project/go-fil-markets/shared_testutil" - "github.com/filecoin-project/go-fil-markets/storagemarket" -) - -// Below fake node implementations - -// StorageMarketState represents a state for the storage market that can be inspected -// - methods on the provider nodes will affect this state -type StorageMarketState struct { - TipSetToken shared.TipSetToken - Epoch abi.ChainEpoch - DealID abi.DealID - Balances map[address.Address]abi.TokenAmount - StorageDeals map[address.Address][]storagemarket.StorageDeal - Providers []*storagemarket.StorageProviderInfo -} - -// NewStorageMarketState returns a new empty state for the storage market -func NewStorageMarketState() *StorageMarketState { - return &StorageMarketState{ - Epoch: 0, - DealID: 0, - Balances: map[address.Address]abi.TokenAmount{}, - StorageDeals: map[address.Address][]storagemarket.StorageDeal{}, - Providers: nil, - } -} - -// AddFunds adds funds for a given address in the storage market -func (sma *StorageMarketState) AddFunds(addr address.Address, amount abi.TokenAmount) { - if existing, ok := sma.Balances[addr]; ok { - sma.Balances[addr] = big.Add(existing, amount) - } else { - sma.Balances[addr] = amount - } -} - -// Balance returns the balance of a given address in the market -func (sma *StorageMarketState) Balance(addr address.Address) storagemarket.Balance { - if existing, ok := sma.Balances[addr]; ok { - return storagemarket.Balance{Locked: big.NewInt(0), Available: existing} - } - return storagemarket.Balance{Locked: big.NewInt(0), Available: big.NewInt(0)} -} - -// Deals returns all deals in the current state -func (sma *StorageMarketState) Deals(addr address.Address) []storagemarket.StorageDeal { - if existing, ok := sma.StorageDeals[addr]; ok { - return existing - } - return nil -} - -// StateKey returns a state key with the storage market states set Epoch -func (sma *StorageMarketState) StateKey() (shared.TipSetToken, abi.ChainEpoch) { - return sma.TipSetToken, sma.Epoch -} - -// AddDeal adds a deal to the current state of the storage market -func (sma *StorageMarketState) AddDeal(deal storagemarket.StorageDeal) (shared.TipSetToken, abi.ChainEpoch) { - for _, addr := range []address.Address{deal.Client, deal.Provider} { - if existing, ok := sma.StorageDeals[addr]; ok { - sma.StorageDeals[addr] = append(existing, deal) - } else { - sma.StorageDeals[addr] = []storagemarket.StorageDeal{deal} - } - } - - return sma.StateKey() -} - -// FakeCommonNode implements common methods for the storage & client node adapters -// where responses are stubbed -type FakeCommonNode struct { - SMState *StorageMarketState - AddFundsCid cid.Cid - EnsureFundsError error - VerifySignatureFails bool - GetBalanceError error - GetChainHeadError error - SignBytesError error - DealCommittedSyncError error - DealCommittedAsyncError error - WaitForDealCompletionError error - OnDealExpiredError error - OnDealSlashedError error - OnDealSlashedEpoch abi.ChainEpoch - - WaitForMessageBlocks bool - WaitForMessageError error - WaitForMessageExitCode exitcode.ExitCode - WaitForMessageRetBytes []byte - WaitForMessageNodeError error - WaitForMessageCalls []cid.Cid -} - -// GetChainHead returns the state id in the storage market state -func (n *FakeCommonNode) GetChainHead(ctx context.Context) (shared.TipSetToken, abi.ChainEpoch, error) { - if n.GetChainHeadError == nil { - key, epoch := n.SMState.StateKey() - return key, epoch, nil - } - - return []byte{}, 0, n.GetChainHeadError -} - -// AddFunds adds funds to the given actor in the storage market state -func (n *FakeCommonNode) AddFunds(ctx context.Context, addr address.Address, amount abi.TokenAmount) (cid.Cid, error) { - n.SMState.AddFunds(addr, amount) - return n.AddFundsCid, nil -} - -// EnsureFunds adds funds to the given actor in the storage market state to ensure it has at least the given amount -func (n *FakeCommonNode) EnsureFunds(ctx context.Context, addr, wallet address.Address, amount abi.TokenAmount, tok shared.TipSetToken) (cid.Cid, error) { - if n.EnsureFundsError == nil { - balance := n.SMState.Balance(addr) - if balance.Available.LessThan(amount) { - return n.AddFunds(ctx, addr, big.Sub(amount, balance.Available)) - } - } - - return cid.Undef, n.EnsureFundsError -} - -// WaitForMessage simulates waiting for a message to appear on chain -func (n *FakeCommonNode) WaitForMessage(ctx context.Context, mcid cid.Cid, onCompletion func(exitcode.ExitCode, []byte, error) error) error { - n.WaitForMessageCalls = append(n.WaitForMessageCalls, mcid) - - if n.WaitForMessageError != nil { - return n.WaitForMessageError - } - - if n.WaitForMessageBlocks { - // just leave the test node in this state to simulate a long operation - return nil - } - - return onCompletion(n.WaitForMessageExitCode, n.WaitForMessageRetBytes, n.WaitForMessageNodeError) -} - -// GetBalance returns the funds in the storage market state -func (n *FakeCommonNode) GetBalance(ctx context.Context, addr address.Address, tok shared.TipSetToken) (storagemarket.Balance, error) { - if n.GetBalanceError == nil { - return n.SMState.Balance(addr), nil - } - return storagemarket.Balance{}, n.GetBalanceError -} - -// VerifySignature just always returns true, for now -func (n *FakeCommonNode) VerifySignature(ctx context.Context, signature crypto.Signature, addr address.Address, data []byte, tok shared.TipSetToken) (bool, error) { - return !n.VerifySignatureFails, nil -} - -// SignBytes simulates signing data by returning a test signature -func (n *FakeCommonNode) SignBytes(ctx context.Context, signer address.Address, b []byte) (*crypto.Signature, error) { - if n.SignBytesError == nil { - return shared_testutil.MakeTestSignature(), nil - } - return nil, n.SignBytesError -} - -// OnDealSectorCommitted returns immediately, and returns stubbed errors -func (n *FakeCommonNode) OnDealSectorCommitted(ctx context.Context, provider address.Address, dealID abi.DealID, cb storagemarket.DealSectorCommittedCallback) error { - if n.DealCommittedSyncError == nil { - cb(n.DealCommittedAsyncError) - } - return n.DealCommittedSyncError -} - -// OnDealExpiredOrSlashed simulates waiting for a deal to be expired or slashed, but provides stubbed behavior -func (n *FakeCommonNode) OnDealExpiredOrSlashed(ctx context.Context, dealID abi.DealID, onDealExpired storagemarket.DealExpiredCallback, onDealSlashed storagemarket.DealSlashedCallback) error { - if n.WaitForDealCompletionError != nil { - return n.WaitForDealCompletionError - } - - if n.OnDealSlashedError != nil { - onDealSlashed(abi.ChainEpoch(0), n.OnDealSlashedError) - return nil - } - - if n.OnDealExpiredError != nil { - onDealExpired(n.OnDealExpiredError) - return nil - } - - if n.OnDealSlashedEpoch == 0 { - onDealExpired(nil) - return nil - } - - onDealSlashed(n.OnDealSlashedEpoch, nil) - return nil -} - -var _ storagemarket.StorageCommon = (*FakeCommonNode)(nil) - -// FakeClientNode is a node adapter for a storage client whose responses -// are stubbed -type FakeClientNode struct { - FakeCommonNode - ClientAddr address.Address - MinerAddr address.Address - WorkerAddr address.Address - ValidationError error - ValidatePublishedDealID abi.DealID - ValidatePublishedError error -} - -// ListClientDeals just returns the deals in the storage market state -func (n *FakeClientNode) ListClientDeals(ctx context.Context, addr address.Address, tok shared.TipSetToken) ([]storagemarket.StorageDeal, error) { - return n.SMState.Deals(addr), nil -} - -// ListStorageProviders lists the providers in the storage market state -func (n *FakeClientNode) ListStorageProviders(ctx context.Context, tok shared.TipSetToken) ([]*storagemarket.StorageProviderInfo, error) { - return n.SMState.Providers, nil -} - -// ValidatePublishedDeal always succeeds -func (n *FakeClientNode) ValidatePublishedDeal(ctx context.Context, deal storagemarket.ClientDeal) (abi.DealID, error) { - return n.ValidatePublishedDealID, n.ValidatePublishedError -} - -// SignProposal signs a deal with a dummy signature -func (n *FakeClientNode) SignProposal(ctx context.Context, signer address.Address, proposal market.DealProposal) (*market.ClientDealProposal, error) { - return &market.ClientDealProposal{ - Proposal: proposal, - ClientSignature: *shared_testutil.MakeTestSignature(), - }, nil -} - -// GetDefaultWalletAddress returns a stubbed ClientAddr -func (n *FakeClientNode) GetDefaultWalletAddress(ctx context.Context) (address.Address, error) { - return n.ClientAddr, nil -} - -// GetMinerInfo returns stubbed information for the first miner in storage market state -func (n *FakeClientNode) GetMinerInfo(ctx context.Context, maddr address.Address, tok shared.TipSetToken) (*storagemarket.StorageProviderInfo, error) { - if len(n.SMState.Providers) == 0 { - return nil, errors.New("Provider not found") - } - return n.SMState.Providers[0], nil -} - -// ValidateAskSignature returns the stubbed validation error and a boolean value -// communicating the validity of the provided signature -func (n *FakeClientNode) ValidateAskSignature(ctx context.Context, ask *storagemarket.SignedStorageAsk, tok shared.TipSetToken) (bool, error) { - return n.ValidationError == nil, n.ValidationError -} - -var _ storagemarket.StorageClientNode = (*FakeClientNode)(nil) - -// FakeProviderNode implements functions specific to the StorageProviderNode -type FakeProviderNode struct { - FakeCommonNode - MinerAddr address.Address - MinerWorkerError error - PieceLength uint64 - PieceSectorID uint64 - PublishDealID abi.DealID - PublishDealsError error - OnDealCompleteError error - OnDealCompleteCalls []storagemarket.MinerDeal - LocatePieceForDealWithinSectorError error - DataCap verifreg.DataCap - GetDataCapErr error -} - -// PublishDeals simulates publishing a deal by adding it to the storage market state -func (n *FakeProviderNode) PublishDeals(ctx context.Context, deal storagemarket.MinerDeal) (cid.Cid, error) { - if n.PublishDealsError == nil { - sd := storagemarket.StorageDeal{ - DealProposal: deal.Proposal, - DealState: market.DealState{}, - } - - n.SMState.AddDeal(sd) - - return shared_testutil.GenerateCids(1)[0], nil - } - return cid.Undef, n.PublishDealsError -} - -// ListProviderDeals returns the deals in the storage market state -func (n *FakeProviderNode) ListProviderDeals(ctx context.Context, addr address.Address, tok shared.TipSetToken) ([]storagemarket.StorageDeal, error) { - return n.SMState.Deals(addr), nil -} - -// OnDealComplete simulates passing of the deal to the storage miner, and does nothing -func (n *FakeProviderNode) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceReader io.Reader) error { - n.OnDealCompleteCalls = append(n.OnDealCompleteCalls, deal) - return n.OnDealCompleteError -} - -// GetMinerWorkerAddress returns the address specified by MinerAddr -func (n *FakeProviderNode) GetMinerWorkerAddress(ctx context.Context, miner address.Address, tok shared.TipSetToken) (address.Address, error) { - if n.MinerWorkerError == nil { - return n.MinerAddr, nil - } - return address.Undef, n.MinerWorkerError -} - -// LocatePieceForDealWithinSector returns stubbed data for a pieces location in a sector -func (n *FakeProviderNode) LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, tok shared.TipSetToken) (sectorID uint64, offset uint64, length uint64, err error) { - if n.LocatePieceForDealWithinSectorError == nil { - return n.PieceSectorID, 0, n.PieceLength, nil - } - return 0, 0, 0, n.LocatePieceForDealWithinSectorError -} - -// GetDataCap gets the current data cap for addr -func (n *FakeProviderNode) GetDataCap(ctx context.Context, addr address.Address, tok shared.TipSetToken) (verifreg.DataCap, error) { - return n.DataCap, n.GetDataCapErr -} - -var _ storagemarket.StorageProviderNode = (*FakeProviderNode)(nil) diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types.go index 0200efe802..db6914ea92 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types.go @@ -1,27 +1,34 @@ package storagemarket import ( - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/crypto" "github.com/ipfs/go-cid" "github.com/libp2p/go-libp2p-core/peer" ma "github.com/multiformats/go-multiaddr" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-address" + datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-multistore" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/specs-actors/actors/builtin/market" "github.com/filecoin-project/go-fil-markets/filestore" ) -//go:generate cbor-gen-for ClientDeal MinerDeal Balance SignedStorageAsk StorageAsk StorageDeal DataRef ProviderDealState +//go:generate cbor-gen-for --map-encoding ClientDeal MinerDeal Balance SignedStorageAsk StorageAsk DataRef ProviderDealState // DealProtocolID is the ID for the libp2p protocol for proposing storage deals. -const DealProtocolID = "/fil/storage/mk/1.0.1" +const OldDealProtocolID = "/fil/storage/mk/1.0.1" +const DealProtocolID = "/fil/storage/mk/1.1.0" // AskProtocolID is the ID for the libp2p protocol for querying miners for their current StorageAsk. -const AskProtocolID = "/fil/storage/ask/1.0.1" +const OldAskProtocolID = "/fil/storage/ask/1.0.1" +const AskProtocolID = "/fil/storage/ask/1.1.0" // DealStatusProtocolID is the ID for the libp2p protocol for querying miners for the current status of a deal. -const DealStatusProtocolID = "/fil/storage/status/1.0.1" +const OldDealStatusProtocolID = "/fil/storage/status/1.0.1" +const DealStatusProtocolID = "/fil/storage/status/1.1.0" // Balance represents a current balance of funds in the StorageMarketActor. type Balance struct { @@ -35,7 +42,8 @@ type Balance struct { // storage provider may run its own decision logic). type StorageAsk struct { // Price per GiB / Epoch - Price abi.TokenAmount + Price abi.TokenAmount + VerifiedPrice abi.TokenAmount MinPieceSize abi.PaddedPieceSize MaxPieceSize abi.PaddedPieceSize @@ -77,45 +85,48 @@ var StorageAskUndefined = StorageAsk{} // MinerDeal is the local state tracked for a deal by a StorageProvider type MinerDeal struct { market.ClientDealProposal - ProposalCid cid.Cid - AddFundsCid *cid.Cid - PublishCid *cid.Cid - Miner peer.ID - Client peer.ID - State StorageDealStatus - PiecePath filestore.Path - MetadataPath filestore.Path - SlashEpoch abi.ChainEpoch - FastRetrieval bool - Message string - - Ref *DataRef - - DealID abi.DealID + ProposalCid cid.Cid + AddFundsCid *cid.Cid + PublishCid *cid.Cid + Miner peer.ID + Client peer.ID + State StorageDealStatus + PiecePath filestore.Path + MetadataPath filestore.Path + SlashEpoch abi.ChainEpoch + FastRetrieval bool + Message string + StoreID *multistore.StoreID + FundsReserved abi.TokenAmount + Ref *DataRef + AvailableForRetrieval bool + + DealID abi.DealID + CreationTime cbg.CborTime + + TransferChannelId *datatransfer.ChannelID } // ClientDeal is the local state tracked for a deal by a StorageClient type ClientDeal struct { market.ClientDealProposal - ProposalCid cid.Cid - AddFundsCid *cid.Cid - State StorageDealStatus - Miner peer.ID - MinerWorker address.Address - DealID abi.DealID - DataRef *DataRef - Message string - PublishMessage *cid.Cid - SlashEpoch abi.ChainEpoch - PollRetryCount uint64 - PollErrorCount uint64 - FastRetrieval bool -} - -// StorageDeal is a local combination of a proposal and a current deal state -type StorageDeal struct { - market.DealProposal - market.DealState + ProposalCid cid.Cid + AddFundsCid *cid.Cid + State StorageDealStatus + Miner peer.ID + MinerWorker address.Address + DealID abi.DealID + DataRef *DataRef + Message string + PublishMessage *cid.Cid + SlashEpoch abi.ChainEpoch + PollRetryCount uint64 + PollErrorCount uint64 + FastRetrieval bool + StoreID *multistore.StoreID + FundsReserved abi.TokenAmount + CreationTime cbg.CborTime + TransferChannelID *datatransfer.ChannelID } // StorageProviderInfo describes on chain information about a StorageProvider @@ -134,6 +145,21 @@ type ProposeStorageDealResult struct { ProposalCid cid.Cid } +// ProposeStorageDealParams describes the parameters for proposing a storage deal +type ProposeStorageDealParams struct { + Addr address.Address + Info *StorageProviderInfo + Data *DataRef + StartEpoch abi.ChainEpoch + EndEpoch abi.ChainEpoch + Price abi.TokenAmount + Collateral abi.TokenAmount + Rt abi.RegisteredSealProof + FastRetrieval bool + VerifiedDeal bool + StoreID *multistore.StoreID +} + const ( // TTGraphsync means data for a deal will be transferred by graphsync TTGraphsync = "graphsync" diff --git a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types_cbor_gen.go b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types_cbor_gen.go index e75b7cd83a..de3bdb7598 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types_cbor_gen.go +++ b/vendor/github.com/filecoin-project/go-fil-markets/storagemarket/types_cbor_gen.go @@ -6,11 +6,13 @@ import ( "fmt" "io" - "github.com/filecoin-project/go-fil-markets/filestore" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/libp2p/go-libp2p-core/peer" + datatransfer "github.com/filecoin-project/go-data-transfer" + filestore "github.com/filecoin-project/go-fil-markets/filestore" + multistore "github.com/filecoin-project/go-multistore" + abi "github.com/filecoin-project/go-state-types/abi" + crypto "github.com/filecoin-project/go-state-types/crypto" + market "github.com/filecoin-project/specs-actors/actors/builtin/market" + peer "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) @@ -22,739 +24,1428 @@ func (t *ClientDeal) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{142}); err != nil { + if _, err := w.Write([]byte{178}); err != nil { return err } + scratch := make([]byte, 9) + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ClientDealProposal")); err != nil { + return err + } + if err := t.ClientDealProposal.MarshalCBOR(w); err != nil { return err } // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } - if err := cbg.WriteCid(w, t.ProposalCid); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.ProposalCid); err != nil { return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) } // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AddFundsCid")); err != nil { + return err + } if t.AddFundsCid == nil { if _, err := w.Write(cbg.CborNull); err != nil { return err } } else { - if err := cbg.WriteCid(w, *t.AddFundsCid); err != nil { + if err := cbg.WriteCidBuf(scratch, w, *t.AddFundsCid); err != nil { return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) } } // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.State))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.State)); err != nil { return err } // t.Miner (peer.ID) (string) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + if len(t.Miner) > cbg.MaxLength { return xerrors.Errorf("Value in field t.Miner was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Miner)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Miner))); err != nil { return err } - if _, err := w.Write([]byte(t.Miner)); err != nil { + if _, err := io.WriteString(w, string(t.Miner)); err != nil { return err } // t.MinerWorker (address.Address) (struct) + if len("MinerWorker") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinerWorker\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MinerWorker"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinerWorker")); err != nil { + return err + } + if err := t.MinerWorker.MarshalCBOR(w); err != nil { return err } // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.DealID))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { return err } // t.DataRef (storagemarket.DataRef) (struct) + if len("DataRef") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DataRef\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DataRef"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DataRef")); err != nil { + return err + } + if err := t.DataRef.MarshalCBOR(w); err != nil { return err } // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + if len(t.Message) > cbg.MaxLength { return xerrors.Errorf("Value in field t.Message was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Message)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { return err } - if _, err := w.Write([]byte(t.Message)); err != nil { + if _, err := io.WriteString(w, string(t.Message)); err != nil { return err } // t.PublishMessage (cid.Cid) (struct) + if len("PublishMessage") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishMessage\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishMessage"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishMessage")); err != nil { + return err + } if t.PublishMessage == nil { if _, err := w.Write(cbg.CborNull); err != nil { return err } } else { - if err := cbg.WriteCid(w, *t.PublishMessage); err != nil { + if err := cbg.WriteCidBuf(scratch, w, *t.PublishMessage); err != nil { return xerrors.Errorf("failed to write cid field t.PublishMessage: %w", err) } } // t.SlashEpoch (abi.ChainEpoch) (int64) + if len("SlashEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SlashEpoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SlashEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SlashEpoch")); err != nil { + return err + } + if t.SlashEpoch >= 0 { - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SlashEpoch))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { return err } } else { - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajNegativeInt, uint64(-t.SlashEpoch)-1)); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { return err } } // t.PollRetryCount (uint64) (uint64) + if len("PollRetryCount") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PollRetryCount\" was too long") + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.PollRetryCount))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PollRetryCount"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PollRetryCount")); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PollRetryCount)); err != nil { return err } // t.PollErrorCount (uint64) (uint64) + if len("PollErrorCount") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PollErrorCount\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PollErrorCount"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PollErrorCount")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.PollErrorCount))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PollErrorCount)); err != nil { return err } // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { return err } + + // t.StoreID (multistore.StoreID) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StoreID")); err != nil { + return err + } + + if t.StoreID == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.FundsReserved (big.Int) (struct) + if len("FundsReserved") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReserved\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FundsReserved"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsReserved")); err != nil { + return err + } + + if err := t.FundsReserved.MarshalCBOR(w); err != nil { + return err + } + + // t.CreationTime (typegen.CborTime) (struct) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CreationTime"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CreationTime")); err != nil { + return err + } + + if err := t.CreationTime.MarshalCBOR(w); err != nil { + return err + } + + // t.TransferChannelID (datatransfer.ChannelID) (struct) + if len("TransferChannelID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferChannelID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TransferChannelID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferChannelID")); err != nil { + return err + } + + if err := t.TransferChannelID.MarshalCBOR(w); err != nil { + return err + } return nil } func (t *ClientDeal) UnmarshalCBOR(r io.Reader) error { + *t = ClientDeal{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 14 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("ClientDeal: map struct too large (%d)", extra) } - // t.ClientDealProposal (market.ClientDealProposal) (struct) + var name string + n := extra - { + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - if err := t.ClientDealProposal.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + name = string(sval) } - } - // t.ProposalCid (cid.Cid) (struct) + switch name { + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": - { + { - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) - } + if err := t.ClientDealProposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } - t.ProposalCid = c + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": - } - // t.AddFundsCid (cid.Cid) (struct) + { - { + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = c - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err } - } else { + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) } + // t.State (uint64) (uint64) + case "State": - t.AddFundsCid = &c - } + { - } - // t.State (uint64) (uint64) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) - { + } + // t.Miner (peer.ID) (string) + case "Miner": - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.State = uint64(extra) + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - } - // t.Miner (peer.ID) (string) + t.Miner = peer.ID(sval) + } + // t.MinerWorker (address.Address) (struct) + case "MinerWorker": - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } + { - t.Miner = peer.ID(sval) - } - // t.MinerWorker (address.Address) (struct) + if err := t.MinerWorker.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.MinerWorker: %w", err) + } - { + } + // t.DealID (abi.DealID) (uint64) + case "DealID": - if err := t.MinerWorker.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.MinerWorker: %w", err) - } + { - } - // t.DealID (abi.DealID) (uint64) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) - { + } + // t.DataRef (storagemarket.DataRef) (struct) + case "DataRef": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.DataRef = new(DataRef) + if err := t.DataRef.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.DataRef pointer: %w", err) + } + } - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = abi.DealID(extra) + } + // t.Message (string) (string) + case "Message": - } - // t.DataRef (storagemarket.DataRef) (struct) + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - { + t.Message = string(sval) + } + // t.PublishMessage (cid.Cid) (struct) + case "PublishMessage": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + } + + t.PublishMessage = &c + } - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err } - } else { - t.DataRef = new(DataRef) - if err := t.DataRef.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DataRef pointer: %w", err) + // t.SlashEpoch (abi.ChainEpoch) (int64) + case "SlashEpoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) } - } + // t.PollRetryCount (uint64) (uint64) + case "PollRetryCount": - } - // t.Message (string) (string) + { - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollRetryCount = uint64(extra) - t.Message = string(sval) - } - // t.PublishMessage (cid.Cid) (struct) + } + // t.PollErrorCount (uint64) (uint64) + case "PollErrorCount": - { + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PollErrorCount = uint64(extra) - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err } - } else { + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": - c, err := cbg.ReadCid(br) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { - return xerrors.Errorf("failed to read cid field t.PublishMessage: %w", err) + return err } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.StoreID (multistore.StoreID) (uint64) + case "StoreID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := multistore.StoreID(extra) + t.StoreID = &typed + } - t.PublishMessage = &c - } + } + // t.FundsReserved (big.Int) (struct) + case "FundsReserved": - } - // t.SlashEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeader(br) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } + { - t.SlashEpoch = abi.ChainEpoch(extraI) - } - // t.PollRetryCount (uint64) (uint64) + if err := t.FundsReserved.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } - { + } + // t.CreationTime (typegen.CborTime) (struct) + case "CreationTime": - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PollRetryCount = uint64(extra) + { - } - // t.PollErrorCount (uint64) (uint64) + if err := t.CreationTime.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } - { + } + // t.TransferChannelID (datatransfer.ChannelID) (struct) + case "TransferChannelID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.TransferChannelID = new(datatransfer.ChannelID) + if err := t.TransferChannelID.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelID pointer: %w", err) + } + } - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.PollErrorCount = uint64(extra) + } + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } } - // t.FastRetrieval (bool) (bool) - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.FastRetrieval = false - case 21: - t.FastRetrieval = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } return nil } - func (t *MinerDeal) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{142}); err != nil { + if _, err := w.Write([]byte{179}); err != nil { return err } + scratch := make([]byte, 9) + // t.ClientDealProposal (market.ClientDealProposal) (struct) + if len("ClientDealProposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ClientDealProposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ClientDealProposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ClientDealProposal")); err != nil { + return err + } + if err := t.ClientDealProposal.MarshalCBOR(w); err != nil { return err } // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } - if err := cbg.WriteCid(w, t.ProposalCid); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ProposalCid")); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.ProposalCid); err != nil { return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) } // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AddFundsCid")); err != nil { + return err + } if t.AddFundsCid == nil { if _, err := w.Write(cbg.CborNull); err != nil { return err } } else { - if err := cbg.WriteCid(w, *t.AddFundsCid); err != nil { + if err := cbg.WriteCidBuf(scratch, w, *t.AddFundsCid); err != nil { return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) } } // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishCid")); err != nil { + return err + } if t.PublishCid == nil { if _, err := w.Write(cbg.CborNull); err != nil { return err } } else { - if err := cbg.WriteCid(w, *t.PublishCid); err != nil { + if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil { return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) } } // t.Miner (peer.ID) (string) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + if len(t.Miner) > cbg.MaxLength { return xerrors.Errorf("Value in field t.Miner was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Miner)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Miner))); err != nil { return err } - if _, err := w.Write([]byte(t.Miner)); err != nil { + if _, err := io.WriteString(w, string(t.Miner)); err != nil { return err } // t.Client (peer.ID) (string) + if len("Client") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Client\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Client"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Client")); err != nil { + return err + } + if len(t.Client) > cbg.MaxLength { return xerrors.Errorf("Value in field t.Client was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Client)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Client))); err != nil { return err } - if _, err := w.Write([]byte(t.Client)); err != nil { + if _, err := io.WriteString(w, string(t.Client)); err != nil { return err } // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.State))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.State)); err != nil { return err } // t.PiecePath (filestore.Path) (string) + if len("PiecePath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PiecePath\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PiecePath"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PiecePath")); err != nil { + return err + } + if len(t.PiecePath) > cbg.MaxLength { return xerrors.Errorf("Value in field t.PiecePath was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.PiecePath)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.PiecePath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.PiecePath)); err != nil { + return err + } + + // t.MetadataPath (filestore.Path) (string) + if len("MetadataPath") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MetadataPath\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MetadataPath"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MetadataPath")); err != nil { + return err + } + + if len(t.MetadataPath) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.MetadataPath was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.MetadataPath))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.MetadataPath)); err != nil { + return err + } + + // t.SlashEpoch (abi.ChainEpoch) (int64) + if len("SlashEpoch") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SlashEpoch\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SlashEpoch"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SlashEpoch")); err != nil { + return err + } + + if t.SlashEpoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SlashEpoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.SlashEpoch-1)); err != nil { + return err + } + } + + // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + return err + } + + // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + + if len(t.Message) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Message was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Message)); err != nil { + return err + } + + // t.StoreID (multistore.StoreID) (uint64) + if len("StoreID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"StoreID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("StoreID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("StoreID")); err != nil { + return err + } + + if t.StoreID == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(*t.StoreID)); err != nil { + return err + } + } + + // t.FundsReserved (big.Int) (struct) + if len("FundsReserved") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FundsReserved\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FundsReserved"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FundsReserved")); err != nil { + return err + } + + if err := t.FundsReserved.MarshalCBOR(w); err != nil { + return err + } + + // t.Ref (storagemarket.DataRef) (struct) + if len("Ref") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ref\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Ref"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Ref")); err != nil { + return err + } + + if err := t.Ref.MarshalCBOR(w); err != nil { + return err + } + + // t.AvailableForRetrieval (bool) (bool) + if len("AvailableForRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AvailableForRetrieval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("AvailableForRetrieval"))); err != nil { return err } - if _, err := w.Write([]byte(t.PiecePath)); err != nil { + if _, err := io.WriteString(w, string("AvailableForRetrieval")); err != nil { return err } - // t.MetadataPath (filestore.Path) (string) - if len(t.MetadataPath) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.MetadataPath was too long") + if err := cbg.WriteBool(w, t.AvailableForRetrieval); err != nil { + return err } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.MetadataPath)))); err != nil { - return err + // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") } - if _, err := w.Write([]byte(t.MetadataPath)); err != nil { + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { return err } - - // t.SlashEpoch (abi.ChainEpoch) (int64) - if t.SlashEpoch >= 0 { - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SlashEpoch))); err != nil { - return err - } - } else { - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajNegativeInt, uint64(-t.SlashEpoch)-1)); err != nil { - return err - } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err } - // t.FastRetrieval (bool) (bool) - if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { return err } - // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") + // t.CreationTime (typegen.CborTime) (struct) + if len("CreationTime") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CreationTime\" was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Message)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CreationTime"))); err != nil { return err } - if _, err := w.Write([]byte(t.Message)); err != nil { + if _, err := io.WriteString(w, string("CreationTime")); err != nil { return err } - // t.Ref (storagemarket.DataRef) (struct) - if err := t.Ref.MarshalCBOR(w); err != nil { + if err := t.CreationTime.MarshalCBOR(w); err != nil { return err } - // t.DealID (abi.DealID) (uint64) + // t.TransferChannelId (datatransfer.ChannelID) (struct) + if len("TransferChannelId") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferChannelId\" was too long") + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.DealID))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TransferChannelId"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferChannelId")); err != nil { return err } + if err := t.TransferChannelId.MarshalCBOR(w); err != nil { + return err + } return nil } func (t *MinerDeal) UnmarshalCBOR(r io.Reader) error { + *t = MinerDeal{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 14 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("MinerDeal: map struct too large (%d)", extra) } - // t.ClientDealProposal (market.ClientDealProposal) (struct) + var name string + n := extra - { + for i := uint64(0); i < n; i++ { - if err := t.ClientDealProposal.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) } - } - // t.ProposalCid (cid.Cid) (struct) + switch name { + // t.ClientDealProposal (market.ClientDealProposal) (struct) + case "ClientDealProposal": - { + { - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) - } + if err := t.ClientDealProposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ClientDealProposal: %w", err) + } - t.ProposalCid = c + } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": - } - // t.AddFundsCid (cid.Cid) (struct) + { - { + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { + t.ProposalCid = c - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": - t.AddFundsCid = &c - } + { - } - // t.PublishCid (cid.Cid) (struct) + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } - { + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } + + t.AddFundsCid = &c + } - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err } - } else { + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } + + t.PublishCid = &c + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) } + // t.Miner (peer.ID) (string) + case "Miner": - t.PublishCid = &c - } + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - } - // t.Miner (peer.ID) (string) + t.Miner = peer.ID(sval) + } + // t.Client (peer.ID) (string) + case "Client": - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - t.Miner = peer.ID(sval) - } - // t.Client (peer.ID) (string) + t.Client = peer.ID(sval) + } + // t.State (uint64) (uint64) + case "State": - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } + { - t.Client = peer.ID(sval) - } - // t.State (uint64) (uint64) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) - { + } + // t.PiecePath (filestore.Path) (string) + case "PiecePath": - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.State = uint64(extra) + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - } - // t.PiecePath (filestore.Path) (string) + t.PiecePath = filestore.Path(sval) + } + // t.MetadataPath (filestore.Path) (string) + case "MetadataPath": - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - t.PiecePath = filestore.Path(sval) - } - // t.MetadataPath (filestore.Path) (string) + t.MetadataPath = filestore.Path(sval) + } + // t.SlashEpoch (abi.ChainEpoch) (int64) + case "SlashEpoch": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.SlashEpoch = abi.ChainEpoch(extraI) + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Message (string) (string) + case "Message": - t.MetadataPath = filestore.Path(sval) - } - // t.SlashEpoch (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeader(br) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - t.SlashEpoch = abi.ChainEpoch(extraI) - } - // t.FastRetrieval (bool) (bool) + t.Message = string(sval) + } + // t.StoreID (multistore.StoreID) (uint64) + case "StoreID": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + typed := multistore.StoreID(extra) + t.StoreID = &typed + } - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.FastRetrieval = false - case 21: - t.FastRetrieval = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } - // t.Message (string) (string) + } + // t.FundsReserved (big.Int) (struct) + case "FundsReserved": - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } + { - t.Message = string(sval) - } - // t.Ref (storagemarket.DataRef) (struct) + if err := t.FundsReserved.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.FundsReserved: %w", err) + } - { + } + // t.Ref (storagemarket.DataRef) (struct) + case "Ref": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Ref = new(DataRef) + if err := t.Ref.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Ref pointer: %w", err) + } + } - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + } + // t.AvailableForRetrieval (bool) (bool) + case "AvailableForRetrieval": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { return err } - } else { - t.Ref = new(DataRef) - if err := t.Ref.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Ref pointer: %w", err) + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") } - } + switch extra { + case 20: + t.AvailableForRetrieval = false + case 21: + t.AvailableForRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.DealID (abi.DealID) (uint64) + case "DealID": - } - // t.DealID (abi.DealID) (uint64) + { - { + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.DealID = abi.DealID(extra) + } + // t.CreationTime (typegen.CborTime) (struct) + case "CreationTime": + + { + + if err := t.CreationTime.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CreationTime: %w", err) + } + } + // t.TransferChannelId (datatransfer.ChannelID) (struct) + case "TransferChannelId": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.TransferChannelId = new(datatransfer.ChannelID) + if err := t.TransferChannelId.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TransferChannelId pointer: %w", err) + } + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } } + return nil } - func (t *Balance) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{130}); err != nil { + if _, err := w.Write([]byte{162}); err != nil { return err } + scratch := make([]byte, 9) + // t.Locked (big.Int) (struct) + if len("Locked") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Locked\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Locked"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Locked")); err != nil { + return err + } + if err := t.Locked.MarshalCBOR(w); err != nil { return err } // t.Available (big.Int) (struct) + if len("Available") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Available\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Available"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Available")); err != nil { + return err + } + if err := t.Available.MarshalCBOR(w); err != nil { return err } @@ -762,56 +1453,105 @@ func (t *Balance) MarshalCBOR(w io.Writer) error { } func (t *Balance) UnmarshalCBOR(r io.Reader) error { + *t = Balance{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("Balance: map struct too large (%d)", extra) } - // t.Locked (big.Int) (struct) + var name string + n := extra + + for i := uint64(0); i < n; i++ { - { + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - if err := t.Locked.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Locked: %w", err) + name = string(sval) } - } - // t.Available (big.Int) (struct) + switch name { + // t.Locked (big.Int) (struct) + case "Locked": - { + { - if err := t.Available.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Available: %w", err) - } + if err := t.Locked.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Locked: %w", err) + } + } + // t.Available (big.Int) (struct) + case "Available": + + { + + if err := t.Available.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Available: %w", err) + } + + } + + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } } + return nil } - func (t *SignedStorageAsk) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{130}); err != nil { + if _, err := w.Write([]byte{162}); err != nil { return err } + scratch := make([]byte, 9) + // t.Ask (storagemarket.StorageAsk) (struct) + if len("Ask") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Ask\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Ask"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Ask")); err != nil { + return err + } + if err := t.Ask.MarshalCBOR(w); err != nil { return err } // t.Signature (crypto.Signature) (struct) + if len("Signature") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Signature\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Signature"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Signature")); err != nil { + return err + } + if err := t.Signature.MarshalCBOR(w); err != nil { return err } @@ -819,121 +1559,234 @@ func (t *SignedStorageAsk) MarshalCBOR(w io.Writer) error { } func (t *SignedStorageAsk) UnmarshalCBOR(r io.Reader) error { + *t = SignedStorageAsk{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("SignedStorageAsk: map struct too large (%d)", extra) } - // t.Ask (storagemarket.StorageAsk) (struct) + var name string + n := extra - { + for i := uint64(0); i < n; i++ { - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { return err } - } else { - t.Ask = new(StorageAsk) - if err := t.Ask.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) - } + + name = string(sval) } - } - // t.Signature (crypto.Signature) (struct) + switch name { + // t.Ask (storagemarket.StorageAsk) (struct) + case "Ask": - { + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Ask = new(StorageAsk) + if err := t.Ask.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Ask pointer: %w", err) + } + } - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err } - } else { - t.Signature = new(crypto.Signature) - if err := t.Signature.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + // t.Signature (crypto.Signature) (struct) + case "Signature": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Signature = new(crypto.Signature) + if err := t.Signature.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Signature pointer: %w", err) + } + } + } - } + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) + } } + return nil } - func (t *StorageAsk) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{135}); err != nil { + if _, err := w.Write([]byte{168}); err != nil { return err } + scratch := make([]byte, 9) + // t.Price (big.Int) (struct) + if len("Price") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Price\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Price"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Price")); err != nil { + return err + } + if err := t.Price.MarshalCBOR(w); err != nil { return err } + // t.VerifiedPrice (big.Int) (struct) + if len("VerifiedPrice") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"VerifiedPrice\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("VerifiedPrice"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("VerifiedPrice")); err != nil { + return err + } + + if err := t.VerifiedPrice.MarshalCBOR(w); err != nil { + return err + } + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + if len("MinPieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MinPieceSize\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MinPieceSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MinPieceSize")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.MinPieceSize))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MinPieceSize)); err != nil { return err } // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + if len("MaxPieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"MaxPieceSize\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("MaxPieceSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("MaxPieceSize")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.MaxPieceSize))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.MaxPieceSize)); err != nil { return err } // t.Miner (address.Address) (struct) + if len("Miner") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Miner\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Miner"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Miner")); err != nil { + return err + } + if err := t.Miner.MarshalCBOR(w); err != nil { return err } // t.Timestamp (abi.ChainEpoch) (int64) + if len("Timestamp") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Timestamp\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Timestamp"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Timestamp")); err != nil { + return err + } + if t.Timestamp >= 0 { - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Timestamp))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Timestamp)); err != nil { return err } } else { - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajNegativeInt, uint64(-t.Timestamp)-1)); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Timestamp-1)); err != nil { return err } } - // t.Expiry (abi.ChainEpoch) (int64) + // t.Expiry (abi.ChainEpoch) (int64) + if len("Expiry") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Expiry\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Expiry"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Expiry")); err != nil { + return err + } + if t.Expiry >= 0 { - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.Expiry))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Expiry)); err != nil { return err } } else { - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajNegativeInt, uint64(-t.Expiry)-1)); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Expiry-1)); err != nil { return err } } // t.SeqNo (uint64) (uint64) + if len("SeqNo") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"SeqNo\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SeqNo"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("SeqNo")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.SeqNo))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SeqNo)); err != nil { return err } @@ -941,232 +1794,258 @@ func (t *StorageAsk) MarshalCBOR(w io.Writer) error { } func (t *StorageAsk) UnmarshalCBOR(r io.Reader) error { + *t = StorageAsk{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 7 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("StorageAsk: map struct too large (%d)", extra) } - // t.Price (big.Int) (struct) - - { - - if err := t.Price.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Price: %w", err) - } + var name string + n := extra - } - // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + for i := uint64(0); i < n; i++ { - { + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err + name = string(sval) } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.MinPieceSize = abi.PaddedPieceSize(extra) - - } - // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) - { + switch name { + // t.Price (big.Int) (struct) + case "Price": - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.MaxPieceSize = abi.PaddedPieceSize(extra) + { - } - // t.Miner (address.Address) (struct) + if err := t.Price.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Price: %w", err) + } - { + } + // t.VerifiedPrice (big.Int) (struct) + case "VerifiedPrice": - if err := t.Miner.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Miner: %w", err) - } + { - } - // t.Timestamp (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeader(br) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } + if err := t.VerifiedPrice.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.VerifiedPrice: %w", err) + } - t.Timestamp = abi.ChainEpoch(extraI) - } - // t.Expiry (abi.ChainEpoch) (int64) - { - maj, extra, err := cbg.CborReadHeader(br) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } + } + // t.MinPieceSize (abi.PaddedPieceSize) (uint64) + case "MinPieceSize": - t.Expiry = abi.ChainEpoch(extraI) - } - // t.SeqNo (uint64) (uint64) + { - { + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MinPieceSize = abi.PaddedPieceSize(extra) - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SeqNo = uint64(extra) + } + // t.MaxPieceSize (abi.PaddedPieceSize) (uint64) + case "MaxPieceSize": - } - return nil -} + { -func (t *StorageDeal) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{130}); err != nil { - return err - } + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.MaxPieceSize = abi.PaddedPieceSize(extra) - // t.DealProposal (market.DealProposal) (struct) - if err := t.DealProposal.MarshalCBOR(w); err != nil { - return err - } + } + // t.Miner (address.Address) (struct) + case "Miner": - // t.DealState (market.DealState) (struct) - if err := t.DealState.MarshalCBOR(w); err != nil { - return err - } - return nil -} + { -func (t *StorageDeal) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) + if err := t.Miner.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Miner: %w", err) + } - maj, extra, err := cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } + } + // t.Timestamp (abi.ChainEpoch) (int64) + case "Timestamp": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Timestamp = abi.ChainEpoch(extraI) + } + // t.Expiry (abi.ChainEpoch) (int64) + case "Expiry": + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Expiry = abi.ChainEpoch(extraI) + } + // t.SeqNo (uint64) (uint64) + case "SeqNo": - if extra != 2 { - return fmt.Errorf("cbor input had wrong number of fields") - } + { - // t.DealProposal (market.DealProposal) (struct) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.SeqNo = uint64(extra) - { + } - if err := t.DealProposal.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealProposal: %w", err) + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) } - } - // t.DealState (market.DealState) (struct) - { - - if err := t.DealState.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.DealState: %w", err) - } - - } return nil } - func (t *DataRef) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{132}); err != nil { + if _, err := w.Write([]byte{164}); err != nil { return err } + scratch := make([]byte, 9) + // t.TransferType (string) (string) + if len("TransferType") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"TransferType\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("TransferType"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("TransferType")); err != nil { + return err + } + if len(t.TransferType) > cbg.MaxLength { return xerrors.Errorf("Value in field t.TransferType was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.TransferType)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.TransferType))); err != nil { return err } - if _, err := w.Write([]byte(t.TransferType)); err != nil { + if _, err := io.WriteString(w, string(t.TransferType)); err != nil { return err } // t.Root (cid.Cid) (struct) + if len("Root") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Root\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Root"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Root")); err != nil { + return err + } - if err := cbg.WriteCid(w, t.Root); err != nil { + if err := cbg.WriteCidBuf(scratch, w, t.Root); err != nil { return xerrors.Errorf("failed to write cid field t.Root: %w", err) } // t.PieceCid (cid.Cid) (struct) + if len("PieceCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceCid")); err != nil { + return err + } if t.PieceCid == nil { if _, err := w.Write(cbg.CborNull); err != nil { return err } } else { - if err := cbg.WriteCid(w, *t.PieceCid); err != nil { + if err := cbg.WriteCidBuf(scratch, w, *t.PieceCid); err != nil { return xerrors.Errorf("failed to write cid field t.PieceCid: %w", err) } } // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + if len("PieceSize") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PieceSize\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PieceSize"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PieceSize")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.PieceSize))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.PieceSize)); err != nil { return err } @@ -1174,158 +2053,268 @@ func (t *DataRef) MarshalCBOR(w io.Writer) error { } func (t *DataRef) UnmarshalCBOR(r io.Reader) error { + *t = DataRef{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 4 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("DataRef: map struct too large (%d)", extra) } - // t.TransferType (string) (string) + var name string + n := extra - { - sval, err := cbg.ReadString(br) - if err != nil { - return err + for i := uint64(0); i < n; i++ { + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) } - t.TransferType = string(sval) - } - // t.Root (cid.Cid) (struct) + switch name { + // t.TransferType (string) (string) + case "TransferType": - { + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Root: %w", err) - } + t.TransferType = string(sval) + } + // t.Root (cid.Cid) (struct) + case "Root": - t.Root = c + { - } - // t.PieceCid (cid.Cid) (struct) + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Root: %w", err) + } - { + t.Root = c - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err } - } else { + // t.PieceCid (cid.Cid) (struct) + case "PieceCid": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCid: %w", err) + } + + t.PieceCid = &c + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PieceCid: %w", err) } + // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + case "PieceSize": - t.PieceCid = &c - } + { - } - // t.PieceSize (abi.UnpaddedPieceSize) (uint64) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.PieceSize = abi.UnpaddedPieceSize(extra) - { + } - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) } - t.PieceSize = abi.UnpaddedPieceSize(extra) - } + return nil } - func (t *ProviderDealState) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{136}); err != nil { + if _, err := w.Write([]byte{168}); err != nil { return err } + scratch := make([]byte, 9) + // t.State (uint64) (uint64) + if len("State") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"State\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("State"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("State")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.State))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.State)); err != nil { return err } // t.Message (string) (string) + if len("Message") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Message\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Message"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Message")); err != nil { + return err + } + if len(t.Message) > cbg.MaxLength { return xerrors.Errorf("Value in field t.Message was too long") } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajTextString, uint64(len(t.Message)))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { return err } - if _, err := w.Write([]byte(t.Message)); err != nil { + if _, err := io.WriteString(w, string(t.Message)); err != nil { return err } // t.Proposal (market.DealProposal) (struct) + if len("Proposal") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"Proposal\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Proposal"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("Proposal")); err != nil { + return err + } + if err := t.Proposal.MarshalCBOR(w); err != nil { return err } // t.ProposalCid (cid.Cid) (struct) + if len("ProposalCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ProposalCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ProposalCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ProposalCid")); err != nil { + return err + } if t.ProposalCid == nil { if _, err := w.Write(cbg.CborNull); err != nil { return err } } else { - if err := cbg.WriteCid(w, *t.ProposalCid); err != nil { + if err := cbg.WriteCidBuf(scratch, w, *t.ProposalCid); err != nil { return xerrors.Errorf("failed to write cid field t.ProposalCid: %w", err) } } // t.AddFundsCid (cid.Cid) (struct) + if len("AddFundsCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"AddFundsCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("AddFundsCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("AddFundsCid")); err != nil { + return err + } if t.AddFundsCid == nil { if _, err := w.Write(cbg.CborNull); err != nil { return err } } else { - if err := cbg.WriteCid(w, *t.AddFundsCid); err != nil { + if err := cbg.WriteCidBuf(scratch, w, *t.AddFundsCid); err != nil { return xerrors.Errorf("failed to write cid field t.AddFundsCid: %w", err) } } // t.PublishCid (cid.Cid) (struct) + if len("PublishCid") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"PublishCid\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("PublishCid"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("PublishCid")); err != nil { + return err + } if t.PublishCid == nil { if _, err := w.Write(cbg.CborNull); err != nil { return err } } else { - if err := cbg.WriteCid(w, *t.PublishCid); err != nil { + if err := cbg.WriteCidBuf(scratch, w, *t.PublishCid); err != nil { return xerrors.Errorf("failed to write cid field t.PublishCid: %w", err) } } // t.DealID (abi.DealID) (uint64) + if len("DealID") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"DealID\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("DealID"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("DealID")); err != nil { + return err + } - if _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(t.DealID))); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.DealID)); err != nil { return err } // t.FastRetrieval (bool) (bool) + if len("FastRetrieval") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"FastRetrieval\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("FastRetrieval"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("FastRetrieval")); err != nil { + return err + } + if err := cbg.WriteBool(w, t.FastRetrieval); err != nil { return err } @@ -1333,167 +2322,191 @@ func (t *ProviderDealState) MarshalCBOR(w io.Writer) error { } func (t *ProviderDealState) UnmarshalCBOR(r io.Reader) error { + *t = ProviderDealState{} + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) - maj, extra, err := cbg.CborReadHeader(br) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of type map") } - if extra != 8 { - return fmt.Errorf("cbor input had wrong number of fields") + if extra > cbg.MaxLength { + return fmt.Errorf("ProviderDealState: map struct too large (%d)", extra) } - // t.State (uint64) (uint64) + var name string + n := extra - { + for i := uint64(0); i < n; i++ { - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + name = string(sval) } - t.State = uint64(extra) - } - // t.Message (string) (string) + switch name { + // t.State (uint64) (uint64) + case "State": - { - sval, err := cbg.ReadString(br) - if err != nil { - return err - } + { - t.Message = string(sval) - } - // t.Proposal (market.DealProposal) (struct) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.State = uint64(extra) - { + } + // t.Message (string) (string) + case "Message": - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Message = string(sval) } - } else { - t.Proposal = new(market.DealProposal) - if err := t.Proposal.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Proposal pointer: %w", err) + // t.Proposal (market.DealProposal) (struct) + case "Proposal": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Proposal = new(market.DealProposal) + if err := t.Proposal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Proposal pointer: %w", err) + } + } + } - } + // t.ProposalCid (cid.Cid) (struct) + case "ProposalCid": - } - // t.ProposalCid (cid.Cid) (struct) + { - { + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) + } + + t.ProposalCid = &c + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ProposalCid: %w", err) } + // t.AddFundsCid (cid.Cid) (struct) + case "AddFundsCid": - t.ProposalCid = &c - } + { - } - // t.AddFundsCid (cid.Cid) (struct) + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } - { + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) + } - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { + t.AddFundsCid = &c + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.AddFundsCid: %w", err) } + // t.PublishCid (cid.Cid) (struct) + case "PublishCid": - t.AddFundsCid = &c - } + { - } - // t.PublishCid (cid.Cid) (struct) + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } - { + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) + } - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { + t.PublishCid = &c + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.PublishCid: %w", err) } + // t.DealID (abi.DealID) (uint64) + case "DealID": - t.PublishCid = &c - } + { - } - // t.DealID (abi.DealID) (uint64) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.DealID = abi.DealID(extra) + + } + // t.FastRetrieval (bool) (bool) + case "FastRetrieval": - { + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.FastRetrieval = false + case 21: + t.FastRetrieval = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") + default: + return fmt.Errorf("unknown struct field %d: '%s'", i, name) } - t.DealID = abi.DealID(extra) - } - // t.FastRetrieval (bool) (bool) - maj, extra, err = cbg.CborReadHeader(br) - if err != nil { - return err - } - if maj != cbg.MajOther { - return fmt.Errorf("booleans must be major type 7") - } - switch extra { - case 20: - t.FastRetrieval = false - case 21: - t.FastRetrieval = true - default: - return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) - } return nil } diff --git a/vendor/github.com/filecoin-project/go-fil-markets/tools/tools.go b/vendor/github.com/filecoin-project/go-fil-markets/tools/tools.go deleted file mode 100644 index 51b8ceae58..0000000000 --- a/vendor/github.com/filecoin-project/go-fil-markets/tools/tools.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build tools - -package tools - -import ( - _ "github.com/hannahhoward/cbor-gen-for" -) diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/.gitignore b/vendor/github.com/filecoin-project/go-hamt-ipld/.gitignore new file mode 100644 index 0000000000..398baf21b2 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/.gitignore @@ -0,0 +1,17 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +.idea diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/.travis.yml b/vendor/github.com/filecoin-project/go-hamt-ipld/.travis.yml new file mode 100644 index 0000000000..923835bc58 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/.travis.yml @@ -0,0 +1,31 @@ +os: + - linux + +language: go + +go: + - 1.11.x + +env: + global: + - GOTFLAGS="-race" + matrix: + - BUILD_DEPTYPE=gomod + + +# disable travis install +install: + - true + +script: + - bash <(curl -s https://raw.githubusercontent.com/ipfs/ci-helpers/master/travis-ci/run-standard-tests.sh) + + +cache: + directories: + - $GOPATH/src/gx + - $GOPATH/pkg/mod + - $HOME/.cache/go-build + +notifications: + email: false diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/LICENSE b/vendor/github.com/filecoin-project/go-hamt-ipld/LICENSE new file mode 100644 index 0000000000..83f48ce5a4 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Whyrusleeping + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/Makefile b/vendor/github.com/filecoin-project/go-hamt-ipld/Makefile new file mode 100644 index 0000000000..0ad4560a70 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/Makefile @@ -0,0 +1,14 @@ +all: build + +build: + go build ./... +.PHONY: build + +test: + go test ./... +.PHONY: test + +benchmark: + go test -bench=./... +.PHONY: benchmark + diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/README.md b/vendor/github.com/filecoin-project/go-hamt-ipld/README.md new file mode 100644 index 0000000000..f3b9327fa2 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/README.md @@ -0,0 +1,34 @@ +go-hamt-ipld +================== + +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://libp2p.io/) +[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs) +[![Travis CI](https://travis-ci.org/ipfs/go-hamt-ipld.svg?branch=master)](https://travis-ci.org/ipfs/go-hamt-ipld) + +> A CHAMP HAMT implemented using ipld + + +## Table of Contents + +- [Usage](#usage) +- [API](#api) +- [Contribute](#contribute) +- [License](#license) + + +## Examples + +```go +// TODO +``` + +## Contribute + +PRs are welcome! + +Small note: If editing the Readme, please conform to the [standard-readme](https://github.com/RichardLitt/standard-readme) specification. + +## License + +MIT © Whyrusleeping diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/cbor_gen.go b/vendor/github.com/filecoin-project/go-hamt-ipld/cbor_gen.go new file mode 100644 index 0000000000..409007f64e --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/cbor_gen.go @@ -0,0 +1,214 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package hamt + +import ( + "fmt" + "io" + "math/big" + + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +// NOTE: This is a generated file, but it has been modified to encode the +// bitfield big.Int as a byte array. The bitfield is only a big.Int because +// thats a convenient type for the operations we need to perform on it, but it +// is fundamentally an array of bytes (bits) + +var _ = xerrors.Errorf + +var lengthBufNode = []byte{130} + +func (t *Node) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufNode); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Bitfield (big.Int) (struct) + { + var b []byte + if t.Bitfield != nil { + b = t.Bitfield.Bytes() + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(b))); err != nil { + return err + } + if _, err := w.Write(b); err != nil { + return err + } + } + + // t.Pointers ([]*hamt.Pointer) (slice) + if len(t.Pointers) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Pointers was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Pointers))); err != nil { + return err + } + for _, v := range t.Pointers { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *Node) UnmarshalCBOR(r io.Reader) error { + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Bitfield (big.Int) (struct) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajByteString { + return fmt.Errorf("big ints should be tagged cbor byte strings") + } + + if extra > 256 { + return fmt.Errorf("t.Bitfield: cbor bignum was too large") + } + + if extra > 0 { + buf := make([]byte, extra) + if _, err := io.ReadFull(br, buf); err != nil { + return err + } + t.Bitfield = big.NewInt(0).SetBytes(buf) + } else { + t.Bitfield = big.NewInt(0) + } + // t.Pointers ([]*hamt.Pointer) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Pointers: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Pointers = make([]*Pointer, extra) + } + + for i := 0; i < int(extra); i++ { + + var v Pointer + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Pointers[i] = &v + } + + return nil +} + +var lengthBufKV = []byte{130} + +func (t *KV) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufKV); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Key ([]uint8) (slice) + if len(t.Key) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Key was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Key))); err != nil { + return err + } + + if _, err := w.Write(t.Key); err != nil { + return err + } + + // t.Value (typegen.Deferred) (struct) + if err := t.Value.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *KV) UnmarshalCBOR(r io.Reader) error { + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Key ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Key: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + t.Key = make([]byte, extra) + if _, err := io.ReadFull(br, t.Key); err != nil { + return err + } + // t.Value (typegen.Deferred) (struct) + + { + + t.Value = new(cbg.Deferred) + + if err := t.Value.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + return nil +} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/codecov.yml b/vendor/github.com/filecoin-project/go-hamt-ipld/codecov.yml new file mode 100644 index 0000000000..5f88a9ea27 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/go.mod b/vendor/github.com/filecoin-project/go-hamt-ipld/go.mod new file mode 100644 index 0000000000..53c81c877f --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/go.mod @@ -0,0 +1,14 @@ +module github.com/filecoin-project/go-hamt-ipld + +require ( + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.6 + github.com/ipfs/go-ipld-cbor v0.0.4 + github.com/ipfs/go-ipld-format v0.0.2 // indirect + github.com/spaolacci/murmur3 v1.1.0 + github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d + golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb // indirect + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +) + +go 1.13 diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/go.sum b/vendor/github.com/filecoin-project/go-hamt-ipld/go.sum new file mode 100644 index 0000000000..b745a25c38 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/go.sum @@ -0,0 +1,86 @@ +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00 h1:QN88Q0kT2QiDaLxpR/SDsqOBtNIEF/F3n96gSDUimkA= +github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= +github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 h1:bzMe+2coZJYHnhGgVlcQKuRy4FSny4ds8dLQjw5P1XE= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 h1:WXhVOwj2USAXB5oMDwRl3piOux2XMV9TANaYxXHdkoE= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d h1:Y25auOnuZb/GuJvqMflRSDWBz8/HBRME8fiD+H8zLfs= +github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/hamt.go b/vendor/github.com/filecoin-project/go-hamt-ipld/hamt.go new file mode 100644 index 0000000000..6b6c42ae63 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/hamt.go @@ -0,0 +1,471 @@ +package hamt + +import ( + "bytes" + "context" + "fmt" + "math/big" + + cid "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +const arrayWidth = 3 +const defaultBitWidth = 8 + +type Node struct { + Bitfield *big.Int `refmt:"bf"` + Pointers []*Pointer `refmt:"p"` + + bitWidth int + hash func([]byte) []byte + + // for fetching and storing children + store cbor.IpldStore +} + +// Option is a function that configures the node +type Option func(*Node) + +// UseTreeBitWidth allows you to set the width of the HAMT tree +// in bits (from 1-8) via a customized hash function +func UseTreeBitWidth(bitWidth int) Option { + return func(nd *Node) { + if bitWidth > 0 && bitWidth <= 8 { + nd.bitWidth = bitWidth + } + } +} + +// UseHashFunction allows you to set the hash function used by the HAMT. It +// defaults to murmur3 but you should use sha256 when an attacker can pick the +// keys. +func UseHashFunction(hash func([]byte) []byte) Option { + return func(nd *Node) { + nd.hash = hash + } +} + +// NewNode creates a new IPLD HAMT Node with the given store and given +// options +func NewNode(cs cbor.IpldStore, options ...Option) *Node { + nd := &Node{ + Bitfield: big.NewInt(0), + Pointers: make([]*Pointer, 0), + store: cs, + hash: defaultHashFunction, + bitWidth: defaultBitWidth, + } + // apply functional options to node before using + for _, option := range options { + option(nd) + } + return nd +} + +type KV struct { + Key []byte + Value *cbg.Deferred +} + +type Pointer struct { + KVs []*KV `refmt:"v,omitempty"` + Link cid.Cid `refmt:"l,omitempty"` + + // cached node to avoid too many serialization operations + cache *Node +} + +func (n *Node) Find(ctx context.Context, k string, out interface{}) error { + return n.getValue(ctx, &hashBits{b: n.hash([]byte(k))}, k, func(kv *KV) error { + // used to just see if the thing exists in the set + if out == nil { + return nil + } + + if um, ok := out.(cbg.CBORUnmarshaler); ok { + return um.UnmarshalCBOR(bytes.NewReader(kv.Value.Raw)) + } + + if err := cbor.DecodeInto(kv.Value.Raw, out); err != nil { + return xerrors.Errorf("cbor decoding value: %w", err) + } + + return nil + }) +} + +func (n *Node) FindRaw(ctx context.Context, k string) ([]byte, error) { + var ret []byte + err := n.getValue(ctx, &hashBits{b: n.hash([]byte(k))}, k, func(kv *KV) error { + ret = kv.Value.Raw + return nil + }) + return ret, err +} + +func (n *Node) Delete(ctx context.Context, k string) error { + kb := []byte(k) + return n.modifyValue(ctx, &hashBits{b: n.hash(kb)}, kb, nil) +} + +var ErrNotFound = fmt.Errorf("not found") +var ErrMaxDepth = fmt.Errorf("attempted to traverse hamt beyond max depth") + +func (n *Node) getValue(ctx context.Context, hv *hashBits, k string, cb func(*KV) error) error { + idx, err := hv.Next(n.bitWidth) + if err != nil { + return ErrMaxDepth + } + + if n.Bitfield.Bit(idx) == 0 { + return ErrNotFound + } + + cindex := byte(n.indexForBitPos(idx)) + + c := n.getChild(cindex) + if c.isShard() { + chnd, err := c.loadChild(ctx, n.store, n.bitWidth, n.hash) + if err != nil { + return err + } + + return chnd.getValue(ctx, hv, k, cb) + } + + for _, kv := range c.KVs { + if string(kv.Key) == k { + return cb(kv) + } + } + + return ErrNotFound +} + +func (p *Pointer) loadChild(ctx context.Context, ns cbor.IpldStore, bitWidth int, hash func([]byte) []byte) (*Node, error) { + if p.cache != nil { + return p.cache, nil + } + + out, err := LoadNode(ctx, ns, p.Link) + if err != nil { + return nil, err + } + out.bitWidth = bitWidth + out.hash = hash + + p.cache = out + return out, nil +} + +func LoadNode(ctx context.Context, cs cbor.IpldStore, c cid.Cid, options ...Option) (*Node, error) { + var out Node + if err := cs.Get(ctx, c, &out); err != nil { + return nil, err + } + + out.store = cs + out.bitWidth = defaultBitWidth + out.hash = defaultHashFunction + // apply functional options to node before using + for _, option := range options { + option(&out) + } + + return &out, nil +} + +func (n *Node) checkSize(ctx context.Context) (uint64, error) { + c, err := n.store.Put(ctx, n) + if err != nil { + return 0, err + } + + var def cbg.Deferred + if err := n.store.Get(ctx, c, &def); err != nil { + return 0, nil + } + + totsize := uint64(len(def.Raw)) + for _, ch := range n.Pointers { + if ch.isShard() { + chnd, err := ch.loadChild(ctx, n.store, n.bitWidth, n.hash) + if err != nil { + return 0, err + } + chsize, err := chnd.checkSize(ctx) + if err != nil { + return 0, err + } + totsize += chsize + } + } + + return totsize, nil +} + +func (n *Node) Flush(ctx context.Context) error { + for _, p := range n.Pointers { + if p.cache != nil { + if err := p.cache.Flush(ctx); err != nil { + return err + } + + c, err := n.store.Put(ctx, p.cache) + if err != nil { + return err + } + + p.cache = nil + p.Link = c + } + } + return nil +} + +// SetRaw sets key k to cbor bytes raw +func (n *Node) SetRaw(ctx context.Context, k string, raw []byte) error { + d := &cbg.Deferred{Raw: raw} + kb := []byte(k) + return n.modifyValue(ctx, &hashBits{b: n.hash(kb)}, kb, d) +} + +func (n *Node) Set(ctx context.Context, k string, v interface{}) error { + var d *cbg.Deferred + + kb := []byte(k) + + cm, ok := v.(cbg.CBORMarshaler) + if ok { + buf := new(bytes.Buffer) + if err := cm.MarshalCBOR(buf); err != nil { + return err + } + d = &cbg.Deferred{Raw: buf.Bytes()} + } else { + b, err := cbor.DumpObject(v) + if err != nil { + return err + } + d = &cbg.Deferred{Raw: b} + } + + return n.modifyValue(ctx, &hashBits{b: n.hash(kb)}, kb, d) +} + +func (n *Node) cleanChild(chnd *Node, cindex byte) error { + l := len(chnd.Pointers) + switch { + case l == 0: + return fmt.Errorf("incorrectly formed HAMT") + case l == 1: + // TODO: only do this if its a value, cant do this for shards unless pairs requirements are met. + + ps := chnd.Pointers[0] + if ps.isShard() { + return nil + } + + return n.setChild(cindex, ps) + case l <= arrayWidth: + var chvals []*KV + for _, p := range chnd.Pointers { + if p.isShard() { + return nil + } + + for _, sp := range p.KVs { + if len(chvals) == arrayWidth { + return nil + } + chvals = append(chvals, sp) + } + } + return n.setChild(cindex, &Pointer{KVs: chvals}) + default: + return nil + } +} + +func (n *Node) modifyValue(ctx context.Context, hv *hashBits, k []byte, v *cbg.Deferred) error { + idx, err := hv.Next(n.bitWidth) + if err != nil { + return ErrMaxDepth + } + + if n.Bitfield.Bit(idx) != 1 { + return n.insertChild(idx, k, v) + } + + cindex := byte(n.indexForBitPos(idx)) + + child := n.getChild(cindex) + if child.isShard() { + chnd, err := child.loadChild(ctx, n.store, n.bitWidth, n.hash) + if err != nil { + return err + } + + if err := chnd.modifyValue(ctx, hv, k, v); err != nil { + return err + } + + // CHAMP optimization, ensure trees look correct after deletions + if v == nil { + if err := n.cleanChild(chnd, cindex); err != nil { + return err + } + } + + return nil + } + + if v == nil { + for i, p := range child.KVs { + if bytes.Equal(p.Key, k) { + if len(child.KVs) == 1 { + return n.rmChild(cindex, idx) + } + + copy(child.KVs[i:], child.KVs[i+1:]) + child.KVs = child.KVs[:len(child.KVs)-1] + return nil + } + } + return ErrNotFound + } + + // check if key already exists + for _, p := range child.KVs { + if bytes.Equal(p.Key, k) { + p.Value = v + return nil + } + } + + // If the array is full, create a subshard and insert everything into it + if len(child.KVs) >= arrayWidth { + sub := NewNode(n.store) + sub.bitWidth = n.bitWidth + sub.hash = n.hash + hvcopy := &hashBits{b: hv.b, consumed: hv.consumed} + if err := sub.modifyValue(ctx, hvcopy, k, v); err != nil { + return err + } + + for _, p := range child.KVs { + chhv := &hashBits{b: n.hash([]byte(p.Key)), consumed: hv.consumed} + if err := sub.modifyValue(ctx, chhv, p.Key, p.Value); err != nil { + return err + } + } + + c, err := n.store.Put(ctx, sub) + if err != nil { + return err + } + + return n.setChild(cindex, &Pointer{Link: c}) + } + + // otherwise insert the new element into the array in order + np := &KV{Key: k, Value: v} + for i := 0; i < len(child.KVs); i++ { + if bytes.Compare(k, child.KVs[i].Key) < 0 { + child.KVs = append(child.KVs[:i], append([]*KV{np}, child.KVs[i:]...)...) + return nil + } + } + child.KVs = append(child.KVs, np) + return nil +} + +func (n *Node) insertChild(idx int, k []byte, v *cbg.Deferred) error { + if v == nil { + return ErrNotFound + } + + i := n.indexForBitPos(idx) + n.Bitfield.SetBit(n.Bitfield, idx, 1) + + p := &Pointer{KVs: []*KV{{Key: k, Value: v}}} + + n.Pointers = append(n.Pointers[:i], append([]*Pointer{p}, n.Pointers[i:]...)...) + return nil +} + +func (n *Node) setChild(i byte, p *Pointer) error { + n.Pointers[i] = p + return nil +} + +func (n *Node) rmChild(i byte, idx int) error { + copy(n.Pointers[i:], n.Pointers[i+1:]) + n.Pointers = n.Pointers[:len(n.Pointers)-1] + n.Bitfield.SetBit(n.Bitfield, idx, 0) + + return nil +} + +func (n *Node) getChild(i byte) *Pointer { + if int(i) >= len(n.Pointers) || i < 0 { + return nil + } + + return n.Pointers[i] +} + +func (n *Node) Copy() *Node { + nn := NewNode(n.store) + nn.bitWidth = n.bitWidth + nn.hash = n.hash + nn.Bitfield.Set(n.Bitfield) + nn.Pointers = make([]*Pointer, len(n.Pointers)) + + for i, p := range n.Pointers { + pp := &Pointer{} + if p.cache != nil { + pp.cache = p.cache.Copy() + } + pp.Link = p.Link + if p.KVs != nil { + pp.KVs = make([]*KV, len(p.KVs)) + for j, kv := range p.KVs { + pp.KVs[j] = &KV{Key: kv.Key, Value: kv.Value} + } + } + nn.Pointers[i] = pp + } + + return nn +} + +func (p *Pointer) isShard() bool { + return p.Link.Defined() +} + +func (n *Node) ForEach(ctx context.Context, f func(k string, val interface{}) error) error { + for _, p := range n.Pointers { + if p.isShard() { + chnd, err := p.loadChild(ctx, n.store, n.bitWidth, n.hash) + if err != nil { + return err + } + + if err := chnd.ForEach(ctx, f); err != nil { + return err + } + } else { + for _, kv := range p.KVs { + // TODO: consider removing 'strings as keys' from every interface, go full-on bytes everywhere + if err := f(string(kv.Key), kv.Value); err != nil { + return err + } + } + } + } + return nil +} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/hash.go b/vendor/github.com/filecoin-project/go-hamt-ipld/hash.go new file mode 100644 index 0000000000..bba6c3903a --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/hash.go @@ -0,0 +1,56 @@ +package hamt + +import ( + "fmt" + + "github.com/spaolacci/murmur3" +) + +// hashBits is a helper that allows the reading of the 'next n bits' as an integer. +type hashBits struct { + b []byte + consumed int +} + +func mkmask(n int) byte { + return (1 << uint(n)) - 1 +} + +// Next returns the next 'i' bits of the hashBits value as an integer, or an +// error if there aren't enough bits. +func (hb *hashBits) Next(i int) (int, error) { + if hb.consumed+i > len(hb.b)*8 { + return 0, fmt.Errorf("sharded directory too deep") + } + return hb.next(i), nil +} + +func (hb *hashBits) next(i int) int { + curbi := hb.consumed / 8 + leftb := 8 - (hb.consumed % 8) + + curb := hb.b[curbi] + if i == leftb { + out := int(mkmask(i) & curb) + hb.consumed += i + return out + } else if i < leftb { + a := curb & mkmask(leftb) // mask out the high bits we don't want + b := a & ^mkmask(leftb-i) // mask out the low bits we don't want + c := b >> uint(leftb-i) // shift whats left down + hb.consumed += i + return int(c) + } else { + out := int(mkmask(leftb) & curb) + out <<= uint(i - leftb) + hb.consumed += leftb + out += hb.next(i - leftb) + return out + } +} + +func defaultHashFunction(val []byte) []byte { + h := murmur3.New64() + h.Write(val) + return h.Sum(nil) +} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/pointer_cbor.go b/vendor/github.com/filecoin-project/go-hamt-ipld/pointer_cbor.go new file mode 100644 index 0000000000..8734626336 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/pointer_cbor.go @@ -0,0 +1,133 @@ +package hamt + +import ( + "fmt" + "io" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" +) + +var keyZero = []byte("0") +var keyOne = []byte("1") + +func (t *Pointer) MarshalCBOR(w io.Writer) error { + if t.Link != cid.Undef && len(t.KVs) > 0 { + return fmt.Errorf("hamt Pointer cannot have both a link and KVs") + } + + scratch := make([]byte, 9) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajMap, 1); err != nil { + return err + } + + if t.Link != cid.Undef { + // key for links is "0" + // Refmt (and the general IPLD data model currently) can't deal + // with non string keys. So we have this weird restriction right now + // hoping to be able to use integer keys soon + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, 1); err != nil { + return err + } + + if _, err := w.Write(keyZero); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.Link); err != nil { + return err + } + } else { + // key for KVs is "1" + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, 1); err != nil { + return err + } + + if _, err := w.Write(keyOne); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.KVs))); err != nil { + return err + } + + for _, kv := range t.KVs { + if err := kv.MarshalCBOR(w); err != nil { + return err + } + } + } + + return nil +} + +func (t *Pointer) UnmarshalCBOR(br io.Reader) error { + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of map") + } + + if extra != 1 { + return fmt.Errorf("Pointers should be a single element map") + } + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajTextString { + return fmt.Errorf("expected text string key") + } + + if val != 1 { + return fmt.Errorf("map keys in pointers must be a single byte long") + } + + if _, err := io.ReadAtLeast(br, scratch[:1], 1); err != nil { + return err + } + + switch scratch[0] { + case '0': + c, err := cbg.ReadCid(br) + if err != nil { + return err + } + t.Link = c + return nil + case '1': + maj, length, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected an array of KVs in cbor input") + } + + if length > 32 { + return fmt.Errorf("KV array in cbor input for pointer was too long") + } + + t.KVs = make([]*KV, length) + for i := 0; i < int(length); i++ { + var kv KV + if err := kv.UnmarshalCBOR(br); err != nil { + return err + } + + t.KVs[i] = &kv + } + + return nil + default: + return fmt.Errorf("invalid pointer map key in cbor input: %d", val) + } +} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/uhamt.go b/vendor/github.com/filecoin-project/go-hamt-ipld/uhamt.go new file mode 100644 index 0000000000..e7cf8a4001 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/uhamt.go @@ -0,0 +1,27 @@ +package hamt + +import ( + "math/big" + "math/bits" +) + +// indexForBitPos returns the index within the collapsed array corresponding to +// the given bit in the bitset. The collapsed array contains only one entry +// per bit set in the bitfield, and this function is used to map the indices. +func (n *Node) indexForBitPos(bp int) int { + return indexForBitPos(bp, n.Bitfield) +} + +func indexForBitPos(bp int, bitfield *big.Int) int { + var x uint + var count, i int + w := bitfield.Bits() + for x = uint(bp); x > bits.UintSize && i < len(w); x -= bits.UintSize { + count += bits.OnesCount(uint(w[i])) + i++ + } + if i == len(w) { + return count + } + return count + bits.OnesCount(uint(w[i])&((1< cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Pointers was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Pointers))); err != nil { + return err + } + for _, v := range t.Pointers { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *Node) UnmarshalCBOR(r io.Reader) error { + *t = Node{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Bitfield (big.Int) (struct) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajByteString { + return fmt.Errorf("big ints should be tagged cbor byte strings") + } + + if extra > 256 { + return fmt.Errorf("t.Bitfield: cbor bignum was too large") + } + + if extra > 0 { + buf := make([]byte, extra) + if _, err := io.ReadFull(br, buf); err != nil { + return err + } + t.Bitfield = big.NewInt(0).SetBytes(buf) + } else { + t.Bitfield = big.NewInt(0) + } + // t.Pointers ([]*hamt.Pointer) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Pointers: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Pointers = make([]*Pointer, extra) + } + + for i := 0; i < int(extra); i++ { + + var v Pointer + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Pointers[i] = &v + } + + return nil +} + +var lengthBufKV = []byte{130} + +func (t *KV) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufKV); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Key ([]uint8) (slice) + if len(t.Key) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Key was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Key))); err != nil { + return err + } + + if _, err := w.Write(t.Key[:]); err != nil { + return err + } + + // t.Value (typegen.Deferred) (struct) + if err := t.Value.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *KV) UnmarshalCBOR(r io.Reader) error { + *t = KV{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Key ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Key: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Key = make([]byte, extra) + } + + if _, err := io.ReadFull(br, t.Key[:]); err != nil { + return err + } + // t.Value (typegen.Deferred) (struct) + + { + + t.Value = new(cbg.Deferred) + + if err := t.Value.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("failed to read deferred field: %w", err) + } + } + return nil +} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v2/codecov.yml b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/codecov.yml new file mode 100644 index 0000000000..5f88a9ea27 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/codecov.yml @@ -0,0 +1,3 @@ +coverage: + range: "50...100" +comment: off diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v2/doc.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/doc.go new file mode 100644 index 0000000000..b5dc12f39c --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/doc.go @@ -0,0 +1,61 @@ +/* +Package hamt provides a reference implementation of the IPLD HAMT used in the +Filecoin blockchain. It includes some optional flexibility such that it may be +used for other purposes outside of Filecoin. + +HAMT is a "hash array mapped trie" +https://en.wikipedia.org/wiki/Hash_array_mapped_trie. This implementation +extends the standard form by including buckets for the key/value pairs at +storage leaves and CHAMP mutation semantics +https://michael.steindorfer.name/publications/oopsla15.pdf. The CHAMP invariant +and mutation rules provide us with the ability to maintain canonical forms +given any set of keys and their values, regardless of insertion order and +intermediate data insertion and deletion. Therefore, for any given set of keys +and their values, a HAMT using the same parameters and CHAMP semantics, the +root node should always produce the same content identifier (CID). + +Algorithm Overview + +The HAMT algorithm hashes incoming keys and uses incrementing subsections of +that hash digest at each level of its tree structure to determine the placement +of either the entry or a link to a child node of the tree. A `bitWidth` +determines the number of bits of the hash to use for index calculation at each +level of the tree such that the root node takes the first `bitWidth` bits of +the hash to calculate an index and as we move lower in the tree, we move along +the hash by `depth x bitWidth` bits. In this way, a sufficiently randomizing +hash function will generate a hash that provides a new index at each level of +the data structure. An index comprising `bitWidth` bits will generate index +values of `[ 0, 2^bitWidth )`. So a `bitWidth` of 8 will generate indexes of 0 +to 255 inclusive. + +Each node in the tree can therefore hold up to `2^bitWidth` elements of data, +which we store in an array. In the this HAMT and the IPLD HashMap we store +entries in buckets. A `Set(key, value)` mutation where the index generated at +the root node for the hash of key denotes an array index that does not yet +contain an entry, we create a new bucket and insert the key / value pair entry. +In this way, a single node can theoretically hold up to +`2^bitWidth x bucketSize` entries, where `bucketSize` is the maximum number of +elements a bucket is allowed to contain ("collisions"). In practice, indexes do +not distribute with perfect randomness so this maximum is theoretical. Entries +stored in the node's buckets are stored in key-sorted order. + +Parameters + +This HAMT implementation: + +• Fixes the `bucketSize` to 3. + +• Defaults the `bitWidth` to 8, however within Filecoin it uses 5 + +• Defaults the hash algorithm to the 64-bit variant of Murmur3-x64 + +Further Reading + +The algorithm used here is identical to that of the IPLD HashMap algorithm +specified at +https://github.com/ipld/specs/blob/master/data-structures/hashmap.md. The +specific parameters used by Filecoin and the DAG-CBOR block layout differ from +the specification and are defined at +https://github.com/ipld/specs/blob/master/data-structures/hashmap.md#Appendix-Filecoin-hamt-variant. +*/ +package hamt diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v2/go.mod b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/go.mod new file mode 100644 index 0000000000..05bf042926 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/go.mod @@ -0,0 +1,13 @@ +module github.com/filecoin-project/go-hamt-ipld/v2 + +require ( + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-cid v0.0.6 + github.com/ipfs/go-ipld-cbor v0.0.4 + github.com/spaolacci/murmur3 v1.1.0 + github.com/stretchr/testify v1.6.1 + github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488 + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +) + +go 1.13 diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v2/go.sum b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/go.sum new file mode 100644 index 0000000000..413263054b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/go.sum @@ -0,0 +1,91 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gxed/hashland/keccakpg v0.0.1 h1:wrk3uMNaMxbXiHibbPO4S0ymqJMm41WiudyFSs7UnsU= +github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= +github.com/gxed/hashland/murmur3 v0.0.1 h1:SheiaIt0sda5K+8FLz952/1iWS9zrnKsEJaOJu4ZbSc= +github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= +github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= +github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= +github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.3 h1:UIAh32wymBpStoe83YCzwVQQ5Oy/H0FdxvUS6DJDzms= +github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= +github.com/ipfs/go-ipfs-util v0.0.1 h1:Wz9bL2wB2YBJqggkA4dD7oSmqB4cAnpNbGrlHJulv50= +github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= +github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= +github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-format v0.0.1 h1:HCu4eB/Gh+KD/Q0M8u888RFkorTWNIL3da4oc5dwc80= +github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= +github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16 h1:5W7KhL8HVF3XCFOweFD3BNESdnO8ewyYTFT2R+/b8FQ= +github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= +github.com/mr-tron/base58 v1.1.0 h1:Y51FGVJ91WBqCEabAi5OPUz38eAx8DakuAm5svLcsfQ= +github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= +github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= +github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= +github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= +github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= +github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= +github.com/multiformats/go-multihash v0.0.1 h1:HHwN1K12I+XllBCrqKnhX949Orn4oawPkegHMu2vDqQ= +github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= +github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= +github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= +github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= +github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= +github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= +github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 h1:bzMe+2coZJYHnhGgVlcQKuRy4FSny4ds8dLQjw5P1XE= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 h1:WXhVOwj2USAXB5oMDwRl3piOux2XMV9TANaYxXHdkoE= +github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488 h1:P/Q9QT99FpyHtFke7ERUqX7yYtZ/KigO880L+TKFyTQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67 h1:ng3VDlRp5/DHpSWl02R4rM9I+8M2rhmsuLwAMmkLQWE= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d h1:Z0Ahzd7HltpJtjAHHxX8QFP3j1yYgiuvjbjRzDj/KH0= +golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v2/hamt.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/hamt.go new file mode 100644 index 0000000000..19551d99e0 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/hamt.go @@ -0,0 +1,815 @@ +package hamt + +import ( + "bytes" + "context" + "fmt" + "math/big" + "sort" + + cid "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +//----------------------------------------------------------------------------- +// Defaults + +const bucketSize = 3 +const defaultBitWidth = 8 + +//----------------------------------------------------------------------------- +// Errors + +// ErrNotFound is returned when a Find operation fails to locate the specified +// key in the HAMT +var ErrNotFound = fmt.Errorf("not found") + +// ErrMaxDepth is returned when the HAMT spans further than the hash function +// is capable of representing. This can occur when sufficient hash collisions +// (e.g. from a weak hash function and attacker-provided keys) extend leaf +// nodes beyond the number of bits that a hash can represent. Or this can occur +// on extremely large (likely impractical) HAMTs that are unable to be +// represented with the hash function used. Hash functions with larger byte +// output increase the maximum theoretical depth of a HAMT. +var ErrMaxDepth = fmt.Errorf("attempted to traverse HAMT beyond max-depth") + +// ErrMalformedHamt is returned whenever a block intended as a HAMT node does +// not conform to the expected form that a block may take. This can occur +// during block-load where initial validation takes place or during traversal +// where certain conditions are expected to be met. +var ErrMalformedHamt = fmt.Errorf("HAMT node was malformed") + +//----------------------------------------------------------------------------- +// Serialized data structures + +// Node is a single point in the HAMT, encoded as an IPLD tuple in DAG-CBOR of +// shape: +// [bytes, [Pointer...]] +// where 'bytes' is the big.Int#Bytes() and the Pointers array is between 1 and +// `2^bitWidth`. +// +// The Bitfield provides us with a mechanism to store a compacted array of +// Pointers. Each bit in the Bitfield represents an element in a sparse array +// where `1` indicates the element is present in the Pointers array and `0` +// indicates it is omitted. To look-up a specific index in the Pointers array +// you must first make a count of the number of `1`s (popcount) up to the +// element you are looking for. +// e.g. a Bitfield of `10010110000` shows that we have a 4 element Pointers +// array. Indexes `[1]` and `[2]` are not present, but index `[3]` is at +// the second position of our Pointers array. +// +// (Note: the `refmt` tags are ignored by cbor-gen which will generate an +// array type rather than map.) +// +// The IPLD Schema representation of this data structure is as follows: +// +// type Node struct { +// bitfield Bytes +// pointers [Pointer] +// } representation tuple +type Node struct { + Bitfield *big.Int `refmt:"bf"` + Pointers []*Pointer `refmt:"p"` + + bitWidth int + hash func([]byte) []byte + + // for fetching and storing children + store cbor.IpldStore +} + +// Pointer is an element in a HAMT node's Pointers array, encoded as an IPLD +// tuple in DAG-CBOR of shape: +// {"0": CID} or {"1": [KV...]} +// Where a map with a single key of "0" contains a Link, where a map with a +// single key of "1" contains a KV bucket. The map may contain only one of +// these two possible keys. +// +// There are between 1 and 2^bitWidth of these Pointers in any HAMT node. +// +// A Pointer contains either a KV bucket of up to `bucketSize` (3) values or a +// link (CID) to a child node. When a KV bucket overflows beyond `bucketSize`, +// the bucket is replaced with a link to a newly created HAMT node which will +// contain the `bucketSize+1` elements in its own Pointers array. +// +// (Note: the `refmt` tags are ignored by cbor-gen which will generate an +// array type rather than map.) +// +// The IPLD Schema representation of this data structure is as follows: +// +// type Pointer union { +// &Node "0" +// Bucket "1" +// } representation keyed +// +// type Bucket [KV] +type Pointer struct { + KVs []*KV `refmt:"v,omitempty"` + Link cid.Cid `refmt:"l,omitempty"` + + // cached node to avoid too many serialization operations + // TODO(rvagg): we should check that this is actually used optimally. Flush() + // performs a save of all of the cached nodes, but both Copy() and loadChild() + // will set them. In the case of loadChild() we're not expecting a mutation so + // a save is likely going to mean we incur unnecessary serialization when + // we've simply inspected the tree. Copy() will only set a cached form if + // it already exists on the source. It's unclear exactly what Flush() is good + // for in its current form. Users may also need an advisory about memory + // usage of large graphs since they don't have control over this outside of + // Flush(). + cache *Node +} + +// KV represents leaf storage within a HAMT node. A Pointer may hold up to +// `bucketSize` KV elements, where each KV contains a key and value pair +// stored by the user. +// +// Keys are represented as bytes. +// +// The IPLD Schema representation of this data structure is as follows: +// +// type KV struct { +// key Bytes +// value Any +// } representation tuple +type KV struct { + Key []byte + Value *cbg.Deferred +} + +//----------------------------------------------------------------------------- +// Options + +// Option is a function that configures the node +// +// See UseTreeBitWidth and UseHashFunction +type Option func(*Node) + +// UseTreeBitWidth allows you to set a custom bitWidth of the HAMT in bits +// (from 1-8). +// +// Passing in the returned Option to NewNode will generate a new HAMT that uses +// the specified bitWidth. +// +// The default bitWidth is 8. +func UseTreeBitWidth(bitWidth int) Option { + return func(nd *Node) { + if bitWidth > 0 && bitWidth <= 8 { + nd.bitWidth = bitWidth + } + } +} + +// UseHashFunction allows you to set the hash function used for internal +// indexing by the HAMT. +// +// Passing in the returned Option to NewNode will generate a new HAMT that uses +// the specified hash function. +// +// The default hash function is murmur3-x64 but you should use a +// cryptographically secure function such as SHA2-256 if an attacker may be +// able to pick the keys in order to avoid potential hash collision (tree +// explosion) attacks. +func UseHashFunction(hash func([]byte) []byte) Option { + return func(nd *Node) { + nd.hash = hash + } +} + +//----------------------------------------------------------------------------- +// Instance and helpers functions + +// NewNode creates a new IPLD HAMT Node with the given IPLD store and any +// additional options (bitWidth and hash function). +// +// This function creates a new HAMT that you can use directly and is also +// used internally to create child nodes. +func NewNode(cs cbor.IpldStore, options ...Option) *Node { + nd := &Node{ + Bitfield: big.NewInt(0), + Pointers: make([]*Pointer, 0), + store: cs, + hash: defaultHashFunction, + bitWidth: defaultBitWidth, + } + // apply functional options to node before using + for _, option := range options { + option(nd) + } + return nd +} + +// Find navigates through the HAMT structure to where key `k` should exist. If +// the key is not found, an ErrNotFound error is returned. If the key is found +// and the `out` parameter has an UnmarshalCBOR(Reader) method, the decoded +// value is returned. If found and the `out` parameter is `nil`, then `nil` +// will be returned (can be used to determine if a key exists where you don't +// need the value, e.g. using the HAMT as a Set). +// +// Depending on the size of the HAMT, this method may load a large number of +// child nodes via the HAMT's IpldStore. +func (n *Node) Find(ctx context.Context, k string, out interface{}) error { + return n.getValue(ctx, &hashBits{b: n.hash([]byte(k))}, k, func(kv *KV) error { + // used to just see if the thing exists in the set + if out == nil { + return nil + } + + if um, ok := out.(cbg.CBORUnmarshaler); ok { + return um.UnmarshalCBOR(bytes.NewReader(kv.Value.Raw)) + } + + if err := cbor.DecodeInto(kv.Value.Raw, out); err != nil { + return xerrors.Errorf("cbor decoding value: %w", err) + } + + return nil + }) +} + +// FindRaw performs the same function as Find, but returns the raw bytes found +// at the key's location (which may or may not be DAG-CBOR, see also SetRaw). +func (n *Node) FindRaw(ctx context.Context, k string) ([]byte, error) { + var ret []byte + err := n.getValue(ctx, &hashBits{b: n.hash([]byte(k))}, k, func(kv *KV) error { + ret = kv.Value.Raw + return nil + }) + return ret, err +} + +// Delete removes an entry entirely from the HAMT structure. +// +// This operation will result in the modification of _at least_ one IPLD block +// via the IpldStore. Depending on the contents of the leaf node, this +// operation may result in a node collapse to shrink the HAMT into its +// canonical form for the remaining data. For an insufficiently random +// collection of keys at the relevant leaf nodes such a collapse may cascade to +// further nodes. +func (n *Node) Delete(ctx context.Context, k string) error { + kb := []byte(k) + return n.modifyValue(ctx, &hashBits{b: n.hash(kb)}, kb, nil) +} + +// handle the two Find operations in a recursive manner, where each node in the +// HAMT we traverse we call this function again with the same parameters. Note +// that `hv` contains state and `hv.Next()` is not idempotent. Each call +// increments a counter for the number of bits consumed. +func (n *Node) getValue(ctx context.Context, hv *hashBits, k string, cb func(*KV) error) error { + // hv.Next chomps off `bitWidth` bits from the hash digest. As we proceed + // down the tree, each node takes `bitWidth` more bits from the digest. If + // we attempt to take more bits than the digest contains, we hit max-depth + // and can't proceed. + idx, err := hv.Next(n.bitWidth) + if err != nil { + return ErrMaxDepth + } + + // if the element expected at this node isn't here then we can be sure it + // doesn't exist in the HAMT. + if n.Bitfield.Bit(idx) == 0 { + return ErrNotFound + } + + // otherwise, the value is either local or in a child + + // perform a popcount of bits up to the `idx` to find `cindex` + cindex := byte(n.indexForBitPos(idx)) + + c := n.getPointer(cindex) + if c.isShard() { + // if isShard, we have a pointer to a child that we need to load and + // delegate our find operation to + chnd, err := c.loadChild(ctx, n.store, n.bitWidth, n.hash) + if err != nil { + return err + } + + return chnd.getValue(ctx, hv, k, cb) + } + + // if not isShard, then the key/value pair is local and we need to retrieve + // it from the bucket. The bucket is sorted but only between 1 and + // `bucketSize` in length, so no need for fanciness. + for _, kv := range c.KVs { + if string(kv.Key) == k { + return cb(kv) + } + } + + return ErrNotFound +} + +// load a HAMT node from the IpldStore and pass on the (assumed) parameters +// that are not stored with the node. +func (p *Pointer) loadChild(ctx context.Context, ns cbor.IpldStore, bitWidth int, hash func([]byte) []byte) (*Node, error) { + if p.cache != nil { + return p.cache, nil + } + + out, err := loadNode(ctx, ns, p.Link, false, bitWidth, hash) + if err != nil { + return nil, err + } + + p.cache = out + return out, nil +} + +// LoadNode loads a HAMT Node from the IpldStore and configures it according +// to any specified Option parameters. Where the parameters of this HAMT vary +// from the defaults (hash function and bitWidth), those variations _must_ be +// supplied here via Options otherwise the HAMT will not be readable. +// +// Users should consider how their HAMT parameters are stored or specified +// along with their HAMT where the data is expected to have a long shelf-life +// as future users will need to know the parameters of a HAMT being loaded in +// order to decode it. Users should also NOT rely on the default parameters +// of this library to remain the defaults long-term and have strategies in +// place to manage variations. +func LoadNode(ctx context.Context, cs cbor.IpldStore, c cid.Cid, options ...Option) (*Node, error) { + return loadNode(ctx, cs, c, true, defaultBitWidth, defaultHashFunction, options...) +} + +// internal version of loadNode that is aware of whether this is a root node or +// not for the purpose of additional validation on non-root nodes. +func loadNode( + ctx context.Context, + cs cbor.IpldStore, + c cid.Cid, + isRoot bool, + bitWidth int, + hashFunction func([]byte) []byte, + options ...Option, +) (*Node, error) { + + var out Node + if err := cs.Get(ctx, c, &out); err != nil { + return nil, err + } + + out.store = cs + out.bitWidth = bitWidth + out.hash = hashFunction + // apply functional options to node before using + for _, option := range options { + option(&out) + } + + // Validation + + // too many elements in the data array for the configured bitWidth? + if len(out.Pointers) > 1< bucketSize { + return nil, ErrMalformedHamt + } + for i := 1; i < len(ch.KVs); i++ { + if bytes.Compare(ch.KVs[i-1].Key, ch.KVs[i].Key) >= 0 { + return nil, ErrMalformedHamt + } + } + } + } + + if !isRoot { + // the only valid empty node is a root node + if len(out.Pointers) == 0 { + return nil, ErrMalformedHamt + } + // a non-root node that contains <=bucketSize direct elements should not + // exist under compaction rules + if out.directChildCount() == 0 && out.directKVCount() <= bucketSize { + return nil, ErrMalformedHamt + } + } + + return &out, nil +} + +// checkSize computes the total serialized size of the entire HAMT. +// It both puts and loads blocks as necesary to do this +// (using the Put operation and a paired Get to discover the serial size, +// and the load to move recursively as necessary). +// +// This is an expensive operation and should only be used in testing and analysis. +// +// Note that checkSize *does* actually *use the blockstore*: therefore it +// will affect get and put counts (and makes no attempt to avoid duplicate puts!); +// be aware of this if you are measuring those event counts. +func (n *Node) checkSize(ctx context.Context) (uint64, error) { + c, err := n.store.Put(ctx, n) + if err != nil { + return 0, err + } + + var def cbg.Deferred + if err := n.store.Get(ctx, c, &def); err != nil { + return 0, nil + } + + totsize := uint64(len(def.Raw)) + for _, ch := range n.Pointers { + if ch.isShard() { + chnd, err := ch.loadChild(ctx, n.store, n.bitWidth, n.hash) + if err != nil { + return 0, err + } + chsize, err := chnd.checkSize(ctx) + if err != nil { + return 0, err + } + totsize += chsize + } + } + + return totsize, nil +} + +// Flush saves and purges any cached Nodes recursively from this Node through +// its (cached) children. Cached nodes primarily exist through the use of +// Copy() operations where the entire graph is instantiated in memory and each +// child pointer exists in cached form. +func (n *Node) Flush(ctx context.Context) error { + for _, p := range n.Pointers { + if p.cache != nil { + if err := p.cache.Flush(ctx); err != nil { + return err + } + + c, err := n.store.Put(ctx, p.cache) + if err != nil { + return err + } + + p.cache = nil + p.Link = c + } + } + return nil +} + +// Set key k to value v, where v is has a MarshalCBOR(bytes.Buffer) method to +// encode it. +func (n *Node) Set(ctx context.Context, k string, v interface{}) error { + var d *cbg.Deferred + + kb := []byte(k) + + cm, ok := v.(cbg.CBORMarshaler) + if ok { + buf := new(bytes.Buffer) + if err := cm.MarshalCBOR(buf); err != nil { + return err + } + d = &cbg.Deferred{Raw: buf.Bytes()} + } else { + b, err := cbor.DumpObject(v) + if err != nil { + return err + } + d = &cbg.Deferred{Raw: b} + } + + return n.modifyValue(ctx, &hashBits{b: n.hash(kb)}, kb, d) +} + +// SetRaw is similar to Set but sets key k in the HAMT to raw bytes without +// performing a DAG-CBOR marshal. The bytes may or may not be encoded DAG-CBOR +// (see also FindRaw for fetching raw form). +func (n *Node) SetRaw(ctx context.Context, k string, raw []byte) error { + d := &cbg.Deferred{Raw: raw} + kb := []byte(k) + return n.modifyValue(ctx, &hashBits{b: n.hash(kb)}, kb, d) +} + +// the number of links to child nodes this node contains +func (n *Node) directChildCount() int { + count := 0 + for _, p := range n.Pointers { + if p.isShard() { + count++ + } + } + return count +} + +// the number of KV entries this node contains +func (n *Node) directKVCount() int { + count := 0 + for _, p := range n.Pointers { + if !p.isShard() { + count = count + len(p.KVs) + } + } + return count +} + +// This happens after deletes to ensure that we retain canonical form for the +// given set of data this HAMT contains. This is a key part of the CHAMP +// algorithm. Any node that could be represented as a bucket in a parent node +// should be collapsed as such. This collapsing process could continue back up +// the tree as far as necessary to represent the data in the minimal HAMT form. +// This operation is done from a parent perspective, so we clean the child +// below us first and then our parent cleans us. +func (n *Node) cleanChild(chnd *Node, cindex byte) error { + if chnd.directChildCount() != 0 { + // child has its own children, nothing to collapse + return nil + } + + if chnd.directKVCount() > bucketSize { + // child contains more local elements than could be collapsed + return nil + } + + if len(chnd.Pointers) == 1 { + // The case where the child node has a single bucket, which we know can + // only contain `bucketSize` elements (maximum), so we need to pull that + // bucket up into this node. + // This case should only happen when it bubbles up from the case below + // where a lower child has its elements compacted into a single bucket. We + // shouldn't be able to reach this block unless a delete has been + // performed on a lower block and we are performing a post-delete clean on + // a parent block. + return n.setPointer(cindex, chnd.Pointers[0]) + } + + // The case where the child node contains enough elements to fit in a + // single bucket and therefore can't justify its existence as a node on its + // own. So we collapse all entries into a single bucket and replace the + // link to the child with that bucket. + // This may cause cascading collapses if this is the only bucket in the + // current node, that case will be handled by our parent node by the l==1 + // case above. + var chvals []*KV + for _, p := range chnd.Pointers { + chvals = append(chvals, p.KVs...) + } + kvLess := func(i, j int) bool { + ki := chvals[i].Key + kj := chvals[j].Key + return bytes.Compare(ki, kj) < 0 + } + sort.Slice(chvals, kvLess) + + return n.setPointer(cindex, &Pointer{KVs: chvals}) +} + +// Add a new value, update an existing value, or delete a value from the HAMT, +// potentially recursively calling child nodes to find the exact location of +// the entry in question and potentially collapsing nodes into buckets in +// parent nodes where a deletion violates the canonical form rules (see +// cleanNode()). Recursive calls use the same arguments on child nodes but +// note that `hv.Next()` is not idempotent. Each call will increment the number +// of bits chomped off the hash digest for this key. +func (n *Node) modifyValue(ctx context.Context, hv *hashBits, k []byte, v *cbg.Deferred) error { + idx, err := hv.Next(n.bitWidth) + if err != nil { + return ErrMaxDepth + } + + // if the element expected at this node isn't here then we can be sure it + // doesn't exist in the HAMT already and can insert it at the appropriate + // position. + if n.Bitfield.Bit(idx) != 1 { + return n.insertKV(idx, k, v) + } + + // otherwise, the value is either local or in a child + + // perform a popcount of bits up to the `idx` to find `cindex` + cindex := byte(n.indexForBitPos(idx)) + + child := n.getPointer(cindex) + if child.isShard() { + // if isShard, we have a pointer to a child that we need to load and + // delegate our modify operation to + chnd, err := child.loadChild(ctx, n.store, n.bitWidth, n.hash) + if err != nil { + return err + } + + if err := chnd.modifyValue(ctx, hv, k, v); err != nil { + return err + } + + // CHAMP optimization, ensure the HAMT retains its canonical form for the + // current data it contains. This may involve collapsing child nodes if + // they no longer contain enough elements to justify their stand-alone + // existence. + if v == nil { + if err := n.cleanChild(chnd, cindex); err != nil { + return err + } + } + + return nil + } + + // if not isShard, then either the key/value pair is local here and can be + // modified (or deleted) here or needs to be added as a new child node if + // there is an overflow. + + if v == nil { + // delete operation, find the child and remove it, compacting the bucket in + // the process + for i, p := range child.KVs { + if bytes.Equal(p.Key, k) { + if len(child.KVs) == 1 { + // last element in the bucket, remove it and update the bitfield + return n.rmPointer(cindex, idx) + } + + copy(child.KVs[i:], child.KVs[i+1:]) + child.KVs = child.KVs[:len(child.KVs)-1] + return nil + } + } + return ErrNotFound + } + + // modify existing, check if key already exists + for _, p := range child.KVs { + if bytes.Equal(p.Key, k) { + p.Value = v + return nil + } + } + + if len(child.KVs) >= bucketSize { + // bucket is full, create a child node (shard) with all existing bucket + // elements plus the new one and set it in the place of the bucket + // TODO(rvagg): this all of the modifyValue() calls are going to result + // in a store.Put(), this could be improved by allowing NewNode() to take + // the bulk set of elements, or modifying modifyValue() for the case + // where we know for sure that the elements will go into buckets and + // not cause an overflow - i.e. we just need to take each element, hash it + // and consume the correct number of bytes off the digest and figure out + // where it should be in the new node. + sub := NewNode(n.store) + sub.bitWidth = n.bitWidth + sub.hash = n.hash + hvcopy := &hashBits{b: hv.b, consumed: hv.consumed} + if err := sub.modifyValue(ctx, hvcopy, k, v); err != nil { + return err + } + + for _, p := range child.KVs { + chhv := &hashBits{b: n.hash([]byte(p.Key)), consumed: hv.consumed} + if err := sub.modifyValue(ctx, chhv, p.Key, p.Value); err != nil { + return err + } + } + + c, err := n.store.Put(ctx, sub) + if err != nil { + return err + } + + return n.setPointer(cindex, &Pointer{Link: c}) + } + + // otherwise insert the new element into the array in order, the ordering is + // important to retain canonical form + np := &KV{Key: k, Value: v} + for i := 0; i < len(child.KVs); i++ { + if bytes.Compare(k, child.KVs[i].Key) < 0 { + child.KVs = append(child.KVs[:i], append([]*KV{np}, child.KVs[i:]...)...) + return nil + } + } + child.KVs = append(child.KVs, np) + return nil +} + +// Insert a new key/value pair into the current node at the specified index. +// This will involve modifying the bitfield for that index and inserting a new +// bucket containing the single key/value pair at that position. +func (n *Node) insertKV(idx int, k []byte, v *cbg.Deferred) error { + if v == nil { + return ErrNotFound + } + + i := n.indexForBitPos(idx) + n.Bitfield.SetBit(n.Bitfield, idx, 1) + + p := &Pointer{KVs: []*KV{{Key: k, Value: v}}} + + n.Pointers = append(n.Pointers[:i], append([]*Pointer{p}, n.Pointers[i:]...)...) + return nil +} + +// Set a Pointer at a specific location, this doesn't modify the elements array +// but assumes that what's there can be updated. This seems to mostly be useful +// for tail calls. +func (n *Node) setPointer(i byte, p *Pointer) error { + n.Pointers[i] = p + return nil +} + +// Remove a child at a specified index, splicing the Pointers array to remove +// it and updating the bitfield to specify that an element no longer exists at +// that position. +func (n *Node) rmPointer(i byte, idx int) error { + copy(n.Pointers[i:], n.Pointers[i+1:]) + n.Pointers = n.Pointers[:len(n.Pointers)-1] + n.Bitfield.SetBit(n.Bitfield, idx, 0) + + return nil +} + +// Load a Pointer from the specified index of the Pointers array. The element +// should exist in a properly formed HAMT. +func (n *Node) getPointer(i byte) *Pointer { + if int(i) >= len(n.Pointers) || i < 0 { + // TODO(rvagg): I think this should be an error, there's an assumption in + // calling code that it's not null and a proper hash chomp shouldn't result + // in anything out of bounds + return nil + } + + return n.Pointers[i] +} + +// Copy a HAMT node and all of its contents. May be useful for mutation +// operations where the original needs to be preserved in memory. +// +// This operation will also recursively clone any child nodes that are attached +// as cached nodes. +func (n *Node) Copy() *Node { + // TODO(rvagg): clarify what situations this method is actually useful for. + nn := NewNode(n.store) + nn.bitWidth = n.bitWidth + nn.hash = n.hash + nn.Bitfield.Set(n.Bitfield) + nn.Pointers = make([]*Pointer, len(n.Pointers)) + + for i, p := range n.Pointers { + pp := &Pointer{} + if p.cache != nil { + pp.cache = p.cache.Copy() + } + pp.Link = p.Link + if p.KVs != nil { + pp.KVs = make([]*KV, len(p.KVs)) + for j, kv := range p.KVs { + pp.KVs[j] = &KV{Key: kv.Key, Value: kv.Value} + } + } + nn.Pointers[i] = pp + } + + return nn +} + +// Pointers elements can either contain a bucket of local elements or be a +// link to a child node. In the case of a link, isShard() returns true. +func (p *Pointer) isShard() bool { + return p.Link.Defined() +} + +// ForEach recursively calls function f on each k / val pair found in the HAMT. +// This performs a full traversal of the graph and for large HAMTs can cause +// a large number of loads from the IpldStore. This should not be used lightly +// as it can incur large costs. +func (n *Node) ForEach(ctx context.Context, f func(k string, val interface{}) error) error { + for _, p := range n.Pointers { + if p.isShard() { + chnd, err := p.loadChild(ctx, n.store, n.bitWidth, n.hash) + if err != nil { + return err + } + + if err := chnd.ForEach(ctx, f); err != nil { + return err + } + } else { + for _, kv := range p.KVs { + if err := f(string(kv.Key), kv.Value); err != nil { + return err + } + } + } + } + return nil +} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v2/hash.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/hash.go new file mode 100644 index 0000000000..a99dbb77df --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/hash.go @@ -0,0 +1,69 @@ +package hamt + +import ( + "fmt" + + "github.com/spaolacci/murmur3" +) + +// hashBits is a helper that allows the reading of the 'next n bits' of a +// digest as an integer. State is retained and calls to `Next` will +// increment the number of consumed bits. +type hashBits struct { + b []byte + consumed int +} + +func mkmask(n int) byte { + return (1 << uint(n)) - 1 +} + +// Next returns the next 'i' bits of the hashBits value as an integer, or an +// error if there aren't enough bits. +// Not enough bits means that the tree is not large enough to contain the data. +// Where the hash is providing a sufficient enough random distribution this +// means that it is "full", Where the distribution is not sufficiently random +// enough, this means there have been too many collisions. Where a user can +// control keys (that are hashed) and the hash function has some +// predictability, collisions can be forced by producing the same indexes at +// (most) levels. +func (hb *hashBits) Next(i int) (int, error) { + if hb.consumed+i > len(hb.b)*8 { + // TODO(rvagg): this msg looks like a UnixFS holdover, it's an overflow + // and should probably bubble up a proper Err* + return 0, fmt.Errorf("sharded directory too deep") + } + return hb.next(i), nil +} + +// where 'i' is not '8', we need to read up to two bytes to extract the bits +// for the index. +func (hb *hashBits) next(i int) int { + curbi := hb.consumed / 8 + leftb := 8 - (hb.consumed % 8) + + curb := hb.b[curbi] + if i == leftb { + out := int(mkmask(i) & curb) + hb.consumed += i + return out + } else if i < leftb { + a := curb & mkmask(leftb) // mask out the high bits we don't want + b := a & ^mkmask(leftb-i) // mask out the low bits we don't want + c := b >> uint(leftb-i) // shift whats left down + hb.consumed += i + return int(c) + } else { + out := int(mkmask(leftb) & curb) + out <<= uint(i - leftb) + hb.consumed += leftb + out += hb.next(i - leftb) + return out + } +} + +func defaultHashFunction(val []byte) []byte { + h := murmur3.New64() + h.Write(val) + return h.Sum(nil) +} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v2/pointer_cbor.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/pointer_cbor.go new file mode 100644 index 0000000000..8734626336 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/pointer_cbor.go @@ -0,0 +1,133 @@ +package hamt + +import ( + "fmt" + "io" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" +) + +var keyZero = []byte("0") +var keyOne = []byte("1") + +func (t *Pointer) MarshalCBOR(w io.Writer) error { + if t.Link != cid.Undef && len(t.KVs) > 0 { + return fmt.Errorf("hamt Pointer cannot have both a link and KVs") + } + + scratch := make([]byte, 9) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajMap, 1); err != nil { + return err + } + + if t.Link != cid.Undef { + // key for links is "0" + // Refmt (and the general IPLD data model currently) can't deal + // with non string keys. So we have this weird restriction right now + // hoping to be able to use integer keys soon + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, 1); err != nil { + return err + } + + if _, err := w.Write(keyZero); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, t.Link); err != nil { + return err + } + } else { + // key for KVs is "1" + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, 1); err != nil { + return err + } + + if _, err := w.Write(keyOne); err != nil { + return err + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.KVs))); err != nil { + return err + } + + for _, kv := range t.KVs { + if err := kv.MarshalCBOR(w); err != nil { + return err + } + } + } + + return nil +} + +func (t *Pointer) UnmarshalCBOR(br io.Reader) error { + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("cbor input should be of map") + } + + if extra != 1 { + return fmt.Errorf("Pointers should be a single element map") + } + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajTextString { + return fmt.Errorf("expected text string key") + } + + if val != 1 { + return fmt.Errorf("map keys in pointers must be a single byte long") + } + + if _, err := io.ReadAtLeast(br, scratch[:1], 1); err != nil { + return err + } + + switch scratch[0] { + case '0': + c, err := cbg.ReadCid(br) + if err != nil { + return err + } + t.Link = c + return nil + case '1': + maj, length, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected an array of KVs in cbor input") + } + + if length > 32 { + return fmt.Errorf("KV array in cbor input for pointer was too long") + } + + t.KVs = make([]*KV, length) + for i := 0; i < int(length); i++ { + var kv KV + if err := kv.UnmarshalCBOR(br); err != nil { + return err + } + + t.KVs[i] = &kv + } + + return nil + default: + return fmt.Errorf("invalid pointer map key in cbor input: %d", val) + } +} diff --git a/vendor/github.com/filecoin-project/go-hamt-ipld/v2/uhamt.go b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/uhamt.go new file mode 100644 index 0000000000..29e6740295 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-hamt-ipld/v2/uhamt.go @@ -0,0 +1,41 @@ +package hamt + +import ( + "math/big" + "math/bits" +) + +// indexForBitPos returns the index within the collapsed array corresponding to +// the given bit in the bitset. The collapsed array contains only one entry +// per bit set in the bitfield, and this function is used to map the indices. +// This is similar to a popcount() operation but is limited to a certain index. +// e.g. a Bitfield of `10010110000` shows that we have a 4 elements in the +// associated array. Indexes `[1]` and `[2]` are not present, but index `[3]` +// is at the second position of our Pointers array. +func (n *Node) indexForBitPos(bp int) int { + return indexForBitPos(bp, n.Bitfield) +} + +func indexForBitPos(bp int, bitfield *big.Int) int { + var x uint + var count, i int + w := bitfield.Bits() + for x = uint(bp); x > bits.UintSize && i < len(w); x -= bits.UintSize { + count += bits.OnesCount(uint(w[i])) + i++ + } + if i == len(w) { + return count + } + return count + bits.OnesCount(uint(w[i])&((1< [external packages] - [go-fil-markets packages] + [other-filecoin-project packages] ) ``` @@ -79,7 +79,7 @@ import ( ipld "github.com/ipfs/go-ipld-format" "github.com/stretchr/testify/assert" - "github.com/filecoin-project/go-fil-markets/filestore/file" + datatransfer "github.com/filecoin-project/go-data-transfer" ) ``` diff --git a/vendor/github.com/filecoin-project/go-multistore/COPYRIGHT b/vendor/github.com/filecoin-project/go-multistore/COPYRIGHT new file mode 100644 index 0000000000..771e6f7cd7 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-multistore/COPYRIGHT @@ -0,0 +1,3 @@ +Copyright 2019. Protocol Labs, Inc. + +This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/go-multistore/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-multistore/LICENSE-APACHE new file mode 100644 index 0000000000..546514363d --- /dev/null +++ b/vendor/github.com/filecoin-project/go-multistore/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2019. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-multistore/LICENSE-MIT b/vendor/github.com/filecoin-project/go-multistore/LICENSE-MIT new file mode 100644 index 0000000000..ea532a8305 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-multistore/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2019. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/filecoin-project/go-multistore/README.md b/vendor/github.com/filecoin-project/go-multistore/README.md new file mode 100644 index 0000000000..5b77b6ff13 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-multistore/README.md @@ -0,0 +1,73 @@ +# go-fil-markets +[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io) +[![CircleCI](https://circleci.com/gh/filecoin-project/go-multistore.svg?style=svg)](https://circleci.com/gh/filecoin-project/go-multistore) +[![codecov](https://codecov.io/gh/filecoin-project/go-multistore/branch/master/graph/badge.svg)](https://codecov.io/gh/filecoin-project/go-multistore) +[![GoDoc](https://godoc.org/github.com/filecoin-project/go-multistore?status.svg)](https://godoc.org/github.com/filecoin-project/go-multistore) + +This repository provides a mechanism for constructing multiple, isolated, IPFS storage instances (blockstore, filestore, DAGService) on top of a single +go-datastore instance. + +### Background reading + +You may want to familiarize yourself with various IPFS storage layer components: + +- [DataStore](https://github.com/ipfs/go-datastore) +- [BlockStore](https://github.com/ipfs/go-ipfs-blockstore) +- [FileStore](https://github.com/ipfs/go-filestore) +- [BlockService](https://github.com/ipfs/go-blockservice) +- [DAGService](https://github.com/ipfs/go-ipld-format/blob/master/merkledag.go) + +## Installation +```bash +go get "github.com/filecoin-project/go-multistore"` +``` + +## Usage + +Initialize multistore: + +```golang +var ds datastore.Batching +multiDs, err := multistore.NewMultiDstore(ds) +``` + +Create new store: + +```golang +next := multiDs.Next() +store, err := multiDs.Get(store) + +// store will have a blockstore, filestore, and DAGService +``` + +List existing store indexes: + +```golang +indexes := multiDs.List() +``` + +Delete a store (will delete all data in isolated store without touching the rest of the datastore): + +```golang +var index int +err := multiDs.Delete(index) +``` + +Shutdown (make sure everything is closed): + +```golang +multiDs.Close() +``` + +## Contributing +Issues and PRs are welcome! Please first read the [background reading](#background-reading) and [CONTRIBUTING](./CONTRIBUTING.md) guide, and look over the current code. PRs against master require approval of at least two maintainers. + +Day-to-day discussion takes place in the #fil-components channel of the [Filecoin project chat](https://github.com/filecoin-project/community#chat). Usage or design questions are welcome. + +## Project-level documentation +The filecoin-project has a [community repo](https://github.com/filecoin-project/community) with more detail about our resources and policies, such as the [Code of Conduct](https://github.com/filecoin-project/community/blob/master/CODE_OF_CONDUCT.md). + +## License +This repository is dual-licensed under Apache 2.0 and MIT terms. + +Copyright 2020. Protocol Labs, Inc. diff --git a/vendor/github.com/filecoin-project/go-multistore/go.mod b/vendor/github.com/filecoin-project/go-multistore/go.mod new file mode 100644 index 0000000000..6140a35140 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-multistore/go.mod @@ -0,0 +1,21 @@ +module github.com/filecoin-project/go-multistore + +go 1.14 + +require ( + github.com/hashicorp/go-multierror v1.1.0 + github.com/ipfs/go-block-format v0.0.2 + github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 + github.com/ipfs/go-cid v0.0.6 + github.com/ipfs/go-datastore v0.4.4 + github.com/ipfs/go-filestore v1.0.0 + github.com/ipfs/go-graphsync v0.1.0 + github.com/ipfs/go-ipfs-blockstore v1.0.0 + github.com/ipfs/go-ipfs-exchange-offline v0.0.1 + github.com/ipfs/go-ipld-format v0.2.0 + github.com/ipfs/go-merkledag v0.3.1 + github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e + github.com/stretchr/testify v1.5.1 + go.uber.org/multierr v1.5.0 + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +) diff --git a/vendor/github.com/filecoin-project/go-fil-markets/go.sum b/vendor/github.com/filecoin-project/go-multistore/go.sum similarity index 75% rename from vendor/github.com/filecoin-project/go-fil-markets/go.sum rename to vendor/github.com/filecoin-project/go-multistore/go.sum index 244f41ba06..23d86c5f16 100644 --- a/vendor/github.com/filecoin-project/go-fil-markets/go.sum +++ b/vendor/github.com/filecoin-project/go-multistore/go.sum @@ -1,7 +1,5 @@ -cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9 h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -12,8 +10,8 @@ github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBA github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= +github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50 h1:4i3KsuVA0o0KoBxAC5x+MY7RbteiMK1V7gf/G08NGIQ= github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3 h1:A/EVblehb75cUgXA5njHPn0kLAsykn6mJGz7rnmW5W0= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= @@ -31,87 +29,27 @@ github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8Nz github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 h1:6xT9KW8zLC5IlbaIF5Q7JNieBoACT7iW0YTxQHR0in0= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e h1:lj77EKYUpYXTd8CD/+QMIf8b6OIOTsfEBSXiAzuEHTU= -github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e/go.mod h1:3ZQK6DMPSz/QZ73jlWxBtUhNA8xZx7LzUFSq/OfP8vk= github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgraph-io/badger v1.6.0-rc1 h1:JphPpoBZJ3WHha133BGYlQqltSGIhV+VsEID0++nN9A= github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= -github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= -github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= -github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= -github.com/fatih/color v1.8.0 h1:5bzFgL+oy7JITMTxUPJ00n7VxmYd/PdMp5mHFX40/RY= -github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= -github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5 h1:/MmWluswvDIbuPvBct4q6HeQgVm62O2DzWYTB38kt4A= -github.com/filecoin-project/go-address v0.0.0-20200107215422-da8eea2842b5/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= -github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be h1:TooKBwR/g8jG0hZ3lqe9S5sy2vTUcLOZLlz3M5wGn2E= -github.com/filecoin-project/go-address v0.0.2-0.20200218010043-eb9bb40ed5be/go.mod h1:SAOwJoakQ8EPjwNIsiakIQKsoKdkcbx8U3IapgCg9R0= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e h1:IOoff6yAZSJ5zHCPY2jzGNwQYQU6ygsRVe/cSnJrY+o= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200131012142-05d80eeccc5e/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2 h1:jamfsxfK0Q9yCMHt8MPWx7Aa/O9k2Lve8eSc6FILYGQ= -github.com/filecoin-project/go-amt-ipld/v2 v2.0.1-0.20200424220931-6263827e49f2/go.mod h1:boRtQhzmxNocrMxOXo1NYn4oUc1NGvR8tEa79wApNXg= -github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060 h1:/3qjGMn6ukXgZJHsIbuwGL7ipla8DOV3uHZDBJkBYfU= -github.com/filecoin-project/go-bitfield v0.0.0-20200416002808-b3ee67ec9060/go.mod h1:iodsLxOFZnqKtjj2zkgqzoGNrv6vUqj69AT/J8DKXEw= -github.com/filecoin-project/go-bitfield v0.0.1/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= -github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e h1:gkG/7G+iKy4He+IiQNeQn+nndFznb/vCoOR8iRQsm60= -github.com/filecoin-project/go-bitfield v0.0.2-0.20200518150651-562fdb554b6e/go.mod h1:Ry9/iUlWSyjPUzlAvdnfy4Gtvrq4kWmWDztCU1yEgJY= -github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= -github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= -github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= -github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v0.3.0 h1:BwBrrXu9Unh9JjjX4GAc5FfzUNioor/aATIjfc7JTBg= -github.com/filecoin-project/go-data-transfer v0.3.0/go.mod h1:cONglGP4s/d+IUQw5mWZrQK+FQATQxr3AXzi4dRh0l4= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5 h1:yvQJCW9mmi9zy+51xA01Ea2X7/dL7r8eKDPuGUjRmbo= -github.com/filecoin-project/go-fil-commcid v0.0.0-20200208005934-2b8bd03caca5/go.mod h1:JbkIgFF/Z9BDlvrJO1FuKkaWsH673/UdFaiVS6uIHlA= -github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6 h1:92PET+sx1Hb4W/8CgFwGuxaKbttwY+UNspYZTvXY0vs= -github.com/filecoin-project/go-padreader v0.0.0-20200210211231-548257017ca6/go.mod h1:0HgYnrkeSU4lu1p+LEOeDpFsNBssa0OGGriWdA4hvaE= -github.com/filecoin-project/go-paramfetch v0.0.1 h1:gV7bs5YaqlgpGFMiLxInGK2L1FyCXUE0rimz4L7ghoE= -github.com/filecoin-project/go-paramfetch v0.0.1/go.mod h1:fZzmf4tftbwf9S37XRifoJlz7nCjRdIrMGLR07dKLCc= -github.com/filecoin-project/go-statemachine v0.0.0-20200619205156-c7bf525c06ef h1:qFXGHKLq49qFmvXjvhvQ2eU3jVk2Z0QaKYQpO5S3SF0= -github.com/filecoin-project/go-statemachine v0.0.0-20200619205156-c7bf525c06ef/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= -github.com/filecoin-project/go-statestore v0.1.0 h1:t56reH59843TwXHkMcwyuayStBIiWBRilQjQ+5IiwdQ= -github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= -github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6 h1:NIcubpeasVs++K5EFelMXeURRb8sWCuXQNOSWnvTc14= -github.com/filecoin-project/sector-storage v0.0.0-20200615154852-728a47ab99d6/go.mod h1:M59QnAeA/oV+Z8oHFLoNpGMv0LZ8Rll+vHVXX7GirPM= -github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf h1:fbxBG12yrxilPFV1EG2lYqpUyAlRZWkvtqjk2svSeXY= -github.com/filecoin-project/specs-actors v0.0.0-20200210130641-2d1fbd8672cf/go.mod h1:xtDZUB6pe4Pksa/bAJbJ693OilaC5Wbot9jMhLm3cZA= -github.com/filecoin-project/specs-actors v0.3.0 h1:QxgAuTrZr5TPqjyprZk0nTYW5o0JWpzbb5v+4UHHvN0= -github.com/filecoin-project/specs-actors v0.3.0/go.mod h1:nQYnFbQ7Y0bHZyq6HDEuVlCPR+U3z5Q3wMOQ+2aiV+Y= -github.com/filecoin-project/specs-actors v0.6.0 h1:IepUsmDGY60QliENVTkBTAkwqGWw9kNbbHOcU/9oiC0= -github.com/filecoin-project/specs-actors v0.6.0/go.mod h1:dRdy3cURykh2R8O/DKqy8olScl70rmIS7GrB4hB1IDY= -github.com/filecoin-project/specs-actors v0.7.0 h1:tldjW8pFiJcMtyGPsXmPoFdbN/18mKW3BpEMlO4NJAc= -github.com/filecoin-project/specs-actors v0.7.0/go.mod h1:+z0htZu/wLBDbOLcQTKKUEC2rkUTFzL2KJ/bRAVWkws= -github.com/filecoin-project/specs-storage v0.1.0 h1:PkDgTOT5W5Ao7752onjDl4QSv+sgOVdJbvFjOnD5w94= -github.com/filecoin-project/specs-storage v0.1.0/go.mod h1:Pr5ntAaxsh+sLG/LYiL4tKzvA83Vk5vLODYhfNwOg7k= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -121,40 +59,34 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc= -github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= -github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099 h1:vQqOW42RRM5LoM/1K5dK940VipLqpH8lEVGrMz+mNjU= -github.com/hannahhoward/cbor-gen-for v0.0.0-20191218204337-9ab7b1bcc099/go.mod h1:WVPCl0HO/0RAL5+vBH2GMxBomlxBF70MAS78+Lu1//k= -github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4nmd7b5qy5t0GWDTwSn4OyRgfAXSmo6VnryBY= github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -176,21 +108,20 @@ github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/ github.com/ipfs/go-block-format v0.0.2 h1:qPDvcP19izTjU8rgo6p7gTXZlkMkF5bz5G3fqIsSCPE= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.3 h1:9XgsPMwwWJSC9uVr2pMDsW2qFTBSkxpGMhmna8mIjPM= github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= +github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834 h1:hFJoI1D2a3MqiNkSb4nKwrdkhCngUxUTFNwVwovZX2s= +github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10 h1:5mRf2p8Bv2iKiuPsGrQUrx38rdBm2T/03JCM6VWzoMc= -github.com/ipfs/go-cid v0.0.4-0.20191112011718-79e75dffeb10/go.mod h1:/BYOuUoxkE+0f6tGzlzMvycuN+5l35VOR4Bpg2sCmds= -github.com/ipfs/go-cid v0.0.4 h1:UlfXKrZx1DjZoBhQHmNHLC1fK1dUJDN20Y28A7s+gJ8= github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= github.com/ipfs/go-cid v0.0.5 h1:o0Ix8e/ql7Zb5UVUJEUfjsWCIY8t48++9lR8qi6oiJU= github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= +github.com/ipfs/go-cid v0.0.6 h1:go0y+GcDOGeJIV01FeBsta4FHngoA4Wz7KMeLkXAhMs= +github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1 h1:F4k0TkTAZGLFzBOrVKDAvch6JZtuN4NHkfdcEZL50aI= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= @@ -200,33 +131,28 @@ github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13X github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= -github.com/ipfs/go-ds-badger v0.0.5 h1:dxKuqw5T1Jm8OuV+lchA76H9QZFyPKZeLuT6bN42hJQ= github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-graphsync v0.0.6-0.20200504202014-9d5f2c26a103 h1:SD+bXod/pOWKJCGj0tG140ht8Us5k+3JBcHw0PVYTho= -github.com/ipfs/go-graphsync v0.0.6-0.20200504202014-9d5f2c26a103/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e h1:bUtmeXx6JpjxRPlMdlKfPXC5kKhLHuueXKgs1Txb9ZU= -github.com/ipfs/go-hamt-ipld v0.0.15-0.20200131012125-dd88a59d3f2e/go.mod h1:9aQJu/i/TaRDW6jqB5U217dLIDopn50wxLdHXM2CTfE= +github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= +github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= +github.com/ipfs/go-graphsync v0.1.0 h1:RjLk7ha1tJtDXktqoxOjhvx4lDuzzIU+xQ+PEi74r3s= +github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4 h1:2SGI6U1B44aODevza8Rde3+dY30Pb+lbcObe1LETxOQ= github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= github.com/ipfs/go-ipfs-blockstore v1.0.0 h1:pmFp5sFYsYVvMOp9X01AK3s85usVcLvkBTRsN6SnfUA= github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-chunker v0.0.1 h1:cHUUxKFQ99pozdahi+uSC/3Y6HeRpi9oTeUHbE27SEw= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= github.com/ipfs/go-ipfs-chunker v0.0.5/go.mod h1:jhgdF8vxRHycr00k13FM8Y0E+6BoalYeobXmUyTreP8= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-ds-help v0.0.1 h1:QBg+Ts2zgeemK/dB0saiF/ykzRGgfoFMT90Rzo0OnVU= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v0.1.1 h1:IW/bXGeaAZV2VH0Kuok+Ohva/zHkHmeLFBxC1k7mNPc= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= @@ -235,8 +161,6 @@ github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFq github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.7 h1:s5BRD12ndahqYifeH1S8Z73zqZhR+3IdKYAG9PiETs0= -github.com/ipfs/go-ipfs-files v0.0.7/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= @@ -255,24 +179,14 @@ github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA github.com/ipfs/go-ipld-cbor v0.0.4 h1:Aw3KPOKXjvrm6VjwJvFf1F1ekR/BH3jdof3Bk7OTiSA= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= -github.com/ipfs/go-ipld-format v0.0.2 h1:OVAGlyYT6JPZ0pEfGntFPS40lfrDmaDbQwNHEY2G9Zs= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.0 h1:BW3LQIiZzpNyolt84yvKNCd3FU+AK4VDw1hnHR+1aiI= -github.com/ipfs/go-log v1.0.0/go.mod h1:JO7RzlMK6rA+CIxFMLOuB6Wf5b81GDiKElL7UPSIKjA= -github.com/ipfs/go-log v1.0.1 h1:5lIEEOQTk/vd1WuPFBRqz2mcp+5G1fMVcW+Ib/H5Hfo= -github.com/ipfs/go-log v1.0.1/go.mod h1:HuWlQttfN6FWNHRhlY5yMk/lW7evQC0HHGOxEwMRR8I= github.com/ipfs/go-log v1.0.2 h1:s19ZwJxH8rPWzypjcDpqPLIyV7BnbLqvpli3iZoqYK0= github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= -github.com/ipfs/go-log v1.0.3 h1:Gg7SUYSZ7BrqaKMwM+hRgcAkKv4QLfzP4XPQt5Sx/OI= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log/v2 v2.0.1/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-log/v2 v2.0.2 h1:xguurydRdfKMJjKyxNXNU8lYP0VZH1NUwJRwUorjuEw= github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.3 h1:Q2gXcBoCALyLN/pUQlz1qgu0x3uFV6FzP9oXhpfyJpc= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.3.1 h1:3UqWINBEr3/N+r6OwgFXAddDP/8zpQX/8J7IGVOCqRQ= github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= @@ -287,14 +201,10 @@ github.com/ipfs/go-unixfs v0.2.4 h1:6NwppOXefWIyysZ4LR/qUBPvXd5//8J3jiMdvpbw6Lo= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/ipld/go-car v0.1.1-0.20200429200904-c222d793c339 h1:YEYaf6mrrjoTfGpi7MajslcGvhP23Sh0b3ubcGYRMw0= -github.com/ipld/go-car v0.1.1-0.20200429200904-c222d793c339/go.mod h1:eajxljm6I8o3LitnFeVEmucwZmz7+yLSiKce9yYMefg= github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e h1:ZISbJlM0urTANR9KRfRaqlBmyOj5uUtxs2r4Up9IXsA= github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1 h1:K1Ysr7kgIlo7YQkPqdkA6H7BVdIugvuAz7OQUTJxLdE= github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= -github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= -github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/gateway v1.0.5 h1:qzXWUJfuMdlLMtt0a3Dgt+xkWQiA5itDEITVJtuSwMc= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= github.com/jackpal/go-nat-pmp v1.0.1 h1:i0LektDkO1QlrTm/cSuP+PyBCDnYvjPLGl4LdWEMiaA= @@ -311,9 +221,8 @@ github.com/jbenet/goprocess v0.1.3 h1:YKyIEECS/XvcfHtBzxtjBBbWK+MbvA6dG8ASiqwvr1 github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= -github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= @@ -322,7 +231,6 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b h1:wxtKgYHEncAU00muMD06dzLiahtGM1eouRNOzVV7tdQ= github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= @@ -347,12 +255,14 @@ github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZ github.com/libp2p/go-flow-metrics v0.0.3 h1:8tAs/hSdNvUiLgtlSy3mxwxWP4I9y/jlkPFT7epKdeM= github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= +github.com/libp2p/go-libp2p v0.1.1 h1:52sB0TJuDk2nYMcMfHOKaPoaayDZjaYVCq6Vk1ejUTk= github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= github.com/libp2p/go-libp2p v0.6.0 h1:EFArryT9N7AVA70LCcOh8zxsW+FeDnxwcpWQx9k7+GM= github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1 h1:WLBZcIRsjZlWdAZj9CiBSvU2wQXoUOiS1Zk1tM7DTJI= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= +github.com/libp2p/go-libp2p-blankhost v0.1.1 h1:X919sCh+KLqJcNRApj43xCSiQRYqOSI88Fdf55ngf78= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.4 h1:I96SWjR4rK9irDHcHq3XHN6hawCRTPUADzkJacgZLvk= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= @@ -361,13 +271,12 @@ github.com/libp2p/go-libp2p-circuit v0.1.4 h1:Phzbmrg3BkVzbqd4ZZ149JxCuUWu2wZcXf github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= +github.com/libp2p/go-libp2p-core v0.0.3 h1:+IonUYY0nJZLb5Fdv6a6DOjtGP1L8Bb3faamiI2q5FY= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= -github.com/libp2p/go-libp2p-core v0.2.4 h1:Et6ykkTwI6PU44tr8qUF9k43vP0aduMNniShAbUJJw8= github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= -github.com/libp2p/go-libp2p-core v0.3.0 h1:F7PqduvrztDtFsAa/bcheQ3azmNo+Nq7m8hQY5GiUW8= github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= @@ -393,28 +302,30 @@ github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLK github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= +github.com/libp2p/go-libp2p-peerstore v0.1.0 h1:MKh7pRNPHSh1fLPj8u/M/s/napdmeNpoi9BRy9lPN0E= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.3 h1:wMgajt1uM2tMiqf4M+4qWKVyyFc8SfA+84VV9glZq1M= github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= github.com/libp2p/go-libp2p-peerstore v0.2.0 h1:XcgJhI8WyUOCbHyRLNEX5542YNj8hnLSJ2G1InRjDhk= github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= +github.com/libp2p/go-libp2p-record v0.1.0 h1:wHwBGbFzymoIl69BpgwIu0O6ta3TXGcMPvHUAcodzRc= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1 h1:ZJK2bHXYUBqObHX+rHLSNrM3M8fmJUlUHrodDPPATmY= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= +github.com/libp2p/go-libp2p-secio v0.1.0 h1:NNP5KLxuP97sE5Bu3iuwOWyT/dKEGMN5zSLMWdB7GTQ= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0 h1:ywzZBsWEEz2KNTn5RtzauEDq5RFEefPsttXYwAWqHng= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= github.com/libp2p/go-libp2p-secio v0.2.1 h1:eNWbJTdyPA7NxhP7J3c5lT97DC5d+u+IldkgCYFTPVA= github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= +github.com/libp2p/go-libp2p-swarm v0.1.0 h1:HrFk2p0awrGEgch9JXK/qp/hfjqQfgNxpLWnCiWPg5s= github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= github.com/libp2p/go-libp2p-swarm v0.2.2 h1:T4hUpgEs2r371PweU3DuH7EOmBIdTBCwWs+FLcgx3bQ= github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= +github.com/libp2p/go-libp2p-testing v0.0.4 h1:Qev57UR47GcLPXWjrunv5aLIQGO4n9mhI/8/EIrEEFc= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0 h1:WaFRj/t3HdMZGNZqnU2pS7pDRBmMeoDx7/HDNpeyT9U= github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= github.com/libp2p/go-libp2p-testing v0.1.1 h1:U03z3HnGI7Ni8Xx6ONVZvUFOAzWYmolWf5W5jAOPNmU= github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= @@ -427,6 +338,7 @@ github.com/libp2p/go-libp2p-yamux v0.2.1 h1:Q3XYNiKCC2vIxrvUJL+Jg1kiyeEaIDNKLjgE github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= github.com/libp2p/go-libp2p-yamux v0.2.2 h1:eGvbqWqWY9S5lrpe2gA0UCOLCdzCgYSAR3vo/xCsNQg= github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= +github.com/libp2p/go-maddr-filter v0.0.4 h1:hx8HIuuwk34KePddrp2mM5ivgPkZ09JH4AvsALRbFUs= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5 h1:CW3AgbMO6vUvT4kf87y4N+0P8KUl2aqLYhrGyDUbLSg= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -444,7 +356,6 @@ github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36Gchpc github.com/libp2p/go-nat v0.0.4 h1:KbizNnq8YIf7+Hn7+VFL/xE0eDrkPru2zIO9NMwL8UQ= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= -github.com/libp2p/go-openssl v0.0.3 h1:wjlG7HvQkt4Fq4cfH33Ivpwp0omaElYEi9z26qaIkIk= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4 h1:d27YZvLoTyMhIN4njrkr8zMDOM4lfpHIp6A+TK9fovg= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= @@ -452,10 +363,10 @@ github.com/libp2p/go-reuseport v0.0.1 h1:7PhkfH73VXfPJYKQ6JwS5I/eVcoyYi9IMNGc6FW github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= github.com/libp2p/go-reuseport-transport v0.0.2 h1:WglMwyXyBu61CMkjCCtnmqNqnjib0GIEjMiHTwR/KN4= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-stream-muxer v0.0.1 h1:Ce6e2Pyu+b5MC1k3eeFtAax0pW4gc6MosYSLV05UeLw= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer-multistream v0.2.0 h1:714bRJ4Zy9mdhyTLJ+ZKiROmAFwUHpeRidG+q7LTQOg= github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= +github.com/libp2p/go-tcp-transport v0.1.0 h1:IGhowvEqyMFknOar4FWCKSWE0zL36UFKQtiRQD60/8o= github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1 h1:yGlqURmqgNA2fvzjSgZNlHcsd/IulAnKM8Ncu+vlqnw= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= @@ -472,18 +383,10 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-runewidth v0.0.7 h1:Ei8KR0497xHyKJPAv59M1dkC+rOZCMBJ+t3fZ+twI54= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= @@ -491,52 +394,52 @@ github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8Rv github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= +github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771 h1:MHkK1uRtFbVqvAgvWxafZe54+5uBxLluGylDiKgdhwo= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.2 h1:ZEw4I2EgPKDJ2iEw0cNmLB3ROrEmkOtXIkaG7wZg+78= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= +github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= +github.com/multiformats/go-multiaddr v0.0.4 h1:WgMSI84/eRLdbptXMkMWDXPjPq7SPLIgGUVm2eroyU4= github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.1 h1:rVAztJYMhCQ7vEFr8FvxW3mS+HF2eY/oPbOMeS0ZDnE= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= -github.com/multiformats/go-multiaddr v0.2.0 h1:lR52sFwcTCuQb6bTfnXF6zA2XfyYvyd+5a9qECv/J90= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= github.com/multiformats/go-multiaddr v0.2.1 h1:SgG/cw5vqyB5QQe5FPe2TqggU9WtrA9X4nZw7LlVqOI= github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= +github.com/multiformats/go-multiaddr-dns v0.0.2 h1:/Bbsgsy3R6e3jf2qBahzNHzww6usYaZ0NhNH3sqdFS8= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0 h1:YWJoIDwLePniH7OU5hBnDZV6SWuvJqJ0YtN6pLeH9zA= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr-fmt v0.0.1 h1:5YjeOIzbX8OTKVaN72aOzGIYW7PnrZrnkDyOfAWRSMA= github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= github.com/multiformats/go-multiaddr-net v0.0.1 h1:76O59E3FavvHqNg7jvzWzsPSW5JSi/ek0E4eiDVbg9g= github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0 h1:ZepO8Ezwovd+7b5XPPDhQhayk1yt0AJpzQBpq9fejx4= github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= github.com/multiformats/go-multiaddr-net v0.1.2 h1:P7zcBH9FRETdPkDrylcXVjQLQ2t1JQtNItZULWNWgeg= github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= github.com/multiformats/go-multibase v0.0.1 h1:PN9/v21eLywrFWdFNsFKaU04kLJzuYzmrJR+ubhT9qA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= +github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= +github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.9 h1:aoijQXYYl7Xtb2pUUP68R+ys1TlnlR3eX6wmozr0Hp4= -github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.10 h1:lMoNbh2Ssd9PUF74Nz008KGzGPlfeV6wH3rit5IIGCM= github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13 h1:06x+mk/zj1FoMsgNejLpy6QTvJqlSt/BhLEy87zidlc= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= @@ -545,7 +448,6 @@ github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wS github.com/multiformats/go-multistream v0.1.1 h1:JlAdpIFhBhGRLxe9W6Om0w++Gd6KMWoFPZL/dEnm9nI= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.2 h1:6sUvyh2YHpJCb8RZ6eYzj6iJQ4+chWYmyIHxszqlPTA= github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.5 h1:XVZwSo04Cs3j/jS0uAEPpT3JY6DzMcVLLoWOSnCxOjg= github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= @@ -571,32 +473,28 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992 h1:bzMe+2coZJYHnhGgVlcQKuRy4FSny4ds8dLQjw5P1XE= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1 h1:CskT+S6Ay54OwxBGB0R3Rsx4Muto6UnEYTyKJbyRIAI= github.com/polydawn/refmt v0.0.0-20190807091052-3d65705ee9f1/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a h1:hjZfReYVLbqFkAtr2us7vdy04YWz3LVAirzP7reh8+M= github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0 h1:c8R11WC8m7KNMkTv/0+Be8vvwo4I3/Ut9AC2FW8fX3U= -github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa h1:E+gaaifzi2xF65PbDmuKI3PhLWY6G5opMLniFq8vmXA= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= +github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a h1:/eS3yfGjQKG+9kayBkj0ip1BGhq6zJ3eaVksphxAaek= github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU= github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= @@ -606,12 +504,10 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -621,27 +517,16 @@ github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli/v2 v2.0.0 h1:+HU9SCbu8GnEUFtIBfuUNXN39ofWViIEJIp6SURMpCg= -github.com/urfave/cli/v2 v2.0.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= +github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436 h1:qOpVTI+BrstcjTZLm2Yz/3sOnqkzj3FQoh0g+E5s3Gc= github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830 h1:8kxMKmKzXXL4Ru1nyhvdms/JjWt+3YLpvRb/bAjO/y0= github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a h1:G++j5e0OC488te356JvdhaM8YS6nMsjLAYF7JxCv07w= github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a h1:xc8sbWMwBsvi8OrxFZR8zxw/fdCneHBLFDJJaV14eaE= -github.com/whyrusleeping/cbor-gen v0.0.0-20191212224538-d370462a7e8a/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= -github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0 h1:efb/4CnrubzNGqQOeHErxyQ6rIsJb7GcgeSDF7fqWeI= -github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= -github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158 h1:WXhVOwj2USAXB5oMDwRl3piOux2XMV9TANaYxXHdkoE= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66 h1:LolR9FiEfQNn5U031bAhn/46po2JgWHKadYbcWFIJ+0= -github.com/whyrusleeping/cbor-gen v0.0.0-20200206220010-03c9665e2a66/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= +github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105 h1:Sh6UG5dW5xW8Ek2CtRGq4ipdEvvx9hOyBJjEGyTYDl0= github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e h1:JY8o/ebUUrCYetWmjRCNghxC59cOEaili83rxPRQCLw= -github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= -github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= @@ -656,29 +541,22 @@ github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go. github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.1 h1:8dP3SGL7MPB94crU3bEPplMPe83FI4EouesJUeFHv50= go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= -go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.5.1 h1:rsqfU5vBkVknbhUGbAUwQKR2H4ItV8tjJ+6kJX4cxHM= -go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0 h1:f3WCSC2KzAcBXGATIxAB1E2XuCpNU255wNKZ505qi3E= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0 h1:nR6NoDBgAf67s68NhaXbsojM+2gxp3S1hWkHDl27pVU= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -689,12 +567,11 @@ golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200117160349-530e935923ad h1:Jh8cai0fqIK+f6nG0UgPW5wFk8wmiMhM3AyciDBdtQg= golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -704,8 +581,6 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -718,17 +593,15 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180202135801-37707fdb30a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -741,14 +614,11 @@ golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae h1:xiXzMMEQdQcric9hXtr1QU98MHunKK7OTtsoU6bYWs4= golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= @@ -766,34 +636,26 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361 h1:RIIXAeV6GvDBuADKumTODatUqANFZ+5BPMnzsy4hulY= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108195415-316d2f248479 h1:csuS+MHeEA2eWhyjQCMaPMq4z1+/PohkBSjJZHSIbOE= -golang.org/x/tools v0.0.0-20200108195415-316d2f248479/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -806,10 +668,6 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= -howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= diff --git a/vendor/github.com/filecoin-project/go-multistore/mbstore.go b/vendor/github.com/filecoin-project/go-multistore/mbstore.go new file mode 100644 index 0000000000..bce48de779 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-multistore/mbstore.go @@ -0,0 +1,95 @@ +package multistore + +import ( + "context" + + "github.com/hashicorp/go-multierror" + "golang.org/x/xerrors" + + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + blockstore "github.com/ipfs/go-ipfs-blockstore" +) + +type multiReadBs struct { + // TODO: some caching + mds *MultiStore +} + +func (m *multiReadBs) Has(cid cid.Cid) (bool, error) { + m.mds.lk.RLock() + defer m.mds.lk.RUnlock() + + var merr error + for i, store := range m.mds.open { + has, err := store.Bstore.Has(cid) + if err != nil { + merr = multierror.Append(merr, xerrors.Errorf("has (ds %d): %w", i, err)) + continue + } + if !has { + continue + } + + return true, nil + } + + return false, merr +} + +func (m *multiReadBs) Get(cid cid.Cid) (blocks.Block, error) { + m.mds.lk.RLock() + defer m.mds.lk.RUnlock() + + var merr error + for i, store := range m.mds.open { + has, err := store.Bstore.Has(cid) + if err != nil { + merr = multierror.Append(merr, xerrors.Errorf("has (ds %d): %w", i, err)) + continue + } + if !has { + continue + } + + val, err := store.Bstore.Get(cid) + if err != nil { + merr = multierror.Append(merr, xerrors.Errorf("get (ds %d): %w", i, err)) + continue + } + + return val, nil + } + + if merr == nil { + return nil, blockstore.ErrNotFound + } + + return nil, merr +} + +func (m *multiReadBs) DeleteBlock(cid cid.Cid) error { + return xerrors.Errorf("operation not supported") +} + +func (m *multiReadBs) GetSize(cid cid.Cid) (int, error) { + return 0, xerrors.Errorf("operation not supported") +} + +func (m *multiReadBs) Put(block blocks.Block) error { + return xerrors.Errorf("operation not supported") +} + +func (m *multiReadBs) PutMany(blocks []blocks.Block) error { + return xerrors.Errorf("operation not supported") +} + +func (m *multiReadBs) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + return nil, xerrors.Errorf("operation not supported") +} + +func (m *multiReadBs) HashOnRead(enabled bool) { + return +} + +var _ blockstore.Blockstore = &multiReadBs{} diff --git a/vendor/github.com/filecoin-project/go-multistore/multistore.go b/vendor/github.com/filecoin-project/go-multistore/multistore.go new file mode 100644 index 0000000000..86d6cf389b --- /dev/null +++ b/vendor/github.com/filecoin-project/go-multistore/multistore.go @@ -0,0 +1,222 @@ +package multistore + +import ( + "encoding/json" + "fmt" + "sort" + "sync" + + "go.uber.org/multierr" + "golang.org/x/xerrors" + + "github.com/ipfs/go-datastore" + ktds "github.com/ipfs/go-datastore/keytransform" + "github.com/ipfs/go-datastore/query" + blockstore "github.com/ipfs/go-ipfs-blockstore" +) + +// StoreID identifies a unique instance of a store +type StoreID uint64 + +// MultiStore is a wrapper around a datastore that provides multiple isolated +// instances of IPFS storage components -> BlockStore, FileStore, DAGService, etc +type MultiStore struct { + ds datastore.Batching + + open map[StoreID]*Store + next StoreID + + lk sync.RWMutex +} + +var dsListKey = datastore.NewKey("/list") +var dsMultiKey = datastore.NewKey("/multi") + +// NewMultiDstore returns a new instance of a MultiStore for the given datastore +// instance +func NewMultiDstore(ds datastore.Batching) (*MultiStore, error) { + listBytes, err := ds.Get(dsListKey) + if xerrors.Is(err, datastore.ErrNotFound) { + listBytes, _ = json.Marshal(StoreIDList{}) + } else if err != nil { + return nil, xerrors.Errorf("could not read multistore list: %w", err) + } + + var ids StoreIDList + if err := json.Unmarshal(listBytes, &ids); err != nil { + return nil, xerrors.Errorf("could not unmarshal multistore list: %w", err) + } + + mds := &MultiStore{ + ds: ds, + open: map[StoreID]*Store{}, + } + + for _, i := range ids { + if i > mds.next { + mds.next = i + } + + _, err := mds.Get(i) + if err != nil { + return nil, xerrors.Errorf("open store %d: %w", i, err) + } + } + + return mds, nil +} + +// Next returns the next available StoreID +func (mds *MultiStore) Next() StoreID { + mds.lk.Lock() + defer mds.lk.Unlock() + + mds.next++ + return mds.next +} + +func (mds *MultiStore) updateStores() error { + stores := make(StoreIDList, 0, len(mds.open)) + for k := range mds.open { + stores = append(stores, k) + } + sort.Sort(stores) + + listBytes, err := json.Marshal(stores) + if err != nil { + return xerrors.Errorf("could not marshal list: %w", err) + } + err = mds.ds.Put(dsListKey, listBytes) + if err != nil { + return xerrors.Errorf("could not save stores list: %w", err) + } + return nil +} + +// Get returns the store for the given ID +func (mds *MultiStore) Get(i StoreID) (*Store, error) { + mds.lk.Lock() + defer mds.lk.Unlock() + + store, ok := mds.open[i] + if ok { + return store, nil + } + + wds := ktds.Wrap(mds.ds, ktds.PrefixTransform{ + Prefix: dsMultiKey.ChildString(fmt.Sprintf("%d", i)), + }) + + var err error + mds.open[i], err = openStore(wds) + if err != nil { + return nil, xerrors.Errorf("could not open new store: %w", err) + } + + err = mds.updateStores() + if err != nil { + return nil, xerrors.Errorf("updating stores: %w", err) + } + + return mds.open[i], nil +} + +// List returns a list of all known store IDs +func (mds *MultiStore) List() StoreIDList { + mds.lk.RLock() + defer mds.lk.RUnlock() + + out := make(StoreIDList, 0, len(mds.open)) + for i := range mds.open { + out = append(out, i) + } + sort.Sort(out) + + return out +} + +// Delete deletes the store with the given id, including all of its data +func (mds *MultiStore) Delete(i StoreID) error { + mds.lk.Lock() + defer mds.lk.Unlock() + + store, ok := mds.open[i] + if !ok { + return nil + } + delete(mds.open, i) + err := store.Close() + if err != nil { + return xerrors.Errorf("closing store: %w", err) + } + + err = mds.updateStores() + if err != nil { + return xerrors.Errorf("updating stores: %w", err) + } + + qres, err := store.ds.Query(query.Query{KeysOnly: true}) + if err != nil { + return xerrors.Errorf("query error: %w", err) + } + defer qres.Close() //nolint:errcheck + + b, err := store.ds.Batch() + if err != nil { + return xerrors.Errorf("batch error: %w", err) + } + + for r := range qres.Next() { + if r.Error != nil { + _ = b.Commit() + return xerrors.Errorf("iterator error: %w", err) + } + err := b.Delete(datastore.NewKey(r.Key)) + if err != nil { + _ = b.Commit() + return xerrors.Errorf("adding to batch: %w", err) + } + } + + err = b.Commit() + if err != nil { + return xerrors.Errorf("committing: %w", err) + } + + return nil +} + +// Close closes all open datastores +func (mds *MultiStore) Close() error { + mds.lk.Lock() + defer mds.lk.Unlock() + + var err error + for _, s := range mds.open { + err = multierr.Append(err, s.Close()) + } + mds.open = make(map[StoreID]*Store) + + return err +} + +// MultiReadBlockstore returns a single Blockstore that will try to read from +// all of the blockstores tracked by this multistore +func (mds *MultiStore) MultiReadBlockstore() blockstore.Blockstore { + return &multiReadBs{mds} +} + +// StoreIDList is just a list of StoreID that implements sort.Interface +type StoreIDList []StoreID + +func (s StoreIDList) Len() int { + return len(s) +} + +func (s StoreIDList) Less(i, j int) bool { + return s[i] < s[j] +} + +func (s StoreIDList) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/filecoin-project/go-multistore/store.go b/vendor/github.com/filecoin-project/go-multistore/store.go new file mode 100644 index 0000000000..b0eaea2268 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-multistore/store.go @@ -0,0 +1,67 @@ +package multistore + +import ( + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" + "github.com/ipfs/go-filestore" + "github.com/ipfs/go-graphsync/storeutil" + blockstore "github.com/ipfs/go-ipfs-blockstore" + offline "github.com/ipfs/go-ipfs-exchange-offline" + ipld "github.com/ipfs/go-ipld-format" + "github.com/ipfs/go-merkledag" + ipldprime "github.com/ipld/go-ipld-prime" +) + +// Store is a single store instance returned by the MultiStore. +// it gives public access to the blockstore, filestore, dag service, +// and an ipld-prime loader/storer +type Store struct { + ds datastore.Batching + + fm *filestore.FileManager + Fstore *filestore.Filestore + + Bstore blockstore.Blockstore + + bsvc blockservice.BlockService + DAG ipld.DAGService + Loader ipldprime.Loader + Storer ipldprime.Storer +} + +func openStore(ds datastore.Batching) (*Store, error) { + blocks := namespace.Wrap(ds, datastore.NewKey("blocks")) + bs := blockstore.NewBlockstore(blocks) + + fm := filestore.NewFileManager(ds, "/") + fm.AllowFiles = true + + fstore := filestore.NewFilestore(bs, fm) + ibs := blockstore.NewIdStore(fstore) + + bsvc := blockservice.New(ibs, offline.Exchange(ibs)) + dag := merkledag.NewDAGService(bsvc) + + loader := storeutil.LoaderForBlockstore(ibs) + storer := storeutil.StorerForBlockstore(ibs) + + return &Store{ + ds: ds, + + fm: fm, + Fstore: fstore, + + Bstore: ibs, + + bsvc: bsvc, + DAG: dag, + Loader: loader, + Storer: storer, + }, nil +} + +// Close closes down the blockservice used by the DAG Service for this store +func (s *Store) Close() error { + return s.bsvc.Close() +} diff --git a/vendor/github.com/filecoin-project/go-state-types/COPYRIGHT b/vendor/github.com/filecoin-project/go-state-types/COPYRIGHT new file mode 100644 index 0000000000..6aa4b36128 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/COPYRIGHT @@ -0,0 +1,3 @@ +Copyright 2020. Protocol Labs, Inc. + +This library is dual-licensed under Apache 2.0 and MIT terms. diff --git a/vendor/github.com/filecoin-project/go-state-types/LICENSE-APACHE b/vendor/github.com/filecoin-project/go-state-types/LICENSE-APACHE new file mode 100644 index 0000000000..22608cf836 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/LICENSE-APACHE @@ -0,0 +1,13 @@ +Copyright 2020. Protocol Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/filecoin-project/go-state-types/LICENSE-MIT b/vendor/github.com/filecoin-project/go-state-types/LICENSE-MIT new file mode 100644 index 0000000000..c6134ad88a --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/LICENSE-MIT @@ -0,0 +1,19 @@ +Copyright 2020. Protocol Labs, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/abi/primitives.go b/vendor/github.com/filecoin-project/go-state-types/abi/actor.go similarity index 56% rename from vendor/github.com/filecoin-project/specs-actors/actors/abi/primitives.go rename to vendor/github.com/filecoin-project/go-state-types/abi/actor.go index 8b892bcd73..26534a2de5 100644 --- a/vendor/github.com/filecoin-project/specs-actors/actors/abi/primitives.go +++ b/vendor/github.com/filecoin-project/go-state-types/abi/actor.go @@ -1,22 +1,6 @@ package abi -import ( - "strconv" - - "github.com/filecoin-project/specs-actors/actors/abi/big" -) - -// The abi package contains definitions of all types that cross the VM boundary and are used -// within actor code. -// -// Primitive types include numerics and opaque array types. - -// Epoch number of the chain state, which acts as a proxy for time within the VM. -type ChainEpoch int64 - -func (e ChainEpoch) String() string { - return strconv.FormatInt(int64(e), 10) -} +import "strconv" // A sequential number assigned to an actor when created by the InitActor. // This ID is embedded in ID-type addresses. @@ -46,23 +30,9 @@ func (e MethodNum) String() string { return strconv.FormatInt(int64(e), 10) } -// TokenAmount is an amount of Filecoin tokens. This type is used within -// the VM in message execution, to account movement of tokens, payment -// of VM gas, and more. -// -// BigInt types are aliases rather than new types because the latter introduce incredible amounts of noise converting to -// and from types in order to manipulate values. We give up some type safety for ergonomics. -type TokenAmount = big.Int - -func NewTokenAmount(t int64) TokenAmount { - return big.NewInt(t) -} - -// Randomness is a string of random bytes -type Randomness []byte - // Multiaddrs is a byte array representing a Libp2p MultiAddress type Multiaddrs = []byte // PeerID is a byte array representing a Libp2p PeerID type PeerID = []byte + diff --git a/vendor/github.com/filecoin-project/go-state-types/abi/cbor_gen.go b/vendor/github.com/filecoin-project/go-state-types/abi/cbor_gen.go new file mode 100644 index 0000000000..11bff65453 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/abi/cbor_gen.go @@ -0,0 +1,165 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package abi + +import ( + "fmt" + "io" + + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf + +var lengthBufPieceInfo = []byte{130} + +func (t *PieceInfo) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufPieceInfo); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Size (abi.PaddedPieceSize) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Size)); err != nil { + return err + } + + // t.PieceCID (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.PieceCID); err != nil { + return xerrors.Errorf("failed to write cid field t.PieceCID: %w", err) + } + + return nil +} + +func (t *PieceInfo) UnmarshalCBOR(r io.Reader) error { + *t = PieceInfo{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Size (abi.PaddedPieceSize) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Size = PaddedPieceSize(extra) + + } + // t.PieceCID (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.PieceCID: %w", err) + } + + t.PieceCID = c + + } + return nil +} + +var lengthBufSectorID = []byte{130} + +func (t *SectorID) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSectorID); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Miner (abi.ActorID) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Miner)); err != nil { + return err + } + + // t.Number (abi.SectorNumber) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Number)); err != nil { + return err + } + + return nil +} + +func (t *SectorID) UnmarshalCBOR(r io.Reader) error { + *t = SectorID{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Miner (abi.ActorID) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Miner = ActorID(extra) + + } + // t.Number (abi.SectorNumber) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Number = SectorNumber(extra) + + } + return nil +} diff --git a/vendor/github.com/filecoin-project/go-state-types/abi/chain.go.go b/vendor/github.com/filecoin-project/go-state-types/abi/chain.go.go new file mode 100644 index 0000000000..d7cf0fa4af --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/abi/chain.go.go @@ -0,0 +1,33 @@ +package abi + +import ( + "strconv" + + "github.com/filecoin-project/go-state-types/big" +) + +// Epoch number of the chain state, which acts as a proxy for time within the VM. +type ChainEpoch int64 + +func (e ChainEpoch) String() string { + return strconv.FormatInt(int64(e), 10) +} + +// TokenAmount is an amount of Filecoin tokens. This type is used within +// the VM in message execution, to account movement of tokens, payment +// of VM gas, and more. +// +// BigInt types are aliases rather than new types because the latter introduce incredible amounts of noise converting to +// and from types in order to manipulate values. We give up some type safety for ergonomics. +type TokenAmount = big.Int + +func NewTokenAmount(t int64) TokenAmount { + return big.NewInt(t) +} + +// Randomness is a string of random bytes +type Randomness []byte + +// RandomnessLength is the length of the randomness slice. +const RandomnessLength = 32 + diff --git a/vendor/github.com/filecoin-project/go-state-types/abi/cid.go b/vendor/github.com/filecoin-project/go-state-types/abi/cid.go new file mode 100644 index 0000000000..e47ba044ae --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/abi/cid.go @@ -0,0 +1,49 @@ +package abi + +import ( + cid "github.com/ipfs/go-cid" + mh "github.com/multiformats/go-multihash" +) + +var ( + // HashFunction is the default hash function for computing CIDs. + // + // This is currently Blake2b-256. + HashFunction = uint64(mh.BLAKE2B_MIN + 31) + + // When producing a CID for an IPLD block less than or equal to CIDInlineLimit + // bytes in length, the identity hash function will be used instead of + // HashFunction. This will effectively "inline" the block into the CID, allowing + // it to be extracted directly from the CID with no disk/network operations. + // + // This is currently -1 for "disabled". + // + // This is exposed for testing. Do not modify unless you know what you're doing. + CIDInlineLimit = -1 +) + +type cidBuilder struct { + codec uint64 +} + +func (cidBuilder) WithCodec(c uint64) cid.Builder { + return cidBuilder{codec: c} +} + +func (b cidBuilder) GetCodec() uint64 { + return b.codec +} + +func (b cidBuilder) Sum(data []byte) (cid.Cid, error) { + hf := HashFunction + if len(data) <= CIDInlineLimit { + hf = mh.IDENTITY + } + return cid.V1Builder{Codec: b.codec, MhType: hf}.Sum(data) +} + +// CidBuilder is the default CID builder for Filecoin. +// +// - The default codec is CBOR. This can be changed with CidBuilder.WithCodec. +// - The default hash function is 256bit blake2b. +var CidBuilder cid.Builder = cidBuilder{codec: cid.DagCBOR} diff --git a/vendor/github.com/filecoin-project/go-state-types/abi/deal.go b/vendor/github.com/filecoin-project/go-state-types/abi/deal.go new file mode 100644 index 0000000000..2208ab37b3 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/abi/deal.go @@ -0,0 +1,10 @@ +package abi + +import "github.com/filecoin-project/go-state-types/big" + +type DealID uint64 + +// BigInt types are aliases rather than new types because the latter introduce incredible amounts of noise +// converting to and from types in order to manipulate values. +// We give up some type safety for ergonomics. +type DealWeight = big.Int // units: byte-epochs diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/empty.go b/vendor/github.com/filecoin-project/go-state-types/abi/empty.go similarity index 86% rename from vendor/github.com/filecoin-project/specs-actors/actors/util/adt/empty.go rename to vendor/github.com/filecoin-project/go-state-types/abi/empty.go index d91a029ad5..d2927f031b 100644 --- a/vendor/github.com/filecoin-project/specs-actors/actors/util/adt/empty.go +++ b/vendor/github.com/filecoin-project/go-state-types/abi/empty.go @@ -1,10 +1,10 @@ -package adt +package abi import ( "fmt" "io" - runtime "github.com/filecoin-project/specs-actors/actors/runtime" + cbg "github.com/whyrusleeping/cbor-gen" ) // The empty value represents absence of a value. It is used for parameter and return types for actor methods @@ -19,8 +19,8 @@ type EmptyValue struct{} // A typed nil pointer to EmptyValue. var Empty *EmptyValue = nil -var _ runtime.CBORMarshaler = (*EmptyValue)(nil) -var _ runtime.CBORUnmarshaler = (*EmptyValue)(nil) +var _ cbg.CBORMarshaler = (*EmptyValue)(nil) +var _ cbg.CBORUnmarshaler = (*EmptyValue)(nil) func (v *EmptyValue) MarshalCBOR(_ io.Writer) error { // An attempt to serialize a non-nil value indicates a caller mis-using this type. @@ -36,3 +36,4 @@ func (v *EmptyValue) UnmarshalCBOR(_ io.Reader) error { // Read zero bytes. return nil } + diff --git a/vendor/github.com/filecoin-project/go-state-types/abi/key.go b/vendor/github.com/filecoin-project/go-state-types/abi/key.go new file mode 100644 index 0000000000..d21c22373a --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/abi/key.go @@ -0,0 +1,76 @@ +package abi + +import ( + "encoding/binary" + "errors" + + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" +) + +// Keyer defines an interface required to put values in mapping. +type Keyer interface { + Key() string +} + +// Adapts an address as a mapping key. +type AddrKey address.Address + +func (k AddrKey) Key() string { + return string(address.Address(k).Bytes()) +} + +type CidKey cid.Cid + +func (k CidKey) Key() string { + return cid.Cid(k).KeyString() +} + +// Adapts an int64 as a mapping key. +type intKey struct { + int64 +} + +//noinspection GoExportedFuncWithUnexportedType +func IntKey(k int64) intKey { + return intKey{k} +} + +func (k intKey) Key() string { + buf := make([]byte, 10) + n := binary.PutVarint(buf, k.int64) + return string(buf[:n]) +} + +//noinspection GoUnusedExportedFunction +func ParseIntKey(k string) (int64, error) { + i, n := binary.Varint([]byte(k)) + if n != len(k) { + return 0, errors.New("failed to decode varint key") + } + return i, nil +} + +// Adapts a uint64 as a mapping key. +type uintKey struct { + uint64 +} + +//noinspection GoExportedFuncWithUnexportedType +func UIntKey(k uint64) uintKey { + return uintKey{k} +} + +func (k uintKey) Key() string { + buf := make([]byte, 10) + n := binary.PutUvarint(buf, k.uint64) + return string(buf[:n]) +} + +func ParseUIntKey(k string) (uint64, error) { + i, n := binary.Uvarint([]byte(k)) + if n != len(k) { + return 0, errors.New("failed to decode varint key") + } + return i, nil +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/abi/piece.go b/vendor/github.com/filecoin-project/go-state-types/abi/piece.go similarity index 73% rename from vendor/github.com/filecoin-project/specs-actors/actors/abi/piece.go rename to vendor/github.com/filecoin-project/go-state-types/abi/piece.go index f54ce568e6..615136de05 100644 --- a/vendor/github.com/filecoin-project/specs-actors/actors/abi/piece.go +++ b/vendor/github.com/filecoin-project/go-state-types/abi/piece.go @@ -4,7 +4,7 @@ import ( "math/bits" cid "github.com/ipfs/go-cid" - "github.com/pkg/errors" + "golang.org/x/xerrors" ) // UnpaddedPieceSize is the size of a piece, in bytes @@ -17,12 +17,12 @@ func (s UnpaddedPieceSize) Padded() PaddedPieceSize { func (s UnpaddedPieceSize) Validate() error { if s < 127 { - return errors.New("minimum piece size is 127 bytes") + return xerrors.New("minimum piece size is 127 bytes") } // is 127 * 2^n if uint64(s)>>bits.TrailingZeros64(uint64(s)) != 127 { - return errors.New("unpadded piece size must be a power of 2 multiple of 127") + return xerrors.New("unpadded piece size must be a power of 2 multiple of 127") } return nil @@ -34,11 +34,11 @@ func (s PaddedPieceSize) Unpadded() UnpaddedPieceSize { func (s PaddedPieceSize) Validate() error { if s < 128 { - return errors.New("minimum padded piece size is 128 bytes") + return xerrors.New("minimum padded piece size is 128 bytes") } if bits.OnesCount64(uint64(s)) != 1 { - return errors.New("padded piece size must be a power of 2") + return xerrors.New("padded piece size must be a power of 2") } return nil diff --git a/vendor/github.com/filecoin-project/go-state-types/abi/sector.go b/vendor/github.com/filecoin-project/go-state-types/abi/sector.go new file mode 100644 index 0000000000..35ceaa5142 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/abi/sector.go @@ -0,0 +1,190 @@ +package abi + +import ( + "fmt" + "math" + "strconv" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/big" +) + +// SectorNumber is a numeric identifier for a sector. It is usually relative to a miner. +type SectorNumber uint64 + +func (s SectorNumber) String() string { + return strconv.FormatUint(uint64(s), 10) +} + +// The maximum assignable sector number. +// Raising this would require modifying our AMT implementation. +const MaxSectorNumber = math.MaxInt64 + +// SectorSize indicates one of a set of possible sizes in the network. +// Ideally, SectorSize would be an enum +// type SectorSize enum { +// 1KiB = 1024 +// 1MiB = 1048576 +// 1GiB = 1073741824 +// 1TiB = 1099511627776 +// 1PiB = 1125899906842624 +// 1EiB = 1152921504606846976 +// max = 18446744073709551615 +// } +type SectorSize uint64 + +// Formats the size as a decimal string. +func (s SectorSize) String() string { + return strconv.FormatUint(uint64(s), 10) +} + +// Abbreviates the size as a human-scale number. +// This approximates (truncates) the size unless it is a power of 1024. +func (s SectorSize) ShortString() string { + var biUnits = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB"} + unit := 0 + for s >= 1024 && unit < len(biUnits)-1 { + s /= 1024 + unit++ + } + return fmt.Sprintf("%d%s", s, biUnits[unit]) +} + +type SectorID struct { + Miner ActorID + Number SectorNumber +} + +// The unit of storage power (measured in bytes) +type StoragePower = big.Int + +type SectorQuality = big.Int + +func NewStoragePower(n int64) StoragePower { + return big.NewInt(n) +} + +// These enumerations must match the proofs library and never change. +type RegisteredSealProof int64 + +const ( + RegisteredSealProof_StackedDrg2KiBV1 = RegisteredSealProof(0) + RegisteredSealProof_StackedDrg8MiBV1 = RegisteredSealProof(1) + RegisteredSealProof_StackedDrg512MiBV1 = RegisteredSealProof(2) + RegisteredSealProof_StackedDrg32GiBV1 = RegisteredSealProof(3) + RegisteredSealProof_StackedDrg64GiBV1 = RegisteredSealProof(4) +) + +type RegisteredPoStProof int64 + +const ( + RegisteredPoStProof_StackedDrgWinning2KiBV1 = RegisteredPoStProof(0) + RegisteredPoStProof_StackedDrgWinning8MiBV1 = RegisteredPoStProof(1) + RegisteredPoStProof_StackedDrgWinning512MiBV1 = RegisteredPoStProof(2) + RegisteredPoStProof_StackedDrgWinning32GiBV1 = RegisteredPoStProof(3) + RegisteredPoStProof_StackedDrgWinning64GiBV1 = RegisteredPoStProof(4) + RegisteredPoStProof_StackedDrgWindow2KiBV1 = RegisteredPoStProof(5) + RegisteredPoStProof_StackedDrgWindow8MiBV1 = RegisteredPoStProof(6) + RegisteredPoStProof_StackedDrgWindow512MiBV1 = RegisteredPoStProof(7) + RegisteredPoStProof_StackedDrgWindow32GiBV1 = RegisteredPoStProof(8) + RegisteredPoStProof_StackedDrgWindow64GiBV1 = RegisteredPoStProof(9) +) + +// Metadata about a seal proof type. +type SealProofInfo struct { + SectorSize SectorSize + WinningPoStProof RegisteredPoStProof + WindowPoStProof RegisteredPoStProof +} + +var SealProofInfos = map[RegisteredSealProof]*SealProofInfo{ + RegisteredSealProof_StackedDrg2KiBV1: { + SectorSize: 2 << 10, + WinningPoStProof: RegisteredPoStProof_StackedDrgWinning2KiBV1, + WindowPoStProof: RegisteredPoStProof_StackedDrgWindow2KiBV1, + }, + RegisteredSealProof_StackedDrg8MiBV1: { + SectorSize: 8 << 20, + WinningPoStProof: RegisteredPoStProof_StackedDrgWinning8MiBV1, + WindowPoStProof: RegisteredPoStProof_StackedDrgWindow8MiBV1, + }, + RegisteredSealProof_StackedDrg512MiBV1: { + SectorSize: 512 << 20, + WinningPoStProof: RegisteredPoStProof_StackedDrgWinning512MiBV1, + WindowPoStProof: RegisteredPoStProof_StackedDrgWindow512MiBV1, + }, + RegisteredSealProof_StackedDrg32GiBV1: { + SectorSize: 32 << 30, + WinningPoStProof: RegisteredPoStProof_StackedDrgWinning32GiBV1, + WindowPoStProof: RegisteredPoStProof_StackedDrgWindow32GiBV1, + }, + RegisteredSealProof_StackedDrg64GiBV1: { + SectorSize: 64 << 30, + WinningPoStProof: RegisteredPoStProof_StackedDrgWinning64GiBV1, + WindowPoStProof: RegisteredPoStProof_StackedDrgWindow64GiBV1, + }, +} + +func (p RegisteredSealProof) SectorSize() (SectorSize, error) { + info, ok := SealProofInfos[p] + if !ok { + return 0, xerrors.Errorf("unsupported proof type: %v", p) + } + return info.SectorSize, nil +} + +// RegisteredWinningPoStProof produces the PoSt-specific RegisteredProof corresponding +// to the receiving RegisteredProof. +func (p RegisteredSealProof) RegisteredWinningPoStProof() (RegisteredPoStProof, error) { + info, ok := SealProofInfos[p] + if !ok { + return 0, xerrors.Errorf("unsupported proof type: %v", p) + } + return info.WinningPoStProof, nil +} + +// RegisteredWindowPoStProof produces the PoSt-specific RegisteredProof corresponding +// to the receiving RegisteredProof. +func (p RegisteredSealProof) RegisteredWindowPoStProof() (RegisteredPoStProof, error) { + info, ok := SealProofInfos[p] + if !ok { + return 0, xerrors.Errorf("unsupported proof type: %v", p) + } + return info.WindowPoStProof, nil +} + +var PoStSealProofTypes = map[RegisteredPoStProof]RegisteredSealProof{ + RegisteredPoStProof_StackedDrgWinning2KiBV1: RegisteredSealProof_StackedDrg2KiBV1, + RegisteredPoStProof_StackedDrgWindow2KiBV1: RegisteredSealProof_StackedDrg2KiBV1, + RegisteredPoStProof_StackedDrgWinning8MiBV1: RegisteredSealProof_StackedDrg8MiBV1, + RegisteredPoStProof_StackedDrgWindow8MiBV1: RegisteredSealProof_StackedDrg8MiBV1, + RegisteredPoStProof_StackedDrgWinning512MiBV1: RegisteredSealProof_StackedDrg512MiBV1, + RegisteredPoStProof_StackedDrgWindow512MiBV1: RegisteredSealProof_StackedDrg512MiBV1, + RegisteredPoStProof_StackedDrgWinning32GiBV1: RegisteredSealProof_StackedDrg32GiBV1, + RegisteredPoStProof_StackedDrgWindow32GiBV1: RegisteredSealProof_StackedDrg32GiBV1, + RegisteredPoStProof_StackedDrgWinning64GiBV1: RegisteredSealProof_StackedDrg64GiBV1, + RegisteredPoStProof_StackedDrgWindow64GiBV1: RegisteredSealProof_StackedDrg64GiBV1, +} + +// Maps PoSt proof types back to seal proof types. +func (p RegisteredPoStProof) RegisteredSealProof() (RegisteredSealProof, error) { + sp, ok := PoStSealProofTypes[p] + if !ok { + return 0, xerrors.Errorf("unsupported PoSt proof type: %v", p) + } + return sp, nil +} + +func (p RegisteredPoStProof) SectorSize() (SectorSize, error) { + sp, err := p.RegisteredSealProof() + if err != nil { + return 0, err + } + return sp.SectorSize() +} + +type SealRandomness Randomness +type InteractiveSealRandomness Randomness +type PoStRandomness Randomness + diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/abi/big/int.go b/vendor/github.com/filecoin-project/go-state-types/big/int.go similarity index 82% rename from vendor/github.com/filecoin-project/specs-actors/actors/abi/big/int.go rename to vendor/github.com/filecoin-project/go-state-types/big/int.go index 05fbf05115..5ba153695a 100644 --- a/vendor/github.com/filecoin-project/specs-actors/actors/abi/big/int.go +++ b/vendor/github.com/filecoin-project/go-state-types/big/int.go @@ -24,6 +24,10 @@ func NewIntUnsigned(i uint64) Int { return Int{big.NewInt(0).SetUint64(i)} } +func NewFromGo(i *big.Int) Int { + return Int{big.NewInt(0).Set(i)} +} + func Zero() Int { return NewInt(0) } @@ -35,6 +39,16 @@ func PositiveFromUnsignedBytes(b []byte) Int { return Int{i} } +// MustFromString convers dec string into big integer and panics if conversion +// is not sucessful. +func MustFromString(s string) Int { + v, err := FromString(s) + if err != nil { + panic(err) + } + return v +} + func FromString(s string) (Int, error) { v, ok := big.NewInt(0).SetString(s, 10) if !ok { @@ -45,9 +59,15 @@ func FromString(s string) (Int, error) { } func (bi Int) Copy() Int { - cpy := Int{} - cpy.Int.Set(bi.Int) - return cpy + return Int{Int: new(big.Int).Set(bi.Int)} +} + +func Product(ints ...Int) Int { + p := NewInt(1) + for _, i := range ints { + p = Mul(p, i) + } + return p } func Mul(a, b Int) Int { @@ -66,14 +86,22 @@ func Add(a, b Int) Int { return Int{big.NewInt(0).Add(a.Int, b.Int)} } -func Sum(num1 Int, ints ...Int) Int { - sum := num1 +func Sum(ints ...Int) Int { + sum := Zero() for _, i := range ints { sum = Add(sum, i) } return sum } +func Subtract(num1 Int, ints ...Int) Int { + sub := num1 + for _, i := range ints { + sub = Sub(sub, i) + } + return sub +} + func Sub(a, b Int) Int { return Int{big.NewInt(0).Sub(a.Int, b.Int)} } @@ -154,6 +182,14 @@ func (bi Int) Neg() Int { return Int{big.NewInt(0).Neg(bi.Int)} } +// Abs returns the absolute value of bi. +func (bi Int) Abs() Int { + if bi.GreaterThanEqual(Zero()) { + return bi.Copy() + } + return bi.Neg() +} + // Equals returns true if bi == o func (bi Int) Equals(o Int) bool { return Cmp(bi, o) == 0 @@ -250,7 +286,12 @@ func (bi *Int) MarshalCBOR(w io.Writer) error { return err } - header := cbg.CborEncodeMajorType(cbg.MajByteString, uint64(len(enc))) + encLen := len(enc) + if encLen > BigIntMaxSerializedLen { + return fmt.Errorf("big integer byte array too long (%d bytes)", encLen) + } + + header := cbg.CborEncodeMajorType(cbg.MajByteString, uint64(encLen)) if _, err := w.Write(header); err != nil { return err } @@ -278,7 +319,7 @@ func (bi *Int) UnmarshalCBOR(br io.Reader) error { } if extra > BigIntMaxSerializedLen { - return fmt.Errorf("big integer byte array too long") + return fmt.Errorf("big integer byte array too long (%d bytes)", extra) } buf := make([]byte, extra) @@ -303,3 +344,7 @@ func (bi *Int) IsZero() bool { func (bi *Int) Nil() bool { return bi.Int == nil } + +func (bi *Int) NilOrZero() bool { + return bi.Int == nil || bi.Int.Sign() == 0 +} diff --git a/vendor/github.com/filecoin-project/go-state-types/cbor/cbor.go b/vendor/github.com/filecoin-project/go-state-types/cbor/cbor.go new file mode 100644 index 0000000000..86f70df389 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/cbor/cbor.go @@ -0,0 +1,18 @@ +package cbor + +import "io" + +// These interfaces are intended to match those from whyrusleeping/cbor-gen, such that code generated from that +// system is automatically usable here (but not mandatory). +type Marshaler interface { + MarshalCBOR(w io.Writer) error +} + +type Unmarshaler interface { + UnmarshalCBOR(r io.Reader) error +} + +type Er interface { + Marshaler + Unmarshaler +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/crypto/randomness.go b/vendor/github.com/filecoin-project/go-state-types/crypto/randomness.go similarity index 79% rename from vendor/github.com/filecoin-project/specs-actors/actors/crypto/randomness.go rename to vendor/github.com/filecoin-project/go-state-types/crypto/randomness.go index aa04ef3536..0e0e8f5cd0 100644 --- a/vendor/github.com/filecoin-project/specs-actors/actors/crypto/randomness.go +++ b/vendor/github.com/filecoin-project/go-state-types/crypto/randomness.go @@ -1,7 +1,7 @@ package crypto // Specifies a domain for randomness generation. -type DomainSeparationTag int +type DomainSeparationTag int64 const ( DomainSeparationTag_TicketProduction DomainSeparationTag = 1 + iota @@ -11,4 +11,6 @@ const ( DomainSeparationTag_SealRandomness DomainSeparationTag_InteractiveSealChallengeSeed DomainSeparationTag_WindowedPoStDeadlineAssignment + DomainSeparationTag_MarketDealCronSeed + DomainSeparationTag_PoStChainCommit ) diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/crypto/signature.go b/vendor/github.com/filecoin-project/go-state-types/crypto/signature.go similarity index 90% rename from vendor/github.com/filecoin-project/specs-actors/actors/crypto/signature.go rename to vendor/github.com/filecoin-project/go-state-types/crypto/signature.go index e2bf0be973..f856980729 100644 --- a/vendor/github.com/filecoin-project/specs-actors/actors/crypto/signature.go +++ b/vendor/github.com/filecoin-project/go-state-types/crypto/signature.go @@ -71,10 +71,13 @@ func (s *Signature) UnmarshalCBOR(br io.Reader) error { } if maj != cbg.MajByteString { - return fmt.Errorf("cbor input for signature was not a byte string") + return fmt.Errorf("not a byte string") } if l > SignatureMaxLength { - return fmt.Errorf("cbor byte array for signature was too long") + return fmt.Errorf("string too long") + } + if l == 0 { + return fmt.Errorf("string empty") } buf := make([]byte, l) if _, err = io.ReadFull(br, buf); err != nil { @@ -100,6 +103,9 @@ func (s *Signature) MarshalBinary() ([]byte, error) { } func (s *Signature) UnmarshalBinary(bs []byte) error { + if len(bs) > SignatureMaxLength { + return fmt.Errorf("invalid signature bytes, too long (%d)", len(bs)) + } if len(bs) == 0 { return fmt.Errorf("invalid signature bytes of length 0") } diff --git a/vendor/github.com/filecoin-project/go-state-types/dline/deadline.go b/vendor/github.com/filecoin-project/go-state-types/dline/deadline.go new file mode 100644 index 0000000000..c2bafa72d3 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/dline/deadline.go @@ -0,0 +1,120 @@ +package dline + +import "github.com/filecoin-project/go-state-types/abi" + +// Deadline calculations with respect to a current epoch. +// "Deadline" refers to the window during which proofs may be submitted. +// Windows are non-overlapping ranges [Open, Close), but the challenge epoch for a window occurs before +// the window opens. +// The current epoch may not necessarily lie within the deadline or proving period represented here. +type Info struct { + // Deadline parameters + CurrentEpoch abi.ChainEpoch // Epoch at which this info was calculated. + PeriodStart abi.ChainEpoch // First epoch of the proving period (<= CurrentEpoch). + Index uint64 // A deadline index, in [0..d.WPoStProvingPeriodDeadlines) unless period elapsed. + Open abi.ChainEpoch // First epoch from which a proof may be submitted (>= CurrentEpoch). + Close abi.ChainEpoch // First epoch from which a proof may no longer be submitted (>= Open). + Challenge abi.ChainEpoch // Epoch at which to sample the chain for challenge (< Open). + FaultCutoff abi.ChainEpoch // First epoch at which a fault declaration is rejected (< Open). + + // Protocol parameters + WPoStPeriodDeadlines uint64 + WPoStProvingPeriod abi.ChainEpoch // the number of epochs in a window post proving period + WPoStChallengeWindow abi.ChainEpoch + WPoStChallengeLookback abi.ChainEpoch + FaultDeclarationCutoff abi.ChainEpoch +} + +// Whether the proving period has begun. +func (d *Info) PeriodStarted() bool { + return d.CurrentEpoch >= d.PeriodStart +} + +// Whether the proving period has elapsed. +func (d *Info) PeriodElapsed() bool { + return d.CurrentEpoch >= d.NextPeriodStart() +} + +// The last epoch in the proving period. +func (d *Info) PeriodEnd() abi.ChainEpoch { + return d.PeriodStart + d.WPoStProvingPeriod - 1 +} + +// The first epoch in the next proving period. +func (d *Info) NextPeriodStart() abi.ChainEpoch { + return d.PeriodStart + d.WPoStProvingPeriod +} + +// Whether the current deadline is currently open. +func (d *Info) IsOpen() bool { + return d.CurrentEpoch >= d.Open && d.CurrentEpoch < d.Close +} + +// Whether the current deadline has already closed. +func (d *Info) HasElapsed() bool { + return d.CurrentEpoch >= d.Close +} + +// The last epoch during which a proof may be submitted. +func (d *Info) Last() abi.ChainEpoch { + return d.Close - 1 +} + +// Epoch at which the subsequent deadline opens. +func (d *Info) NextOpen() abi.ChainEpoch { + return d.Close +} + +// Whether the deadline's fault cutoff has passed. +func (d *Info) FaultCutoffPassed() bool { + return d.CurrentEpoch >= d.FaultCutoff +} + +// Returns the next instance of this deadline that has not yet elapsed. +func (d *Info) NextNotElapsed() *Info { + next := d + for next.HasElapsed() { + next = NewInfo(next.NextPeriodStart(), d.Index, d.CurrentEpoch, d.WPoStPeriodDeadlines, d.WPoStProvingPeriod, d.WPoStChallengeWindow, d.WPoStChallengeLookback, d.FaultDeclarationCutoff) + } + return next +} + +// Returns deadline-related calculations for a deadline in some proving period and the current epoch. +func NewInfo(periodStart abi.ChainEpoch, deadlineIdx uint64, currEpoch abi.ChainEpoch, wPoStPeriodDeadlines uint64, wPoStProvingPeriod, wPoStChallengeWindow, wPoStChallengeLookback, faultDeclarationCutoff abi.ChainEpoch) *Info { + if deadlineIdx < wPoStPeriodDeadlines { + deadlineOpen := periodStart + (abi.ChainEpoch(deadlineIdx) * wPoStChallengeWindow) + return &Info{ + CurrentEpoch: currEpoch, + PeriodStart: periodStart, + Index: deadlineIdx, + Open: deadlineOpen, + Close: deadlineOpen + wPoStChallengeWindow, + Challenge: deadlineOpen - wPoStChallengeLookback, + FaultCutoff: deadlineOpen - faultDeclarationCutoff, + // parameters + WPoStPeriodDeadlines: wPoStPeriodDeadlines, + WPoStProvingPeriod: wPoStProvingPeriod, + WPoStChallengeWindow: wPoStChallengeWindow, + WPoStChallengeLookback: wPoStChallengeLookback, + FaultDeclarationCutoff: faultDeclarationCutoff, + } + } else { + // Return deadline info for a no-duration deadline immediately after the last real one. + afterLastDeadline := periodStart + wPoStProvingPeriod + return &Info{ + CurrentEpoch: currEpoch, + PeriodStart: periodStart, + Index: deadlineIdx, + Open: afterLastDeadline, + Close: afterLastDeadline, + Challenge: afterLastDeadline, + FaultCutoff: 0, + // parameters + WPoStPeriodDeadlines: wPoStPeriodDeadlines, + WPoStProvingPeriod: wPoStProvingPeriod, + WPoStChallengeWindow: wPoStChallengeWindow, + WPoStChallengeLookback: wPoStChallengeLookback, + FaultDeclarationCutoff: faultDeclarationCutoff, + } + } +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/exitcode/common.go b/vendor/github.com/filecoin-project/go-state-types/exitcode/common.go similarity index 77% rename from vendor/github.com/filecoin-project/specs-actors/actors/runtime/exitcode/common.go rename to vendor/github.com/filecoin-project/go-state-types/exitcode/common.go index 841929bda0..f3172c2366 100644 --- a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/exitcode/common.go +++ b/vendor/github.com/filecoin-project/go-state-types/exitcode/common.go @@ -16,6 +16,8 @@ const ( ErrIllegalState // Indicates de/serialization failure within actor code. ErrSerialization - // An error code intended to be replaced by different code structure or a more descriptive error. - ErrPlaceholder = ExitCode(1000) + + // Common error codes stop here. If you define a common error code above + // this value it will have conflicting interpretations + FirstActorSpecificExitCode = ExitCode(32) ) diff --git a/vendor/github.com/filecoin-project/go-state-types/exitcode/exitcode.go b/vendor/github.com/filecoin-project/go-state-types/exitcode/exitcode.go new file mode 100644 index 0000000000..d604909517 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/exitcode/exitcode.go @@ -0,0 +1,96 @@ +package exitcode + +import ( + "errors" + "fmt" + "strconv" + + "golang.org/x/xerrors" +) + +type ExitCode int64 + +func (x ExitCode) IsSuccess() bool { + return x == Ok +} + +func (x ExitCode) IsError() bool { + return !x.IsSuccess() +} + +// Whether an exit code indicates a message send failure. +// A send failure means that the caller's CallSeqNum is not incremented and the caller has not paid +// gas fees for the message (because the caller doesn't exist or can't afford it). +// A receipt with send failure does not indicate that the message (or another one carrying the same CallSeqNum) +// could not apply in the future, against a different state. +func (x ExitCode) IsSendFailure() bool { + return x == SysErrSenderInvalid || x == SysErrSenderStateInvalid +} + +// A non-canonical string representation for human inspection. +func (x ExitCode) String() string { + name, ok := names[x] + if ok { + return fmt.Sprintf("%s(%d)", name, x) + } + return strconv.FormatInt(int64(x), 10) +} + +// Implement error to trigger Go compiler checking of exit code return values. +func (x ExitCode) Error() string { + return x.String() +} + +// Wrapf attaches an error message, and possibly an error, to the exit +// code. +// +// err := ErrIllegalArgument.Wrapf("my description: %w", err) +// exitcode.Unwrap(exitcode.ErrIllegalState, err) == exitcode.ErrIllegalArgument +func (x ExitCode) Wrapf(msg string, args ...interface{}) error { + return &wrapped{ + exitCode: x, + cause: xerrors.Errorf(msg, args...), + } +} + +type wrapped struct { + exitCode ExitCode + cause error +} + +func (w *wrapped) String() string { + return w.Error() +} + +func (w *wrapped) Error() string { + // Don't include the exit code. That will be handled by the runtime and + // this error has likely been wrapped multiple times. + return w.cause.Error() +} + +// implements the interface required by errors.As +func (w *wrapped) As(target interface{}) bool { + return errors.As(w.exitCode, target) || errors.As(w.cause, target) +} + +// implements the interface required by errors.Is +func (w *wrapped) Is(target error) bool { + if _, ok := target.(ExitCode); ok { + // If the target is an exit code, make sure we shadow lower exit + // codes. + return w.exitCode == target + } + return errors.Is(w.cause, target) +} + +// Unwrap extracts an exit code from an error, defaulting to the passed default +// exit code. +// +// err := ErrIllegalState.WithContext("my description: %w", err) +// exitcode.Unwrap(exitcode.ErrIllegalState, err) == exitcode.ErrIllegalArgument +func Unwrap(err error, defaultExitCode ExitCode) (code ExitCode) { + if errors.As(err, &code) { + return code + } + return defaultExitCode +} diff --git a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/exitcode/reserved.go b/vendor/github.com/filecoin-project/go-state-types/exitcode/reserved.go similarity index 61% rename from vendor/github.com/filecoin-project/specs-actors/actors/runtime/exitcode/reserved.go rename to vendor/github.com/filecoin-project/go-state-types/exitcode/reserved.go index acf6197f37..a8c11225ce 100644 --- a/vendor/github.com/filecoin-project/specs-actors/actors/runtime/exitcode/reserved.go +++ b/vendor/github.com/filecoin-project/go-state-types/exitcode/reserved.go @@ -1,43 +1,5 @@ package exitcode -import ( - "fmt" - "strconv" -) - -type ExitCode int64 - -func (x ExitCode) IsSuccess() bool { - return x == Ok -} - -func (x ExitCode) IsError() bool { - return !x.IsSuccess() -} - -// Whether an exit code indicates a message send failure. -// A send failure means that the caller's CallSeqNum is not incremented and the caller has not paid -// gas fees for the message (because the caller doesn't exist or can't afford it). -// A receipt with send failure does not indicate that the message (or another one carrying the same CallSeqNum) -// could not apply in the future, against a different state. -func (x ExitCode) IsSendFailure() bool { - return x == SysErrSenderInvalid || x == SysErrSenderStateInvalid -} - -// A non-canonical string representation for human inspection. -func (x ExitCode) String() string { - name, ok := names[x] - if ok { - return fmt.Sprintf("%s(%d)", name, x) - } - return strconv.FormatInt(int64(x), 10) -} - -// Implement error to trigger Go compiler checking of exit code return values. -func (x ExitCode) Error() string { - return x.String() -} - // The system error codes are reserved for use by the runtime. // No actor may use one explicitly. Correspondingly, no runtime invocation should abort with an exit // code outside this list. @@ -60,8 +22,8 @@ const ( // Indicates failure to find a method in an actor. SysErrInvalidMethod = ExitCode(3) - // Indicates non-decodeable or syntactically invalid parameters for a method. - SysErrInvalidParameters = ExitCode(4) + // Unused. + SysErrReserved1 = ExitCode(4) // Indicates that the receiver of a message is not valid (and cannot be implicitly created). SysErrInvalidReceiver = ExitCode(5) @@ -86,13 +48,12 @@ const ( // Indicates an invalid argument passed to a runtime method. SysErrorIllegalArgument = ExitCode(10) - // Indicates an object failed to de/serialize for storage. - SysErrSerialization = ExitCode(11) - - SysErrorReserved3 = ExitCode(12) - SysErrorReserved4 = ExitCode(13) - SysErrorReserved5 = ExitCode(14) - SysErrorReserved6 = ExitCode(15) + // Unused + SysErrReserved2 = ExitCode(11) + SysErrReserved3 = ExitCode(12) + SysErrReserved4 = ExitCode(13) + SysErrReserved5 = ExitCode(14) + SysErrReserved6 = ExitCode(15) ) // The initial range of exit codes is reserved for system errors. @@ -104,16 +65,16 @@ var names = map[ExitCode]string{ SysErrSenderInvalid: "SysErrSenderInvalid", SysErrSenderStateInvalid: "SysErrSenderStateInvalid", SysErrInvalidMethod: "SysErrInvalidMethod", - SysErrInvalidParameters: "SysErrInvalidParameters", + SysErrReserved1: "SysErrReserved1", SysErrInvalidReceiver: "SysErrInvalidReceiver", SysErrInsufficientFunds: "SysErrInsufficientFunds", SysErrOutOfGas: "SysErrOutOfGas", SysErrForbidden: "SysErrForbidden", SysErrorIllegalActor: "SysErrorIllegalActor", SysErrorIllegalArgument: "SysErrorIllegalArgument", - SysErrSerialization: "SysErrSerialization", - SysErrorReserved3: "SysErrorReserved3", - SysErrorReserved4: "SysErrorReserved4", - SysErrorReserved5: "SysErrorReserved5", - SysErrorReserved6: "SysErrorReserved6", + SysErrReserved2: "SysErrReserved2", + SysErrReserved3: "SysErrReserved3", + SysErrReserved4: "SysErrReserved4", + SysErrReserved5: "SysErrReserved5", + SysErrReserved6: "SysErrReserved6", } diff --git a/vendor/github.com/filecoin-project/go-state-types/network/version.go b/vendor/github.com/filecoin-project/go-state-types/network/version.go new file mode 100644 index 0000000000..1505f68727 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/network/version.go @@ -0,0 +1,20 @@ +package network + +import "math" + +// Enumeration of network upgrades where actor behaviour can change (without necessarily +// vendoring and versioning the whole actor codebase). +type Version uint + +const ( + Version0 = Version(iota) // genesis (specs-actors v0.9.3) + Version1 // breeze (specs-actors v0.9.7) + Version2 // smoke (specs-actors v0.9.8) + Version3 // ignition (specs-actors v0.9.11) + Version4 // actors v2 (specs-actors v2.0.3) + Version5 // tape (specs-actors v2.1.0) + Version6 // upcoming + + // VersionMax is the maximum version number + VersionMax = Version(math.MaxUint32) +) diff --git a/vendor/github.com/filecoin-project/go-state-types/rt/actor.go b/vendor/github.com/filecoin-project/go-state-types/rt/actor.go new file mode 100644 index 0000000000..26d6c130e5 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/rt/actor.go @@ -0,0 +1,33 @@ +package rt + +import ( + "github.com/filecoin-project/go-state-types/cbor" + "github.com/ipfs/go-cid" +) + +// VMActor is a concrete implementation of an actor, to be used by a Filecoin +// VM. +type VMActor interface { + // Exports returns a slice of methods exported by this actor, indexed by + // method number. Skipped/deprecated method numbers will be nil. + Exports() []interface{} + + // Code returns the code ID for this actor. + Code() cid.Cid + + // State returns a new State object for this actor. This can be used to + // decode the actor's state. + State() cbor.Er + + // NOTE: methods like "IsSingleton" are intentionally excluded from this + // interface. That way, we can add additional attributes actors in newer + // specs-actors versions, without having to update previous specs-actors + // versions. +} + +// IsSingletonActor returns true if the actor is a singleton actor (i.e., cannot +// be constructed). +func IsSingletonActor(a VMActor) bool { + s, ok := a.(interface{ IsSingleton() bool }) + return ok && s.IsSingleton() +} diff --git a/vendor/github.com/filecoin-project/go-state-types/rt/log.go b/vendor/github.com/filecoin-project/go-state-types/rt/log.go new file mode 100644 index 0000000000..d2dfff85d8 --- /dev/null +++ b/vendor/github.com/filecoin-project/go-state-types/rt/log.go @@ -0,0 +1,18 @@ +package rt + +// Specifies importance of message, LogLevel numbering is consistent with the uber-go/zap package. +type LogLevel int + +const ( + // DebugLevel logs are typically voluminous, and are usually disabled in + // production. + DEBUG LogLevel = iota - 1 + // InfoLevel is the default logging priority. + INFO + // WarnLevel logs are more important than Info, but don't need individual + // human review. + WARN + // ErrorLevel logs are high-priority. If an application is running smoothly, + // it shouldn't generate any error-level logs. + ERROR +) diff --git a/vendor/github.com/filecoin-project/go-statestore/.circleci/config.yml b/vendor/github.com/filecoin-project/go-statestore/.circleci/config.yml deleted file mode 100644 index 958082979c..0000000000 --- a/vendor/github.com/filecoin-project/go-statestore/.circleci/config.yml +++ /dev/null @@ -1,159 +0,0 @@ -version: 2.1 -orbs: - go: gotest/tools@0.0.9 - -executors: - golang: - docker: - - image: circleci/golang:1.13 - -commands: - install-deps: - steps: - - go/install-ssh - - go/install: {package: git} - -jobs: - mod-tidy-check: - executor: golang - steps: - - install-deps - - checkout - - go/mod-download - - go/mod-tidy-check - - build-all: - executor: golang - steps: - - install-deps - - checkout - - go/mod-download - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - - run: - command: make build - - test: &test - description: | - Run tests with gotestsum. - parameters: - executor: - type: executor - default: golang - go-test-flags: - type: string - default: "-timeout 30m" - description: Flags passed to go test. - packages: - type: string - default: "./..." - description: Import paths of packages to be tested. - test-suite-name: - type: string - default: unit - description: Test suite name to report to CircleCI. - gotestsum-format: - type: string - default: short - description: gotestsum format. https://github.com/gotestyourself/gotestsum#format - coverage: - type: string - default: -coverprofile=coverage.txt - description: Coverage flag. Set to the empty string to disable. - codecov-upload: - type: boolean - default: false - description: | - Upload coverage report to https://codecov.io/. Requires the codecov API token to be - set as an environment variable for private projects. - executor: << parameters.executor >> - steps: - - install-deps - - checkout - - go/mod-download - - restore_cache: - name: restore go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - - go/install-gotestsum: - gobin: $HOME/.local/bin - - run: - name: make test - environment: - GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml - GOTESTSUM_FORMAT: << parameters.gotestsum-format >> - command: | - mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> - gotestsum -- \ - << parameters.coverage >> \ - << parameters.go-test-flags >> \ - << parameters.packages >> - no_output_timeout: 30m - - store_test_results: - path: /tmp/test-reports - - when: - condition: << parameters.codecov-upload >> - steps: - - go/install: {package: bash} - - go/install: {package: curl} - - run: - shell: /bin/bash -eo pipefail - command: | - bash <(curl -s https://codecov.io/bash) - - save_cache: - name: save go mod cache - key: v1-go-deps-{{ arch }}-{{ checksum "/home/circleci/project/go.mod" }} - paths: - - "~/go/pkg" - - "~/go/src/github.com" - - "~/go/src/golang.org" - - lint: &lint - description: | - Run golangci-lint. - parameters: - executor: - type: executor - default: golang - golangci-lint-version: - type: string - default: 1.17.1 - concurrency: - type: string - default: '2' - description: | - Concurrency used to run linters. Defaults to 2 because NumCPU is not - aware of container CPU limits. - args: - type: string - default: '' - description: | - Arguments to pass to golangci-lint - executor: << parameters.executor >> - steps: - - install-deps - - checkout - - go/mod-download - - go/install-golangci-lint: - gobin: $HOME/.local/bin - version: << parameters.golangci-lint-version >> - - run: - name: Lint - command: | - $HOME/.local/bin/golangci-lint run -v \ - --concurrency << parameters.concurrency >> << parameters.args >> - lint-changes: - <<: *lint - - lint-all: - <<: *lint - -workflows: - version: 2.1 - ci: - jobs: - - lint-changes: - args: "--new-from-rev origin/master" - - test - - mod-tidy-check - - build-all diff --git a/vendor/github.com/filecoin-project/go-statestore/.gitignore b/vendor/github.com/filecoin-project/go-statestore/.gitignore new file mode 100644 index 0000000000..485dee64bc --- /dev/null +++ b/vendor/github.com/filecoin-project/go-statestore/.gitignore @@ -0,0 +1 @@ +.idea diff --git a/vendor/github.com/filecoin-project/go-statestore/store_test.go b/vendor/github.com/filecoin-project/go-statestore/store_test.go deleted file mode 100644 index 68dd8f5868..0000000000 --- a/vendor/github.com/filecoin-project/go-statestore/store_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package statestore - -import ( - "fmt" - "io" - "testing" - - "github.com/filecoin-project/go-cbor-util" - "github.com/ipfs/go-datastore" -) - -type Flarp struct { - x byte -} - -func (f *Flarp) UnmarshalCBOR(r io.Reader) error { - p := make([]byte, 1) - n, err := r.Read(p) - if n != 1 { - panic("somebody messed up") - } - f.x = p[0] - return err -} - -func (f *Flarp) MarshalCBOR(w io.Writer) error { - xs := []byte{f.x} - _, err := w.Write(xs) - return err -} - -func (f *Flarp) Blarg() string { - return fmt.Sprintf("%d", f.x) -} - -func TestList(t *testing.T) { - x1 := byte(64) - x2 := byte(42) - - ds := datastore.NewMapDatastore() - - e1, err := cborutil.Dump(&Flarp{x: x1}) - if err != nil { - t.Fatal(err) - } - - if err := ds.Put(datastore.NewKey("/2"), e1); err != nil { - t.Fatal(err) - } - - e2, err := cborutil.Dump(&Flarp{x: x2}) - if err != nil { - t.Fatal(err) - } - - if err := ds.Put(datastore.NewKey("/3"), e2); err != nil { - t.Fatal(err) - } - - st := &StateStore{ds: ds} - - var out []Flarp - if err := st.List(&out); err != nil { - t.Fatal(err) - } - - if len(out) != 2 { - t.Fatalf("wrong len (expected %d, got %d)", 2, len(out)) - } - - blargs := make(map[string]bool) - for _, v := range out { - blargs[v.Blarg()] = true - } - - if !blargs[fmt.Sprintf("%d", x1)] { - t.Fatalf("wrong data (missing Flarp#Blarg() == %d)", x1) - } - - if !blargs[fmt.Sprintf("%d", x2)] { - t.Fatalf("wrong data (missing Flarp#Blarg() == %d)", x2) - } -} diff --git a/vendor/github.com/filecoin-project/lotus/.circleci/config.yml b/vendor/github.com/filecoin-project/lotus/.circleci/config.yml deleted file mode 100644 index 0c4f29c872..0000000000 --- a/vendor/github.com/filecoin-project/lotus/.circleci/config.yml +++ /dev/null @@ -1,359 +0,0 @@ -version: 2.1 -orbs: - go: gotest/tools@0.0.13 - -executors: - golang: - docker: - - image: circleci/golang:1.14.2 - resource_class: 2xlarge - ubuntu: - docker: - - image: ubuntu:19.10 - -commands: - install-deps: - steps: - - go/install-ssh - - go/install: {package: git} - prepare: - parameters: - linux: - default: true - description: is a linux build environment? - type: boolean - darwin: - default: false - description: is a darwin build environment? - type: boolean - steps: - - checkout - - git_fetch_all_tags - - checkout - - when: - condition: << parameters.linux >> - steps: - - run: sudo apt-get update - - run: sudo apt-get install ocl-icd-opencl-dev - - run: git submodule sync - - run: git submodule update --init - download-params: - steps: - - restore_cache: - name: Restore parameters cache - keys: - - 'v25-2k-lotus-params' - paths: - - /var/tmp/filecoin-proof-parameters/ - - run: ./lotus fetch-params 2048 - - save_cache: - name: Save parameters cache - key: 'v25-2k-lotus-params' - paths: - - /var/tmp/filecoin-proof-parameters/ - install_ipfs: - steps: - - run: | - apt update - apt install -y wget - wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz - wget https://github.com/ipfs/go-ipfs/releases/download/v0.4.22/go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512 - if [ "$(sha512sum go-ipfs_v0.4.22_linux-amd64.tar.gz)" != "$(cat go-ipfs_v0.4.22_linux-amd64.tar.gz.sha512)" ] - then - echo "ipfs failed checksum check" - exit 1 - fi - tar -xf go-ipfs_v0.4.22_linux-amd64.tar.gz - mv go-ipfs/ipfs /usr/local/bin/ipfs - chmod +x /usr/local/bin/ipfs - git_fetch_all_tags: - steps: - - run: - name: fetch all tags - command: | - git fetch --all - -jobs: - mod-tidy-check: - executor: golang - steps: - - install-deps - - prepare - - go/mod-tidy-check - - build-all: - executor: golang - steps: - - install-deps - - prepare - - run: sudo apt-get update - - run: sudo apt-get install npm - - run: - command: make buildall - - store_artifacts: - path: lotus - - store_artifacts: - path: lotus-storage-miner - - store_artifacts: - path: lotus-seal-worker - - run: mkdir linux && mv lotus lotus-storage-miner lotus-seal-worker linux/ - - persist_to_workspace: - root: "." - paths: - - linux - - build-debug: - executor: golang - steps: - - install-deps - - prepare - - run: - command: make debug - - test: &test - description: | - Run tests with gotestsum. - parameters: - executor: - type: executor - default: golang - go-test-flags: - type: string - default: "-timeout 30m" - description: Flags passed to go test. - packages: - type: string - default: "./..." - description: Import paths of packages to be tested. - winpost-test: - type: string - default: "0" - test-suite-name: - type: string - default: unit - description: Test suite name to report to CircleCI. - gotestsum-format: - type: string - default: short - description: gotestsum format. https://github.com/gotestyourself/gotestsum#format - coverage: - type: string - default: -coverprofile=coverage.txt -coverpkg=github.com/filecoin-project/lotus/... - description: Coverage flag. Set to the empty string to disable. - codecov-upload: - type: boolean - default: false - description: | - Upload coverage report to https://codecov.io/. Requires the codecov API token to be - set as an environment variable for private projects. - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - run: - command: make deps lotus - no_output_timeout: 30m - - download-params - - go/install-gotestsum: - gobin: $HOME/.local/bin - - run: - name: go test - environment: - GOTESTSUM_JUNITFILE: /tmp/test-reports/<< parameters.test-suite-name >>/junit.xml - GOTESTSUM_FORMAT: << parameters.gotestsum-format >> - LOTUS_TEST_WINDOW_POST: << parameters.winpost-test >> - command: | - mkdir -p /tmp/test-reports/<< parameters.test-suite-name >> - gotestsum -- \ - << parameters.coverage >> \ - << parameters.go-test-flags >> \ - << parameters.packages >> - no_output_timeout: 30m - - store_test_results: - path: /tmp/test-reports - - when: - condition: << parameters.codecov-upload >> - steps: - - go/install: {package: bash} - - go/install: {package: curl} - - run: - shell: /bin/bash -eo pipefail - command: | - bash <(curl -s https://codecov.io/bash) - - test-short: - <<: *test - test-window-post: - <<: *test - - build-macos: - description: build darwin lotus binary - macos: - xcode: "10.0.0" - working_directory: ~/go/src/github.com/filecoin-project/lotus - steps: - - prepare: - linux: false - darwin: true - - run: - name: Install go - command: | - curl -O https://dl.google.com/go/go1.14.2.darwin-amd64.pkg && \ - sudo installer -pkg go1.14.2.darwin-amd64.pkg -target / - - run: - name: Install pkg-config - command: HOMEBREW_NO_AUTO_UPDATE=1 brew install pkg-config - - run: go version - - run: - name: Install Rust - command: | - curl https://sh.rustup.rs -sSf | sh -s -- -y - - run: - name: Install jq - command: | - curl --location https://github.com/stedolan/jq/releases/download/jq-1.6/jq-osx-amd64 --output /usr/local/bin/jq - chmod +x /usr/local/bin/jq - - restore_cache: - name: restore cargo cache - key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }} - - install-deps - - run: - command: make build - no_output_timeout: 30m - - store_artifacts: - path: lotus - - store_artifacts: - path: lotus-storage-miner - - store_artifacts: - path: lotus-seal-worker - - run: mkdir darwin && mv lotus lotus-storage-miner lotus-seal-worker darwin/ - - persist_to_workspace: - root: "." - paths: - - darwin - - save_cache: - name: save cargo cache - key: v3-go-deps-{{ arch }}-{{ checksum "~/go/src/github.com/filecoin-project/lotus/go.sum" }} - paths: - - "~/.rustup" - - "~/.cargo" - - gofmt: - executor: golang - steps: - - install-deps - - prepare - - run: - command: "! go fmt ./... 2>&1 | read" - - lint: &lint - description: | - Run golangci-lint. - parameters: - executor: - type: executor - default: golang - golangci-lint-version: - type: string - default: 1.27.0 - concurrency: - type: string - default: '2' - description: | - Concurrency used to run linters. Defaults to 2 because NumCPU is not - aware of container CPU limits. - args: - type: string - default: '' - description: | - Arguments to pass to golangci-lint - executor: << parameters.executor >> - steps: - - install-deps - - prepare - - run: - command: make deps - no_output_timeout: 30m - - go/install-golangci-lint: - gobin: $HOME/.local/bin - version: << parameters.golangci-lint-version >> - - run: - name: Lint - command: | - $HOME/.local/bin/golangci-lint run -v --timeout 2m \ - --concurrency << parameters.concurrency >> << parameters.args >> - lint-changes: - <<: *lint - - lint-all: - <<: *lint - - publish: - description: publish binary artifacts - executor: ubuntu - steps: - - run: - name: Install git jq curl - command: apt update && apt install -y git jq curl - - checkout - - git_fetch_all_tags - - checkout - - install_ipfs - - attach_workspace: - at: "." - - run: - name: Create bundles - command: ./scripts/build-bundle.sh - - run: - name: Publish release - command: ./scripts/publish-release.sh - - -workflows: - version: 2.1 - ci: - jobs: - - lint-changes: - args: "--new-from-rev origin/master" - - mod-tidy-check - - gofmt - - test: - codecov-upload: true - - test-window-post: - go-test-flags: "-run=TestWindowedPost" - winpost-test: "1" - - test-short: - go-test-flags: "--timeout 10m --short" - filters: - tags: - only: - - /^v\d+\.\d+\.\d+$/ - - build-debug - - build-all: - requires: - - test-short - filters: - tags: - only: - - /^v\d+\.\d+\.\d+$/ - - build-macos: - requires: - - test-short - filters: - branches: - ignore: - - /.*/ - tags: - only: - - /^v\d+\.\d+\.\d+$/ - - publish: - requires: - - build-all - - build-macos - filters: - branches: - ignore: - - /.*/ - tags: - only: - - /^v\d+\.\d+\.\d+$/ diff --git a/vendor/github.com/filecoin-project/lotus/.codecov.yml b/vendor/github.com/filecoin-project/lotus/.codecov.yml deleted file mode 100644 index cf409a6b69..0000000000 --- a/vendor/github.com/filecoin-project/lotus/.codecov.yml +++ /dev/null @@ -1,3 +0,0 @@ -comment: off -ignore: - - "cbor_gen.go" diff --git a/vendor/github.com/filecoin-project/lotus/.dockerignore b/vendor/github.com/filecoin-project/lotus/.dockerignore deleted file mode 120000 index 3e4e48b0b5..0000000000 --- a/vendor/github.com/filecoin-project/lotus/.dockerignore +++ /dev/null @@ -1 +0,0 @@ -.gitignore \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/lotus/.github/ISSUE_TEMPLATE/bug_report.md b/vendor/github.com/filecoin-project/lotus/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 1ded8c36b7..0000000000 --- a/vendor/github.com/filecoin-project/lotus/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Run '...' -2. See error - -**Expected behavior** -A clear and concise description of what you expected to happen. - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Version (run `lotus version`):** - -**Additional context** -Add any other context about the problem here. diff --git a/vendor/github.com/filecoin-project/lotus/.github/ISSUE_TEMPLATE/sealingfailed.md b/vendor/github.com/filecoin-project/lotus/.github/ISSUE_TEMPLATE/sealingfailed.md deleted file mode 100644 index d58664415e..0000000000 --- a/vendor/github.com/filecoin-project/lotus/.github/ISSUE_TEMPLATE/sealingfailed.md +++ /dev/null @@ -1,34 +0,0 @@ ---- -name: Sealing Issues -about: Create a report for help with sealing (commit) failures. -title: '' -labels: 'sealing' -assignees: '' - ---- - -Please provide all the information requested here to help us troubleshoot "commit failed" issues. -If the information requested is missing, we will probably have to just ask you to provide it anyway, -before we can help debug. - -**Describe the problem** - -A brief description of the problem you encountered while proving (sealing) a sector. - -Including what commands you ran, and a description of your setup, is very helpful. - -**Sectors list** - -The output of `./lotus-storage-miner sectors list`. - -**Sectors status** - -The output of `./lotus-storage-miner sectors status --log ` for the failed sector(s). - -**Lotus storage miner logs** - -Please go through the logs of your storage miner, and include screenshots of any error-like messages you find. - -**Version** - -The output of `./lotus --version`. diff --git a/vendor/github.com/filecoin-project/lotus/.golangci.yml b/vendor/github.com/filecoin-project/lotus/.golangci.yml deleted file mode 100644 index 76bbc19496..0000000000 --- a/vendor/github.com/filecoin-project/lotus/.golangci.yml +++ /dev/null @@ -1,51 +0,0 @@ -linters: - disable-all: true - enable: - - gofmt - - govet - - goimports - - misspell - - goconst - - golint - - errcheck - - gosec - - unconvert - - staticcheck - - varcheck - - structcheck - - deadcode - - scopelint - - -issues: - exclude: - - "func name will be used as test\\.Test.* by other packages, and that stutters; consider calling this" - - "Potential file inclusion via variable" - - "should have( a package)? comment" - - "Error return value of `logging.SetLogLevel` is not checked" - - exclude-use-default: false - exclude-rules: - - path: lotuspond - linters: - - errcheck - - - path: node/modules/lp2p - linters: - - golint - - - path: build/params_.*\.go - linters: - - golint - - - path: api/apistruct/struct.go - linters: - - golint - - - path: .*_test.go - linters: - - gosec - -linters-settings: - goconst: - min-occurrences: 6 diff --git a/vendor/github.com/filecoin-project/lotus/CHANGELOG.md b/vendor/github.com/filecoin-project/lotus/CHANGELOG.md deleted file mode 100644 index 6ba83a258b..0000000000 --- a/vendor/github.com/filecoin-project/lotus/CHANGELOG.md +++ /dev/null @@ -1,7 +0,0 @@ -# lotus changelog - -## 0.1.0 / 2019-12-11 - -We are very excited to release **lotus** 0.1.0. This is our testnet release. To install lotus and join the testnet, please visit [docs.lotu.sh](docs.lotu.sh). Please file bug reports as [issues](https://github.com/filecoin-project/lotus/issues). - -A huge thank you to all contributors for this testnet release! \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/lotus/Makefile b/vendor/github.com/filecoin-project/lotus/Makefile deleted file mode 100644 index 4533cc4dc3..0000000000 --- a/vendor/github.com/filecoin-project/lotus/Makefile +++ /dev/null @@ -1,235 +0,0 @@ -SHELL=/usr/bin/env bash - -all: build -.PHONY: all - -unexport GOFLAGS - -GOVERSION:=$(shell go version | cut -d' ' -f 3 | cut -d. -f 2) -ifeq ($(shell expr $(GOVERSION) \< 14), 1) -$(warning Your Golang version is go 1.$(GOVERSION)) -$(error Update Golang to version $(shell grep '^go' go.mod)) -endif - -# git modules that need to be loaded -MODULES:= - -CLEAN:= -BINS:= - -ldflags=-X=github.com/filecoin-project/lotus/build.CurrentCommit=+git.$(subst -,.,$(shell git describe --always --match=NeVeRmAtCh --dirty 2>/dev/null || git rev-parse --short HEAD 2>/dev/null)) -ifneq ($(strip $(LDFLAGS)),) - ldflags+=-extldflags=$(LDFLAGS) -endif - -GOFLAGS+=-ldflags="$(ldflags)" - - -## FFI - -FFI_PATH:=extern/filecoin-ffi/ -FFI_DEPS:=.install-filcrypto -FFI_DEPS:=$(addprefix $(FFI_PATH),$(FFI_DEPS)) - -$(FFI_DEPS): build/.filecoin-install ; - -build/.filecoin-install: $(FFI_PATH) - $(MAKE) -C $(FFI_PATH) $(FFI_DEPS:$(FFI_PATH)%=%) - @touch $@ - -MODULES+=$(FFI_PATH) -BUILD_DEPS+=build/.filecoin-install -CLEAN+=build/.filecoin-install - -$(MODULES): build/.update-modules ; - -# dummy file that marks the last time modules were updated -build/.update-modules: - git submodule update --init --recursive - touch $@ - -# end git modules - -## MAIN BINARIES - -CLEAN+=build/.update-modules - -deps: $(BUILD_DEPS) -.PHONY: deps - -debug: GOFLAGS+=-tags=debug -debug: lotus lotus-storage-miner lotus-seal-worker lotus-seed - -2k: GOFLAGS+=-tags=2k -2k: lotus lotus-storage-miner lotus-seal-worker lotus-seed - -lotus: $(BUILD_DEPS) - rm -f lotus - go build $(GOFLAGS) -o lotus ./cmd/lotus - go run github.com/GeertJohan/go.rice/rice append --exec lotus -i ./build - -.PHONY: lotus -BINS+=lotus - -lotus-storage-miner: $(BUILD_DEPS) - rm -f lotus-storage-miner - go build $(GOFLAGS) -o lotus-storage-miner ./cmd/lotus-storage-miner - go run github.com/GeertJohan/go.rice/rice append --exec lotus-storage-miner -i ./build -.PHONY: lotus-storage-miner -BINS+=lotus-storage-miner - -lotus-seal-worker: $(BUILD_DEPS) - rm -f lotus-seal-worker - go build $(GOFLAGS) -o lotus-seal-worker ./cmd/lotus-seal-worker - go run github.com/GeertJohan/go.rice/rice append --exec lotus-seal-worker -i ./build -.PHONY: lotus-seal-worker -BINS+=lotus-seal-worker - -lotus-shed: $(BUILD_DEPS) - rm -f lotus-shed - go build $(GOFLAGS) -o lotus-shed ./cmd/lotus-shed - go run github.com/GeertJohan/go.rice/rice append --exec lotus-shed -i ./build -.PHONY: lotus-shed -BINS+=lotus-shed - -build: lotus lotus-storage-miner lotus-seal-worker - @[[ $$(type -P "lotus") ]] && echo "Caution: you have \ -an existing lotus binary in your PATH. This may cause problems if you don't run 'sudo make install'" || true - -.PHONY: build - -install: - install -C ./lotus /usr/local/bin/lotus - install -C ./lotus-storage-miner /usr/local/bin/lotus-storage-miner - install -C ./lotus-seal-worker /usr/local/bin/lotus-seal-worker - -install-services: install - mkdir -p /usr/local/lib/systemd/system - mkdir -p /var/log/lotus - install -C -m 0644 ./scripts/lotus-daemon.service /usr/local/lib/systemd/system/lotus-daemon.service - install -C -m 0644 ./scripts/lotus-miner.service /usr/local/lib/systemd/system/lotus-miner.service - systemctl daemon-reload - @echo - @echo "lotus-daemon and lotus-miner services installed. Don't forget to 'systemctl enable lotus-daemon|lotus-miner' for it to be enabled on startup." - -clean-services: - rm -f /usr/local/lib/systemd/system/lotus-daemon.service - rm -f /usr/local/lib/systemd/system/lotus-miner.service - rm -f /usr/local/lib/systemd/system/chainwatch.service - systemctl daemon-reload - -# TOOLS - -lotus-seed: $(BUILD_DEPS) - rm -f lotus-seed - go build $(GOFLAGS) -o lotus-seed ./cmd/lotus-seed - go run github.com/GeertJohan/go.rice/rice append --exec lotus-seed -i ./build - -.PHONY: lotus-seed -BINS+=lotus-seed - -benchmarks: - go run github.com/whyrusleeping/bencher ./... > bench.json - @echo Submitting results - @curl -X POST 'http://benchmark.kittyhawk.wtf/benchmark' -d '@bench.json' -u "${benchmark_http_cred}" -.PHONY: benchmarks - -pond: 2k - go build -o pond ./lotuspond - (cd lotuspond/front && npm i && CI=false npm run build) -.PHONY: pond -BINS+=pond - -townhall: - rm -f townhall - go build -o townhall ./cmd/lotus-townhall - (cd ./cmd/lotus-townhall/townhall && npm i && npm run build) - go run github.com/GeertJohan/go.rice/rice append --exec townhall -i ./cmd/lotus-townhall -i ./build -.PHONY: townhall -BINS+=townhall - -fountain: - rm -f fountain - go build -o fountain ./cmd/lotus-fountain - go run github.com/GeertJohan/go.rice/rice append --exec fountain -i ./cmd/lotus-fountain -i ./build -.PHONY: fountain -BINS+=fountain - -chainwatch: - rm -f chainwatch - go build -o chainwatch ./cmd/lotus-chainwatch - go run github.com/GeertJohan/go.rice/rice append --exec chainwatch -i ./cmd/lotus-chainwatch -i ./build -.PHONY: chainwatch -BINS+=chainwatch - -install-chainwatch-service: chainwatch - install -C ./chainwatch /usr/local/bin/chainwatch - install -C -m 0644 ./scripts/chainwatch.service /usr/local/lib/systemd/system/chainwatch.service - systemctl daemon-reload - @echo - @echo "chainwatch installed. Don't forget to 'systemctl enable chainwatch' for it to be enabled on startup." - -bench: - rm -f bench - go build -o bench ./cmd/lotus-bench - go run github.com/GeertJohan/go.rice/rice append --exec bench -i ./build -.PHONY: bench -BINS+=bench - -stats: - rm -f stats - go build -o stats ./tools/stats - go run github.com/GeertJohan/go.rice/rice append --exec stats -i ./build -.PHONY: stats -BINS+=stats - -health: - rm -f lotus-health - go build -o lotus-health ./cmd/lotus-health - go run github.com/GeertJohan/go.rice/rice append --exec lotus-health -i ./build - -.PHONY: health -BINS+=health - -testground: - go build -tags testground -o /dev/null ./cmd/lotus - -.PHONY: testground -BINS+=testground - -# MISC - -buildall: $(BINS) - -completions: - ./scripts/make-completions.sh lotus - ./scripts/make-completions.sh lotus-storage-miner -.PHONY: completions - -install-completions: - mkdir -p /usr/share/bash-completion/completions /usr/local/share/zsh/site-functions/ - install -C ./scripts/bash-completion/lotus /usr/share/bash-completion/completions/lotus - install -C ./scripts/bash-completion/lotus-storage-miner /usr/share/bash-completion/completions/lotus-storage-miner - install -C ./scripts/zsh-completion/lotus /usr/local/share/zsh/site-functions/_lotus - install -C ./scripts/zsh-completion/lotus-storage-miner /usr/local/share/zsh/site-functions/_lotus-storage-miner - -clean: - rm -rf $(CLEAN) $(BINS) - -$(MAKE) -C $(FFI_PATH) clean -.PHONY: clean - -dist-clean: - git clean -xdff - git submodule deinit --all -f -.PHONY: dist-clean - -type-gen: - go run ./gen/main.go - -method-gen: - (cd ./lotuspond/front/src/chain && go run ./methodgen.go) - -gen: type-gen method-gen - -print-%: - @echo $*=$($*) diff --git a/vendor/github.com/filecoin-project/lotus/README.md b/vendor/github.com/filecoin-project/lotus/README.md deleted file mode 100644 index a15276ee29..0000000000 --- a/vendor/github.com/filecoin-project/lotus/README.md +++ /dev/null @@ -1,26 +0,0 @@ -![Lotus](documentation/images/lotus_logo_h.png) - -# Project Lotus - 莲 - -Lotus is an implementation of the Filecoin Distributed Storage Network. For more details about Filecoin, check out the [Filecoin Spec](https://github.com/filecoin-project/specs). - -## Building & Documentation - -For instructions on how to build lotus from source, please visit [https://docs.lotu.sh](https://docs.lotu.sh) or read the source [here](https://github.com/filecoin-project/lotus/tree/master/documentation). - -## Reporting a Vulnerability - -Please send an email to security@filecoin.org. See our [security policy](SECURITY.md) for more details. - -## Development - -All work is tracked via issues. An attempt at keeping an up-to-date view on remaining work is in the [lotus testnet github project board](https://github.com/filecoin-project/lotus/projects/1). - -The main branches under development at the moment are: -* [`master`](https://github.com/filecoin-project/lotus): current testnet. -* [`next`](https://github.com/filecoin-project/lotus/tree/next): working branch with chain-breaking changes. -* [`interopnet`](https://github.com/filecoin-project/lotus/tree/interopnet): devnet running one of `next` commits. - -## License - -Dual-licensed under [MIT](https://github.com/filecoin-project/lotus/blob/master/LICENSE-MIT) + [Apache 2.0](https://github.com/filecoin-project/lotus/blob/master/LICENSE-APACHE) diff --git a/vendor/github.com/filecoin-project/lotus/SECURITY.md b/vendor/github.com/filecoin-project/lotus/SECURITY.md deleted file mode 100644 index ecb600deb6..0000000000 --- a/vendor/github.com/filecoin-project/lotus/SECURITY.md +++ /dev/null @@ -1,29 +0,0 @@ -# Security Policy - -## Reporting a Vulnerability - -For *critical* bugs, please send an email to security@filecoin.org. - -The bug reporting process differs between bugs that are critical and may crash the network, and others that are unlikely to cause problems if malicious parties know about it. For non-critical bugs, please simply file a GitHub [issue](https://github.com/filecoin-project/lotus/issues/new?template=bug_report.md). - -Please try to provide a clear description of any bugs reported, along with how to reproduce the bug if possible. More detailed bug reports (especially those with a PoC included) will help us move forward much faster. Additionally, please avoid reporting bugs that already have open issues. Take a moment to search the issue list of the related GitHub repositories before writing up a new report. - -Here are some examples of bugs we would consider 'critical': - -* If you can spend from a `multisig` wallet you do not control the keys for. -* If you can cause a miner to be slashed without them actually misbehaving. -* If you can maintain power without submitting windowed posts regularly. -* If you can craft a message that causes lotus nodes to panic. -* If you can cause your miner to win significantly more blocks than it should. -* If you can craft a message that causes a persistent fork in the network. -* If you can cause the total amount of Filecoin in the network to no longer be 2 billion. - -This is not an exhaustive list, but should provide some idea of what we consider 'critical'. - -## Supported Versions - -* TODO: This should be defined and set up by Mainnet launch. - -| Version | Supported | -| ------- | ------------------ | -| Testnet | :white_check_mark: | diff --git a/vendor/github.com/filecoin-project/lotus/api/api_common.go b/vendor/github.com/filecoin-project/lotus/api/api_common.go deleted file mode 100644 index aa63e9815d..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/api_common.go +++ /dev/null @@ -1,67 +0,0 @@ -package api - -import ( - "context" - "fmt" - - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-jsonrpc/auth" - - "github.com/filecoin-project/lotus/build" -) - -type Common interface { - - // MethodGroup: Auth - - AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) - AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) - - // MethodGroup: Net - - NetConnectedness(context.Context, peer.ID) (network.Connectedness, error) - NetPeers(context.Context) ([]peer.AddrInfo, error) - NetConnect(context.Context, peer.AddrInfo) error - NetAddrsListen(context.Context) (peer.AddrInfo, error) - NetDisconnect(context.Context, peer.ID) error - NetFindPeer(context.Context, peer.ID) (peer.AddrInfo, error) - NetPubsubScores(context.Context) ([]PubsubScore, error) - - // MethodGroup: Common - - // ID returns peerID of libp2p node backing this API - ID(context.Context) (peer.ID, error) - - // Version provides information about API provider - Version(context.Context) (Version, error) - - LogList(context.Context) ([]string, error) - LogSetLevel(context.Context, string, string) error - - // trigger graceful shutdown - Shutdown(context.Context) error - - Closing(context.Context) (<-chan struct{}, error) -} - -// Version provides various build-time information -type Version struct { - Version string - - // APIVersion is a binary encoded semver version of the remote implementing - // this api - // - // See APIVersion in build/version.go - APIVersion build.Version - - // TODO: git commit / os / genesis cid? - - // Seconds - BlockDelay uint64 -} - -func (v Version) String() string { - return fmt.Sprintf("%s+api%s", v.Version, v.APIVersion.String()) -} diff --git a/vendor/github.com/filecoin-project/lotus/api/api_full.go b/vendor/github.com/filecoin-project/lotus/api/api_full.go deleted file mode 100644 index 09c69a1ba1..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/api_full.go +++ /dev/null @@ -1,609 +0,0 @@ -package api - -import ( - "context" - "time" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-filestore" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/crypto" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -// FullNode API is a low-level interface to the Filecoin network full node -type FullNode interface { - Common - - // TODO: TipSetKeys - - // MethodGroup: Chain - // The Chain method group contains methods for interacting with the - // blockchain, but that do not require any form of state computation. - - // ChainNotify returns channel with chain head updates. - // First message is guaranteed to be of len == 1, and type == 'current'. - ChainNotify(context.Context) (<-chan []*HeadChange, error) - - // ChainHead returns the current head of the chain. - ChainHead(context.Context) (*types.TipSet, error) - - // ChainGetRandomness is used to sample the chain for randomness. - ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) - - // ChainGetBlock returns the block specified by the given CID. - ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) - // ChainGetTipSet returns the tipset specified by the given TipSetKey. - ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - - // ChainGetBlockMessages returns messages stored in the specified block. - ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*BlockMessages, error) - - // ChainGetParentReceipts returns receipts for messages in parent tipset of - // the specified block. - ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) - - // ChainGetParentReceipts returns messages stored in parent tipset of the - // specified block. - ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]Message, error) - - // ChainGetTipSetByHeight looks back for a tipset at the specified epoch. - // If there are no blocks at the specified epoch, a tipset at higher epoch - // will be returned. - ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) - - // ChainReadObj reads ipld nodes referenced by the specified CID from chain - // blockstore and returns raw bytes. - ChainReadObj(context.Context, cid.Cid) ([]byte, error) - - // ChainHasObj checks if a given CID exists in the chain blockstore. - ChainHasObj(context.Context, cid.Cid) (bool, error) - ChainStatObj(context.Context, cid.Cid, cid.Cid) (ObjStat, error) - - // ChainSetHead forcefully sets current chain head. Use with caution. - ChainSetHead(context.Context, types.TipSetKey) error - - // ChainGetGenesis returns the genesis tipset. - ChainGetGenesis(context.Context) (*types.TipSet, error) - - // ChainTipSetWeight computes weight for the specified tipset. - ChainTipSetWeight(context.Context, types.TipSetKey) (types.BigInt, error) - ChainGetNode(ctx context.Context, p string) (*IpldObject, error) - - // ChainGetMessage reads a message referenced by the specified CID from the - // chain blockstore. - ChainGetMessage(context.Context, cid.Cid) (*types.Message, error) - - // ChainGetPath returns a set of revert/apply operations needed to get from - // one tipset to another, for example: - //``` - // to - // ^ - // from tAA - // ^ ^ - // tBA tAB - // ^---*--^ - // ^ - // tRR - //``` - // Would return `[revert(tBA), apply(tAB), apply(tAA)]` - ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*HeadChange, error) - - // ChainExport returns a stream of bytes with CAR dump of chain data. - ChainExport(context.Context, types.TipSetKey) (<-chan []byte, error) - - // MethodGroup: Sync - // The Sync method group contains methods for interacting with and - // observing the lotus sync service. - - // SyncState returns the current status of the lotus sync system. - SyncState(context.Context) (*SyncState, error) - - // SyncSubmitBlock can be used to submit a newly created block to the. - // network through this node - SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error - - // SyncIncomingBlocks returns a channel streaming incoming, potentially not - // yet synced block headers. - SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) - - // SyncMarkBad marks a blocks as bad, meaning that it won't ever by synced. - // Use with extreme caution. - SyncMarkBad(ctx context.Context, bcid cid.Cid) error - - // SyncCheckBad checks if a block was marked as bad, and if it was, returns - // the reason. - SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) - - // MethodGroup: Mpool - // The Mpool methods are for interacting with the message pool. The message pool - // manages all incoming and outgoing 'messages' going over the network. - - // MpoolPending returns pending mempool messages. - MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) - - // MpoolPush pushes a signed message to mempool. - MpoolPush(context.Context, *types.SignedMessage) (cid.Cid, error) - - // MpoolPushMessage atomically assigns a nonce, signs, and pushes a message - // to mempool. - MpoolPushMessage(context.Context, *types.Message) (*types.SignedMessage, error) - - // MpoolGetNonce gets next nonce for the specified sender. - // Note that this method may not be atomic. Use MpoolPushMessage instead. - MpoolGetNonce(context.Context, address.Address) (uint64, error) - MpoolSub(context.Context) (<-chan MpoolUpdate, error) - - // MpoolEstimateGasPrice estimates what gas price should be used for a - // message to have high likelihood of inclusion in `nblocksincl` epochs. - MpoolEstimateGasPrice(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) - - // MethodGroup: Miner - - MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error) - MinerCreateBlock(context.Context, *BlockTemplate) (*types.BlockMsg, error) - - // // UX ? - - // MethodGroup: Wallet - - // WalletNew creates a new address in the wallet with the given sigType. - WalletNew(context.Context, crypto.SigType) (address.Address, error) - // WalletHas indicates whether the given address is in the wallet. - WalletHas(context.Context, address.Address) (bool, error) - // WalletHas indicates whether the given address is in the wallet. - WalletList(context.Context) ([]address.Address, error) - // WalletBalance returns the balance of the given address at the current head of the chain. - WalletBalance(context.Context, address.Address) (types.BigInt, error) - // WalletSign signs the given bytes using the given address. - WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) - // WalletSignMessage signs the given message using the given address. - WalletSignMessage(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) - // WalletVerify takes an address, a signature, and some bytes, and indicates whether the signature is valid. - // The address does not have to be in the wallet. - WalletVerify(context.Context, address.Address, []byte, *crypto.Signature) bool - // WalletDefaultAddress returns the address marked as default in the wallet. - WalletDefaultAddress(context.Context) (address.Address, error) - // WalletSetDefault marks the given address as as the default one. - WalletSetDefault(context.Context, address.Address) error - // WalletExport returns the private key of an address in the wallet. - WalletExport(context.Context, address.Address) (*types.KeyInfo, error) - // WalletImport receives a KeyInfo, which includes a private key, and imports it into the wallet. - WalletImport(context.Context, *types.KeyInfo) (address.Address, error) - // WalletDelete deletes an address from the wallet. - WalletDelete(context.Context, address.Address) error - - // Other - - // MethodGroup: Client - // The Client methods all have to do with interacting with the storage and - // retrieval markets as a client - - // ClientImport imports file under the specified path into filestore. - ClientImport(ctx context.Context, ref FileRef) (cid.Cid, error) - // ClientStartDeal proposes a deal with a miner. - ClientStartDeal(ctx context.Context, params *StartDealParams) (*cid.Cid, error) - // ClientGetDealInfo returns the latest information about a given deal. - ClientGetDealInfo(context.Context, cid.Cid) (*DealInfo, error) - // ClientListDeals returns information about the deals made by the local client. - ClientListDeals(ctx context.Context) ([]DealInfo, error) - // ClientHasLocal indicates whether a certain CID is locally stored. - ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) - // ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). - ClientFindData(ctx context.Context, root cid.Cid) ([]QueryOffer, error) - // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. - ClientMinerQueryOffer(ctx context.Context, root cid.Cid, miner address.Address) (QueryOffer, error) - // ClientRetrieve initiates the retrieval of a file, as specified in the order. - ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error - // ClientQueryAsk returns a signed StorageAsk from the specified miner. - ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) - // ClientCalcCommP calculates the CommP for a specified file, based on the sector size of the provided miner. - ClientCalcCommP(ctx context.Context, inpath string, miner address.Address) (*CommPRet, error) - // ClientGenCar generates a CAR file for the specified file. - ClientGenCar(ctx context.Context, ref FileRef, outpath string) error - - // ClientUnimport removes references to the specified file from filestore - //ClientUnimport(path string) - - // ClientListImports lists imported files and their root CIDs - ClientListImports(ctx context.Context) ([]Import, error) - - //ClientListAsks() []Ask - - // MethodGroup: State - // The State methods are used to query, inspect, and interact with chain state. - // All methods take a TipSetKey as a parameter. The state looked up is the state at that tipset. - // A nil TipSetKey can be provided as a param, this will cause the heaviest tipset in the chain to be used. - - // StateCall runs the given message and returns its result without any persisted changes. - StateCall(context.Context, *types.Message, types.TipSetKey) (*InvocResult, error) - // StateReplay returns the result of executing the indicated message, assuming it was executed in the indicated tipset. - StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) - // StateGetActor returns the indicated actor's nonce and balance. - StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) - // StateReadState returns the indicated actor's state. - StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*ActorState, error) - // StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. - StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) - - // StateNetworkName returns the name of the network the node is synced to - StateNetworkName(context.Context) (dtypes.NetworkName, error) - // StateMinerSectors returns info about the given miner's sectors. If the filter bitfield is nil, all sectors are included. - // If the filterOut boolean is set to true, any sectors in the filter are excluded. - // If false, only those sectors in the filter are included. - StateMinerSectors(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*ChainSectorInfo, error) - // StateMinerProvingSet returns info about those sectors that a given miner is actively proving. - StateMinerProvingSet(context.Context, address.Address, types.TipSetKey) ([]*ChainSectorInfo, error) - // StateMinerProvingDeadline calculates the deadline at some epoch for a proving period - // and returns the deadline-related calculations. - StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error) - // StateMinerPower returns the power of the indicated miner - StateMinerPower(context.Context, address.Address, types.TipSetKey) (*MinerPower, error) - // StateMinerInfo returns info about the indicated miner - StateMinerInfo(context.Context, address.Address, types.TipSetKey) (MinerInfo, error) - // StateMinerDeadlines returns all the proving deadlines for the given miner - StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) (*miner.Deadlines, error) - // StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner - StateMinerFaults(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) - // StateAllMinerFaults returns all non-expired Faults that occur within lookback epochs of the given tipset - StateAllMinerFaults(ctx context.Context, lookback abi.ChainEpoch, ts types.TipSetKey) ([]*Fault, error) - // StateMinerRecoveries returns a bitfield indicating the recovering sectors of the given miner - StateMinerRecoveries(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) - // StateMinerInitialPledgeCollateral returns the initial pledge collateral for the specified miner's sector - StateMinerInitialPledgeCollateral(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (types.BigInt, error) - // StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent - StateMinerAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) - // StateSectorPreCommitInfo returns the PreCommit info for the specified miner's sector - StateSectorPreCommitInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) - // StateSectorGetInfo returns the on-chain info for the specified miner's sector - StateSectorGetInfo(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) - StatePledgeCollateral(context.Context, types.TipSetKey) (types.BigInt, error) - // StateSearchMsg searches for a message in the chain, and returns its receipt and the tipset where it was executed - StateSearchMsg(context.Context, cid.Cid) (*MsgLookup, error) - // StateWaitMsg looks back in the chain for a message. If not found, it blocks until the - // message arrives on chain, and gets to the indicated confidence depth. - StateWaitMsg(ctx context.Context, cid cid.Cid, confidence uint64) (*MsgLookup, error) - // StateListMiners returns the addresses of every miner that has claimed power in the Power Actor - StateListMiners(context.Context, types.TipSetKey) ([]address.Address, error) - // StateListActors returns the addresses of every actor in the state - StateListActors(context.Context, types.TipSetKey) ([]address.Address, error) - // StateMarketBalance looks up the Escrow and Locked balances of the given address in the Storage Market - StateMarketBalance(context.Context, address.Address, types.TipSetKey) (MarketBalance, error) - // StateMarketParticipants returns the Escrow and Locked balances of every participant in the Storage Market - StateMarketParticipants(context.Context, types.TipSetKey) (map[string]MarketBalance, error) - // StateMarketDeals returns information about every deal in the Storage Market - StateMarketDeals(context.Context, types.TipSetKey) (map[string]MarketDeal, error) - // StateMarketStorageDeal returns information about the indicated deal - StateMarketStorageDeal(context.Context, abi.DealID, types.TipSetKey) (*MarketDeal, error) - // StateLookupID retrieves the ID address of the given address - StateLookupID(context.Context, address.Address, types.TipSetKey) (address.Address, error) - // StateAccountKey returns the public key address of the given ID address - StateAccountKey(context.Context, address.Address, types.TipSetKey) (address.Address, error) - // StateChangedActors returns all the actors whose states change between the two given state CIDs - // TODO: Should this take tipset keys instead? - StateChangedActors(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) - // StateGetReceipt returns the message receipt for the given message - StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) - // StateMinerSectorCount returns the number of sectors in a miner's sector set and proving set - StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error) - // StateCompute is a flexible command that applies the given messages on the given tipset. - // The messages are run as though the VM were at the provided height. - StateCompute(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*ComputeStateOutput, error) - - // MethodGroup: Msig - // The Msig methods are used to interact with multisig wallets on the - // filecoin network - - // MsigGetAvailableBalance returns the portion of a multisig's balance that can be withdrawn or spent - MsigGetAvailableBalance(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) - // MsigGetAvailableBalance creates a multisig wallet - // It takes the following params: , , , - // , - MsigCreate(context.Context, int64, []address.Address, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) - // MsigPropose proposes a multisig message - // It takes the following params: , , , - // , , - MsigPropose(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) - // MsigApprove approves a previously-proposed multisig message - // It takes the following params: , , , , , - // , , - MsigApprove(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) - // MsigCancel cancels a previously-proposed multisig message - // It takes the following params: , , , , , - // , , - // TODO: You can't cancel someone else's proposed message, so "src" and "proposer" here are redundant - MsigCancel(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) - - MarketEnsureAvailable(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) - // MarketFreeBalance - - // MethodGroup: Paych - // The Paych methods are for interacting with and managing payment channels - - PaychGet(ctx context.Context, from, to address.Address, ensureFunds types.BigInt) (*ChannelInfo, error) - PaychList(context.Context) ([]address.Address, error) - PaychStatus(context.Context, address.Address) (*PaychStatus, error) - PaychClose(context.Context, address.Address) (cid.Cid, error) - PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) - PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []VoucherSpec) (*PaymentInfo, error) - PaychVoucherCheckValid(context.Context, address.Address, *paych.SignedVoucher) error - PaychVoucherCheckSpendable(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) - PaychVoucherCreate(context.Context, address.Address, types.BigInt, uint64) (*paych.SignedVoucher, error) - PaychVoucherAdd(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) - PaychVoucherList(context.Context, address.Address) ([]*paych.SignedVoucher, error) - PaychVoucherSubmit(context.Context, address.Address, *paych.SignedVoucher) (cid.Cid, error) -} - -type FileRef struct { - Path string - IsCAR bool -} - -type MinerSectors struct { - Sset uint64 - Pset uint64 -} - -type Import struct { - Status filestore.Status - Key cid.Cid - FilePath string - Size uint64 -} - -type DealInfo struct { - ProposalCid cid.Cid - State storagemarket.StorageDealStatus - Message string // more information about deal state, particularly errors - Provider address.Address - - PieceCID cid.Cid - Size uint64 - - PricePerEpoch types.BigInt - Duration uint64 - - DealID abi.DealID -} - -type MsgLookup struct { - Receipt types.MessageReceipt - ReturnDec interface{} - // TODO: This should probably a tipsetkey? - TipSet *types.TipSet -} - -type BlockMessages struct { - BlsMessages []*types.Message - SecpkMessages []*types.SignedMessage - - Cids []cid.Cid -} - -type Message struct { - Cid cid.Cid - Message *types.Message -} - -type ChainSectorInfo struct { - Info miner.SectorOnChainInfo - ID abi.SectorNumber -} - -type ActorState struct { - Balance types.BigInt - State interface{} -} - -type PCHDir int - -const ( - PCHUndef PCHDir = iota - PCHInbound - PCHOutbound -) - -type PaychStatus struct { - ControlAddr address.Address - Direction PCHDir -} - -type ChannelInfo struct { - Channel address.Address - ChannelMessage cid.Cid -} - -type PaymentInfo struct { - Channel address.Address - ChannelMessage *cid.Cid - Vouchers []*paych.SignedVoucher -} - -type VoucherSpec struct { - Amount types.BigInt - TimeLockMin abi.ChainEpoch - TimeLockMax abi.ChainEpoch - MinSettle abi.ChainEpoch - - Extra *paych.ModVerifyParams -} - -type MinerPower struct { - MinerPower power.Claim - TotalPower power.Claim -} - -type QueryOffer struct { - Err string - - Root cid.Cid - - Size uint64 - MinPrice types.BigInt - PaymentInterval uint64 - PaymentIntervalIncrease uint64 - Miner address.Address - MinerPeerID peer.ID -} - -func (o *QueryOffer) Order(client address.Address) RetrievalOrder { - return RetrievalOrder{ - Root: o.Root, - Size: o.Size, - Total: o.MinPrice, - PaymentInterval: o.PaymentInterval, - PaymentIntervalIncrease: o.PaymentIntervalIncrease, - Client: client, - - Miner: o.Miner, - MinerPeerID: o.MinerPeerID, - } -} - -type MarketBalance struct { - Escrow big.Int - Locked big.Int -} - -type MarketDeal struct { - Proposal market.DealProposal - State market.DealState -} - -type RetrievalOrder struct { - // TODO: make this less unixfs specific - Root cid.Cid - Size uint64 - // TODO: support offset - Total types.BigInt - PaymentInterval uint64 - PaymentIntervalIncrease uint64 - Client address.Address - Miner address.Address - MinerPeerID peer.ID -} - -type InvocResult struct { - Msg *types.Message - MsgRct *types.MessageReceipt - ExecutionTrace types.ExecutionTrace - Error string - Duration time.Duration -} - -type MethodCall struct { - types.MessageReceipt - Error string -} - -type StartDealParams struct { - Data *storagemarket.DataRef - Wallet address.Address - Miner address.Address - EpochPrice types.BigInt - MinBlocksDuration uint64 - DealStartEpoch abi.ChainEpoch -} - -type IpldObject struct { - Cid cid.Cid - Obj interface{} -} - -type ActiveSync struct { - Base *types.TipSet - Target *types.TipSet - - Stage SyncStateStage - Height abi.ChainEpoch - - Start time.Time - End time.Time - Message string -} - -type SyncState struct { - ActiveSyncs []ActiveSync -} - -type SyncStateStage int - -const ( - StageIdle = SyncStateStage(iota) - StageHeaders - StagePersistHeaders - StageMessages - StageSyncComplete - StageSyncErrored -) - -type MpoolChange int - -const ( - MpoolAdd MpoolChange = iota - MpoolRemove -) - -type MpoolUpdate struct { - Type MpoolChange - Message *types.SignedMessage -} - -type ComputeStateOutput struct { - Root cid.Cid - Trace []*InvocResult -} - -type MiningBaseInfo struct { - MinerPower types.BigInt - NetworkPower types.BigInt - Sectors []abi.SectorInfo - WorkerKey address.Address - SectorSize abi.SectorSize - PrevBeaconEntry types.BeaconEntry - BeaconEntries []types.BeaconEntry -} - -type BlockTemplate struct { - Miner address.Address - Parents types.TipSetKey - Ticket *types.Ticket - Eproof *types.ElectionProof - BeaconValues []types.BeaconEntry - Messages []*types.SignedMessage - Epoch abi.ChainEpoch - Timestamp uint64 - WinningPoStProof []abi.PoStProof -} - -type CommPRet struct { - Root cid.Cid - Size abi.UnpaddedPieceSize -} -type HeadChange struct { - Type string - Val *types.TipSet -} - -type MsigProposeResponse int - -const ( - MsigApprove MsigProposeResponse = iota - MsigCancel -) - -type Fault struct { - Miner address.Address - Epoch abi.ChainEpoch -} diff --git a/vendor/github.com/filecoin-project/lotus/api/api_storage.go b/vendor/github.com/filecoin-project/lotus/api/api_storage.go deleted file mode 100644 index dfb5365808..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/api_storage.go +++ /dev/null @@ -1,133 +0,0 @@ -package api - -import ( - "bytes" - "context" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/sector-storage/stores" - "github.com/filecoin-project/sector-storage/storiface" - "github.com/filecoin-project/specs-actors/actors/abi" -) - -// StorageMiner is a low-level interface to the Filecoin network storage miner node -type StorageMiner interface { - Common - - ActorAddress(context.Context) (address.Address, error) - - ActorSectorSize(context.Context, address.Address) (abi.SectorSize, error) - - MiningBase(context.Context) (*types.TipSet, error) - - // Temp api for testing - PledgeSector(context.Context) error - - // Get the status of a given sector by ID - SectorsStatus(context.Context, abi.SectorNumber) (SectorInfo, error) - - // List all staged sectors - SectorsList(context.Context) ([]abi.SectorNumber, error) - - SectorsRefs(context.Context) (map[string][]SealedRef, error) - - SectorsUpdate(context.Context, abi.SectorNumber, SectorState) error - SectorRemove(context.Context, abi.SectorNumber) error - - StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) - StorageLocal(ctx context.Context) (map[stores.ID]string, error) - StorageStat(ctx context.Context, id stores.ID) (stores.FsStat, error) - - // WorkerConnect tells the node to connect to workers RPC - WorkerConnect(context.Context, string) error - WorkerStats(context.Context) (map[uint64]storiface.WorkerStats, error) - - stores.SectorIndex - - MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error - MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error) - MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) - MarketSetAsk(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error - MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) - - DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error - DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) - DealsConsiderOnlineStorageDeals(context.Context) (bool, error) - DealsSetConsiderOnlineStorageDeals(context.Context, bool) error - DealsConsiderOnlineRetrievalDeals(context.Context) (bool, error) - DealsSetConsiderOnlineRetrievalDeals(context.Context, bool) error - DealsPieceCidBlocklist(context.Context) ([]cid.Cid, error) - DealsSetPieceCidBlocklist(context.Context, []cid.Cid) error - DealsConsiderOfflineStorageDeals(context.Context) (bool, error) - DealsSetConsiderOfflineStorageDeals(context.Context, bool) error - DealsConsiderOfflineRetrievalDeals(context.Context) (bool, error) - DealsSetConsiderOfflineRetrievalDeals(context.Context, bool) error - - StorageAddLocal(ctx context.Context, path string) error -} - -type SealRes struct { - Err string - GoErr error `json:"-"` - - Proof []byte -} - -type SectorLog struct { - Kind string - Timestamp uint64 - - Trace string - - Message string -} - -type SectorInfo struct { - SectorID abi.SectorNumber - State SectorState - CommD *cid.Cid - CommR *cid.Cid - Proof []byte - Deals []abi.DealID - Ticket SealTicket - Seed SealSeed - Retries uint64 - - LastErr string - - Log []SectorLog -} - -type SealedRef struct { - SectorID abi.SectorNumber - Offset uint64 - Size abi.UnpaddedPieceSize -} - -type SealedRefs struct { - Refs []SealedRef -} - -type SealTicket struct { - Value abi.SealRandomness - Epoch abi.ChainEpoch -} - -type SealSeed struct { - Value abi.InteractiveSealRandomness - Epoch abi.ChainEpoch -} - -func (st *SealTicket) Equals(ost *SealTicket) bool { - return bytes.Equal(st.Value, ost.Value) && st.Epoch == ost.Epoch -} - -func (st *SealSeed) Equals(ost *SealSeed) bool { - return bytes.Equal(st.Value, ost.Value) && st.Epoch == ost.Epoch -} - -type SectorState string diff --git a/vendor/github.com/filecoin-project/lotus/api/api_test.go b/vendor/github.com/filecoin-project/lotus/api/api_test.go deleted file mode 100644 index 1b438258ac..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/api_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package api - -import ( - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "testing" -) - -func goCmd() string { - var exeSuffix string - if runtime.GOOS == "windows" { - exeSuffix = ".exe" - } - path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix) - if _, err := os.Stat(path); err == nil { - return path - } - return "go" -} - -func TestDoesntDependOnFFI(t *testing.T) { - deps, err := exec.Command(goCmd(), "list", "-deps", "github.com/filecoin-project/lotus/api").Output() - if err != nil { - t.Fatal(err) - } - for _, pkg := range strings.Fields(string(deps)) { - if pkg == "github.com/filecoin-project/filecoin-ffi" { - t.Fatal("api depends on filecoin-ffi") - } - } -} diff --git a/vendor/github.com/filecoin-project/lotus/api/api_worker.go b/vendor/github.com/filecoin-project/lotus/api/api_worker.go deleted file mode 100644 index 69a5aed5f7..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/api_worker.go +++ /dev/null @@ -1,36 +0,0 @@ -package api - -import ( - "context" - "io" - - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/sector-storage/sealtasks" - "github.com/filecoin-project/sector-storage/stores" - "github.com/filecoin-project/sector-storage/storiface" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-storage/storage" - - "github.com/filecoin-project/lotus/build" -) - -type WorkerAPI interface { - Version(context.Context) (build.Version, error) - // TODO: Info() (name, ...) ? - - TaskTypes(context.Context) (map[sealtasks.TaskType]struct{}, error) // TaskType -> Weight - Paths(context.Context) ([]stores.StoragePath, error) - Info(context.Context) (storiface.WorkerInfo, error) - - storage.Sealer - - MoveStorage(ctx context.Context, sector abi.SectorID) error - - UnsealPiece(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error - ReadPiece(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error - - Fetch(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error - - Closing(context.Context) (<-chan struct{}, error) -} diff --git a/vendor/github.com/filecoin-project/lotus/api/apibstore/apibstore.go b/vendor/github.com/filecoin-project/lotus/api/apibstore/apibstore.go deleted file mode 100644 index 5bd0f0ad7b..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/apibstore/apibstore.go +++ /dev/null @@ -1,67 +0,0 @@ -package apibstore - -import ( - "context" - - blocks "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "golang.org/x/xerrors" -) - -type ChainIO interface { - ChainReadObj(context.Context, cid.Cid) ([]byte, error) - ChainHasObj(context.Context, cid.Cid) (bool, error) -} - -type apiBStore struct { - api ChainIO -} - -func NewAPIBlockstore(cio ChainIO) blockstore.Blockstore { - return &apiBStore{ - api: cio, - } -} - -func (a *apiBStore) DeleteBlock(cid.Cid) error { - return xerrors.New("not supported") -} - -func (a *apiBStore) Has(c cid.Cid) (bool, error) { - return a.api.ChainHasObj(context.TODO(), c) -} - -func (a *apiBStore) Get(c cid.Cid) (blocks.Block, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) - if err != nil { - return nil, err - } - return blocks.NewBlockWithCid(bb, c) -} - -func (a *apiBStore) GetSize(c cid.Cid) (int, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) - if err != nil { - return 0, err - } - return len(bb), nil -} - -func (a *apiBStore) Put(blocks.Block) error { - return xerrors.New("not supported") -} - -func (a *apiBStore) PutMany([]blocks.Block) error { - return xerrors.New("not supported") -} - -func (a *apiBStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { - return nil, xerrors.New("not supported") -} - -func (a *apiBStore) HashOnRead(enabled bool) { - return -} - -var _ blockstore.Blockstore = &apiBStore{} diff --git a/vendor/github.com/filecoin-project/lotus/api/apistruct/permissioned.go b/vendor/github.com/filecoin-project/lotus/api/apistruct/permissioned.go deleted file mode 100644 index c936627334..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/apistruct/permissioned.go +++ /dev/null @@ -1,38 +0,0 @@ -package apistruct - -import ( - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/lotus/api" -) - -const ( - // When changing these, update docs/API.md too - - PermRead auth.Permission = "read" // default - PermWrite auth.Permission = "write" - PermSign auth.Permission = "sign" // Use wallet keys for signing - PermAdmin auth.Permission = "admin" // Manage permissions -) - -var AllPermissions = []auth.Permission{PermRead, PermWrite, PermSign, PermAdmin} -var DefaultPerms = []auth.Permission{PermRead} - -func PermissionedStorMinerAPI(a api.StorageMiner) api.StorageMiner { - var out StorageMinerStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal) - return &out -} - -func PermissionedFullAPI(a api.FullNode) api.FullNode { - var out FullNodeStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.CommonStruct.Internal) - return &out -} - -func PermissionedWorkerAPI(a api.WorkerAPI) api.WorkerAPI { - var out WorkerStruct - auth.PermissionedProxy(AllPermissions, DefaultPerms, a, &out.Internal) - return &out -} diff --git a/vendor/github.com/filecoin-project/lotus/api/apistruct/struct.go b/vendor/github.com/filecoin-project/lotus/api/apistruct/struct.go deleted file mode 100644 index db6e0b27b5..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/apistruct/struct.go +++ /dev/null @@ -1,1000 +0,0 @@ -package apistruct - -import ( - "context" - "io" - - "github.com/ipfs/go-cid" - "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/go-jsonrpc/auth" - - "github.com/filecoin-project/sector-storage/sealtasks" - "github.com/filecoin-project/sector-storage/stores" - "github.com/filecoin-project/sector-storage/storiface" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-storage/storage" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -// All permissions are listed in permissioned.go -var _ = AllPermissions - -type CommonStruct struct { - Internal struct { - AuthVerify func(ctx context.Context, token string) ([]auth.Permission, error) `perm:"read"` - AuthNew func(ctx context.Context, perms []auth.Permission) ([]byte, error) `perm:"admin"` - - NetConnectedness func(context.Context, peer.ID) (network.Connectedness, error) `perm:"read"` - NetPeers func(context.Context) ([]peer.AddrInfo, error) `perm:"read"` - NetConnect func(context.Context, peer.AddrInfo) error `perm:"write"` - NetAddrsListen func(context.Context) (peer.AddrInfo, error) `perm:"read"` - NetDisconnect func(context.Context, peer.ID) error `perm:"write"` - NetFindPeer func(context.Context, peer.ID) (peer.AddrInfo, error) `perm:"read"` - NetPubsubScores func(context.Context) ([]api.PubsubScore, error) `perm:"read"` - - ID func(context.Context) (peer.ID, error) `perm:"read"` - Version func(context.Context) (api.Version, error) `perm:"read"` - - LogList func(context.Context) ([]string, error) `perm:"write"` - LogSetLevel func(context.Context, string, string) error `perm:"write"` - - Shutdown func(context.Context) error `perm:"admin"` - Closing func(context.Context) (<-chan struct{}, error) `perm:"read"` - } -} - -// FullNodeStruct implements API passing calls to user-provided function values. -type FullNodeStruct struct { - CommonStruct - - Internal struct { - ChainNotify func(context.Context) (<-chan []*api.HeadChange, error) `perm:"read"` - ChainHead func(context.Context) (*types.TipSet, error) `perm:"read"` - ChainGetRandomness func(context.Context, types.TipSetKey, crypto.DomainSeparationTag, abi.ChainEpoch, []byte) (abi.Randomness, error) `perm:"read"` - ChainGetBlock func(context.Context, cid.Cid) (*types.BlockHeader, error) `perm:"read"` - ChainGetTipSet func(context.Context, types.TipSetKey) (*types.TipSet, error) `perm:"read"` - ChainGetBlockMessages func(context.Context, cid.Cid) (*api.BlockMessages, error) `perm:"read"` - ChainGetParentReceipts func(context.Context, cid.Cid) ([]*types.MessageReceipt, error) `perm:"read"` - ChainGetParentMessages func(context.Context, cid.Cid) ([]api.Message, error) `perm:"read"` - ChainGetTipSetByHeight func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) `perm:"read"` - ChainReadObj func(context.Context, cid.Cid) ([]byte, error) `perm:"read"` - ChainHasObj func(context.Context, cid.Cid) (bool, error) `perm:"read"` - ChainStatObj func(context.Context, cid.Cid, cid.Cid) (api.ObjStat, error) `perm:"read"` - ChainSetHead func(context.Context, types.TipSetKey) error `perm:"admin"` - ChainGetGenesis func(context.Context) (*types.TipSet, error) `perm:"read"` - ChainTipSetWeight func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"` - ChainGetNode func(ctx context.Context, p string) (*api.IpldObject, error) `perm:"read"` - ChainGetMessage func(context.Context, cid.Cid) (*types.Message, error) `perm:"read"` - ChainGetPath func(context.Context, types.TipSetKey, types.TipSetKey) ([]*api.HeadChange, error) `perm:"read"` - ChainExport func(context.Context, types.TipSetKey) (<-chan []byte, error) `perm:"read"` - - SyncState func(context.Context) (*api.SyncState, error) `perm:"read"` - SyncSubmitBlock func(ctx context.Context, blk *types.BlockMsg) error `perm:"write"` - SyncIncomingBlocks func(ctx context.Context) (<-chan *types.BlockHeader, error) `perm:"read"` - SyncMarkBad func(ctx context.Context, bcid cid.Cid) error `perm:"admin"` - SyncCheckBad func(ctx context.Context, bcid cid.Cid) (string, error) `perm:"read"` - - MpoolPending func(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) `perm:"read"` - MpoolPush func(context.Context, *types.SignedMessage) (cid.Cid, error) `perm:"write"` - MpoolPushMessage func(context.Context, *types.Message) (*types.SignedMessage, error) `perm:"sign"` - MpoolGetNonce func(context.Context, address.Address) (uint64, error) `perm:"read"` - MpoolSub func(context.Context) (<-chan api.MpoolUpdate, error) `perm:"read"` - MpoolEstimateGasPrice func(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) `perm:"read"` - - MinerGetBaseInfo func(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) `perm:"read"` - MinerCreateBlock func(context.Context, *api.BlockTemplate) (*types.BlockMsg, error) `perm:"write"` - - WalletNew func(context.Context, crypto.SigType) (address.Address, error) `perm:"write"` - WalletHas func(context.Context, address.Address) (bool, error) `perm:"write"` - WalletList func(context.Context) ([]address.Address, error) `perm:"write"` - WalletBalance func(context.Context, address.Address) (types.BigInt, error) `perm:"read"` - WalletSign func(context.Context, address.Address, []byte) (*crypto.Signature, error) `perm:"sign"` - WalletSignMessage func(context.Context, address.Address, *types.Message) (*types.SignedMessage, error) `perm:"sign"` - WalletVerify func(context.Context, address.Address, []byte, *crypto.Signature) bool `perm:"read"` - WalletDefaultAddress func(context.Context) (address.Address, error) `perm:"write"` - WalletSetDefault func(context.Context, address.Address) error `perm:"admin"` - WalletExport func(context.Context, address.Address) (*types.KeyInfo, error) `perm:"admin"` - WalletImport func(context.Context, *types.KeyInfo) (address.Address, error) `perm:"admin"` - WalletDelete func(context.Context, address.Address) error `perm:"write"` - - ClientImport func(ctx context.Context, ref api.FileRef) (cid.Cid, error) `perm:"admin"` - ClientListImports func(ctx context.Context) ([]api.Import, error) `perm:"write"` - ClientHasLocal func(ctx context.Context, root cid.Cid) (bool, error) `perm:"write"` - ClientFindData func(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) `perm:"read"` - ClientMinerQueryOffer func(ctx context.Context, root cid.Cid, miner address.Address) (api.QueryOffer, error) `perm:"read"` - ClientStartDeal func(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` - ClientGetDealInfo func(context.Context, cid.Cid) (*api.DealInfo, error) `perm:"read"` - ClientListDeals func(ctx context.Context) ([]api.DealInfo, error) `perm:"write"` - ClientRetrieve func(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error `perm:"admin"` - ClientQueryAsk func(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) `perm:"read"` - ClientCalcCommP func(ctx context.Context, inpath string, miner address.Address) (*api.CommPRet, error) `perm:"read"` - ClientGenCar func(ctx context.Context, ref api.FileRef, outpath string) error `perm:"write"` - - StateNetworkName func(context.Context) (dtypes.NetworkName, error) `perm:"read"` - StateMinerSectors func(context.Context, address.Address, *abi.BitField, bool, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"` - StateMinerProvingSet func(context.Context, address.Address, types.TipSetKey) ([]*api.ChainSectorInfo, error) `perm:"read"` - StateMinerProvingDeadline func(context.Context, address.Address, types.TipSetKey) (*miner.DeadlineInfo, error) `perm:"read"` - StateMinerPower func(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) `perm:"read"` - StateMinerInfo func(context.Context, address.Address, types.TipSetKey) (api.MinerInfo, error) `perm:"read"` - StateMinerDeadlines func(context.Context, address.Address, types.TipSetKey) (*miner.Deadlines, error) `perm:"read"` - StateMinerFaults func(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) `perm:"read"` - StateAllMinerFaults func(context.Context, abi.ChainEpoch, types.TipSetKey) ([]*api.Fault, error) `perm:"read"` - StateMinerRecoveries func(context.Context, address.Address, types.TipSetKey) (*abi.BitField, error) `perm:"read"` - StateMinerInitialPledgeCollateral func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (types.BigInt, error) `perm:"read"` - StateMinerAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` - StateSectorPreCommitInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) `perm:"read"` - StateSectorGetInfo func(context.Context, address.Address, abi.SectorNumber, types.TipSetKey) (*miner.SectorOnChainInfo, error) `perm:"read"` - StateCall func(context.Context, *types.Message, types.TipSetKey) (*api.InvocResult, error) `perm:"read"` - StateReplay func(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) `perm:"read"` - StateGetActor func(context.Context, address.Address, types.TipSetKey) (*types.Actor, error) `perm:"read"` - StateReadState func(context.Context, address.Address, types.TipSetKey) (*api.ActorState, error) `perm:"read"` - StatePledgeCollateral func(context.Context, types.TipSetKey) (types.BigInt, error) `perm:"read"` - StateWaitMsg func(ctx context.Context, cid cid.Cid, confidence uint64) (*api.MsgLookup, error) `perm:"read"` - StateSearchMsg func(context.Context, cid.Cid) (*api.MsgLookup, error) `perm:"read"` - StateListMiners func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` - StateListActors func(context.Context, types.TipSetKey) ([]address.Address, error) `perm:"read"` - StateMarketBalance func(context.Context, address.Address, types.TipSetKey) (api.MarketBalance, error) `perm:"read"` - StateMarketParticipants func(context.Context, types.TipSetKey) (map[string]api.MarketBalance, error) `perm:"read"` - StateMarketDeals func(context.Context, types.TipSetKey) (map[string]api.MarketDeal, error) `perm:"read"` - StateMarketStorageDeal func(context.Context, abi.DealID, types.TipSetKey) (*api.MarketDeal, error) `perm:"read"` - StateLookupID func(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) `perm:"read"` - StateAccountKey func(context.Context, address.Address, types.TipSetKey) (address.Address, error) `perm:"read"` - StateChangedActors func(context.Context, cid.Cid, cid.Cid) (map[string]types.Actor, error) `perm:"read"` - StateGetReceipt func(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) `perm:"read"` - StateMinerSectorCount func(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) `perm:"read"` - StateListMessages func(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) `perm:"read"` - StateCompute func(context.Context, abi.ChainEpoch, []*types.Message, types.TipSetKey) (*api.ComputeStateOutput, error) `perm:"read"` - - MsigGetAvailableBalance func(context.Context, address.Address, types.TipSetKey) (types.BigInt, error) `perm:"read"` - MsigCreate func(context.Context, int64, []address.Address, types.BigInt, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"` - MsigPropose func(context.Context, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"` - MsigApprove func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"` - MsigCancel func(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (cid.Cid, error) `perm:"sign"` - - MarketEnsureAvailable func(context.Context, address.Address, address.Address, types.BigInt) (cid.Cid, error) `perm:"sign"` - - PaychGet func(ctx context.Context, from, to address.Address, ensureFunds types.BigInt) (*api.ChannelInfo, error) `perm:"sign"` - PaychList func(context.Context) ([]address.Address, error) `perm:"read"` - PaychStatus func(context.Context, address.Address) (*api.PaychStatus, error) `perm:"read"` - PaychClose func(context.Context, address.Address) (cid.Cid, error) `perm:"sign"` - PaychAllocateLane func(context.Context, address.Address) (uint64, error) `perm:"sign"` - PaychNewPayment func(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) `perm:"sign"` - PaychVoucherCheck func(context.Context, *paych.SignedVoucher) error `perm:"read"` - PaychVoucherCheckValid func(context.Context, address.Address, *paych.SignedVoucher) error `perm:"read"` - PaychVoucherCheckSpendable func(context.Context, address.Address, *paych.SignedVoucher, []byte, []byte) (bool, error) `perm:"read"` - PaychVoucherAdd func(context.Context, address.Address, *paych.SignedVoucher, []byte, types.BigInt) (types.BigInt, error) `perm:"write"` - PaychVoucherCreate func(context.Context, address.Address, big.Int, uint64) (*paych.SignedVoucher, error) `perm:"sign"` - PaychVoucherList func(context.Context, address.Address) ([]*paych.SignedVoucher, error) `perm:"write"` - PaychVoucherSubmit func(context.Context, address.Address, *paych.SignedVoucher) (cid.Cid, error) `perm:"sign"` - } -} - -func (c *FullNodeStruct) StateMinerSectorCount(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MinerSectors, error) { - return c.Internal.StateMinerSectorCount(ctx, addr, tsk) -} - -type StorageMinerStruct struct { - CommonStruct - - Internal struct { - ActorAddress func(context.Context) (address.Address, error) `perm:"read"` - ActorSectorSize func(context.Context, address.Address) (abi.SectorSize, error) `perm:"read"` - - MiningBase func(context.Context) (*types.TipSet, error) `perm:"read"` - - MarketImportDealData func(context.Context, cid.Cid, string) error `perm:"write"` - MarketListDeals func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"` - MarketListIncompleteDeals func(ctx context.Context) ([]storagemarket.MinerDeal, error) `perm:"read"` - MarketSetAsk func(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error `perm:"admin"` - MarketGetAsk func(ctx context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"` - - PledgeSector func(context.Context) error `perm:"write"` - - SectorsStatus func(context.Context, abi.SectorNumber) (api.SectorInfo, error) `perm:"read"` - SectorsList func(context.Context) ([]abi.SectorNumber, error) `perm:"read"` - SectorsRefs func(context.Context) (map[string][]api.SealedRef, error) `perm:"read"` - SectorsUpdate func(context.Context, abi.SectorNumber, api.SectorState) error `perm:"write"` - SectorRemove func(context.Context, abi.SectorNumber) error `perm:"admin"` - - WorkerConnect func(context.Context, string) error `perm:"admin"` // TODO: worker perm - WorkerStats func(context.Context) (map[uint64]storiface.WorkerStats, error) `perm:"admin"` - - StorageList func(context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"` - StorageLocal func(context.Context) (map[stores.ID]string, error) `perm:"admin"` - StorageStat func(context.Context, stores.ID) (stores.FsStat, error) `perm:"admin"` - StorageAttach func(context.Context, stores.StorageInfo, stores.FsStat) error `perm:"admin"` - StorageDeclareSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType, bool) error `perm:"admin"` - StorageDropSector func(context.Context, stores.ID, abi.SectorID, stores.SectorFileType) error `perm:"admin"` - StorageFindSector func(context.Context, abi.SectorID, stores.SectorFileType, bool) ([]stores.SectorStorageInfo, error) `perm:"admin"` - StorageInfo func(context.Context, stores.ID) (stores.StorageInfo, error) `perm:"admin"` - StorageBestAlloc func(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredSealProof, sealing stores.PathType) ([]stores.StorageInfo, error) `perm:"admin"` - StorageReportHealth func(ctx context.Context, id stores.ID, report stores.HealthReport) error `perm:"admin"` - StorageLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error `perm:"admin"` - StorageTryLock func(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) `perm:"admin"` - - DealsImportData func(ctx context.Context, dealPropCid cid.Cid, file string) error `perm:"write"` - DealsList func(ctx context.Context) ([]storagemarket.StorageDeal, error) `perm:"read"` - DealsConsiderOnlineStorageDeals func(context.Context) (bool, error) `perm:"read"` - DealsSetConsiderOnlineStorageDeals func(context.Context, bool) error `perm:"admin"` - DealsConsiderOnlineRetrievalDeals func(context.Context) (bool, error) `perm:"read"` - DealsSetConsiderOnlineRetrievalDeals func(context.Context, bool) error `perm:"admin"` - DealsConsiderOfflineStorageDeals func(context.Context) (bool, error) `perm:"read"` - DealsSetConsiderOfflineStorageDeals func(context.Context, bool) error `perm:"admin"` - DealsConsiderOfflineRetrievalDeals func(context.Context) (bool, error) `perm:"read"` - DealsSetConsiderOfflineRetrievalDeals func(context.Context, bool) error `perm:"admin"` - DealsPieceCidBlocklist func(context.Context) ([]cid.Cid, error) `perm:"read"` - DealsSetPieceCidBlocklist func(context.Context, []cid.Cid) error `perm:"admin"` - - StorageAddLocal func(ctx context.Context, path string) error `perm:"admin"` - } -} - -type WorkerStruct struct { - Internal struct { - // TODO: lower perms - - Version func(context.Context) (build.Version, error) `perm:"admin"` - - TaskTypes func(context.Context) (map[sealtasks.TaskType]struct{}, error) `perm:"admin"` - Paths func(context.Context) ([]stores.StoragePath, error) `perm:"admin"` - Info func(context.Context) (storiface.WorkerInfo, error) `perm:"admin"` - - SealPreCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) `perm:"admin"` - SealPreCommit2 func(context.Context, abi.SectorID, storage.PreCommit1Out) (cids storage.SectorCids, err error) `perm:"admin"` - SealCommit1 func(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) `perm:"admin"` - SealCommit2 func(context.Context, abi.SectorID, storage.Commit1Out) (storage.Proof, error) `perm:"admin"` - FinalizeSector func(context.Context, abi.SectorID, []storage.Range) error `perm:"admin"` - ReleaseUnsealed func(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error `perm:"admin"` - Remove func(ctx context.Context, sector abi.SectorID) error `perm:"admin"` - MoveStorage func(ctx context.Context, sector abi.SectorID) error `perm:"admin"` - - UnsealPiece func(context.Context, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) error `perm:"admin"` - ReadPiece func(context.Context, io.Writer, abi.SectorID, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize) error `perm:"admin"` - - Fetch func(context.Context, abi.SectorID, stores.SectorFileType, stores.PathType, stores.AcquireMode) error `perm:"admin"` - - Closing func(context.Context) (<-chan struct{}, error) `perm:"admin"` - } -} - -// CommonStruct - -func (c *CommonStruct) AuthVerify(ctx context.Context, token string) ([]auth.Permission, error) { - return c.Internal.AuthVerify(ctx, token) -} - -func (c *CommonStruct) AuthNew(ctx context.Context, perms []auth.Permission) ([]byte, error) { - return c.Internal.AuthNew(ctx, perms) -} - -func (c *CommonStruct) NetPubsubScores(ctx context.Context) ([]api.PubsubScore, error) { - return c.Internal.NetPubsubScores(ctx) -} -func (c *CommonStruct) NetConnectedness(ctx context.Context, pid peer.ID) (network.Connectedness, error) { - return c.Internal.NetConnectedness(ctx, pid) -} - -func (c *CommonStruct) NetPeers(ctx context.Context) ([]peer.AddrInfo, error) { - return c.Internal.NetPeers(ctx) -} - -func (c *CommonStruct) NetConnect(ctx context.Context, p peer.AddrInfo) error { - return c.Internal.NetConnect(ctx, p) -} - -func (c *CommonStruct) NetAddrsListen(ctx context.Context) (peer.AddrInfo, error) { - return c.Internal.NetAddrsListen(ctx) -} - -func (c *CommonStruct) NetDisconnect(ctx context.Context, p peer.ID) error { - return c.Internal.NetDisconnect(ctx, p) -} - -func (c *CommonStruct) NetFindPeer(ctx context.Context, p peer.ID) (peer.AddrInfo, error) { - return c.Internal.NetFindPeer(ctx, p) -} - -// ID implements API.ID -func (c *CommonStruct) ID(ctx context.Context) (peer.ID, error) { - return c.Internal.ID(ctx) -} - -// Version implements API.Version -func (c *CommonStruct) Version(ctx context.Context) (api.Version, error) { - return c.Internal.Version(ctx) -} - -func (c *CommonStruct) LogList(ctx context.Context) ([]string, error) { - return c.Internal.LogList(ctx) -} - -func (c *CommonStruct) LogSetLevel(ctx context.Context, group, level string) error { - return c.Internal.LogSetLevel(ctx, group, level) -} - -func (c *CommonStruct) Shutdown(ctx context.Context) error { - return c.Internal.Shutdown(ctx) -} - -func (c *CommonStruct) Closing(ctx context.Context) (<-chan struct{}, error) { - return c.Internal.Closing(ctx) -} - -// FullNodeStruct - -func (c *FullNodeStruct) ClientListImports(ctx context.Context) ([]api.Import, error) { - return c.Internal.ClientListImports(ctx) -} - -func (c *FullNodeStruct) ClientImport(ctx context.Context, ref api.FileRef) (cid.Cid, error) { - return c.Internal.ClientImport(ctx, ref) -} - -func (c *FullNodeStruct) ClientHasLocal(ctx context.Context, root cid.Cid) (bool, error) { - return c.Internal.ClientHasLocal(ctx, root) -} - -func (c *FullNodeStruct) ClientFindData(ctx context.Context, root cid.Cid) ([]api.QueryOffer, error) { - return c.Internal.ClientFindData(ctx, root) -} - -func (c *FullNodeStruct) ClientMinerQueryOffer(ctx context.Context, root cid.Cid, miner address.Address) (api.QueryOffer, error) { - return c.Internal.ClientMinerQueryOffer(ctx, root, miner) -} - -func (c *FullNodeStruct) ClientStartDeal(ctx context.Context, params *api.StartDealParams) (*cid.Cid, error) { - return c.Internal.ClientStartDeal(ctx, params) -} -func (c *FullNodeStruct) ClientGetDealInfo(ctx context.Context, deal cid.Cid) (*api.DealInfo, error) { - return c.Internal.ClientGetDealInfo(ctx, deal) -} - -func (c *FullNodeStruct) ClientListDeals(ctx context.Context) ([]api.DealInfo, error) { - return c.Internal.ClientListDeals(ctx) -} - -func (c *FullNodeStruct) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error { - return c.Internal.ClientRetrieve(ctx, order, ref) -} - -func (c *FullNodeStruct) ClientQueryAsk(ctx context.Context, p peer.ID, miner address.Address) (*storagemarket.SignedStorageAsk, error) { - return c.Internal.ClientQueryAsk(ctx, p, miner) -} -func (c *FullNodeStruct) ClientCalcCommP(ctx context.Context, inpath string, miner address.Address) (*api.CommPRet, error) { - return c.Internal.ClientCalcCommP(ctx, inpath, miner) -} - -func (c *FullNodeStruct) ClientGenCar(ctx context.Context, ref api.FileRef, outpath string) error { - return c.Internal.ClientGenCar(ctx, ref, outpath) -} - -func (c *FullNodeStruct) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { - return c.Internal.MpoolPending(ctx, tsk) -} - -func (c *FullNodeStruct) MpoolPush(ctx context.Context, smsg *types.SignedMessage) (cid.Cid, error) { - return c.Internal.MpoolPush(ctx, smsg) -} - -func (c *FullNodeStruct) MpoolPushMessage(ctx context.Context, msg *types.Message) (*types.SignedMessage, error) { - return c.Internal.MpoolPushMessage(ctx, msg) -} - -func (c *FullNodeStruct) MpoolSub(ctx context.Context) (<-chan api.MpoolUpdate, error) { - return c.Internal.MpoolSub(ctx) -} - -func (c *FullNodeStruct) MpoolEstimateGasPrice(ctx context.Context, nblocksincl uint64, sender address.Address, limit int64, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.MpoolEstimateGasPrice(ctx, nblocksincl, sender, limit, tsk) -} - -func (c *FullNodeStruct) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { - return c.Internal.MinerGetBaseInfo(ctx, maddr, epoch, tsk) -} - -func (c *FullNodeStruct) MinerCreateBlock(ctx context.Context, bt *api.BlockTemplate) (*types.BlockMsg, error) { - return c.Internal.MinerCreateBlock(ctx, bt) -} - -func (c *FullNodeStruct) ChainHead(ctx context.Context) (*types.TipSet, error) { - return c.Internal.ChainHead(ctx) -} - -func (c *FullNodeStruct) ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - return c.Internal.ChainGetRandomness(ctx, tsk, personalization, randEpoch, entropy) -} - -func (c *FullNodeStruct) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - return c.Internal.ChainGetTipSetByHeight(ctx, h, tsk) -} - -func (c *FullNodeStruct) WalletNew(ctx context.Context, typ crypto.SigType) (address.Address, error) { - return c.Internal.WalletNew(ctx, typ) -} - -func (c *FullNodeStruct) WalletHas(ctx context.Context, addr address.Address) (bool, error) { - return c.Internal.WalletHas(ctx, addr) -} - -func (c *FullNodeStruct) WalletList(ctx context.Context) ([]address.Address, error) { - return c.Internal.WalletList(ctx) -} - -func (c *FullNodeStruct) WalletBalance(ctx context.Context, a address.Address) (types.BigInt, error) { - return c.Internal.WalletBalance(ctx, a) -} - -func (c *FullNodeStruct) WalletSign(ctx context.Context, k address.Address, msg []byte) (*crypto.Signature, error) { - return c.Internal.WalletSign(ctx, k, msg) -} - -func (c *FullNodeStruct) WalletSignMessage(ctx context.Context, k address.Address, msg *types.Message) (*types.SignedMessage, error) { - return c.Internal.WalletSignMessage(ctx, k, msg) -} - -func (c *FullNodeStruct) WalletVerify(ctx context.Context, k address.Address, msg []byte, sig *crypto.Signature) bool { - return c.Internal.WalletVerify(ctx, k, msg, sig) -} - -func (c *FullNodeStruct) WalletDefaultAddress(ctx context.Context) (address.Address, error) { - return c.Internal.WalletDefaultAddress(ctx) -} - -func (c *FullNodeStruct) WalletSetDefault(ctx context.Context, a address.Address) error { - return c.Internal.WalletSetDefault(ctx, a) -} - -func (c *FullNodeStruct) WalletExport(ctx context.Context, a address.Address) (*types.KeyInfo, error) { - return c.Internal.WalletExport(ctx, a) -} - -func (c *FullNodeStruct) WalletImport(ctx context.Context, ki *types.KeyInfo) (address.Address, error) { - return c.Internal.WalletImport(ctx, ki) -} - -func (c *FullNodeStruct) WalletDelete(ctx context.Context, addr address.Address) error { - return c.Internal.WalletDelete(ctx, addr) -} - -func (c *FullNodeStruct) MpoolGetNonce(ctx context.Context, addr address.Address) (uint64, error) { - return c.Internal.MpoolGetNonce(ctx, addr) -} - -func (c *FullNodeStruct) ChainGetBlock(ctx context.Context, b cid.Cid) (*types.BlockHeader, error) { - return c.Internal.ChainGetBlock(ctx, b) -} - -func (c *FullNodeStruct) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { - return c.Internal.ChainGetTipSet(ctx, key) -} - -func (c *FullNodeStruct) ChainGetBlockMessages(ctx context.Context, b cid.Cid) (*api.BlockMessages, error) { - return c.Internal.ChainGetBlockMessages(ctx, b) -} - -func (c *FullNodeStruct) ChainGetParentReceipts(ctx context.Context, b cid.Cid) ([]*types.MessageReceipt, error) { - return c.Internal.ChainGetParentReceipts(ctx, b) -} - -func (c *FullNodeStruct) ChainGetParentMessages(ctx context.Context, b cid.Cid) ([]api.Message, error) { - return c.Internal.ChainGetParentMessages(ctx, b) -} - -func (c *FullNodeStruct) ChainNotify(ctx context.Context) (<-chan []*api.HeadChange, error) { - return c.Internal.ChainNotify(ctx) -} - -func (c *FullNodeStruct) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { - return c.Internal.ChainReadObj(ctx, obj) -} - -func (c *FullNodeStruct) ChainHasObj(ctx context.Context, o cid.Cid) (bool, error) { - return c.Internal.ChainHasObj(ctx, o) -} - -func (c *FullNodeStruct) ChainStatObj(ctx context.Context, obj, base cid.Cid) (api.ObjStat, error) { - return c.Internal.ChainStatObj(ctx, obj, base) -} - -func (c *FullNodeStruct) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error { - return c.Internal.ChainSetHead(ctx, tsk) -} - -func (c *FullNodeStruct) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { - return c.Internal.ChainGetGenesis(ctx) -} - -func (c *FullNodeStruct) ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.ChainTipSetWeight(ctx, tsk) -} - -func (c *FullNodeStruct) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) { - return c.Internal.ChainGetNode(ctx, p) -} - -func (c *FullNodeStruct) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { - return c.Internal.ChainGetMessage(ctx, mc) -} - -func (c *FullNodeStruct) ChainGetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { - return c.Internal.ChainGetPath(ctx, from, to) -} - -func (c *FullNodeStruct) ChainExport(ctx context.Context, tsk types.TipSetKey) (<-chan []byte, error) { - return c.Internal.ChainExport(ctx, tsk) -} - -func (c *FullNodeStruct) SyncState(ctx context.Context) (*api.SyncState, error) { - return c.Internal.SyncState(ctx) -} - -func (c *FullNodeStruct) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error { - return c.Internal.SyncSubmitBlock(ctx, blk) -} - -func (c *FullNodeStruct) SyncIncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) { - return c.Internal.SyncIncomingBlocks(ctx) -} - -func (c *FullNodeStruct) SyncMarkBad(ctx context.Context, bcid cid.Cid) error { - return c.Internal.SyncMarkBad(ctx, bcid) -} - -func (c *FullNodeStruct) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error) { - return c.Internal.SyncCheckBad(ctx, bcid) -} - -func (c *FullNodeStruct) StateNetworkName(ctx context.Context) (dtypes.NetworkName, error) { - return c.Internal.StateNetworkName(ctx) -} - -func (c *FullNodeStruct) StateMinerSectors(ctx context.Context, addr address.Address, filter *abi.BitField, filterOut bool, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) { - return c.Internal.StateMinerSectors(ctx, addr, filter, filterOut, tsk) -} - -func (c *FullNodeStruct) StateMinerProvingSet(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.ChainSectorInfo, error) { - return c.Internal.StateMinerProvingSet(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*miner.DeadlineInfo, error) { - return c.Internal.StateMinerProvingDeadline(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateMinerPower(ctx context.Context, a address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { - return c.Internal.StateMinerPower(ctx, a, tsk) -} - -func (c *FullNodeStruct) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (api.MinerInfo, error) { - return c.Internal.StateMinerInfo(ctx, actor, tsk) -} - -func (c *FullNodeStruct) StateMinerDeadlines(ctx context.Context, m address.Address, tsk types.TipSetKey) (*miner.Deadlines, error) { - return c.Internal.StateMinerDeadlines(ctx, m, tsk) -} - -func (c *FullNodeStruct) StateMinerFaults(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*abi.BitField, error) { - return c.Internal.StateMinerFaults(ctx, actor, tsk) -} - -func (c *FullNodeStruct) StateAllMinerFaults(ctx context.Context, cutoff abi.ChainEpoch, endTsk types.TipSetKey) ([]*api.Fault, error) { - return c.Internal.StateAllMinerFaults(ctx, cutoff, endTsk) -} - -func (c *FullNodeStruct) StateMinerRecoveries(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*abi.BitField, error) { - return c.Internal.StateMinerRecoveries(ctx, actor, tsk) -} - -func (c *FullNodeStruct) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, snum abi.SectorNumber, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.StateMinerInitialPledgeCollateral(ctx, maddr, snum, tsk) -} - -func (c *FullNodeStruct) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.StateMinerAvailableBalance(ctx, maddr, tsk) -} - -func (c *FullNodeStruct) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { - return c.Internal.StateSectorPreCommitInfo(ctx, maddr, n, tsk) -} - -func (c *FullNodeStruct) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { - return c.Internal.StateSectorGetInfo(ctx, maddr, n, tsk) -} - -func (c *FullNodeStruct) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (*api.InvocResult, error) { - return c.Internal.StateCall(ctx, msg, tsk) -} - -func (c *FullNodeStruct) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid.Cid) (*api.InvocResult, error) { - return c.Internal.StateReplay(ctx, tsk, mc) -} - -func (c *FullNodeStruct) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { - return c.Internal.StateGetActor(ctx, actor, tsk) -} - -func (c *FullNodeStruct) StateReadState(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.ActorState, error) { - return c.Internal.StateReadState(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StatePledgeCollateral(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.StatePledgeCollateral(ctx, tsk) -} - -func (c *FullNodeStruct) StateWaitMsg(ctx context.Context, msgc cid.Cid, confidence uint64) (*api.MsgLookup, error) { - return c.Internal.StateWaitMsg(ctx, msgc, confidence) -} - -func (c *FullNodeStruct) StateSearchMsg(ctx context.Context, msgc cid.Cid) (*api.MsgLookup, error) { - return c.Internal.StateSearchMsg(ctx, msgc) -} - -func (c *FullNodeStruct) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - return c.Internal.StateListMiners(ctx, tsk) -} - -func (c *FullNodeStruct) StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - return c.Internal.StateListActors(ctx, tsk) -} - -func (c *FullNodeStruct) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { - return c.Internal.StateMarketBalance(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketBalance, error) { - return c.Internal.StateMarketParticipants(ctx, tsk) -} - -func (c *FullNodeStruct) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketDeal, error) { - return c.Internal.StateMarketDeals(ctx, tsk) -} - -func (c *FullNodeStruct) StateMarketStorageDeal(ctx context.Context, dealid abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { - return c.Internal.StateMarketStorageDeal(ctx, dealid, tsk) -} - -func (c *FullNodeStruct) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - return c.Internal.StateLookupID(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - return c.Internal.StateAccountKey(ctx, addr, tsk) -} - -func (c *FullNodeStruct) StateChangedActors(ctx context.Context, olnstate cid.Cid, newstate cid.Cid) (map[string]types.Actor, error) { - return c.Internal.StateChangedActors(ctx, olnstate, newstate) -} - -func (c *FullNodeStruct) StateGetReceipt(ctx context.Context, msg cid.Cid, tsk types.TipSetKey) (*types.MessageReceipt, error) { - return c.Internal.StateGetReceipt(ctx, msg, tsk) -} - -func (c *FullNodeStruct) StateListMessages(ctx context.Context, match *types.Message, tsk types.TipSetKey, toht abi.ChainEpoch) ([]cid.Cid, error) { - return c.Internal.StateListMessages(ctx, match, tsk, toht) -} - -func (c *FullNodeStruct) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs []*types.Message, tsk types.TipSetKey) (*api.ComputeStateOutput, error) { - return c.Internal.StateCompute(ctx, height, msgs, tsk) -} - -func (c *FullNodeStruct) MsigGetAvailableBalance(ctx context.Context, a address.Address, tsk types.TipSetKey) (types.BigInt, error) { - return c.Internal.MsigGetAvailableBalance(ctx, a, tsk) -} - -func (c *FullNodeStruct) MsigCreate(ctx context.Context, req int64, addrs []address.Address, val types.BigInt, src address.Address, gp types.BigInt) (cid.Cid, error) { - return c.Internal.MsigCreate(ctx, req, addrs, val, src, gp) -} - -func (c *FullNodeStruct) MsigPropose(ctx context.Context, msig address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { - return c.Internal.MsigPropose(ctx, msig, to, amt, src, method, params) -} - -func (c *FullNodeStruct) MsigApprove(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { - return c.Internal.MsigApprove(ctx, msig, txID, proposer, to, amt, src, method, params) -} - -func (c *FullNodeStruct) MsigCancel(ctx context.Context, msig address.Address, txID uint64, proposer address.Address, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { - return c.Internal.MsigCancel(ctx, msig, txID, proposer, to, amt, src, method, params) -} - -func (c *FullNodeStruct) MarketEnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) { - return c.Internal.MarketEnsureAvailable(ctx, addr, wallet, amt) -} - -func (c *FullNodeStruct) PaychGet(ctx context.Context, from, to address.Address, ensureFunds types.BigInt) (*api.ChannelInfo, error) { - return c.Internal.PaychGet(ctx, from, to, ensureFunds) -} - -func (c *FullNodeStruct) PaychList(ctx context.Context) ([]address.Address, error) { - return c.Internal.PaychList(ctx) -} - -func (c *FullNodeStruct) PaychStatus(ctx context.Context, pch address.Address) (*api.PaychStatus, error) { - return c.Internal.PaychStatus(ctx, pch) -} - -func (c *FullNodeStruct) PaychVoucherCheckValid(ctx context.Context, addr address.Address, sv *paych.SignedVoucher) error { - return c.Internal.PaychVoucherCheckValid(ctx, addr, sv) -} - -func (c *FullNodeStruct) PaychVoucherCheckSpendable(ctx context.Context, addr address.Address, sv *paych.SignedVoucher, secret []byte, proof []byte) (bool, error) { - return c.Internal.PaychVoucherCheckSpendable(ctx, addr, sv, secret, proof) -} - -func (c *FullNodeStruct) PaychVoucherAdd(ctx context.Context, addr address.Address, sv *paych.SignedVoucher, proof []byte, minDelta types.BigInt) (types.BigInt, error) { - return c.Internal.PaychVoucherAdd(ctx, addr, sv, proof, minDelta) -} - -func (c *FullNodeStruct) PaychVoucherCreate(ctx context.Context, pch address.Address, amt types.BigInt, lane uint64) (*paych.SignedVoucher, error) { - return c.Internal.PaychVoucherCreate(ctx, pch, amt, lane) -} - -func (c *FullNodeStruct) PaychVoucherList(ctx context.Context, pch address.Address) ([]*paych.SignedVoucher, error) { - return c.Internal.PaychVoucherList(ctx, pch) -} - -func (c *FullNodeStruct) PaychClose(ctx context.Context, a address.Address) (cid.Cid, error) { - return c.Internal.PaychClose(ctx, a) -} - -func (c *FullNodeStruct) PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) { - return c.Internal.PaychAllocateLane(ctx, ch) -} - -func (c *FullNodeStruct) PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) { - return c.Internal.PaychNewPayment(ctx, from, to, vouchers) -} - -func (c *FullNodeStruct) PaychVoucherSubmit(ctx context.Context, ch address.Address, sv *paych.SignedVoucher) (cid.Cid, error) { - return c.Internal.PaychVoucherSubmit(ctx, ch, sv) -} - -// StorageMinerStruct - -func (c *StorageMinerStruct) ActorAddress(ctx context.Context) (address.Address, error) { - return c.Internal.ActorAddress(ctx) -} - -func (c *StorageMinerStruct) MiningBase(ctx context.Context) (*types.TipSet, error) { - return c.Internal.MiningBase(ctx) -} - -func (c *StorageMinerStruct) ActorSectorSize(ctx context.Context, addr address.Address) (abi.SectorSize, error) { - return c.Internal.ActorSectorSize(ctx, addr) -} - -func (c *StorageMinerStruct) PledgeSector(ctx context.Context) error { - return c.Internal.PledgeSector(ctx) -} - -// Get the status of a given sector by ID -func (c *StorageMinerStruct) SectorsStatus(ctx context.Context, sid abi.SectorNumber) (api.SectorInfo, error) { - return c.Internal.SectorsStatus(ctx, sid) -} - -// List all staged sectors -func (c *StorageMinerStruct) SectorsList(ctx context.Context) ([]abi.SectorNumber, error) { - return c.Internal.SectorsList(ctx) -} - -func (c *StorageMinerStruct) SectorsRefs(ctx context.Context) (map[string][]api.SealedRef, error) { - return c.Internal.SectorsRefs(ctx) -} - -func (c *StorageMinerStruct) SectorsUpdate(ctx context.Context, id abi.SectorNumber, state api.SectorState) error { - return c.Internal.SectorsUpdate(ctx, id, state) -} - -func (c *StorageMinerStruct) SectorRemove(ctx context.Context, number abi.SectorNumber) error { - return c.Internal.SectorRemove(ctx, number) -} - -func (c *StorageMinerStruct) WorkerConnect(ctx context.Context, url string) error { - return c.Internal.WorkerConnect(ctx, url) -} - -func (c *StorageMinerStruct) WorkerStats(ctx context.Context) (map[uint64]storiface.WorkerStats, error) { - return c.Internal.WorkerStats(ctx) -} - -func (c *StorageMinerStruct) StorageAttach(ctx context.Context, si stores.StorageInfo, st stores.FsStat) error { - return c.Internal.StorageAttach(ctx, si, st) -} - -func (c *StorageMinerStruct) StorageDeclareSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType, primary bool) error { - return c.Internal.StorageDeclareSector(ctx, storageId, s, ft, primary) -} - -func (c *StorageMinerStruct) StorageDropSector(ctx context.Context, storageId stores.ID, s abi.SectorID, ft stores.SectorFileType) error { - return c.Internal.StorageDropSector(ctx, storageId, s, ft) -} - -func (c *StorageMinerStruct) StorageFindSector(ctx context.Context, si abi.SectorID, types stores.SectorFileType, allowFetch bool) ([]stores.SectorStorageInfo, error) { - return c.Internal.StorageFindSector(ctx, si, types, allowFetch) -} - -func (c *StorageMinerStruct) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) { - return c.Internal.StorageList(ctx) -} - -func (c *StorageMinerStruct) StorageLocal(ctx context.Context) (map[stores.ID]string, error) { - return c.Internal.StorageLocal(ctx) -} - -func (c *StorageMinerStruct) StorageStat(ctx context.Context, id stores.ID) (stores.FsStat, error) { - return c.Internal.StorageStat(ctx, id) -} - -func (c *StorageMinerStruct) StorageInfo(ctx context.Context, id stores.ID) (stores.StorageInfo, error) { - return c.Internal.StorageInfo(ctx, id) -} - -func (c *StorageMinerStruct) StorageBestAlloc(ctx context.Context, allocate stores.SectorFileType, spt abi.RegisteredSealProof, pt stores.PathType) ([]stores.StorageInfo, error) { - return c.Internal.StorageBestAlloc(ctx, allocate, spt, pt) -} - -func (c *StorageMinerStruct) StorageReportHealth(ctx context.Context, id stores.ID, report stores.HealthReport) error { - return c.Internal.StorageReportHealth(ctx, id, report) -} - -func (c *StorageMinerStruct) StorageLock(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) error { - return c.Internal.StorageLock(ctx, sector, read, write) -} - -func (c *StorageMinerStruct) StorageTryLock(ctx context.Context, sector abi.SectorID, read stores.SectorFileType, write stores.SectorFileType) (bool, error) { - return c.Internal.StorageTryLock(ctx, sector, read, write) -} - -func (c *StorageMinerStruct) MarketImportDealData(ctx context.Context, propcid cid.Cid, path string) error { - return c.Internal.MarketImportDealData(ctx, propcid, path) -} - -func (c *StorageMinerStruct) MarketListDeals(ctx context.Context) ([]storagemarket.StorageDeal, error) { - return c.Internal.MarketListDeals(ctx) -} - -func (c *StorageMinerStruct) MarketListIncompleteDeals(ctx context.Context) ([]storagemarket.MinerDeal, error) { - return c.Internal.MarketListIncompleteDeals(ctx) -} - -func (c *StorageMinerStruct) MarketSetAsk(ctx context.Context, price types.BigInt, duration abi.ChainEpoch, minPieceSize abi.PaddedPieceSize, maxPieceSize abi.PaddedPieceSize) error { - return c.Internal.MarketSetAsk(ctx, price, duration, minPieceSize, maxPieceSize) -} - -func (c *StorageMinerStruct) MarketGetAsk(ctx context.Context) (*storagemarket.SignedStorageAsk, error) { - return c.Internal.MarketGetAsk(ctx) -} - -func (c *StorageMinerStruct) DealsImportData(ctx context.Context, dealPropCid cid.Cid, file string) error { - return c.Internal.DealsImportData(ctx, dealPropCid, file) -} - -func (c *StorageMinerStruct) DealsList(ctx context.Context) ([]storagemarket.StorageDeal, error) { - return c.Internal.DealsList(ctx) -} - -func (c *StorageMinerStruct) DealsConsiderOnlineStorageDeals(ctx context.Context) (bool, error) { - return c.Internal.DealsConsiderOnlineStorageDeals(ctx) -} - -func (c *StorageMinerStruct) DealsSetConsiderOnlineStorageDeals(ctx context.Context, b bool) error { - return c.Internal.DealsSetConsiderOnlineStorageDeals(ctx, b) -} - -func (c *StorageMinerStruct) DealsConsiderOnlineRetrievalDeals(ctx context.Context) (bool, error) { - return c.Internal.DealsConsiderOnlineRetrievalDeals(ctx) -} - -func (c *StorageMinerStruct) DealsSetConsiderOnlineRetrievalDeals(ctx context.Context, b bool) error { - return c.Internal.DealsSetConsiderOnlineRetrievalDeals(ctx, b) -} - -func (c *StorageMinerStruct) DealsPieceCidBlocklist(ctx context.Context) ([]cid.Cid, error) { - return c.Internal.DealsPieceCidBlocklist(ctx) -} - -func (c *StorageMinerStruct) DealsSetPieceCidBlocklist(ctx context.Context, cids []cid.Cid) error { - return c.Internal.DealsSetPieceCidBlocklist(ctx, cids) -} - -func (c *StorageMinerStruct) DealsConsiderOfflineStorageDeals(ctx context.Context) (bool, error) { - return c.Internal.DealsConsiderOfflineStorageDeals(ctx) -} - -func (c *StorageMinerStruct) DealsSetConsiderOfflineStorageDeals(ctx context.Context, b bool) error { - return c.Internal.DealsSetConsiderOfflineStorageDeals(ctx, b) -} - -func (c *StorageMinerStruct) DealsConsiderOfflineRetrievalDeals(ctx context.Context) (bool, error) { - return c.Internal.DealsConsiderOfflineRetrievalDeals(ctx) -} - -func (c *StorageMinerStruct) DealsSetConsiderOfflineRetrievalDeals(ctx context.Context, b bool) error { - return c.Internal.DealsSetConsiderOfflineRetrievalDeals(ctx, b) -} - -func (c *StorageMinerStruct) StorageAddLocal(ctx context.Context, path string) error { - return c.Internal.StorageAddLocal(ctx, path) -} - -// WorkerStruct - -func (w *WorkerStruct) Version(ctx context.Context) (build.Version, error) { - return w.Internal.Version(ctx) -} - -func (w *WorkerStruct) TaskTypes(ctx context.Context) (map[sealtasks.TaskType]struct{}, error) { - return w.Internal.TaskTypes(ctx) -} - -func (w *WorkerStruct) Paths(ctx context.Context) ([]stores.StoragePath, error) { - return w.Internal.Paths(ctx) -} - -func (w *WorkerStruct) Info(ctx context.Context) (storiface.WorkerInfo, error) { - return w.Internal.Info(ctx) -} - -func (w *WorkerStruct) SealPreCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storage.PreCommit1Out, error) { - return w.Internal.SealPreCommit1(ctx, sector, ticket, pieces) -} - -func (w *WorkerStruct) SealPreCommit2(ctx context.Context, sector abi.SectorID, p1o storage.PreCommit1Out) (storage.SectorCids, error) { - return w.Internal.SealPreCommit2(ctx, sector, p1o) -} - -func (w *WorkerStruct) SealCommit1(ctx context.Context, sector abi.SectorID, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storage.Commit1Out, error) { - return w.Internal.SealCommit1(ctx, sector, ticket, seed, pieces, cids) -} - -func (w *WorkerStruct) SealCommit2(ctx context.Context, sector abi.SectorID, c1o storage.Commit1Out) (storage.Proof, error) { - return w.Internal.SealCommit2(ctx, sector, c1o) -} - -func (w *WorkerStruct) FinalizeSector(ctx context.Context, sector abi.SectorID, keepUnsealed []storage.Range) error { - return w.Internal.FinalizeSector(ctx, sector, keepUnsealed) -} - -func (w *WorkerStruct) ReleaseUnsealed(ctx context.Context, sector abi.SectorID, safeToFree []storage.Range) error { - return w.Internal.ReleaseUnsealed(ctx, sector, safeToFree) -} - -func (w *WorkerStruct) Remove(ctx context.Context, sector abi.SectorID) error { - return w.Internal.Remove(ctx, sector) -} - -func (w *WorkerStruct) MoveStorage(ctx context.Context, sector abi.SectorID) error { - return w.Internal.MoveStorage(ctx, sector) -} - -func (w *WorkerStruct) UnsealPiece(ctx context.Context, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, c cid.Cid) error { - return w.Internal.UnsealPiece(ctx, id, index, size, randomness, c) -} - -func (w *WorkerStruct) ReadPiece(ctx context.Context, writer io.Writer, id abi.SectorID, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) error { - return w.Internal.ReadPiece(ctx, writer, id, index, size) -} - -func (w *WorkerStruct) Fetch(ctx context.Context, id abi.SectorID, fileType stores.SectorFileType, ptype stores.PathType, am stores.AcquireMode) error { - return w.Internal.Fetch(ctx, id, fileType, ptype, am) -} - -func (w *WorkerStruct) Closing(ctx context.Context) (<-chan struct{}, error) { - return w.Internal.Closing(ctx) -} - -var _ api.Common = &CommonStruct{} -var _ api.FullNode = &FullNodeStruct{} -var _ api.StorageMiner = &StorageMinerStruct{} -var _ api.WorkerAPI = &WorkerStruct{} diff --git a/vendor/github.com/filecoin-project/lotus/api/apistruct/struct_test.go b/vendor/github.com/filecoin-project/lotus/api/apistruct/struct_test.go deleted file mode 100644 index 9f5f583601..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/apistruct/struct_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package apistruct - -import "testing" - -func TestPermTags(t *testing.T) { - _ = PermissionedFullAPI(&FullNodeStruct{}) - _ = PermissionedStorMinerAPI(&StorageMinerStruct{}) - _ = PermissionedWorkerAPI(&WorkerStruct{}) -} diff --git a/vendor/github.com/filecoin-project/lotus/api/cbor_gen.go b/vendor/github.com/filecoin-project/lotus/api/cbor_gen.go deleted file mode 100644 index 85079d39a5..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/cbor_gen.go +++ /dev/null @@ -1,731 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package api - -import ( - "fmt" - "io" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -func (t *PaymentInfo) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{163}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Channel (address.Address) (struct) - if len("Channel") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Channel\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Channel"))); err != nil { - return err - } - if _, err := io.WriteString(w, "Channel"); err != nil { - return err - } - - if err := t.Channel.MarshalCBOR(w); err != nil { - return err - } - - // t.ChannelMessage (cid.Cid) (struct) - if len("ChannelMessage") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"ChannelMessage\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ChannelMessage"))); err != nil { - return err - } - if _, err := io.WriteString(w, "ChannelMessage"); err != nil { - return err - } - - if t.ChannelMessage == nil { - if _, err := w.Write(cbg.CborNull); err != nil { - return err - } - } else { - if err := cbg.WriteCidBuf(scratch, w, *t.ChannelMessage); err != nil { - return xerrors.Errorf("failed to write cid field t.ChannelMessage: %w", err) - } - } - - // t.Vouchers ([]*paych.SignedVoucher) (slice) - if len("Vouchers") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Vouchers\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Vouchers"))); err != nil { - return err - } - if _, err := io.WriteString(w, "Vouchers"); err != nil { - return err - } - - if len(t.Vouchers) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Vouchers was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Vouchers))); err != nil { - return err - } - for _, v := range t.Vouchers { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *PaymentInfo) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("PaymentInfo: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.Channel (address.Address) (struct) - case "Channel": - - { - - if err := t.Channel.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Channel: %w", err) - } - - } - // t.ChannelMessage (cid.Cid) (struct) - case "ChannelMessage": - - { - - pb, err := br.PeekByte() - if err != nil { - return err - } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { - return err - } - } else { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ChannelMessage: %w", err) - } - - t.ChannelMessage = &c - } - - } - // t.Vouchers ([]*paych.SignedVoucher) (slice) - case "Vouchers": - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Vouchers: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Vouchers = make([]*paych.SignedVoucher, extra) - } - - for i := 0; i < int(extra); i++ { - - var v paych.SignedVoucher - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Vouchers[i] = &v - } - - default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) - } - } - - return nil -} -func (t *SealedRef) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{163}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.SectorID (abi.SectorNumber) (uint64) - if len("SectorID") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"SectorID\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("SectorID"))); err != nil { - return err - } - if _, err := io.WriteString(w, "SectorID"); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.SectorID)); err != nil { - return err - } - - // t.Offset (uint64) (uint64) - if len("Offset") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Offset\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Offset"))); err != nil { - return err - } - if _, err := io.WriteString(w, "Offset"); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Offset)); err != nil { - return err - } - - // t.Size (abi.UnpaddedPieceSize) (uint64) - if len("Size") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Size\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Size"))); err != nil { - return err - } - if _, err := io.WriteString(w, "Size"); err != nil { - return err - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Size)); err != nil { - return err - } - - return nil -} - -func (t *SealedRef) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("SealedRef: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.SectorID (abi.SectorNumber) (uint64) - case "SectorID": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.SectorID = abi.SectorNumber(extra) - - } - // t.Offset (uint64) (uint64) - case "Offset": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Offset = uint64(extra) - - } - // t.Size (abi.UnpaddedPieceSize) (uint64) - case "Size": - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Size = abi.UnpaddedPieceSize(extra) - - } - - default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) - } - } - - return nil -} -func (t *SealedRefs) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{161}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Refs ([]api.SealedRef) (slice) - if len("Refs") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Refs\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Refs"))); err != nil { - return err - } - if _, err := io.WriteString(w, "Refs"); err != nil { - return err - } - - if len(t.Refs) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Refs was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Refs))); err != nil { - return err - } - for _, v := range t.Refs { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - return nil -} - -func (t *SealedRefs) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("SealedRefs: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.Refs ([]api.SealedRef) (slice) - case "Refs": - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Refs: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Refs = make([]SealedRef, extra) - } - - for i := 0; i < int(extra); i++ { - - var v SealedRef - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Refs[i] = v - } - - default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) - } - } - - return nil -} -func (t *SealTicket) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{162}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Value (abi.SealRandomness) (slice) - if len("Value") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Value\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Value"))); err != nil { - return err - } - if _, err := io.WriteString(w, "Value"); err != nil { - return err - } - - if len(t.Value) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Value was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Value))); err != nil { - return err - } - - if _, err := w.Write(t.Value); err != nil { - return err - } - - // t.Epoch (abi.ChainEpoch) (int64) - if len("Epoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Epoch\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Epoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, "Epoch"); err != nil { - return err - } - - if t.Epoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { - return err - } - } - return nil -} - -func (t *SealTicket) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("SealTicket: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.Value (abi.SealRandomness) (slice) - case "Value": - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Value: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Value = make([]byte, extra) - if _, err := io.ReadFull(br, t.Value); err != nil { - return err - } - // t.Epoch (abi.ChainEpoch) (int64) - case "Epoch": - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Epoch = abi.ChainEpoch(extraI) - } - - default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) - } - } - - return nil -} -func (t *SealSeed) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write([]byte{162}); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Value (abi.InteractiveSealRandomness) (slice) - if len("Value") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Value\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Value"))); err != nil { - return err - } - if _, err := io.WriteString(w, "Value"); err != nil { - return err - } - - if len(t.Value) > cbg.ByteArrayMaxLen { - return xerrors.Errorf("Byte array in field t.Value was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Value))); err != nil { - return err - } - - if _, err := w.Write(t.Value); err != nil { - return err - } - - // t.Epoch (abi.ChainEpoch) (int64) - if len("Epoch") > cbg.MaxLength { - return xerrors.Errorf("Value in field \"Epoch\" was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("Epoch"))); err != nil { - return err - } - if _, err := io.WriteString(w, "Epoch"); err != nil { - return err - } - - if t.Epoch >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { - return err - } - } - return nil -} - -func (t *SealSeed) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajMap { - return fmt.Errorf("cbor input should be of type map") - } - - if extra > cbg.MaxLength { - return fmt.Errorf("SealSeed: map struct too large (%d)", extra) - } - - var name string - n := extra - - for i := uint64(0); i < n; i++ { - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - name = string(sval) - } - - switch name { - // t.Value (abi.InteractiveSealRandomness) (slice) - case "Value": - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.ByteArrayMaxLen { - return fmt.Errorf("t.Value: byte array too large (%d)", extra) - } - if maj != cbg.MajByteString { - return fmt.Errorf("expected byte array") - } - t.Value = make([]byte, extra) - if _, err := io.ReadFull(br, t.Value); err != nil { - return err - } - // t.Epoch (abi.ChainEpoch) (int64) - case "Epoch": - { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 - if err != nil { - return err - } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) - } - - t.Epoch = abi.ChainEpoch(extraI) - } - - default: - return fmt.Errorf("unknown struct field %d: '%s'", i, name) - } - } - - return nil -} diff --git a/vendor/github.com/filecoin-project/lotus/api/client/client.go b/vendor/github.com/filecoin-project/lotus/api/client/client.go deleted file mode 100644 index 20bad20483..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/client/client.go +++ /dev/null @@ -1,61 +0,0 @@ -package client - -import ( - "net/http" - - "github.com/filecoin-project/go-jsonrpc" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apistruct" -) - -// NewCommonRPC creates a new http jsonrpc client. -func NewCommonRPC(addr string, requestHeader http.Header) (api.Common, jsonrpc.ClientCloser, error) { - var res apistruct.CommonStruct - closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", - []interface{}{ - &res.Internal, - }, - requestHeader, - ) - - return &res, closer, err -} - -// NewFullNodeRPC creates a new http jsonrpc client. -func NewFullNodeRPC(addr string, requestHeader http.Header) (api.FullNode, jsonrpc.ClientCloser, error) { - var res apistruct.FullNodeStruct - closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", - []interface{}{ - &res.CommonStruct.Internal, - &res.Internal, - }, requestHeader) - - return &res, closer, err -} - -// NewStorageMinerRPC creates a new http jsonrpc client for storage miner -func NewStorageMinerRPC(addr string, requestHeader http.Header) (api.StorageMiner, jsonrpc.ClientCloser, error) { - var res apistruct.StorageMinerStruct - closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", - []interface{}{ - &res.CommonStruct.Internal, - &res.Internal, - }, - requestHeader, - ) - - return &res, closer, err -} - -func NewWorkerRPC(addr string, requestHeader http.Header) (api.WorkerAPI, jsonrpc.ClientCloser, error) { - var res apistruct.WorkerStruct - closer, err := jsonrpc.NewMergeClient(addr, "Filecoin", - []interface{}{ - &res.Internal, - }, - requestHeader, - ) - - return &res, closer, err -} diff --git a/vendor/github.com/filecoin-project/lotus/api/docgen/docgen.go b/vendor/github.com/filecoin-project/lotus/api/docgen/docgen.go deleted file mode 100644 index c9d8e8aa41..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/docgen/docgen.go +++ /dev/null @@ -1,379 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "go/ast" - "go/parser" - "go/token" - "reflect" - "sort" - "strings" - "time" - "unicode" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/go-jsonrpc/auth" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/apistruct" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/modules/dtypes" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-filestore" - "github.com/libp2p/go-libp2p-core/network" - peer "github.com/libp2p/go-libp2p-peer" - "github.com/multiformats/go-multiaddr" -) - -var ExampleValues = map[reflect.Type]interface{}{ - reflect.TypeOf(auth.Permission("")): auth.Permission("write"), - reflect.TypeOf(""): "string value", - reflect.TypeOf(uint64(42)): uint64(42), - reflect.TypeOf(byte(7)): byte(7), - reflect.TypeOf([]byte{}): []byte("byte array"), -} - -func addExample(v interface{}) { - ExampleValues[reflect.TypeOf(v)] = v -} - -func init() { - c, err := cid.Decode("bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4") - if err != nil { - panic(err) - } - - ExampleValues[reflect.TypeOf(c)] = c - - c2, err := cid.Decode("bafy2bzacebp3shtrn43k7g3unredz7fxn4gj533d3o43tqn2p2ipxxhrvchve") - if err != nil { - panic(err) - } - - tsk := types.NewTipSetKey(c, c2) - - ExampleValues[reflect.TypeOf(tsk)] = tsk - - addr, err := address.NewIDAddress(1234) - if err != nil { - panic(err) - } - - ExampleValues[reflect.TypeOf(addr)] = addr - - pid, err := peer.IDB58Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf") - if err != nil { - panic(err) - } - addExample(pid) - - addExample(bitfield.NewFromSet([]uint64{5})) - addExample(abi.RegisteredSealProof_StackedDrg32GiBV1) - addExample(abi.RegisteredPoStProof_StackedDrgWindow32GiBV1) - addExample(abi.ChainEpoch(10101)) - addExample(crypto.SigTypeBLS) - addExample(int64(9)) - addExample(12.3) - addExample(123) - addExample(uintptr(0)) - addExample(abi.MethodNum(1)) - addExample(exitcode.ExitCode(0)) - addExample(crypto.DomainSeparationTag_ElectionProofProduction) - addExample(true) - addExample(abi.UnpaddedPieceSize(1024)) - addExample(abi.UnpaddedPieceSize(1024).Padded()) - addExample(abi.DealID(5432)) - addExample(filestore.StatusFileChanged) - addExample(abi.SectorNumber(9)) - addExample(abi.SectorSize(32 * 1024 * 1024 * 1024)) - addExample(api.MpoolChange(0)) - addExample(network.Connected) - addExample(dtypes.NetworkName("lotus")) - addExample(api.SyncStateStage(1)) - addExample(build.APIVersion) - addExample(api.PCHInbound) - addExample(time.Minute) - addExample(&types.ExecutionTrace{ - Msg: exampleValue(reflect.TypeOf(&types.Message{}), nil).(*types.Message), - MsgRct: exampleValue(reflect.TypeOf(&types.MessageReceipt{}), nil).(*types.MessageReceipt), - }) - addExample(map[string]types.Actor{ - "t01236": exampleValue(reflect.TypeOf(types.Actor{}), nil).(types.Actor), - }) - addExample(map[string]api.MarketDeal{ - "t026363": exampleValue(reflect.TypeOf(api.MarketDeal{}), nil).(api.MarketDeal), - }) - addExample(map[string]api.MarketBalance{ - "t026363": exampleValue(reflect.TypeOf(api.MarketBalance{}), nil).(api.MarketBalance), - }) - - maddr, err := multiaddr.NewMultiaddr("/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior") - if err != nil { - panic(err) - } - - // because reflect.TypeOf(maddr) returns the concrete type... - ExampleValues[reflect.TypeOf(struct{ A multiaddr.Multiaddr }{}).Field(0).Type] = maddr - -} - -func exampleValue(t, parent reflect.Type) interface{} { - v, ok := ExampleValues[t] - if ok { - return v - } - - switch t.Kind() { - case reflect.Slice: - out := reflect.New(t).Elem() - reflect.Append(out, reflect.ValueOf(exampleValue(t.Elem(), t))) - return out.Interface() - case reflect.Chan: - return exampleValue(t.Elem(), nil) - case reflect.Struct: - es := exampleStruct(t, parent) - v := reflect.ValueOf(es).Elem().Interface() - ExampleValues[t] = v - return v - case reflect.Array: - out := reflect.New(t).Elem() - for i := 0; i < t.Len(); i++ { - out.Index(i).Set(reflect.ValueOf(exampleValue(t.Elem(), t))) - } - return out.Interface() - - case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct { - es := exampleStruct(t.Elem(), t) - //ExampleValues[t] = es - return es - } - case reflect.Interface: - return struct{}{} - } - - panic(fmt.Sprintf("No example value for type: %s", t)) -} - -func exampleStruct(t, parent reflect.Type) interface{} { - ns := reflect.New(t) - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type == parent { - continue - } - if strings.Title(f.Name) == f.Name { - ns.Elem().Field(i).Set(reflect.ValueOf(exampleValue(f.Type, t))) - } - } - - return ns.Interface() -} - -type Visitor struct { - Methods map[string]ast.Node -} - -func (v *Visitor) Visit(node ast.Node) ast.Visitor { - st, ok := node.(*ast.TypeSpec) - if !ok { - return v - } - - if st.Name.Name != "FullNode" { - return nil - } - - iface := st.Type.(*ast.InterfaceType) - for _, m := range iface.Methods.List { - if len(m.Names) > 0 { - v.Methods[m.Names[0].Name] = m - } - } - - return v -} - -const noComment = "There are not yet any comments for this method." - -func parseApiASTInfo() (map[string]string, map[string]string) { //nolint:golint - fset := token.NewFileSet() - pkgs, err := parser.ParseDir(fset, "./api", nil, parser.AllErrors|parser.ParseComments) - if err != nil { - fmt.Println("parse error: ", err) - } - - ap := pkgs["api"] - - f := ap.Files["api/api_full.go"] - - cmap := ast.NewCommentMap(fset, f, f.Comments) - - v := &Visitor{make(map[string]ast.Node)} - ast.Walk(v, pkgs["api"]) - - groupDocs := make(map[string]string) - out := make(map[string]string) - for mn, node := range v.Methods { - cs := cmap.Filter(node).Comments() - if len(cs) == 0 { - out[mn] = noComment - } else { - for _, c := range cs { - if strings.HasPrefix(c.Text(), "MethodGroup:") { - parts := strings.Split(c.Text(), "\n") - groupName := strings.TrimSpace(parts[0][12:]) - comment := strings.Join(parts[1:], "\n") - groupDocs[groupName] = comment - - break - } - } - - last := cs[len(cs)-1].Text() - if !strings.HasPrefix(last, "MethodGroup:") { - out[mn] = last - } else { - out[mn] = noComment - } - } - } - return out, groupDocs -} - -type MethodGroup struct { - GroupName string - Header string - Methods []*Method -} - -type Method struct { - Comment string - Name string - InputExample string - ResponseExample string -} - -func methodGroupFromName(mn string) string { - i := strings.IndexFunc(mn[1:], func(r rune) bool { - return unicode.IsUpper(r) - }) - if i < 0 { - return "" - } - return mn[:i+1] -} - -func main() { - - comments, groupComments := parseApiASTInfo() - - groups := make(map[string]*MethodGroup) - - var api struct{ api.FullNode } - t := reflect.TypeOf(api) - for i := 0; i < t.NumMethod(); i++ { - m := t.Method(i) - - groupName := methodGroupFromName(m.Name) - - g, ok := groups[groupName] - if !ok { - g = new(MethodGroup) - g.Header = groupComments[groupName] - g.GroupName = groupName - groups[groupName] = g - } - - var args []interface{} - ft := m.Func.Type() - for j := 2; j < ft.NumIn(); j++ { - inp := ft.In(j) - args = append(args, exampleValue(inp, nil)) - } - - v, err := json.MarshalIndent(args, "", " ") - if err != nil { - panic(err) - } - - outv := exampleValue(ft.Out(0), nil) - - ov, err := json.MarshalIndent(outv, "", " ") - if err != nil { - panic(err) - } - - g.Methods = append(g.Methods, &Method{ - Name: m.Name, - Comment: comments[m.Name], - InputExample: string(v), - ResponseExample: string(ov), - }) - } - - var groupslice []*MethodGroup - for _, g := range groups { - groupslice = append(groupslice, g) - } - - sort.Slice(groupslice, func(i, j int) bool { - return groupslice[i].GroupName < groupslice[j].GroupName - }) - - fmt.Printf("# Groups\n") - - for _, g := range groupslice { - fmt.Printf("* [%s](#%s)\n", g.GroupName, g.GroupName) - for _, method := range g.Methods { - fmt.Printf(" * [%s](#%s)\n", method.Name, method.Name) - } - } - - permStruct := reflect.TypeOf(apistruct.FullNodeStruct{}.Internal) - commonPermStruct := reflect.TypeOf(apistruct.CommonStruct{}.Internal) - - for _, g := range groupslice { - g := g - fmt.Printf("## %s\n", g.GroupName) - fmt.Printf("%s\n\n", g.Header) - - sort.Slice(g.Methods, func(i, j int) bool { - return g.Methods[i].Name < g.Methods[j].Name - }) - - for _, m := range g.Methods { - fmt.Printf("### %s\n", m.Name) - fmt.Printf("%s\n\n", m.Comment) - - meth, ok := permStruct.FieldByName(m.Name) - if !ok { - meth, ok = commonPermStruct.FieldByName(m.Name) - if !ok { - panic("no perms for method: " + m.Name) - } - } - - perms := meth.Tag.Get("perm") - - fmt.Printf("Perms: %s\n\n", perms) - - if strings.Count(m.InputExample, "\n") > 0 { - fmt.Printf("Inputs:\n```json\n%s\n```\n\n", m.InputExample) - } else { - fmt.Printf("Inputs: `%s`\n\n", m.InputExample) - } - - if strings.Count(m.ResponseExample, "\n") > 0 { - fmt.Printf("Response:\n```json\n%s\n```\n\n", m.ResponseExample) - } else { - fmt.Printf("Response: `%s`\n\n", m.ResponseExample) - } - } - } -} diff --git a/vendor/github.com/filecoin-project/lotus/api/test/deals.go b/vendor/github.com/filecoin-project/lotus/api/test/deals.go deleted file mode 100644 index 65429cbcf3..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/test/deals.go +++ /dev/null @@ -1,255 +0,0 @@ -package test - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "sync/atomic" - "testing" - "time" - - "github.com/ipfs/go-cid" - - files "github.com/ipfs/go-ipfs-files" - logging "github.com/ipfs/go-log/v2" - "github.com/ipld/go-car" - - "github.com/filecoin-project/go-fil-markets/storagemarket" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - dag "github.com/ipfs/go-merkledag" - dstest "github.com/ipfs/go-merkledag/test" - unixfile "github.com/ipfs/go-unixfs/file" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl" - ipld "github.com/ipfs/go-ipld-format" -) - -func init() { - logging.SetAllLoggers(logging.LevelInfo) - build.InsecurePoStValidation = true -} - -func TestDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - - ctx := context.Background() - n, sn := b(t, 1, oneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, func(bool, error) {}); err != nil { - t.Error(err) - } - } - }() - - makeDeal(t, ctx, 6, client, miner, carExport) - - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done -} - -func TestDoubleDealFlow(t *testing.T, b APIBuilder, blocktime time.Duration) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - - ctx := context.Background() - n, sn := b(t, 1, oneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := int64(1) - done := make(chan struct{}) - - go func() { - defer close(done) - for atomic.LoadInt64(&mine) == 1 { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, func(bool, error) {}); err != nil { - t.Error(err) - } - } - }() - - makeDeal(t, ctx, 6, client, miner, false) - makeDeal(t, ctx, 7, client, miner, false) - - atomic.AddInt64(&mine, -1) - fmt.Println("shutting down mining") - <-done -} - -func makeDeal(t *testing.T, ctx context.Context, rseed int, client *impl.FullNodeAPI, miner TestStorageNode, carExport bool) { - data := make([]byte, 1600) - rand.New(rand.NewSource(int64(rseed))).Read(data) - - r := bytes.NewReader(data) - fcid, err := client.ClientImportLocal(ctx, r) - if err != nil { - t.Fatal(err) - } - - fmt.Println("FILE CID: ", fcid) - - deal := startDeal(t, ctx, miner, client, fcid) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - waitDealSealed(t, ctx, client, deal) - - // Retrieval - - testRetrieval(t, ctx, err, client, fcid, carExport, data) -} - -func startDeal(t *testing.T, ctx context.Context, miner TestStorageNode, client *impl.FullNodeAPI, fcid cid.Cid) *cid.Cid { - maddr, err := miner.ActorAddress(ctx) - if err != nil { - t.Fatal(err) - } - - addr, err := client.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - deal, err := client.ClientStartDeal(ctx, &api.StartDealParams{ - Data: &storagemarket.DataRef{Root: fcid}, - Wallet: addr, - Miner: maddr, - EpochPrice: types.NewInt(1000000), - MinBlocksDuration: 100, - }) - if err != nil { - t.Fatalf("%+v", err) - } - return deal -} - -func waitDealSealed(t *testing.T, ctx context.Context, client *impl.FullNodeAPI, deal *cid.Cid) { -loop: - for { - di, err := client.ClientGetDealInfo(ctx, *deal) - if err != nil { - t.Fatal(err) - } - switch di.State { - case storagemarket.StorageDealProposalRejected: - t.Fatal("deal rejected") - case storagemarket.StorageDealFailing: - t.Fatal("deal failed") - case storagemarket.StorageDealError: - t.Fatal("deal errored", di.Message) - case storagemarket.StorageDealActive: - fmt.Println("COMPLETE", di) - break loop - } - fmt.Println("Deal state: ", storagemarket.DealStates[di.State]) - time.Sleep(time.Second / 2) - } -} - -func testRetrieval(t *testing.T, ctx context.Context, err error, client *impl.FullNodeAPI, fcid cid.Cid, carExport bool, data []byte) { - offers, err := client.ClientFindData(ctx, fcid) - if err != nil { - t.Fatal(err) - } - - if len(offers) < 1 { - t.Fatal("no offers") - } - - rpath, err := ioutil.TempDir("", "lotus-retrieve-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rpath) //nolint:errcheck - - caddr, err := client.WalletDefaultAddress(ctx) - if err != nil { - t.Fatal(err) - } - - ref := &api.FileRef{ - Path: filepath.Join(rpath, "ret"), - IsCAR: carExport, - } - err = client.ClientRetrieve(ctx, offers[0].Order(caddr), ref) - if err != nil { - t.Fatalf("%+v", err) - } - - rdata, err := ioutil.ReadFile(filepath.Join(rpath, "ret")) - if err != nil { - t.Fatal(err) - } - - if carExport { - rdata = extractCarData(t, ctx, rdata, rpath) - } - - if !bytes.Equal(rdata, data) { - t.Fatal("wrong data retrieved") - } -} - -func extractCarData(t *testing.T, ctx context.Context, rdata []byte, rpath string) []byte { - bserv := dstest.Bserv() - ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata)) - if err != nil { - t.Fatal(err) - } - b, err := bserv.GetBlock(ctx, ch.Roots[0]) - if err != nil { - t.Fatal(err) - } - nd, err := ipld.Decode(b) - if err != nil { - t.Fatal(err) - } - dserv := dag.NewDAGService(bserv) - fil, err := unixfile.NewUnixfsFile(ctx, dserv, nd) - if err != nil { - t.Fatal(err) - } - outPath := filepath.Join(rpath, "retLoadedCAR") - if err := files.WriteTo(fil, outPath); err != nil { - t.Fatal(err) - } - rdata, err = ioutil.ReadFile(outPath) - if err != nil { - t.Fatal(err) - } - return rdata -} diff --git a/vendor/github.com/filecoin-project/lotus/api/test/mining.go b/vendor/github.com/filecoin-project/lotus/api/test/mining.go deleted file mode 100644 index 5cb140df67..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/test/mining.go +++ /dev/null @@ -1,200 +0,0 @@ -package test - -import ( - "bytes" - "context" - "fmt" - "math/rand" - "os" - "sync/atomic" - "testing" - "time" - - logging "github.com/ipfs/go-log/v2" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/node/impl" -) - -var log = logging.Logger("apitest") - -func (ts *testSuite) testMining(t *testing.T) { - ctx := context.Background() - apis, sn := ts.makeNodes(t, 1, oneMiner) - api := apis[0] - - h1, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Equal(t, abi.ChainEpoch(0), h1.Height()) - - newHeads, err := api.ChainNotify(ctx) - require.NoError(t, err) - <-newHeads - - err = sn[0].MineOne(ctx, func(bool, error) {}) - require.NoError(t, err) - - <-newHeads - - h2, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Equal(t, abi.ChainEpoch(1), h2.Height()) -} - -func (ts *testSuite) testMiningReal(t *testing.T) { - build.InsecurePoStValidation = false - defer func() { - build.InsecurePoStValidation = true - }() - - ctx := context.Background() - apis, sn := ts.makeNodes(t, 1, oneMiner) - api := apis[0] - - h1, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Equal(t, abi.ChainEpoch(0), h1.Height()) - - newHeads, err := api.ChainNotify(ctx) - require.NoError(t, err) - <-newHeads - - err = sn[0].MineOne(ctx, func(bool, error) {}) - require.NoError(t, err) - - <-newHeads - - h2, err := api.ChainHead(ctx) - require.NoError(t, err) - require.Equal(t, abi.ChainEpoch(1), h2.Height()) - - err = sn[0].MineOne(ctx, func(bool, error) {}) - require.NoError(t, err) - - <-newHeads - - h2, err = api.ChainHead(ctx) - require.NoError(t, err) - require.Equal(t, abi.ChainEpoch(2), h2.Height()) -} - -func TestDealMining(t *testing.T, b APIBuilder, blocktime time.Duration, carExport bool) { - _ = os.Setenv("BELLMAN_NO_GPU", "1") - - // test making a deal with a fresh miner, and see if it starts to mine - - ctx := context.Background() - n, sn := b(t, 1, []StorageMiner{ - {Full: 0, Preseal: PresealGenesis}, - {Full: 0, Preseal: 0}, // TODO: Add support for storage miners on non-first full node - }) - client := n[0].FullNode.(*impl.FullNodeAPI) - provider := sn[1] - genesisMiner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := provider.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - - if err := genesisMiner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - - time.Sleep(time.Second) - - data := make([]byte, 600) - rand.New(rand.NewSource(5)).Read(data) - - r := bytes.NewReader(data) - fcid, err := client.ClientImportLocal(ctx, r) - if err != nil { - t.Fatal(err) - } - - fmt.Println("FILE CID: ", fcid) - - var mine int32 = 1 - done := make(chan struct{}) - minedTwo := make(chan struct{}) - - go func() { - doneMinedTwo := false - defer close(done) - - prevExpect := 0 - for atomic.LoadInt32(&mine) != 0 { - wait := make(chan int, 2) - mdone := func(mined bool, err error) { - go func() { - n := 0 - if mined { - n = 1 - } - wait <- n - }() - } - - if err := sn[0].MineOne(ctx, mdone); err != nil { - t.Error(err) - } - - if err := sn[1].MineOne(ctx, mdone); err != nil { - t.Error(err) - } - - expect := <-wait - expect += <-wait - - time.Sleep(blocktime) - - for { - n := 0 - for i, node := range sn { - mb, err := node.MiningBase(ctx) - if err != nil { - t.Error(err) - return - } - - if len(mb.Cids()) != expect { - log.Warnf("node %d mining base not complete (%d, want %d)", i, len(mb.Cids()), expect) - continue - } - n++ - } - if n == len(sn) { - break - } - time.Sleep(blocktime) - } - - if prevExpect == 2 && expect == 2 && !doneMinedTwo { - close(minedTwo) - doneMinedTwo = true - } - - prevExpect = expect - } - }() - - deal := startDeal(t, ctx, provider, client, fcid) - - // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this - time.Sleep(time.Second) - - waitDealSealed(t, ctx, client, deal) - - <-minedTwo - - atomic.StoreInt32(&mine, 0) - fmt.Println("shutting down mining") - <-done -} diff --git a/vendor/github.com/filecoin-project/lotus/api/test/test.go b/vendor/github.com/filecoin-project/lotus/api/test/test.go deleted file mode 100644 index a19d13e301..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/test/test.go +++ /dev/null @@ -1,126 +0,0 @@ -package test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" -) - -type TestNode struct { - api.FullNode -} - -type TestStorageNode struct { - api.StorageMiner - - MineOne func(context.Context, func(bool, error)) error -} - -var PresealGenesis = -1 - -const GenesisPreseals = 2 - -type StorageMiner struct { - Full int - Preseal int -} - -// APIBuilder is a function which is invoked in test suite to provide -// test nodes and networks -// -// storage array defines storage nodes, numbers in the array specify full node -// index the storage node 'belongs' to -type APIBuilder func(t *testing.T, nFull int, storage []StorageMiner) ([]TestNode, []TestStorageNode) -type testSuite struct { - makeNodes APIBuilder -} - -// TestApis is the entry point to API test suite -func TestApis(t *testing.T, b APIBuilder) { - ts := testSuite{ - makeNodes: b, - } - - t.Run("version", ts.testVersion) - t.Run("id", ts.testID) - t.Run("testConnectTwo", ts.testConnectTwo) - t.Run("testMining", ts.testMining) - t.Run("testMiningReal", ts.testMiningReal) -} - -var oneMiner = []StorageMiner{{Full: 0, Preseal: PresealGenesis}} - -func (ts *testSuite) testVersion(t *testing.T) { - ctx := context.Background() - apis, _ := ts.makeNodes(t, 1, oneMiner) - api := apis[0] - - v, err := api.Version(ctx) - if err != nil { - t.Fatal(err) - } - require.Equal(t, v.Version, build.BuildVersion) -} - -func (ts *testSuite) testID(t *testing.T) { - ctx := context.Background() - apis, _ := ts.makeNodes(t, 1, oneMiner) - api := apis[0] - - id, err := api.ID(ctx) - if err != nil { - t.Fatal(err) - } - assert.Regexp(t, "^12", id.Pretty()) -} - -func (ts *testSuite) testConnectTwo(t *testing.T) { - ctx := context.Background() - apis, _ := ts.makeNodes(t, 2, oneMiner) - - p, err := apis[0].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 0 { - t.Error("Node 0 has a peer") - } - - p, err = apis[1].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 0 { - t.Error("Node 1 has a peer") - } - - addrs, err := apis[1].NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := apis[0].NetConnect(ctx, addrs); err != nil { - t.Fatal(err) - } - - p, err = apis[0].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 1 { - t.Error("Node 0 doesn't have 1 peer") - } - - p, err = apis[1].NetPeers(ctx) - if err != nil { - t.Fatal(err) - } - if len(p) != 1 { - t.Error("Node 0 doesn't have 1 peer") - } -} diff --git a/vendor/github.com/filecoin-project/lotus/api/test/window_post.go b/vendor/github.com/filecoin-project/lotus/api/test/window_post.go deleted file mode 100644 index dcf6fcebd6..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/test/window_post.go +++ /dev/null @@ -1,169 +0,0 @@ -package test - -import ( - "context" - "fmt" - "github.com/filecoin-project/lotus/api" - "os" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/specs-actors/actors/abi" - miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner" - sealing "github.com/filecoin-project/storage-fsm" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl" -) - -func TestPledgeSector(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { - os.Setenv("BELLMAN_NO_GPU", "1") - - ctx := context.Background() - n, sn := b(t, 1, oneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := true - done := make(chan struct{}) - go func() { - defer close(done) - for mine { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, func(bool, error) {}); err != nil { - t.Error(err) - } - } - }() - - pledgeSectors(t, ctx, miner, nSectors) - - mine = false - <-done -} - -func pledgeSectors(t *testing.T, ctx context.Context, miner TestStorageNode, n int) { - for i := 0; i < n; i++ { - err := miner.PledgeSector(ctx) - require.NoError(t, err) - } - - for { - s, err := miner.SectorsList(ctx) // Note - the test builder doesn't import genesis sectors into FSM - require.NoError(t, err) - fmt.Printf("Sectors: %d\n", len(s)) - if len(s) >= n { - break - } - - time.Sleep(100 * time.Millisecond) - } - - fmt.Printf("All sectors is fsm\n") - - s, err := miner.SectorsList(ctx) - require.NoError(t, err) - - toCheck := map[abi.SectorNumber]struct{}{} - for _, number := range s { - toCheck[number] = struct{}{} - } - - for len(toCheck) > 0 { - for n := range toCheck { - st, err := miner.SectorsStatus(ctx, n) - require.NoError(t, err) - if st.State == api.SectorState(sealing.Proving) { - delete(toCheck, n) - } - if strings.Contains(string(st.State), "Fail") { - t.Fatal("sector in a failed state", st.State) - } - } - - time.Sleep(100 * time.Millisecond) - fmt.Printf("WaitSeal: %d\n", len(s)) - } -} - -func TestWindowPost(t *testing.T, b APIBuilder, blocktime time.Duration, nSectors int) { - os.Setenv("BELLMAN_NO_GPU", "1") - - ctx := context.Background() - n, sn := b(t, 1, oneMiner) - client := n[0].FullNode.(*impl.FullNodeAPI) - miner := sn[0] - - addrinfo, err := client.NetAddrsListen(ctx) - if err != nil { - t.Fatal(err) - } - - if err := miner.NetConnect(ctx, addrinfo); err != nil { - t.Fatal(err) - } - time.Sleep(time.Second) - - mine := true - done := make(chan struct{}) - go func() { - defer close(done) - for mine { - time.Sleep(blocktime) - if err := sn[0].MineOne(ctx, func(bool, error) {}); err != nil { - t.Error(err) - } - } - }() - - pledgeSectors(t, ctx, miner, nSectors) - - maddr, err := miner.ActorAddress(ctx) - require.NoError(t, err) - - di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - fmt.Printf("Running one proving periods\n") - - for { - head, err := client.ChainHead(ctx) - require.NoError(t, err) - - if head.Height() > di.PeriodStart+(miner2.WPoStProvingPeriod)+2 { - break - } - - if head.Height()%100 == 0 { - fmt.Printf("@%d\n", head.Height()) - } - time.Sleep(blocktime) - } - - p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) - - ssz, err := miner.ActorSectorSize(ctx, maddr) - require.NoError(t, err) - - require.Equal(t, p.MinerPower, p.TotalPower) - require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz)*uint64(nSectors+GenesisPreseals))) - - // TODO: Inject faults here - - mine = false - <-done -} diff --git a/vendor/github.com/filecoin-project/lotus/api/types.go b/vendor/github.com/filecoin-project/lotus/api/types.go deleted file mode 100644 index 29bd7401c0..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/types.go +++ /dev/null @@ -1,77 +0,0 @@ -package api - -import ( - "encoding/json" - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - - "github.com/libp2p/go-libp2p-core/peer" - ma "github.com/multiformats/go-multiaddr" -) - -// TODO: check if this exists anywhere else - -type MultiaddrSlice []ma.Multiaddr - -func (m *MultiaddrSlice) UnmarshalJSON(raw []byte) (err error) { - var temp []string - if err := json.Unmarshal(raw, &temp); err != nil { - return err - } - - res := make([]ma.Multiaddr, len(temp)) - for i, str := range temp { - res[i], err = ma.NewMultiaddr(str) - if err != nil { - return err - } - } - *m = res - return nil -} - -var _ json.Unmarshaler = new(MultiaddrSlice) - -type ObjStat struct { - Size uint64 - Links uint64 -} - -type PubsubScore struct { - ID peer.ID - Score float64 -} - -type MinerInfo struct { - Owner address.Address // Must be an ID-address. - Worker address.Address // Must be an ID-address. - NewWorker address.Address // Must be an ID-address. - WorkerChangeEpoch abi.ChainEpoch - PeerId peer.ID - Multiaddrs []abi.Multiaddrs - SealProofType abi.RegisteredSealProof - SectorSize abi.SectorSize - WindowPoStPartitionSectors uint64 -} - -func NewApiMinerInfo(info miner.MinerInfo) MinerInfo { - mi := MinerInfo{ - Owner: info.Owner, - Worker: info.Worker, - NewWorker: address.Undef, - WorkerChangeEpoch: -1, - PeerId: peer.ID(info.PeerId), - Multiaddrs: info.Multiaddrs, - SealProofType: info.SealProofType, - SectorSize: info.SectorSize, - WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, - } - - if info.PendingWorkerKey != nil { - mi.NewWorker = info.PendingWorkerKey.NewWorker - mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt - } - - return mi -} diff --git a/vendor/github.com/filecoin-project/lotus/api/utils.go b/vendor/github.com/filecoin-project/lotus/api/utils.go deleted file mode 100644 index 13d5c92cb9..0000000000 --- a/vendor/github.com/filecoin-project/lotus/api/utils.go +++ /dev/null @@ -1,28 +0,0 @@ -package api - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/crypto" -) - -type SignFunc = func(context.Context, []byte) (*crypto.Signature, error) - -type Signer func(context.Context, address.Address, []byte) (*crypto.Signature, error) - -type Signable interface { - Sign(context.Context, SignFunc) error -} - -func SignWith(ctx context.Context, signer Signer, addr address.Address, signable ...Signable) error { - for _, s := range signable { - err := s.Sign(ctx, func(ctx context.Context, b []byte) (*crypto.Signature, error) { - return signer(ctx, addr, b) - }) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/filecoin-project/lotus/build/bootstrap.go b/vendor/github.com/filecoin-project/lotus/build/bootstrap.go index 6343a0172a..80c1529ff6 100644 --- a/vendor/github.com/filecoin-project/lotus/build/bootstrap.go +++ b/vendor/github.com/filecoin-project/lotus/build/bootstrap.go @@ -38,12 +38,3 @@ func BuiltinBootstrap() ([]peer.AddrInfo, error) { }) return out, err } - -func DrandBootstrap() ([]peer.AddrInfo, error) { - addrs := []string{ - "/dnsaddr/pl-eu.testnet.drand.sh/", - "/dnsaddr/pl-us.testnet.drand.sh/", - "/dnsaddr/pl-sin.testnet.drand.sh/", - } - return addrutil.ParseAddresses(context.TODO(), addrs) -} diff --git a/vendor/github.com/filecoin-project/lotus/build/bootstrap/.gitkeep b/vendor/github.com/filecoin-project/lotus/build/bootstrap/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/filecoin-project/lotus/build/bootstrap/bootstrappers.pi b/vendor/github.com/filecoin-project/lotus/build/bootstrap/bootstrappers.pi deleted file mode 100644 index 0854ac0eda..0000000000 --- a/vendor/github.com/filecoin-project/lotus/build/bootstrap/bootstrappers.pi +++ /dev/null @@ -1,12 +0,0 @@ -/dns4/bootstrap-0-sin.fil-test.net/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd -/ip4/86.109.15.57/tcp/1347/p2p/12D3KooWPdUquftaQvoQEtEdsRBAhwD6jopbF2oweVTzR59VbHEd -/dns4/bootstrap-0-dfw.fil-test.net/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d -/ip4/139.178.84.45/tcp/1347/p2p/12D3KooWQSCkHCzosEyrh8FgYfLejKgEPM5VB6qWzZE3yDAuXn8d -/dns4/bootstrap-0-fra.fil-test.net/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK -/ip4/136.144.49.17/tcp/1347/p2p/12D3KooWEXN2eQmoyqnNjde9PBAQfQLHN67jcEdWU6JougWrgXJK -/dns4/bootstrap-1-sin.fil-test.net/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw -/ip4/86.109.15.123/tcp/1347/p2p/12D3KooWLmJkZd33mJhjg5RrpJ6NFep9SNLXWc4uVngV4TXKwzYw -/dns4/bootstrap-1-dfw.fil-test.net/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5 -/ip4/139.178.86.3/tcp/1347/p2p/12D3KooWGXLHjiz6pTRu7x2pkgTVCoxcCiVxcNLpMnWcJ3JiNEy5 -/dns4/bootstrap-1-fra.fil-test.net/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R -/ip4/136.144.49.131/tcp/1347/p2p/12D3KooW9szZmKttS9A1FafH3Zc2pxKwwmvCWCGKkRP4KmbhhC4R diff --git a/vendor/github.com/filecoin-project/lotus/build/clock.go b/vendor/github.com/filecoin-project/lotus/build/clock.go new file mode 100644 index 0000000000..a3943897d6 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/clock.go @@ -0,0 +1,10 @@ +package build + +import "github.com/raulk/clock" + +// Clock is the global clock for the system. In standard builds, +// we use a real-time clock, which maps to the `time` package. +// +// Tests that need control of time can replace this variable with +// clock.NewMock(). Always use real time for socket/stream deadlines. +var Clock = clock.New() diff --git a/vendor/github.com/filecoin-project/lotus/build/drand.go b/vendor/github.com/filecoin-project/lotus/build/drand.go new file mode 100644 index 0000000000..3b976ac925 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/drand.go @@ -0,0 +1,74 @@ +package build + +import ( + "sort" + + "github.com/filecoin-project/lotus/node/modules/dtypes" +) + +type DrandEnum int + +func DrandConfigSchedule() dtypes.DrandSchedule { + out := dtypes.DrandSchedule{} + for start, config := range DrandSchedule { + out = append(out, dtypes.DrandPoint{Start: start, Config: DrandConfigs[config]}) + } + + sort.Slice(out, func(i, j int) bool { + return out[i].Start < out[j].Start + }) + + return out +} + +const ( + DrandMainnet DrandEnum = iota + 1 + DrandTestnet + DrandDevnet + DrandLocalnet + DrandIncentinet +) + +var DrandConfigs = map[DrandEnum]dtypes.DrandConfig{ + DrandMainnet: { + Servers: []string{ + "https://api.drand.sh", + "https://api2.drand.sh", + "https://api3.drand.sh", + "https://drand.cloudflare.com", + }, + Relays: []string{ + "/dnsaddr/api.drand.sh/", + "/dnsaddr/api2.drand.sh/", + "/dnsaddr/api3.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"868f005eb8e6e4ca0a47c8a77ceaa5309a47978a7c71bc5cce96366b5d7a569937c529eeda66c7293784a9402801af31","period":30,"genesis_time":1595431050,"hash":"8990e7a9aaed2ffed73dbd7092123d6f289930540d7651336225dc172e51b2ce","groupHash":"176f93498eac9ca337150b46d21dd58673ea4e3581185f869672e59fa4cb390a"}`, + }, + DrandTestnet: { + Servers: []string{ + "https://pl-eu.testnet.drand.sh", + "https://pl-us.testnet.drand.sh", + "https://pl-sin.testnet.drand.sh", + }, + Relays: []string{ + "/dnsaddr/pl-eu.testnet.drand.sh/", + "/dnsaddr/pl-us.testnet.drand.sh/", + "/dnsaddr/pl-sin.testnet.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"84b2234fb34e835dccd048255d7ad3194b81af7d978c3bf157e3469592ae4e02","groupHash":"4dd408e5fdff9323c76a9b6f087ba8fdc5a6da907bd9217d9d10f2287d081957"}`, + }, + DrandDevnet: { + Servers: []string{ + "https://dev1.drand.sh", + "https://dev2.drand.sh", + }, + Relays: []string{ + "/dnsaddr/dev1.drand.sh/", + "/dnsaddr/dev2.drand.sh/", + }, + ChainInfoJSON: `{"public_key":"8cda589f88914aa728fd183f383980b35789ce81b274e5daee1f338b77d02566ef4d3fb0098af1f844f10f9c803c1827","period":25,"genesis_time":1595348225,"hash":"e73b7dc3c4f6a236378220c0dd6aa110eb16eed26c11259606e07ee122838d4f","groupHash":"567d4785122a5a3e75a9bc9911d7ea807dd85ff76b78dc4ff06b075712898607"}`, + }, + DrandIncentinet: { + ChainInfoJSON: `{"public_key":"8cad0c72c606ab27d36ee06de1d5b2db1faf92e447025ca37575ab3a8aac2eaae83192f846fc9e158bc738423753d000","period":30,"genesis_time":1595873820,"hash":"80c8b872c714f4c00fdd3daa465d5514049f457f01f85a4caf68cdcd394ba039","groupHash":"d9406aaed487f7af71851b4399448e311f2328923d454e971536c05398ce2d9b"}`, + }, +} diff --git a/vendor/github.com/filecoin-project/lotus/build/genesis/.gitkeep b/vendor/github.com/filecoin-project/lotus/build/genesis/.gitkeep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/filecoin-project/lotus/build/genesis/devnet.car b/vendor/github.com/filecoin-project/lotus/build/genesis/devnet.car deleted file mode 100644 index 3e156de617..0000000000 Binary files a/vendor/github.com/filecoin-project/lotus/build/genesis/devnet.car and /dev/null differ diff --git a/vendor/github.com/filecoin-project/lotus/build/params_2k.go b/vendor/github.com/filecoin-project/lotus/build/params_2k.go index d22c6a6f8c..b09b60faee 100644 --- a/vendor/github.com/filecoin-project/lotus/build/params_2k.go +++ b/vendor/github.com/filecoin-project/lotus/build/params_2k.go @@ -3,26 +3,45 @@ package build import ( - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + "math" + "os" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/chain/actors/policy" ) +const UpgradeBreezeHeight = -1 +const BreezeGasTampingDuration = 0 + +const UpgradeSmokeHeight = -1 +const UpgradeIgnitionHeight = -2 +const UpgradeRefuelHeight = -3 +const UpgradeTapeHeight = -4 + +var UpgradeActorsV2Height = abi.ChainEpoch(10) +var UpgradeLiftoffHeight = abi.ChainEpoch(-5) + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, +} + func init() { - power.ConsensusMinerMinPower = big.NewInt(2048) - miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ - abi.RegisteredSealProof_StackedDrg2KiBV1: {}, + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg2KiBV1) + policy.SetConsensusMinerMinPower(abi.NewStoragePower(2048)) + policy.SetMinVerifiedDealSize(abi.NewStoragePower(256)) + + if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" { + UpgradeActorsV2Height = math.MaxInt64 + UpgradeLiftoffHeight = 11 } - verifreg.MinVerifiedDealSize = big.NewInt(256) BuildType |= Build2k } -const BlockDelaySecs = uint64(2) +const BlockDelaySecs = uint64(4) -const PropagationDelaySecs = uint64(3) +const PropagationDelaySecs = uint64(1) // SlashablePowerDelay is the number of epochs after ElectionPeriodStart, after // which the miner is slashed diff --git a/vendor/github.com/filecoin-project/lotus/build/params_mainnet.go b/vendor/github.com/filecoin-project/lotus/build/params_mainnet.go new file mode 100644 index 0000000000..54f50ac6ea --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/build/params_mainnet.go @@ -0,0 +1,60 @@ +// +build !debug +// +build !2k +// +build !testground + +package build + +import ( + "math" + "os" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/policy" + + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" +) + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandIncentinet, + UpgradeSmokeHeight: DrandMainnet, +} + +const UpgradeBreezeHeight = 41280 +const BreezeGasTampingDuration = 120 + +const UpgradeSmokeHeight = 51000 + +const UpgradeIgnitionHeight = 94000 +const UpgradeRefuelHeight = 130800 + +var UpgradeActorsV2Height = abi.ChainEpoch(138720) + +const UpgradeTapeHeight = 140760 + +// This signals our tentative epoch for mainnet launch. Can make it later, but not earlier. +// Miners, clients, developers, custodians all need time to prepare. +// We still have upgrades and state changes to do, but can happen after signaling timing here. +const UpgradeLiftoffHeight = 148888 + +func init() { + policy.SetConsensusMinerMinPower(abi.NewStoragePower(10 << 40)) + policy.SetSupportedProofTypes( + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + ) + + if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { + SetAddressNetwork(address.Mainnet) + } + + if os.Getenv("LOTUS_DISABLE_V2_ACTOR_MIGRATION") == "1" { + UpgradeActorsV2Height = math.MaxInt64 + } + + Devnet = false +} + +const BlockDelaySecs = uint64(builtin0.EpochDurationSeconds) + +const PropagationDelaySecs = uint64(6) diff --git a/vendor/github.com/filecoin-project/lotus/build/params_shared_funcs.go b/vendor/github.com/filecoin-project/lotus/build/params_shared_funcs.go index cdb8e70d37..40ccca50bd 100644 --- a/vendor/github.com/filecoin-project/lotus/build/params_shared_funcs.go +++ b/vendor/github.com/filecoin-project/lotus/build/params_shared_funcs.go @@ -3,17 +3,19 @@ package build import ( "sort" + "github.com/filecoin-project/go-address" + "github.com/libp2p/go-libp2p-core/protocol" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/filecoin-project/go-state-types/abi" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" "github.com/filecoin-project/lotus/node/modules/dtypes" ) func DefaultSectorSize() abi.SectorSize { - szs := make([]abi.SectorSize, 0, len(miner.SupportedProofTypes)) - for spt := range miner.SupportedProofTypes { + szs := make([]abi.SectorSize, 0, len(miner0.SupportedProofTypes)) + for spt := range miner0.SupportedProofTypes { ss, err := spt.SectorSize() if err != nil { panic(err) @@ -36,3 +38,15 @@ func MessagesTopic(netName dtypes.NetworkName) string { return "/fil/msgs/" + st func DhtProtocolName(netName dtypes.NetworkName) protocol.ID { return protocol.ID("/fil/kad/" + string(netName)) } + +func UseNewestNetwork() bool { + // TODO: Put these in a container we can iterate over + if UpgradeBreezeHeight <= 0 && UpgradeSmokeHeight <= 0 && UpgradeActorsV2Height <= 0 { + return true + } + return false +} + +func SetAddressNetwork(n address.Network) { + address.CurrentNetwork = n +} diff --git a/vendor/github.com/filecoin-project/lotus/build/params_shared_vals.go b/vendor/github.com/filecoin-project/lotus/build/params_shared_vals.go index 8ef9d9dc4d..ede40c0e32 100644 --- a/vendor/github.com/filecoin-project/lotus/build/params_shared_vals.go +++ b/vendor/github.com/filecoin-project/lotus/build/params_shared_vals.go @@ -4,11 +4,15 @@ package build import ( "math/big" + "os" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors/policy" + + "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/specs-actors/actors/builtin" ) // ///// @@ -21,13 +25,17 @@ const UnixfsLinksPerLevel = 1024 // Consensus / Network const AllowableClockDriftSecs = uint64(1) +const NewestNetworkVersion = network.Version5 +const ActorUpgradeNetworkVersion = network.Version4 // Epochs +const ForkLengthThreshold = Finality // Blocks (e) var BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch) // Epochs +const Finality = policy.ChainFinality const MessageConfidence = uint64(5) // constants for Weight calculation @@ -39,10 +47,8 @@ const WRatioDen = uint64(2) // Proofs // Epochs - -// Epochs - -// Maximum lookback that randomness can be sourced from for a seal proof submission +// TODO: unused +const SealRandomnessLookback = policy.SealRandomnessLookback // ///// // Mining @@ -50,23 +56,37 @@ const WRatioDen = uint64(2) // Epochs const TicketRandomnessLookback = abi.ChainEpoch(1) -const WinningPoStSectorSetLookback = abi.ChainEpoch(10) +// ///// +// Address + +const AddressMainnetEnvVar = "_mainnet_" // ///// // Devnet settings -const TotalFilecoin = uint64(2_000_000_000) -const MiningRewardTotal = uint64(1_400_000_000) +var Devnet = true + +const FilBase = uint64(2_000_000_000) +const FilAllocStorageMining = uint64(1_100_000_000) const FilecoinPrecision = uint64(1_000_000_000_000_000_000) +const FilReserved = uint64(300_000_000) var InitialRewardBalance *big.Int +var InitialFilReserved *big.Int // TODO: Move other important consts here func init() { - InitialRewardBalance = big.NewInt(int64(MiningRewardTotal)) + InitialRewardBalance = big.NewInt(int64(FilAllocStorageMining)) InitialRewardBalance = InitialRewardBalance.Mul(InitialRewardBalance, big.NewInt(int64(FilecoinPrecision))) + + InitialFilReserved = big.NewInt(int64(FilReserved)) + InitialFilReserved = InitialFilReserved.Mul(InitialFilReserved, big.NewInt(int64(FilecoinPrecision))) + + if os.Getenv("LOTUS_ADDRESS_TYPE") == AddressMainnetEnvVar { + SetAddressNetwork(address.Mainnet) + } } // Sync @@ -84,14 +104,16 @@ const VerifSigCacheSize = 32000 // Limits // TODO: If this is gonna stay, it should move to specs-actors -const BlockMessageLimit = 512 -const BlockGasLimit = 100_000_000_000 - -var DrandConfig = dtypes.DrandConfig{ - Servers: []string{ - "https://pl-eu.testnet.drand.sh", - "https://pl-us.testnet.drand.sh", - "https://pl-sin.testnet.drand.sh", - }, - ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"138a324aa6540f93d0dad002aa89454b1bec2b6e948682cde6bd4db40f4b7c9b"}`, -} +const BlockMessageLimit = 10000 + +const BlockGasLimit = 10_000_000_000 +const BlockGasTarget = BlockGasLimit / 2 +const BaseFeeMaxChangeDenom = 8 // 12.5% +const InitialBaseFee = 100e6 +const MinimumBaseFee = 100 +const PackingEfficiencyNum = 4 +const PackingEfficiencyDenom = 5 + +// Actor consts +// TODO: Pull from actors when its made not private +var MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) diff --git a/vendor/github.com/filecoin-project/lotus/build/params_testground.go b/vendor/github.com/filecoin-project/lotus/build/params_testground.go index 7045039817..7ef034234c 100644 --- a/vendor/github.com/filecoin-project/lotus/build/params_testground.go +++ b/vendor/github.com/filecoin-project/lotus/build/params_testground.go @@ -10,26 +10,30 @@ package build import ( "math/big" - "github.com/filecoin-project/lotus/node/modules/dtypes" - - "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" + + "github.com/filecoin-project/lotus/chain/actors/policy" ) var ( UnixfsChunkSize = uint64(1 << 20) UnixfsLinksPerLevel = 1024 - BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch) - BlockMessageLimit = 512 - BlockGasLimit = int64(100_000_000_000) - BlockDelaySecs = uint64(builtin.EpochDurationSeconds) - PropagationDelaySecs = uint64(6) + BlocksPerEpoch = uint64(builtin.ExpectedLeadersPerEpoch) + BlockMessageLimit = 512 + BlockGasLimit = int64(100_000_000_000) + BlockGasTarget = int64(BlockGasLimit / 2) + BaseFeeMaxChangeDenom = int64(8) // 12.5% + InitialBaseFee = int64(100e6) + MinimumBaseFee = int64(100) + BlockDelaySecs = uint64(builtin.EpochDurationSeconds) + PropagationDelaySecs = uint64(6) AllowableClockDriftSecs = uint64(1) - Finality = miner.ChainFinalityish + Finality = policy.ChainFinality ForkLengthThreshold = Finality SlashablePowerDelay = 20 @@ -44,30 +48,51 @@ var ( BlsSignatureCacheSize = 40000 VerifSigCacheSize = 32000 - SealRandomnessLookback = Finality - SealRandomnessLookbackLimit = SealRandomnessLookback + 2000 - MaxSealLookback = SealRandomnessLookbackLimit + 2000 + SealRandomnessLookback = policy.SealRandomnessLookback - TicketRandomnessLookback = abi.ChainEpoch(1) - WinningPoStSectorSetLookback = abi.ChainEpoch(10) + TicketRandomnessLookback = abi.ChainEpoch(1) - TotalFilecoin uint64 = 2_000_000_000 - MiningRewardTotal uint64 = 1_400_000_000 + FilBase uint64 = 2_000_000_000 + FilAllocStorageMining uint64 = 1_400_000_000 + FilReserved uint64 = 300_000_000 FilecoinPrecision uint64 = 1_000_000_000_000_000_000 InitialRewardBalance = func() *big.Int { - v := big.NewInt(int64(MiningRewardTotal)) + v := big.NewInt(int64(FilAllocStorageMining)) + v = v.Mul(v, big.NewInt(int64(FilecoinPrecision))) + return v + }() + + InitialFilReserved = func() *big.Int { + v := big.NewInt(int64(FilReserved)) v = v.Mul(v, big.NewInt(int64(FilecoinPrecision))) return v }() - DrandConfig = dtypes.DrandConfig{ - Servers: []string{ - "https://pl-eu.testnet.drand.sh", - "https://pl-us.testnet.drand.sh", - "https://pl-sin.testnet.drand.sh", - }, - ChainInfoJSON: `{"public_key":"922a2e93828ff83345bae533f5172669a26c02dc76d6bf59c80892e12ab1455c229211886f35bb56af6d5bea981024df","period":25,"genesis_time":1590445175,"hash":"138a324aa6540f93d0dad002aa89454b1bec2b6e948682cde6bd4db40f4b7c9b"}`, + // Actor consts + // TODO: Pull from actors when its made not private + MinDealDuration = abi.ChainEpoch(180 * builtin.EpochsInDay) + + PackingEfficiencyNum int64 = 4 + PackingEfficiencyDenom int64 = 5 + + UpgradeBreezeHeight abi.ChainEpoch = -1 + BreezeGasTampingDuration abi.ChainEpoch = 0 + + UpgradeSmokeHeight abi.ChainEpoch = -1 + UpgradeIgnitionHeight abi.ChainEpoch = -2 + UpgradeRefuelHeight abi.ChainEpoch = -3 + UpgradeTapeHeight abi.ChainEpoch = -4 + UpgradeActorsV2Height abi.ChainEpoch = 10 + UpgradeLiftoffHeight abi.ChainEpoch = -5 + + DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, } + + NewestNetworkVersion = network.Version5 + ActorUpgradeNetworkVersion = network.Version4 + + Devnet = true ) diff --git a/vendor/github.com/filecoin-project/lotus/build/params_testnet.go b/vendor/github.com/filecoin-project/lotus/build/params_testnet.go deleted file mode 100644 index e0e3fc3fa7..0000000000 --- a/vendor/github.com/filecoin-project/lotus/build/params_testnet.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build !debug -// +build !2k -// +build !testground - -package build - -import ( - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/power" -) - -func init() { - power.ConsensusMinerMinPower = big.NewInt(1024 << 30) - miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ - abi.RegisteredSealProof_StackedDrg32GiBV1: {}, - abi.RegisteredSealProof_StackedDrg64GiBV1: {}, - } -} - -const BlockDelaySecs = uint64(builtin.EpochDurationSeconds) - -const PropagationDelaySecs = uint64(6) diff --git a/vendor/github.com/filecoin-project/lotus/build/proof-params/parameters.json b/vendor/github.com/filecoin-project/lotus/build/proof-params/parameters.json deleted file mode 100644 index b632c17e8f..0000000000 --- a/vendor/github.com/filecoin-project/lotus/build/proof-params/parameters.json +++ /dev/null @@ -1,152 +0,0 @@ -{ - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.params": { - "cid": "QmeDRyxek34F1H6xJY6AkFdWvPsy5F6dKTrebV3ZtWT4ky", - "digest": "f5827f2d8801c62c831e0f972f6dc8bb", - "sector_size": 2048 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0170db1f394b35d995252228ee359194b13199d259380541dc529fb0099096b0.vk": { - "cid": "QmUw1ZmG4BBbX19MsbH3zAEGKUc42iFJc5ZAyomDHeJTsA", - "digest": "398fecdb4b2de445125852bc3c080b35", - "sector_size": 2048 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.params": { - "cid": "QmUeNKp9YZpiAFm81RV5KuxH1FDGJx2DuwcbU2XNSZLLSv", - "digest": "2b6d2972ac9e862e8134d98fb695b0c5", - "sector_size": 536870912 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-0cfb4f178bbb71cf2ecfcd42accce558b27199ab4fb59cb78f2483fe21ef36d9.vk": { - "cid": "QmQaQmTXX995Akd66ggtJY5bNx6Gkxk8P34JTdMMq8393G", - "digest": "3688c9eb256b7b17f411dad78d5ef74a", - "sector_size": 536870912 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.params": { - "cid": "QmfEYTMSkwGJTumQx26iKXGNKiYh3mmAC4SkdybZpJCj5p", - "digest": "09bff16aed893349d94485cfae366a9c", - "sector_size": 2048 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-3ea05428c9d11689f23529cde32fd30aabd50f7d2c93657c1d3650bca3e8ea9e.vk": { - "cid": "QmP4ThPieSUJyRanjibWpT5R5cCMzMAU4j8Y7kBn7CSW1Q", - "digest": "142f2f7e8f1b1779290315cabfd2c803", - "sector_size": 2048 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.params": { - "cid": "QmcAixrHsz29DgvtZiMc2kQjvPRvWxYUp36QYmRDZbmREm", - "digest": "8f987f64d434365562180b96ec12e299", - "sector_size": 8388608 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-50c7368dea9593ed0989e70974d28024efa9d156d585b7eea1be22b2e753f331.vk": { - "cid": "QmT4iFnbL6r4txS5PXsiV7NTzbhCxHy54PvdkJJGV2VFXb", - "digest": "94b6c24ac01924f4feeecedd16b5d77d", - "sector_size": 8388608 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.params": { - "cid": "QmbjFst6SFCK1KsTQrfwPdxf3VTNa1raed574tEZZ9PoyQ", - "digest": "2c245fe8179839dd6c6cdea207c67ae8", - "sector_size": 8388608 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-5294475db5237a2e83c3e52fd6c2b03859a1831d45ed08c4f35dbf9a803165a9.vk": { - "cid": "QmQJKmvZN1a5cQ1Nw6CDyXs3nuRPzvyU5NvCFMUL2BfcZC", - "digest": "56ae47bfda53bb8d22981ed8d8d27d72", - "sector_size": 8388608 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.params": { - "cid": "QmQCABxeTpdvXTyjDyk7nPBxkQzCh7MXfGztWnSXEPKMLW", - "digest": "7e6b2eb5ecbb11ac651ad66ebbb2075a", - "sector_size": 536870912 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-0-0-7d739b8cf60f1b0709eeebee7730e297683552e4b69cab6984ec0285663c5781.vk": { - "cid": "QmPBweyugh5Sx4umk8ULhgEGbjY8xmWLfU6M7EMpc8Mad6", - "digest": "94a8d9e25a9ab9674d339833664eba25", - "sector_size": 536870912 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.params": { - "cid": "QmY5yax1E9KymBnCeHksE9Zi8NieZbmwcpoDGoabkeeb9h", - "digest": "c909ea9e3fe25ab9b391a64593afdbba", - "sector_size": 34359738368 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-0377ded656c6f524f1618760bffe4e0a1c51d5a70c4509eedae8a27555733edc.vk": { - "cid": "QmXnPo4yH5mwMguwrvqgRfduSttbmPrXtbBfbwU21wQWHt", - "digest": "caf900461e988bbf86dbcaca087b7864", - "sector_size": 34359738368 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.params": { - "cid": "QmZtzzPWwmZEgR7MSMvXRbt9KVK8k4XZ5RLWHybHJW9SdE", - "digest": "a2844f0703f186d143a06146a04577d8", - "sector_size": 34359738368 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-0-559e581f022bb4e4ec6e719e563bf0e026ad6de42e56c18714a2c692b1b88d7e.vk": { - "cid": "QmWxEA7EdQCUJTzjNpxg5XTF45D2uVyYnN1QRUb5TRYU8M", - "digest": "2306247a1e616dbe07f01b88196c2044", - "sector_size": 34359738368 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.params": { - "cid": "QmP676KwuvyF9Y64uJnXvLtvD1xcuWQ6wD23RzYtQ6dd4f", - "digest": "215b1c667a4f46a1d0178338df568615", - "sector_size": 68719476736 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-2627e4006b67f99cef990c0a47d5426cb7ab0a0ad58fc1061547bf2d28b09def.vk": { - "cid": "QmPvPwbJtcSGyqB1rQJhSF5yvFbX9ZBSsHVej5F8JUyHUJ", - "digest": "0c9c423b28b1455fcbc329a1045fd4dd", - "sector_size": 68719476736 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.params": { - "cid": "QmUxPQfvckzm1t6MFRdDZ1fDK5UJzAjK7pTZ97cwyachdr", - "digest": "965132f51ae445b0e6d32692b7561995", - "sector_size": 68719476736 - }, - "v27-proof-of-spacetime-fallback-merkletree-poseidon_hasher-8-8-2-b62098629d07946e9028127e70295ed996fe3ed25b0f9f88eb610a0ab4385a3c.vk": { - "cid": "QmTxq2EBnQWb5R8tS4MHdchj4vNfLYGoSXxwJFvs5xgW4K", - "digest": "fc8c3d26e0e56373ad96cb41520d55a6", - "sector_size": 68719476736 - }, - "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.params": { - "cid": "QmRjgZHERgqGoRagR788Kh6ybi26csVYa8mqbqhmZm57Jx", - "digest": "cfc7b0897d1eee48c586f7beb89e67f7", - "sector_size": 2048 - }, - "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-032d3138d22506ec0082ed72b2dcba18df18477904e35bafee82b3793b06832f.vk": { - "cid": "QmNjvnvFP7KgovHUddULoB19fBHT81iz7NcUbzEHZUUPsm", - "digest": "fb59bd061c987eac7068008c44de346b", - "sector_size": 2048 - }, - "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.params": { - "cid": "QmTpRPBA4dt8fgGpcVzi4L1KA1U2eBHCE8WVmS2GUygMvT", - "digest": "36d465915b0afbf96bd08e7915e00952", - "sector_size": 536870912 - }, - "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-6babf46ce344ae495d558e7770a585b2382d54f225af8ed0397b8be7c3fcd472.vk": { - "cid": "QmRzDyVfQCLsxspoVsed5bcQRsG6KiktngJfcNBL3TJPZe", - "digest": "99d16df0eb6a7e227a4f4570c4f6b6f1", - "sector_size": 536870912 - }, - "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.params": { - "cid": "QmV8ZjTSGzDUWmFvsq9NSyPBR7eDDUcvCPNgj2yE7HMAFu", - "digest": "34f3ddf1d1c9f41c0cd73b91e8b4bc27", - "sector_size": 8388608 - }, - "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-0-0-sha256_hasher-ecd683648512ab1765faa2a5f14bab48f676e633467f0aa8aad4b55dcb0652bb.vk": { - "cid": "QmTa3VbjTiqJWU6r4WKayaQrUaaBsrpp5UDqYvPDd2C5hs", - "digest": "ec62d59651daa5631d3d1e9c782dd940", - "sector_size": 8388608 - }, - "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.params": { - "cid": "Qmf8ngfArxrv9tFWDqBcNegdBMymvuakwyHKd1pbW3pbsb", - "digest": "a16d6f4c6424fb280236739f84b24f97", - "sector_size": 34359738368 - }, - "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-0-sha256_hasher-82a357d2f2ca81dc61bb45f4a762807aedee1b0a53fd6c4e77b46a01bfef7820.vk": { - "cid": "QmfQgVFerArJ6Jupwyc9tKjLD9n1J9ajLHBdpY465tRM7M", - "digest": "7a139d82b8a02e35279d657e197f5c1f", - "sector_size": 34359738368 - }, - "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.params": { - "cid": "QmfDha8271nXJn14Aq3qQeghjMBWbs6HNSGa6VuzCVk4TW", - "digest": "5d3cd3f107a3bea8a96d1189efd2965c", - "sector_size": 68719476736 - }, - "v27-stacked-proof-of-replication-merkletree-poseidon_hasher-8-8-2-sha256_hasher-96f1b4a04c5c51e4759bbf224bbc2ef5a42c7100f16ec0637123f16a845ddfb2.vk": { - "cid": "QmRVtTtiFzHJTHurYzaCvetGAchux9cktixT4aGHthN6Zt", - "digest": "62c366405404e60f171e661492740b1c", - "sector_size": 68719476736 - } -} \ No newline at end of file diff --git a/vendor/github.com/filecoin-project/lotus/build/version.go b/vendor/github.com/filecoin-project/lotus/build/version.go index b5d48eac7f..df1fe28de4 100644 --- a/vendor/github.com/filecoin-project/lotus/build/version.go +++ b/vendor/github.com/filecoin-project/lotus/build/version.go @@ -1,6 +1,10 @@ package build -import "fmt" +import ( + "fmt" + + "golang.org/x/xerrors" +) var CurrentCommit string var BuildType int @@ -25,7 +29,7 @@ func buildType() string { } // BuildVersion is the local build version, set by build system -const BuildVersion = "0.4.1" +const BuildVersion = "0.10.2" func UserVersion() string { return BuildVersion + buildType() + CurrentCommit @@ -52,8 +56,37 @@ func (ve Version) EqMajorMinor(v2 Version) bool { return ve&minorMask == v2&minorMask } -// APIVersion is a semver version of the rpc api exposed -var APIVersion Version = newVer(0, 5, 0) +type NodeType int + +const ( + NodeUnknown NodeType = iota + + NodeFull + NodeMiner + NodeWorker +) + +var RunningNodeType NodeType + +func VersionForType(nodeType NodeType) (Version, error) { + switch nodeType { + case NodeFull: + return FullAPIVersion, nil + case NodeMiner: + return MinerAPIVersion, nil + case NodeWorker: + return WorkerAPIVersion, nil + default: + return Version(0), xerrors.Errorf("unknown node type %d", nodeType) + } +} + +// semver versions of the rpc api exposed +var ( + FullAPIVersion = newVer(0, 17, 0) + MinerAPIVersion = newVer(0, 15, 0) + WorkerAPIVersion = newVer(0, 15, 0) +) //nolint:varcheck,deadcode const ( diff --git a/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/error.go b/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/error.go index e687982c8c..12f802c8fc 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/error.go +++ b/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/error.go @@ -3,7 +3,7 @@ package aerrors import ( "fmt" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" + "github.com/filecoin-project/go-state-types/exitcode" "golang.org/x/xerrors" ) diff --git a/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/error_test.go b/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/error_test.go deleted file mode 100644 index 4d87ac396e..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/error_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package aerrors_test - -import ( - "testing" - - . "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - - "github.com/stretchr/testify/assert" - "golang.org/x/xerrors" -) - -func TestFatalError(t *testing.T) { - e1 := xerrors.New("out of disk space") - e2 := xerrors.Errorf("could not put node: %w", e1) - e3 := xerrors.Errorf("could not save head: %w", e2) - ae := Escalate(e3, "failed to save the head") - aw1 := Wrap(ae, "saving head of new miner actor") - aw2 := Absorb(aw1, 1, "try to absorb fatal error") - aw3 := Wrap(aw2, "initializing actor") - aw4 := Wrap(aw3, "creating miner in storage market") - t.Logf("Verbose error: %+v", aw4) - t.Logf("Normal error: %v", aw4) - assert.True(t, IsFatal(aw4), "should be fatal") -} -func TestAbsorbeError(t *testing.T) { - e1 := xerrors.New("EOF") - e2 := xerrors.Errorf("could not decode: %w", e1) - ae := Absorb(e2, 35, "failed to decode CBOR") - aw1 := Wrap(ae, "saving head of new miner actor") - aw2 := Wrap(aw1, "initializing actor") - aw3 := Wrap(aw2, "creating miner in storage market") - t.Logf("Verbose error: %+v", aw3) - t.Logf("Normal error: %v", aw3) - assert.Equal(t, exitcode.ExitCode(35), RetCode(aw3)) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/wrap.go b/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/wrap.go index 338659966a..0552829f91 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/wrap.go +++ b/vendor/github.com/filecoin-project/lotus/chain/actors/aerrors/wrap.go @@ -4,7 +4,7 @@ import ( "errors" "fmt" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" + "github.com/filecoin-project/go-state-types/exitcode" cbor "github.com/ipfs/go-ipld-cbor" "golang.org/x/xerrors" ) diff --git a/vendor/github.com/filecoin-project/lotus/chain/actors/policy/policy.go b/vendor/github.com/filecoin-project/lotus/chain/actors/policy/policy.go new file mode 100644 index 0000000000..ba09e44242 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/actors/policy/policy.go @@ -0,0 +1,116 @@ +package policy + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors" + market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" + miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + power0 "github.com/filecoin-project/specs-actors/actors/builtin/power" + verifreg0 "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + market2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + verifreg2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/verifreg" +) + +const ( + ChainFinality = miner0.ChainFinality + SealRandomnessLookback = ChainFinality +) + +// SetSupportedProofTypes sets supported proof types, across all actor versions. +// This should only be used for testing. +func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { + newTypes := make(map[abi.RegisteredSealProof]struct{}, len(types)) + for _, t := range types { + newTypes[t] = struct{}{} + } + // Set for all miner versions. + miner0.SupportedProofTypes = newTypes + miner2.SupportedProofTypes = newTypes +} + +// AddSupportedProofTypes sets supported proof types, across all actor versions. +// This should only be used for testing. +func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { + for _, t := range types { + // Set for all miner versions. + miner0.SupportedProofTypes[t] = struct{}{} + miner2.SupportedProofTypes[t] = struct{}{} + } +} + +// SetPreCommitChallengeDelay sets the pre-commit challenge delay across all +// actors versions. Use for testing. +func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { + // Set for all miner versions. + miner0.PreCommitChallengeDelay = delay + miner2.PreCommitChallengeDelay = delay +} + +// TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. +func GetPreCommitChallengeDelay() abi.ChainEpoch { + return miner0.PreCommitChallengeDelay +} + +// SetConsensusMinerMinPower sets the minimum power of an individual miner must +// meet for leader election, across all actor versions. This should only be used +// for testing. +func SetConsensusMinerMinPower(p abi.StoragePower) { + power0.ConsensusMinerMinPower = p + for _, policy := range builtin2.SealProofPolicies { + policy.ConsensusMinerMinPower = p + } +} + +// SetMinVerifiedDealSize sets the minimum size of a verified deal. This should +// only be used for testing. +func SetMinVerifiedDealSize(size abi.StoragePower) { + verifreg0.MinVerifiedDealSize = size + verifreg2.MinVerifiedDealSize = size +} + +func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) abi.ChainEpoch { + switch ver { + case actors.Version0: + return miner0.MaxSealDuration[t] + case actors.Version2: + return miner2.MaxProveCommitDuration[t] + default: + panic("unsupported actors version") + } +} + +func DealProviderCollateralBounds( + size abi.PaddedPieceSize, verified bool, + rawBytePower, qaPower, baselinePower abi.StoragePower, + circulatingFil abi.TokenAmount, nwVer network.Version, +) (min, max abi.TokenAmount) { + switch actors.VersionForNetwork(nwVer) { + case actors.Version0: + return market0.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil, nwVer) + case actors.Version2: + return market2.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + default: + panic("unsupported network version") + } +} + +// Sets the challenge window and scales the proving period to match (such that +// there are always 48 challenge windows in a proving period). +func SetWPoStChallengeWindow(period abi.ChainEpoch) { + miner0.WPoStChallengeWindow = period + miner0.WPoStProvingPeriod = period * abi.ChainEpoch(miner0.WPoStPeriodDeadlines) + + miner2.WPoStChallengeWindow = period + miner2.WPoStProvingPeriod = period * abi.ChainEpoch(miner2.WPoStPeriodDeadlines) +} + +func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { + if nwVer <= network.Version3 { + return 10 + } + + return ChainFinality +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/actors/version.go b/vendor/github.com/filecoin-project/lotus/chain/actors/version.go new file mode 100644 index 0000000000..2efd903bbd --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/actors/version.go @@ -0,0 +1,26 @@ +package actors + +import ( + "fmt" + + "github.com/filecoin-project/go-state-types/network" +) + +type Version int + +const ( + Version0 Version = 0 + Version2 Version = 2 +) + +// Converts a network version into an actors adt version. +func VersionForNetwork(version network.Version) Version { + switch version { + case network.Version0, network.Version1, network.Version2, network.Version3: + return Version0 + case network.Version4, network.Version5: + return Version2 + default: + panic(fmt.Sprintf("unsupported network version %d", version)) + } +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/badtscache.go b/vendor/github.com/filecoin-project/lotus/chain/badtscache.go deleted file mode 100644 index cdb75f842c..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/badtscache.go +++ /dev/null @@ -1,35 +0,0 @@ -package chain - -import ( - "github.com/filecoin-project/lotus/build" - lru "github.com/hashicorp/golang-lru" - "github.com/ipfs/go-cid" -) - -type BadBlockCache struct { - badBlocks *lru.ARCCache -} - -func NewBadBlockCache() *BadBlockCache { - cache, err := lru.NewARC(build.BadBlockCacheSize) - if err != nil { - panic(err) // ok - } - - return &BadBlockCache{ - badBlocks: cache, - } -} - -func (bts *BadBlockCache) Add(c cid.Cid, reason string) { - bts.badBlocks.Add(c, reason) -} - -func (bts *BadBlockCache) Has(c cid.Cid) (string, bool) { - rval, ok := bts.badBlocks.Get(c) - if !ok { - return "", false - } - - return rval.(string), true -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/beacon/beacon.go b/vendor/github.com/filecoin-project/lotus/chain/beacon/beacon.go deleted file mode 100644 index 2be2e7f1ce..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/beacon/beacon.go +++ /dev/null @@ -1,93 +0,0 @@ -package beacon - -import ( - "context" - "time" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/specs-actors/actors/abi" - logging "github.com/ipfs/go-log" - "golang.org/x/xerrors" -) - -var log = logging.Logger("beacon") - -type Response struct { - Entry types.BeaconEntry - Err error -} - -// RandomBeacon represents a system that provides randomness to Lotus. -// Other components interrogate the RandomBeacon to acquire randomness that's -// valid for a specific chain epoch. Also to verify beacon entries that have -// been posted on chain. -type RandomBeacon interface { - Entry(context.Context, uint64) <-chan Response - VerifyEntry(types.BeaconEntry, types.BeaconEntry) error - MaxBeaconRoundForEpoch(abi.ChainEpoch, types.BeaconEntry) uint64 -} - -func ValidateBlockValues(b RandomBeacon, h *types.BlockHeader, prevEntry types.BeaconEntry) error { - maxRound := b.MaxBeaconRoundForEpoch(h.Height, prevEntry) - if maxRound == prevEntry.Round { - if len(h.BeaconEntries) != 0 { - return xerrors.Errorf("expected not to have any beacon entries in this block, got %d", len(h.BeaconEntries)) - } - return nil - } - - last := h.BeaconEntries[len(h.BeaconEntries)-1] - if last.Round != maxRound { - return xerrors.Errorf("expected final beacon entry in block to be at round %d, got %d", maxRound, last.Round) - } - - for i, e := range h.BeaconEntries { - if err := b.VerifyEntry(e, prevEntry); err != nil { - return xerrors.Errorf("beacon entry %d (%d - %x (%d)) was invalid: %w", i, e.Round, e.Data, len(e.Data), err) - } - prevEntry = e - } - - return nil -} - -func BeaconEntriesForBlock(ctx context.Context, beacon RandomBeacon, round abi.ChainEpoch, prev types.BeaconEntry) ([]types.BeaconEntry, error) { - start := time.Now() - - maxRound := beacon.MaxBeaconRoundForEpoch(round, prev) - if maxRound == prev.Round { - return nil, nil - } - - // TODO: this is a sketchy way to handle the genesis block not having a beacon entry - if prev.Round == 0 { - prev.Round = maxRound - 1 - } - - cur := maxRound - var out []types.BeaconEntry - for cur > prev.Round { - rch := beacon.Entry(ctx, cur) - select { - case resp := <-rch: - if resp.Err != nil { - return nil, xerrors.Errorf("beacon entry request returned error: %w", resp.Err) - } - - out = append(out, resp.Entry) - cur = resp.Entry.Round - 1 - case <-ctx.Done(): - return nil, xerrors.Errorf("context timed out waiting on beacon entry to come back for round %d: %w", round, ctx.Err()) - } - } - - log.Debugw("fetching beacon entries", "took", time.Since(start), "numEntries", len(out)) - reverse(out) - return out, nil -} - -func reverse(arr []types.BeaconEntry) { - for i := 0; i < len(arr)/2; i++ { - arr[i], arr[len(arr)-(1+i)] = arr[len(arr)-(1+i)], arr[i] - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/beacon/drand/drand.go b/vendor/github.com/filecoin-project/lotus/chain/beacon/drand/drand.go deleted file mode 100644 index 00ff05f810..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/beacon/drand/drand.go +++ /dev/null @@ -1,192 +0,0 @@ -package drand - -import ( - "bytes" - "context" - "sync" - "time" - - dchain "github.com/drand/drand/chain" - dclient "github.com/drand/drand/client" - hclient "github.com/drand/drand/client/http" - dlog "github.com/drand/drand/log" - gclient "github.com/drand/drand/lp2p/client" - "github.com/drand/kyber" - kzap "github.com/go-kit/kit/log/zap" - "go.uber.org/zap/zapcore" - "golang.org/x/xerrors" - - logging "github.com/ipfs/go-log" - pubsub "github.com/libp2p/go-libp2p-pubsub" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -var log = logging.Logger("drand") - -type drandPeer struct { - addr string - tls bool -} - -func (dp *drandPeer) Address() string { - return dp.addr -} - -func (dp *drandPeer) IsTLS() bool { - return dp.tls -} - -// DrandBeacon connects Lotus with a drand network in order to provide -// randomness to the system in a way that's aligned with Filecoin rounds/epochs. -// -// We connect to drand peers via their public HTTP endpoints. The peers are -// enumerated in the drandServers variable. -// -// The root trust for the Drand chain is configured from build.DrandChain. -type DrandBeacon struct { - client dclient.Client - - pubkey kyber.Point - - // seconds - interval time.Duration - - drandGenTime uint64 - filGenTime uint64 - filRoundTime uint64 - - cacheLk sync.Mutex - localCache map[uint64]types.BeaconEntry -} - -func NewDrandBeacon(genesisTs, interval uint64, ps *pubsub.PubSub, config dtypes.DrandConfig) (*DrandBeacon, error) { - if genesisTs == 0 { - panic("what are you doing this cant be zero") - } - - drandChain, err := dchain.InfoFromJSON(bytes.NewReader([]byte(config.ChainInfoJSON))) - if err != nil { - return nil, xerrors.Errorf("unable to unmarshal drand chain info: %w", err) - } - - dlogger := dlog.NewKitLoggerFrom(kzap.NewZapSugarLogger( - log.SugaredLogger.Desugar(), zapcore.InfoLevel)) - - var clients []dclient.Client - for _, url := range config.Servers { - hc, err := hclient.NewWithInfo(url, drandChain, nil) - if err != nil { - return nil, xerrors.Errorf("could not create http drand client: %w", err) - } - clients = append(clients, hc) - - } - - opts := []dclient.Option{ - dclient.WithChainInfo(drandChain), - dclient.WithCacheSize(1024), - dclient.WithLogger(dlogger), - dclient.WithAutoWatch(), - } - - if ps != nil { - opts = append(opts, gclient.WithPubsub(ps)) - } else { - log.Info("drand beacon without pubsub") - } - - client, err := dclient.Wrap(clients, opts...) - if err != nil { - return nil, xerrors.Errorf("creating drand client") - } - - db := &DrandBeacon{ - client: client, - localCache: make(map[uint64]types.BeaconEntry), - } - - db.pubkey = drandChain.PublicKey - db.interval = drandChain.Period - db.drandGenTime = uint64(drandChain.GenesisTime) - db.filRoundTime = interval - db.filGenTime = genesisTs - - return db, nil -} - -func (db *DrandBeacon) Entry(ctx context.Context, round uint64) <-chan beacon.Response { - out := make(chan beacon.Response, 1) - if round != 0 { - be := db.getCachedValue(round) - if be != nil { - out <- beacon.Response{Entry: *be} - close(out) - return out - } - } - - go func() { - start := time.Now() - log.Infow("start fetching randomness", "round", round) - resp, err := db.client.Get(ctx, round) - - var br beacon.Response - if err != nil { - br.Err = xerrors.Errorf("drand failed Get request: %w", err) - } else { - br.Entry.Round = resp.Round() - br.Entry.Data = resp.Signature() - } - log.Infow("done fetching randomness", "round", round, "took", time.Since(start)) - out <- br - close(out) - }() - - return out -} -func (db *DrandBeacon) cacheValue(e types.BeaconEntry) { - db.cacheLk.Lock() - defer db.cacheLk.Unlock() - db.localCache[e.Round] = e -} - -func (db *DrandBeacon) getCachedValue(round uint64) *types.BeaconEntry { - db.cacheLk.Lock() - defer db.cacheLk.Unlock() - v, ok := db.localCache[round] - if !ok { - return nil - } - return &v -} - -func (db *DrandBeacon) VerifyEntry(curr types.BeaconEntry, prev types.BeaconEntry) error { - if prev.Round == 0 { - // TODO handle genesis better - return nil - } - b := &dchain.Beacon{ - PreviousSig: prev.Data, - Round: curr.Round, - Signature: curr.Data, - } - err := dchain.VerifyBeacon(db.pubkey, b) - if err == nil { - db.cacheValue(curr) - } - return err -} - -func (db *DrandBeacon) MaxBeaconRoundForEpoch(filEpoch abi.ChainEpoch, prevEntry types.BeaconEntry) uint64 { - // TODO: sometimes the genesis time for filecoin is zero and this goes negative - latestTs := ((uint64(filEpoch) * db.filRoundTime) + db.filGenTime) - db.filRoundTime - dround := (latestTs - db.drandGenTime) / uint64(db.interval.Seconds()) - return dround -} - -var _ beacon.RandomBeacon = (*DrandBeacon)(nil) diff --git a/vendor/github.com/filecoin-project/lotus/chain/beacon/drand/drand_test.go b/vendor/github.com/filecoin-project/lotus/chain/beacon/drand/drand_test.go deleted file mode 100644 index d7d9c4d189..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/beacon/drand/drand_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package drand - -import ( - "os" - "testing" - - dchain "github.com/drand/drand/chain" - hclient "github.com/drand/drand/client/http" - "github.com/stretchr/testify/assert" - - "github.com/filecoin-project/lotus/build" -) - -func TestPrintGroupInfo(t *testing.T) { - server := build.DrandConfig.Servers[0] - c, err := hclient.New(server, nil, nil) - assert.NoError(t, err) - cg := c.(interface { - FetchChainInfo(groupHash []byte) (*dchain.Info, error) - }) - chain, err := cg.FetchChainInfo(nil) - assert.NoError(t, err) - err = chain.ToJSON(os.Stdout) - assert.NoError(t, err) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/beacon/mock.go b/vendor/github.com/filecoin-project/lotus/chain/beacon/mock.go deleted file mode 100644 index dc45ae895f..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/beacon/mock.go +++ /dev/null @@ -1,64 +0,0 @@ -package beacon - -import ( - "bytes" - "context" - "encoding/binary" - "time" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/minio/blake2b-simd" - "golang.org/x/xerrors" -) - -// Mock beacon assumes that filecoin rounds are 1:1 mapped with the beacon rounds -type mockBeacon struct { - interval time.Duration -} - -func NewMockBeacon(interval time.Duration) RandomBeacon { - mb := &mockBeacon{interval: interval} - - return mb -} - -func (mb *mockBeacon) RoundTime() time.Duration { - return mb.interval -} - -func (mb *mockBeacon) entryForIndex(index uint64) types.BeaconEntry { - buf := make([]byte, 8) - binary.BigEndian.PutUint64(buf, index) - rval := blake2b.Sum256(buf) - return types.BeaconEntry{ - Round: index, - Data: rval[:], - } -} - -func (mb *mockBeacon) Entry(ctx context.Context, index uint64) <-chan Response { - e := mb.entryForIndex(index) - out := make(chan Response, 1) - out <- Response{Entry: e} - return out -} - -func (mb *mockBeacon) VerifyEntry(from types.BeaconEntry, to types.BeaconEntry) error { - // TODO: cache this, especially for bls - oe := mb.entryForIndex(from.Round) - if !bytes.Equal(from.Data, oe.Data) { - return xerrors.Errorf("mock beacon entry was invalid!") - } - return nil -} - -func (mb *mockBeacon) IsEntryForEpoch(e types.BeaconEntry, epoch abi.ChainEpoch, nulls int) (bool, error) { - return int64(e.Round) <= int64(epoch) && int64(epoch)-int64(nulls) >= int64(e.Round), nil -} - -func (mb *mockBeacon) MaxBeaconRoundForEpoch(epoch abi.ChainEpoch, prevEntry types.BeaconEntry) uint64 { - return uint64(epoch) -} - -var _ RandomBeacon = (*mockBeacon)(nil) diff --git a/vendor/github.com/filecoin-project/lotus/chain/block_receipt_tracker.go b/vendor/github.com/filecoin-project/lotus/chain/block_receipt_tracker.go deleted file mode 100644 index f182fd1806..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/block_receipt_tracker.go +++ /dev/null @@ -1,71 +0,0 @@ -package chain - -import ( - "sort" - "sync" - "time" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/hashicorp/golang-lru" - peer "github.com/libp2p/go-libp2p-core/peer" -) - -type blockReceiptTracker struct { - lk sync.Mutex - - // using an LRU cache because i don't want to handle all the edge cases for - // manual cleanup and maintenance of a fixed size set - cache *lru.Cache -} - -type peerSet struct { - peers map[peer.ID]time.Time -} - -func newBlockReceiptTracker() *blockReceiptTracker { - c, _ := lru.New(512) - return &blockReceiptTracker{ - cache: c, - } -} - -func (brt *blockReceiptTracker) Add(p peer.ID, ts *types.TipSet) { - brt.lk.Lock() - defer brt.lk.Unlock() - - val, ok := brt.cache.Get(ts.Key()) - if !ok { - pset := &peerSet{ - peers: map[peer.ID]time.Time{ - p: time.Now(), - }, - } - brt.cache.Add(ts.Key(), pset) - return - } - - val.(*peerSet).peers[p] = time.Now() -} - -func (brt *blockReceiptTracker) GetPeers(ts *types.TipSet) []peer.ID { - brt.lk.Lock() - defer brt.lk.Unlock() - - val, ok := brt.cache.Get(ts.Key()) - if !ok { - return nil - } - - ps := val.(*peerSet) - - out := make([]peer.ID, 0, len(ps.peers)) - for p := range ps.peers { - out = append(out, p) - } - - sort.Slice(out, func(i, j int) bool { - return ps.peers[out[i]].Before(ps.peers[out[j]]) - }) - - return out -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/blocksync/blocksync.go b/vendor/github.com/filecoin-project/lotus/chain/blocksync/blocksync.go deleted file mode 100644 index a9251c4192..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/blocksync/blocksync.go +++ /dev/null @@ -1,276 +0,0 @@ -package blocksync - -import ( - "bufio" - "context" - "time" - - "github.com/libp2p/go-libp2p-core/protocol" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - cborutil "github.com/filecoin-project/go-cbor-util" - - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - inet "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" -) - -var log = logging.Logger("blocksync") - -type NewStreamFunc func(context.Context, peer.ID, ...protocol.ID) (inet.Stream, error) - -const BlockSyncProtocolID = "/fil/sync/blk/0.0.1" - -const BlockSyncMaxRequestLength = 800 - -// BlockSyncService is the component that services BlockSync requests from -// peers. -// -// BlockSync is the basic chain synchronization protocol of Filecoin. BlockSync -// is an RPC-oriented protocol, with a single operation to request blocks. -// -// A request contains a start anchor block (referred to with a CID), and a -// amount of blocks requested beyond the anchor (including the anchor itself). -// -// A client can also pass options, encoded as a 64-bit bitfield. Lotus supports -// two options at the moment: -// -// - include block contents -// - include block messages -// -// The response will include a status code, an optional message, and the -// response payload in case of success. The payload is a slice of serialized -// tipsets. -type BlockSyncService struct { - cs *store.ChainStore -} - -type BlockSyncRequest struct { - Start []cid.Cid - RequestLength uint64 - - Options uint64 -} - -type BSOptions struct { - IncludeBlocks bool - IncludeMessages bool -} - -func ParseBSOptions(optfield uint64) *BSOptions { - return &BSOptions{ - IncludeBlocks: optfield&(BSOptBlocks) != 0, - IncludeMessages: optfield&(BSOptMessages) != 0, - } -} - -const ( - BSOptBlocks = 1 << iota - BSOptMessages -) - -const ( - StatusOK = uint64(0) - StatusPartial = uint64(101) - StatusNotFound = uint64(201) - StatusGoAway = uint64(202) - StatusInternalError = uint64(203) - StatusBadRequest = uint64(204) -) - -type BlockSyncResponse struct { - Chain []*BSTipSet - - Status uint64 - Message string -} - -type BSTipSet struct { - Blocks []*types.BlockHeader - - BlsMessages []*types.Message - BlsMsgIncludes [][]uint64 - - SecpkMessages []*types.SignedMessage - SecpkMsgIncludes [][]uint64 -} - -func NewBlockSyncService(cs *store.ChainStore) *BlockSyncService { - return &BlockSyncService{ - cs: cs, - } -} - -func (bss *BlockSyncService) HandleStream(s inet.Stream) { - ctx, span := trace.StartSpan(context.Background(), "blocksync.HandleStream") - defer span.End() - - defer s.Close() //nolint:errcheck - - var req BlockSyncRequest - if err := cborutil.ReadCborRPC(bufio.NewReader(s), &req); err != nil { - log.Warnf("failed to read block sync request: %s", err) - return - } - log.Infow("block sync request", "start", req.Start, "len", req.RequestLength) - - resp, err := bss.processRequest(ctx, s.Conn().RemotePeer(), &req) - if err != nil { - log.Warn("failed to process block sync request: ", err) - return - } - - writeDeadline := 60 * time.Second - _ = s.SetDeadline(time.Now().Add(writeDeadline)) - if err := cborutil.WriteCborRPC(s, resp); err != nil { - log.Warnw("failed to write back response for handle stream", "err", err, "peer", s.Conn().RemotePeer()) - return - } -} - -func (bss *BlockSyncService) processRequest(ctx context.Context, p peer.ID, req *BlockSyncRequest) (*BlockSyncResponse, error) { - _, span := trace.StartSpan(ctx, "blocksync.ProcessRequest") - defer span.End() - - opts := ParseBSOptions(req.Options) - if len(req.Start) == 0 { - return &BlockSyncResponse{ - Status: StatusBadRequest, - Message: "no cids given in blocksync request", - }, nil - } - - span.AddAttributes( - trace.BoolAttribute("blocks", opts.IncludeBlocks), - trace.BoolAttribute("messages", opts.IncludeMessages), - trace.Int64Attribute("reqlen", int64(req.RequestLength)), - ) - - reqlen := req.RequestLength - if reqlen > BlockSyncMaxRequestLength { - log.Warnw("limiting blocksync request length", "orig", req.RequestLength, "peer", p) - reqlen = BlockSyncMaxRequestLength - } - - chain, err := collectChainSegment(bss.cs, types.NewTipSetKey(req.Start...), reqlen, opts) - if err != nil { - log.Warn("encountered error while responding to block sync request: ", err) - return &BlockSyncResponse{ - Status: StatusInternalError, - Message: err.Error(), - }, nil - } - - status := StatusOK - if reqlen < req.RequestLength { - status = StatusPartial - } - - return &BlockSyncResponse{ - Chain: chain, - Status: status, - }, nil -} - -func collectChainSegment(cs *store.ChainStore, start types.TipSetKey, length uint64, opts *BSOptions) ([]*BSTipSet, error) { - var bstips []*BSTipSet - cur := start - for { - var bst BSTipSet - ts, err := cs.LoadTipSet(cur) - if err != nil { - return nil, xerrors.Errorf("failed loading tipset %s: %w", cur, err) - } - - if opts.IncludeMessages { - bmsgs, bmincl, smsgs, smincl, err := gatherMessages(cs, ts) - if err != nil { - return nil, xerrors.Errorf("gather messages failed: %w", err) - } - - bst.BlsMessages = bmsgs - bst.BlsMsgIncludes = bmincl - bst.SecpkMessages = smsgs - bst.SecpkMsgIncludes = smincl - } - - if opts.IncludeBlocks { - bst.Blocks = ts.Blocks() - } - - bstips = append(bstips, &bst) - - if uint64(len(bstips)) >= length || ts.Height() == 0 { - return bstips, nil - } - - cur = ts.Parents() - } -} - -func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) { - blsmsgmap := make(map[cid.Cid]uint64) - secpkmsgmap := make(map[cid.Cid]uint64) - var secpkmsgs []*types.SignedMessage - var blsmsgs []*types.Message - var secpkincl, blsincl [][]uint64 - - for _, b := range ts.Blocks() { - bmsgs, smsgs, err := cs.MessagesForBlock(b) - if err != nil { - return nil, nil, nil, nil, err - } - - bmi := make([]uint64, 0, len(bmsgs)) - for _, m := range bmsgs { - i, ok := blsmsgmap[m.Cid()] - if !ok { - i = uint64(len(blsmsgs)) - blsmsgs = append(blsmsgs, m) - blsmsgmap[m.Cid()] = i - } - - bmi = append(bmi, i) - } - blsincl = append(blsincl, bmi) - - smi := make([]uint64, 0, len(smsgs)) - for _, m := range smsgs { - i, ok := secpkmsgmap[m.Cid()] - if !ok { - i = uint64(len(secpkmsgs)) - secpkmsgs = append(secpkmsgs, m) - secpkmsgmap[m.Cid()] = i - } - - smi = append(smi, i) - } - secpkincl = append(secpkincl, smi) - } - - return blsmsgs, blsincl, secpkmsgs, secpkincl, nil -} - -func bstsToFullTipSet(bts *BSTipSet) (*store.FullTipSet, error) { - fts := &store.FullTipSet{} - for i, b := range bts.Blocks { - fb := &types.FullBlock{ - Header: b, - } - for _, mi := range bts.BlsMsgIncludes[i] { - fb.BlsMessages = append(fb.BlsMessages, bts.BlsMessages[mi]) - } - for _, mi := range bts.SecpkMsgIncludes[i] { - fb.SecpkMessages = append(fb.SecpkMessages, bts.SecpkMessages[mi]) - } - - fts.Blocks = append(fts.Blocks, fb) - } - - return fts, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/blocksync/blocksync_client.go b/vendor/github.com/filecoin-project/lotus/chain/blocksync/blocksync_client.go deleted file mode 100644 index daa4b6335c..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/blocksync/blocksync_client.go +++ /dev/null @@ -1,602 +0,0 @@ -package blocksync - -import ( - "bufio" - "context" - "fmt" - "math/rand" - "sort" - "sync" - "time" - - blocks "github.com/ipfs/go-block-format" - bserv "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - graphsync "github.com/ipfs/go-graphsync" - gsnet "github.com/ipfs/go-graphsync/network" - host "github.com/libp2p/go-libp2p-core/host" - inet "github.com/libp2p/go-libp2p-core/network" - "github.com/libp2p/go-libp2p-core/peer" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - incrt "github.com/filecoin-project/lotus/lib/increadtimeout" - "github.com/filecoin-project/lotus/lib/peermgr" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -type BlockSync struct { - bserv bserv.BlockService - gsync graphsync.GraphExchange - host host.Host - - syncPeers *bsPeerTracker - peerMgr *peermgr.PeerMgr -} - -func NewBlockSyncClient(bserv dtypes.ChainBlockService, h host.Host, pmgr peermgr.MaybePeerMgr, gs dtypes.Graphsync) *BlockSync { - return &BlockSync{ - bserv: bserv, - host: h, - syncPeers: newPeerTracker(pmgr.Mgr), - peerMgr: pmgr.Mgr, - gsync: gs, - } -} - -func (bs *BlockSync) processStatus(req *BlockSyncRequest, res *BlockSyncResponse) error { - switch res.Status { - case StatusPartial: // Partial Response - return xerrors.Errorf("not handling partial blocksync responses yet") - case StatusNotFound: // req.Start not found - return xerrors.Errorf("not found") - case StatusGoAway: // Go Away - return xerrors.Errorf("not handling 'go away' blocksync responses yet") - case StatusInternalError: // Internal Error - return xerrors.Errorf("block sync peer errored: %s", res.Message) - case StatusBadRequest: - return xerrors.Errorf("block sync request invalid: %s", res.Message) - default: - return xerrors.Errorf("unrecognized response code: %d", res.Status) - } -} - -// GetBlocks fetches count blocks from the network, from the provided tipset -// *backwards*, returning as many tipsets as count. -// -// {hint/usage}: This is used by the Syncer during normal chain syncing and when -// resolving forks. -func (bs *BlockSync) GetBlocks(ctx context.Context, tsk types.TipSetKey, count int) ([]*types.TipSet, error) { - ctx, span := trace.StartSpan(ctx, "bsync.GetBlocks") - defer span.End() - if span.IsRecordingEvents() { - span.AddAttributes( - trace.StringAttribute("tipset", fmt.Sprint(tsk.Cids())), - trace.Int64Attribute("count", int64(count)), - ) - } - - req := &BlockSyncRequest{ - Start: tsk.Cids(), - RequestLength: uint64(count), - Options: BSOptBlocks, - } - - // this peerset is sorted by latency and failure counting. - peers := bs.getPeers() - - // randomize the first few peers so we don't always pick the same peer - shufflePrefix(peers) - - start := time.Now() - var oerr error - - for _, p := range peers { - // TODO: doing this synchronously isnt great, but fetching in parallel - // may not be a good idea either. think about this more - select { - case <-ctx.Done(): - return nil, xerrors.Errorf("blocksync getblocks failed: %w", ctx.Err()) - default: - } - - res, err := bs.sendRequestToPeer(ctx, p, req) - if err != nil { - oerr = err - if !xerrors.Is(err, inet.ErrNoConn) { - log.Warnf("BlockSync request failed for peer %s: %s", p.String(), err) - } - continue - } - - if res.Status == StatusOK || res.Status == StatusPartial { - resp, err := bs.processBlocksResponse(req, res) - if err != nil { - return nil, xerrors.Errorf("success response from peer failed to process: %w", err) - } - bs.syncPeers.logGlobalSuccess(time.Since(start)) - bs.host.ConnManager().TagPeer(p, "bsync", 25) - return resp, nil - } - - oerr = bs.processStatus(req, res) - if oerr != nil { - log.Warnf("BlockSync peer %s response was an error: %s", p.String(), oerr) - } - } - return nil, xerrors.Errorf("GetBlocks failed with all peers: %w", oerr) -} - -func (bs *BlockSync) GetFullTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { - // TODO: round robin through these peers on error - - req := &BlockSyncRequest{ - Start: tsk.Cids(), - RequestLength: 1, - Options: BSOptBlocks | BSOptMessages, - } - - res, err := bs.sendRequestToPeer(ctx, p, req) - if err != nil { - return nil, err - } - - switch res.Status { - case 0: // Success - if len(res.Chain) == 0 { - return nil, fmt.Errorf("got zero length chain response") - } - bts := res.Chain[0] - - return bstsToFullTipSet(bts) - case 101: // Partial Response - return nil, xerrors.Errorf("partial responses are not handled for single tipset fetching") - case 201: // req.Start not found - return nil, fmt.Errorf("not found") - case 202: // Go Away - return nil, xerrors.Errorf("received 'go away' response peer") - case 203: // Internal Error - return nil, fmt.Errorf("block sync peer errored: %q", res.Message) - case 204: // Invalid Request - return nil, fmt.Errorf("block sync request invalid: %q", res.Message) - default: - return nil, fmt.Errorf("unrecognized response code") - } -} - -func shufflePrefix(peers []peer.ID) { - pref := 5 - if len(peers) < pref { - pref = len(peers) - } - - buf := make([]peer.ID, pref) - perm := rand.Perm(pref) - for i, v := range perm { - buf[i] = peers[v] - } - - copy(peers, buf) -} - -func (bs *BlockSync) GetChainMessages(ctx context.Context, h *types.TipSet, count uint64) ([]*BSTipSet, error) { - ctx, span := trace.StartSpan(ctx, "GetChainMessages") - defer span.End() - - peers := bs.getPeers() - // randomize the first few peers so we don't always pick the same peer - shufflePrefix(peers) - - req := &BlockSyncRequest{ - Start: h.Cids(), - RequestLength: count, - Options: BSOptMessages, - } - - var err error - start := time.Now() - - for _, p := range peers { - res, rerr := bs.sendRequestToPeer(ctx, p, req) - if rerr != nil { - err = rerr - log.Warnf("BlockSync request failed for peer %s: %s", p.String(), err) - continue - } - - if res.Status == StatusOK { - bs.syncPeers.logGlobalSuccess(time.Since(start)) - return res.Chain, nil - } - - if res.Status == StatusPartial { - // TODO: track partial response sizes to ensure we don't overrequest too often - return res.Chain, nil - } - - err = bs.processStatus(req, res) - if err != nil { - log.Warnf("BlockSync peer %s response was an error: %s", p.String(), err) - } - } - - if err == nil { - return nil, xerrors.Errorf("GetChainMessages failed, no peers connected") - } - - // TODO: What if we have no peers (and err is nil)? - return nil, xerrors.Errorf("GetChainMessages failed with all peers(%d): %w", len(peers), err) -} - -func (bs *BlockSync) sendRequestToPeer(ctx context.Context, p peer.ID, req *BlockSyncRequest) (_ *BlockSyncResponse, err error) { - ctx, span := trace.StartSpan(ctx, "sendRequestToPeer") - defer span.End() - - defer func() { - if err != nil { - if span.IsRecordingEvents() { - span.SetStatus(trace.Status{ - Code: 5, - Message: err.Error(), - }) - } - } - }() - - if span.IsRecordingEvents() { - span.AddAttributes( - trace.StringAttribute("peer", p.Pretty()), - ) - } - - gsproto := string(gsnet.ProtocolGraphsync) - supp, err := bs.host.Peerstore().SupportsProtocols(p, BlockSyncProtocolID, gsproto) - if err != nil { - return nil, xerrors.Errorf("failed to get protocols for peer: %w", err) - } - - if len(supp) == 0 { - return nil, xerrors.Errorf("peer %s supports no known sync protocols", p) - } - - switch supp[0] { - case BlockSyncProtocolID: - res, err := bs.fetchBlocksBlockSync(ctx, p, req) - if err != nil { - return nil, xerrors.Errorf("blocksync req failed: %w", err) - } - return res, nil - case gsproto: - res, err := bs.fetchBlocksGraphSync(ctx, p, req) - if err != nil { - return nil, xerrors.Errorf("graphsync req failed: %w", err) - } - return res, nil - default: - return nil, xerrors.Errorf("peerstore somehow returned unexpected protocols: %v", supp) - } - -} -func (bs *BlockSync) fetchBlocksBlockSync(ctx context.Context, p peer.ID, req *BlockSyncRequest) (*BlockSyncResponse, error) { - ctx, span := trace.StartSpan(ctx, "blockSyncFetch") - defer span.End() - - start := time.Now() - s, err := bs.host.NewStream(inet.WithNoDial(ctx, "should already have connection"), p, BlockSyncProtocolID) - if err != nil { - bs.RemovePeer(p) - return nil, xerrors.Errorf("failed to open stream to peer: %w", err) - } - _ = s.SetWriteDeadline(time.Now().Add(5 * time.Second)) - - if err := cborutil.WriteCborRPC(s, req); err != nil { - _ = s.SetWriteDeadline(time.Time{}) - bs.syncPeers.logFailure(p, time.Since(start)) - return nil, err - } - _ = s.SetWriteDeadline(time.Time{}) - - var res BlockSyncResponse - r := incrt.New(s, 50<<10, 5*time.Second) - if err := cborutil.ReadCborRPC(bufio.NewReader(r), &res); err != nil { - bs.syncPeers.logFailure(p, time.Since(start)) - return nil, err - } - - if span.IsRecordingEvents() { - span.AddAttributes( - trace.Int64Attribute("resp_status", int64(res.Status)), - trace.StringAttribute("msg", res.Message), - trace.Int64Attribute("chain_len", int64(len(res.Chain))), - ) - } - - bs.syncPeers.logSuccess(p, time.Since(start)) - return &res, nil -} - -func (bs *BlockSync) processBlocksResponse(req *BlockSyncRequest, res *BlockSyncResponse) ([]*types.TipSet, error) { - if len(res.Chain) == 0 { - return nil, xerrors.Errorf("got no blocks in successful blocksync response") - } - - cur, err := types.NewTipSet(res.Chain[0].Blocks) - if err != nil { - return nil, err - } - - out := []*types.TipSet{cur} - for bi := 1; bi < len(res.Chain); bi++ { - next := res.Chain[bi].Blocks - nts, err := types.NewTipSet(next) - if err != nil { - return nil, err - } - - if !types.CidArrsEqual(cur.Parents().Cids(), nts.Cids()) { - return nil, fmt.Errorf("parents of tipset[%d] were not tipset[%d]", bi-1, bi) - } - - out = append(out, nts) - cur = nts - } - return out, nil -} - -func (bs *BlockSync) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHeader, error) { - sb, err := bs.bserv.GetBlock(ctx, c) - if err != nil { - return nil, err - } - - return types.DecodeBlock(sb.RawData()) -} - -func (bs *BlockSync) AddPeer(p peer.ID) { - bs.syncPeers.addPeer(p) -} - -func (bs *BlockSync) RemovePeer(p peer.ID) { - bs.syncPeers.removePeer(p) -} - -// getPeers returns a preference-sorted set of peers to query. -func (bs *BlockSync) getPeers() []peer.ID { - return bs.syncPeers.prefSortedPeers() -} - -func (bs *BlockSync) FetchMessagesByCids(ctx context.Context, cids []cid.Cid) ([]*types.Message, error) { - out := make([]*types.Message, len(cids)) - - err := bs.fetchCids(ctx, cids, func(i int, b blocks.Block) error { - msg, err := types.DecodeMessage(b.RawData()) - if err != nil { - return err - } - - if out[i] != nil { - return fmt.Errorf("received duplicate message") - } - - out[i] = msg - return nil - }) - if err != nil { - return nil, err - } - return out, nil -} - -func (bs *BlockSync) FetchSignedMessagesByCids(ctx context.Context, cids []cid.Cid) ([]*types.SignedMessage, error) { - out := make([]*types.SignedMessage, len(cids)) - - err := bs.fetchCids(ctx, cids, func(i int, b blocks.Block) error { - smsg, err := types.DecodeSignedMessage(b.RawData()) - if err != nil { - return err - } - - if out[i] != nil { - return fmt.Errorf("received duplicate message") - } - - out[i] = smsg - return nil - }) - if err != nil { - return nil, err - } - return out, nil -} - -func (bs *BlockSync) fetchCids(ctx context.Context, cids []cid.Cid, cb func(int, blocks.Block) error) error { - resp := bs.bserv.GetBlocks(context.TODO(), cids) - - m := make(map[cid.Cid]int) - for i, c := range cids { - m[c] = i - } - - for i := 0; i < len(cids); i++ { - select { - case v, ok := <-resp: - if !ok { - if i == len(cids)-1 { - break - } - - return fmt.Errorf("failed to fetch all messages") - } - - ix, ok := m[v.Cid()] - if !ok { - return fmt.Errorf("received message we didnt ask for") - } - - if err := cb(ix, v); err != nil { - return err - } - } - } - - return nil -} - -type peerStats struct { - successes int - failures int - firstSeen time.Time - averageTime time.Duration -} - -type bsPeerTracker struct { - lk sync.Mutex - - peers map[peer.ID]*peerStats - avgGlobalTime time.Duration - - pmgr *peermgr.PeerMgr -} - -func newPeerTracker(pmgr *peermgr.PeerMgr) *bsPeerTracker { - return &bsPeerTracker{ - peers: make(map[peer.ID]*peerStats), - pmgr: pmgr, - } -} - -func (bpt *bsPeerTracker) addPeer(p peer.ID) { - bpt.lk.Lock() - defer bpt.lk.Unlock() - if _, ok := bpt.peers[p]; ok { - return - } - bpt.peers[p] = &peerStats{ - firstSeen: time.Now(), - } - -} - -const ( - // newPeerMul is how much better than average is the new peer assumed to be - // less than one to encourouge trying new peers - newPeerMul = 0.9 -) - -func (bpt *bsPeerTracker) prefSortedPeers() []peer.ID { - // TODO: this could probably be cached, but as long as its not too many peers, fine for now - bpt.lk.Lock() - defer bpt.lk.Unlock() - out := make([]peer.ID, 0, len(bpt.peers)) - for p := range bpt.peers { - out = append(out, p) - } - - // sort by 'expected cost' of requesting data from that peer - // additionally handle edge cases where not enough data is available - sort.Slice(out, func(i, j int) bool { - pi := bpt.peers[out[i]] - pj := bpt.peers[out[j]] - - var costI, costJ float64 - - getPeerInitLat := func(p peer.ID) float64 { - var res float64 - if bpt.pmgr != nil { - if lat, ok := bpt.pmgr.GetPeerLatency(p); ok { - res = float64(lat) - } - } - if res == 0 { - res = float64(bpt.avgGlobalTime) - } - return res * newPeerMul - } - - if pi.successes+pi.failures > 0 { - failRateI := float64(pi.failures) / float64(pi.failures+pi.successes) - costI = float64(pi.averageTime) + failRateI*float64(bpt.avgGlobalTime) - } else { - costI = getPeerInitLat(out[i]) - } - - if pj.successes+pj.failures > 0 { - failRateJ := float64(pj.failures) / float64(pj.failures+pj.successes) - costJ = float64(pj.averageTime) + failRateJ*float64(bpt.avgGlobalTime) - } else { - costJ = getPeerInitLat(out[j]) - } - - return costI < costJ - }) - - return out -} - -const ( - // xInvAlpha = (N+1)/2 - - localInvAlpha = 5 // 86% of the value is the last 9 - globalInvAlpha = 20 // 86% of the value is the last 39 -) - -func (bpt *bsPeerTracker) logGlobalSuccess(dur time.Duration) { - bpt.lk.Lock() - defer bpt.lk.Unlock() - - if bpt.avgGlobalTime == 0 { - bpt.avgGlobalTime = dur - return - } - delta := (dur - bpt.avgGlobalTime) / globalInvAlpha - bpt.avgGlobalTime += delta -} - -func logTime(pi *peerStats, dur time.Duration) { - if pi.averageTime == 0 { - pi.averageTime = dur - return - } - delta := (dur - pi.averageTime) / localInvAlpha - pi.averageTime += delta - -} - -func (bpt *bsPeerTracker) logSuccess(p peer.ID, dur time.Duration) { - bpt.lk.Lock() - defer bpt.lk.Unlock() - - var pi *peerStats - var ok bool - if pi, ok = bpt.peers[p]; !ok { - log.Warnw("log success called on peer not in tracker", "peerid", p.String()) - return - } - - pi.successes++ - logTime(pi, dur) -} - -func (bpt *bsPeerTracker) logFailure(p peer.ID, dur time.Duration) { - bpt.lk.Lock() - defer bpt.lk.Unlock() - - var pi *peerStats - var ok bool - if pi, ok = bpt.peers[p]; !ok { - log.Warn("log failure called on peer not in tracker", "peerid", p.String()) - return - } - - pi.failures++ - logTime(pi, dur) -} - -func (bpt *bsPeerTracker) removePeer(p peer.ID) { - bpt.lk.Lock() - defer bpt.lk.Unlock() - delete(bpt.peers, p) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/blocksync/cbor_gen.go b/vendor/github.com/filecoin-project/lotus/chain/blocksync/cbor_gen.go deleted file mode 100644 index 583a5b58d8..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/blocksync/cbor_gen.go +++ /dev/null @@ -1,578 +0,0 @@ -// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. - -package blocksync - -import ( - "fmt" - "io" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - xerrors "golang.org/x/xerrors" -) - -var _ = xerrors.Errorf - -var lengthBufBlockSyncRequest = []byte{131} - -func (t *BlockSyncRequest) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufBlockSyncRequest); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Start ([]cid.Cid) (slice) - if len(t.Start) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Start was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Start))); err != nil { - return err - } - for _, v := range t.Start { - if err := cbg.WriteCidBuf(scratch, w, v); err != nil { - return xerrors.Errorf("failed writing cid field t.Start: %w", err) - } - } - - // t.RequestLength (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.RequestLength)); err != nil { - return err - } - - // t.Options (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Options)); err != nil { - return err - } - - return nil -} - -func (t *BlockSyncRequest) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Start ([]cid.Cid) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Start: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Start = make([]cid.Cid, extra) - } - - for i := 0; i < int(extra); i++ { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("reading cid field t.Start failed: %w", err) - } - t.Start[i] = c - } - - // t.RequestLength (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.RequestLength = uint64(extra) - - } - // t.Options (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Options = uint64(extra) - - } - return nil -} - -var lengthBufBlockSyncResponse = []byte{131} - -func (t *BlockSyncResponse) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufBlockSyncResponse); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Chain ([]*blocksync.BSTipSet) (slice) - if len(t.Chain) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Chain was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Chain))); err != nil { - return err - } - for _, v := range t.Chain { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.Status (uint64) (uint64) - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { - return err - } - - // t.Message (string) (string) - if len(t.Message) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.Message was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Message))); err != nil { - return err - } - if _, err := io.WriteString(w, t.Message); err != nil { - return err - } - return nil -} - -func (t *BlockSyncResponse) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 3 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Chain ([]*blocksync.BSTipSet) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Chain: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Chain = make([]*BSTipSet, extra) - } - - for i := 0; i < int(extra); i++ { - - var v BSTipSet - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Chain[i] = &v - } - - // t.Status (uint64) (uint64) - - { - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") - } - t.Status = uint64(extra) - - } - // t.Message (string) (string) - - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } - - t.Message = string(sval) - } - return nil -} - -var lengthBufBSTipSet = []byte{133} - -func (t *BSTipSet) MarshalCBOR(w io.Writer) error { - if t == nil { - _, err := w.Write(cbg.CborNull) - return err - } - if _, err := w.Write(lengthBufBSTipSet); err != nil { - return err - } - - scratch := make([]byte, 9) - - // t.Blocks ([]*types.BlockHeader) (slice) - if len(t.Blocks) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.Blocks was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Blocks))); err != nil { - return err - } - for _, v := range t.Blocks { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.BlsMessages ([]*types.Message) (slice) - if len(t.BlsMessages) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.BlsMessages was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.BlsMessages))); err != nil { - return err - } - for _, v := range t.BlsMessages { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.BlsMsgIncludes ([][]uint64) (slice) - if len(t.BlsMsgIncludes) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.BlsMsgIncludes was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.BlsMsgIncludes))); err != nil { - return err - } - for _, v := range t.BlsMsgIncludes { - if len(v) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field v was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(v))); err != nil { - return err - } - for _, v := range v { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - } - - // t.SecpkMessages ([]*types.SignedMessage) (slice) - if len(t.SecpkMessages) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.SecpkMessages was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.SecpkMessages))); err != nil { - return err - } - for _, v := range t.SecpkMessages { - if err := v.MarshalCBOR(w); err != nil { - return err - } - } - - // t.SecpkMsgIncludes ([][]uint64) (slice) - if len(t.SecpkMsgIncludes) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field t.SecpkMsgIncludes was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.SecpkMsgIncludes))); err != nil { - return err - } - for _, v := range t.SecpkMsgIncludes { - if len(v) > cbg.MaxLength { - return xerrors.Errorf("Slice value in field v was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(v))); err != nil { - return err - } - for _, v := range v { - if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { - return err - } - } - } - return nil -} - -func (t *BSTipSet) UnmarshalCBOR(r io.Reader) error { - br := cbg.GetPeeker(r) - scratch := make([]byte, 8) - - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajArray { - return fmt.Errorf("cbor input should be of type array") - } - - if extra != 5 { - return fmt.Errorf("cbor input had wrong number of fields") - } - - // t.Blocks ([]*types.BlockHeader) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.Blocks: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.Blocks = make([]*types.BlockHeader, extra) - } - - for i := 0; i < int(extra); i++ { - - var v types.BlockHeader - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.Blocks[i] = &v - } - - // t.BlsMessages ([]*types.Message) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.BlsMessages: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.BlsMessages = make([]*types.Message, extra) - } - - for i := 0; i < int(extra); i++ { - - var v types.Message - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.BlsMessages[i] = &v - } - - // t.BlsMsgIncludes ([][]uint64) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.BlsMsgIncludes: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.BlsMsgIncludes = make([][]uint64, extra) - } - - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.BlsMsgIncludes[i]: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.BlsMsgIncludes[i] = make([]uint64, extra) - } - - for j := 0; j < int(extra); j++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.BlsMsgIncludes[i] slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.BlsMsgIncludes[i] was not a uint, instead got %d", maj) - } - - t.BlsMsgIncludes[i][j] = uint64(val) - } - - } - } - - // t.SecpkMessages ([]*types.SignedMessage) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.SecpkMessages: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.SecpkMessages = make([]*types.SignedMessage, extra) - } - - for i := 0; i < int(extra); i++ { - - var v types.SignedMessage - if err := v.UnmarshalCBOR(br); err != nil { - return err - } - - t.SecpkMessages[i] = &v - } - - // t.SecpkMsgIncludes ([][]uint64) (slice) - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.SecpkMsgIncludes: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.SecpkMsgIncludes = make([][]uint64, extra) - } - - for i := 0; i < int(extra); i++ { - { - var maj byte - var extra uint64 - var err error - - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - - if extra > cbg.MaxLength { - return fmt.Errorf("t.SecpkMsgIncludes[i]: array too large (%d)", extra) - } - - if maj != cbg.MajArray { - return fmt.Errorf("expected cbor array") - } - - if extra > 0 { - t.SecpkMsgIncludes[i] = make([]uint64, extra) - } - - for j := 0; j < int(extra); j++ { - - maj, val, err := cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return xerrors.Errorf("failed to read uint64 for t.SecpkMsgIncludes[i] slice: %w", err) - } - - if maj != cbg.MajUnsignedInt { - return xerrors.Errorf("value read for array t.SecpkMsgIncludes[i] was not a uint, instead got %d", maj) - } - - t.SecpkMsgIncludes[i][j] = uint64(val) - } - - } - } - - return nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/blocksync/graphsync_client.go b/vendor/github.com/filecoin-project/lotus/chain/blocksync/graphsync_client.go deleted file mode 100644 index 03e4a30e57..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/blocksync/graphsync_client.go +++ /dev/null @@ -1,151 +0,0 @@ -package blocksync - -import ( - "context" - - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-graphsync" - "github.com/ipld/go-ipld-prime" - "github.com/libp2p/go-libp2p-core/peer" - "golang.org/x/xerrors" - - store "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - - cidlink "github.com/ipld/go-ipld-prime/linking/cid" - basicnode "github.com/ipld/go-ipld-prime/node/basic" - ipldselector "github.com/ipld/go-ipld-prime/traversal/selector" - selectorbuilder "github.com/ipld/go-ipld-prime/traversal/selector/builder" -) - -const ( - - // AMT selector recursion. An AMT has arity of 8 so this gives allows - // us to retrieve trees with 8^10 (1,073,741,824) elements. - amtRecursionDepth = uint32(10) - - // some constants for looking up tuple encoded struct fields - // field index of Parents field in a block header - blockIndexParentsField = 5 - - // field index of Messages field in a block header - blockIndexMessagesField = 10 - - // field index of AMT node in AMT head - amtHeadNodeFieldIndex = 2 - - // field index of links array AMT node - amtNodeLinksFieldIndex = 1 - - // field index of values array AMT node - amtNodeValuesFieldIndex = 2 - - // maximum depth per traversal - maxRequestLength = 50 -) - -var amtSelector selectorbuilder.SelectorSpec - -func init() { - // builer for selectors - ssb := selectorbuilder.NewSelectorSpecBuilder(basicnode.Style.Any) - // amt selector -- needed to selector through a messages AMT - amtSelector = ssb.ExploreIndex(amtHeadNodeFieldIndex, - ssb.ExploreRecursive(ipldselector.RecursionLimitDepth(int(amtRecursionDepth)), - ssb.ExploreUnion( - ssb.ExploreIndex(amtNodeLinksFieldIndex, - ssb.ExploreAll(ssb.ExploreRecursiveEdge())), - ssb.ExploreIndex(amtNodeValuesFieldIndex, - ssb.ExploreAll(ssb.Matcher()))))) -} - -func selectorForRequest(req *BlockSyncRequest) ipld.Node { - // builer for selectors - ssb := selectorbuilder.NewSelectorSpecBuilder(basicnode.Style.Any) - - bso := ParseBSOptions(req.Options) - if bso.IncludeMessages { - return ssb.ExploreRecursive(ipldselector.RecursionLimitDepth(int(req.RequestLength)), - ssb.ExploreIndex(blockIndexParentsField, - ssb.ExploreUnion( - ssb.ExploreAll( - ssb.ExploreIndex(blockIndexMessagesField, - ssb.ExploreRange(0, 2, amtSelector), - )), - ssb.ExploreIndex(0, ssb.ExploreRecursiveEdge()), - ))).Node() - } - return ssb.ExploreRecursive(ipldselector.RecursionLimitDepth(int(req.RequestLength)), ssb.ExploreIndex(blockIndexParentsField, - ssb.ExploreUnion( - ssb.ExploreAll( - ssb.Matcher(), - ), - ssb.ExploreIndex(0, ssb.ExploreRecursiveEdge()), - ))).Node() -} - -func firstTipsetSelector(req *BlockSyncRequest) ipld.Node { - // builer for selectors - ssb := selectorbuilder.NewSelectorSpecBuilder(basicnode.Style.Any) - - bso := ParseBSOptions(req.Options) - if bso.IncludeMessages { - return ssb.ExploreIndex(blockIndexMessagesField, - ssb.ExploreRange(0, 2, amtSelector), - ).Node() - } - return ssb.Matcher().Node() - -} - -func (bs *BlockSync) executeGsyncSelector(ctx context.Context, p peer.ID, root cid.Cid, sel ipld.Node) error { - extension := graphsync.ExtensionData{ - Name: "chainsync", - Data: nil, - } - _, errs := bs.gsync.Request(ctx, p, cidlink.Link{Cid: root}, sel, extension) - - for err := range errs { - return xerrors.Errorf("failed to complete graphsync request: %w", err) - } - return nil -} - -// Fallback for interacting with other non-lotus nodes -func (bs *BlockSync) fetchBlocksGraphSync(ctx context.Context, p peer.ID, req *BlockSyncRequest) (*BlockSyncResponse, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - immediateTsSelector := firstTipsetSelector(req) - - // Do this because we can only request one root at a time - for _, r := range req.Start { - if err := bs.executeGsyncSelector(ctx, p, r, immediateTsSelector); err != nil { - return nil, err - } - } - - if req.RequestLength > maxRequestLength { - req.RequestLength = maxRequestLength - } - - sel := selectorForRequest(req) - - // execute the selector forreal - if err := bs.executeGsyncSelector(ctx, p, req.Start[0], sel); err != nil { - return nil, err - } - - // Now pull the data we fetched out of the chainstore (where it should now be persisted) - tempcs := store.NewChainStore(bs.bserv.Blockstore(), datastore.NewMapDatastore(), nil) - - opts := ParseBSOptions(req.Options) - tsk := types.NewTipSetKey(req.Start...) - chain, err := collectChainSegment(tempcs, tsk, req.RequestLength, opts) - if err != nil { - return nil, xerrors.Errorf("failed to load chain data from chainstore after successful graphsync response (start = %v): %w", req.Start, err) - } - - return &BlockSyncResponse{Chain: chain}, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/events/events.go b/vendor/github.com/filecoin-project/lotus/chain/events/events.go deleted file mode 100644 index e115077959..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/events/events.go +++ /dev/null @@ -1,179 +0,0 @@ -package events - -import ( - "context" - "sync" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" -) - -var log = logging.Logger("events") - -// HeightHandler `curH`-`ts.Height` = `confidence` -type HeightHandler func(ctx context.Context, ts *types.TipSet, curH abi.ChainEpoch) error -type RevertHandler func(ctx context.Context, ts *types.TipSet) error - -type heightHandler struct { - confidence int - called bool - - handle HeightHandler - revert RevertHandler -} - -type eventAPI interface { - ChainNotify(context.Context) (<-chan []*api.HeadChange, error) - ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) - ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) - StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) - ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) - - StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) // optional / for CalledMsg -} - -type Events struct { - api eventAPI - - tsc *tipSetCache - lk sync.Mutex - - ready sync.WaitGroup - readyOnce sync.Once - - heightEvents - *hcEvents -} - -func NewEvents(ctx context.Context, api eventAPI) *Events { - gcConfidence := 2 * build.ForkLengthThreshold - - tsc := newTSCache(gcConfidence, api.ChainGetTipSetByHeight) - - e := &Events{ - api: api, - - tsc: tsc, - - heightEvents: heightEvents{ - tsc: tsc, - ctx: ctx, - gcConfidence: gcConfidence, - - heightTriggers: map[uint64]*heightHandler{}, - htTriggerHeights: map[abi.ChainEpoch][]uint64{}, - htHeights: map[abi.ChainEpoch][]uint64{}, - }, - - hcEvents: newHCEvents(ctx, api, tsc, uint64(gcConfidence)), - } - - e.ready.Add(1) - - go e.listenHeadChanges(ctx) - - e.ready.Wait() - - // TODO: cleanup/gc goroutine - - return e -} - -func (e *Events) listenHeadChanges(ctx context.Context) { - for { - if err := e.listenHeadChangesOnce(ctx); err != nil { - log.Errorf("listen head changes errored: %s", err) - } else { - log.Warn("listenHeadChanges quit") - } - if ctx.Err() != nil { - log.Warnf("not restarting listenHeadChanges: context error: %s", ctx.Err()) - return - } - time.Sleep(time.Second) - log.Info("restarting listenHeadChanges") - } -} - -func (e *Events) listenHeadChangesOnce(ctx context.Context) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - notifs, err := e.api.ChainNotify(ctx) - if err != nil { - // TODO: retry - return xerrors.Errorf("listenHeadChanges ChainNotify call failed: %w", err) - } - - cur, ok := <-notifs // TODO: timeout? - if !ok { - return xerrors.Errorf("notification channel closed") - } - - if len(cur) != 1 { - return xerrors.Errorf("unexpected initial head notification length: %d", len(cur)) - } - - if cur[0].Type != store.HCCurrent { - return xerrors.Errorf("expected first head notification type to be 'current', was '%s'", cur[0].Type) - } - - if err := e.tsc.add(cur[0].Val); err != nil { - log.Warn("tsc.add: adding current tipset failed: %w", err) - } - - e.readyOnce.Do(func() { - e.lastTs = cur[0].Val - - e.ready.Done() - }) - - for notif := range notifs { - var rev, app []*types.TipSet - for _, notif := range notif { - switch notif.Type { - case store.HCRevert: - rev = append(rev, notif.Val) - case store.HCApply: - app = append(app, notif.Val) - default: - log.Warnf("unexpected head change notification type: '%s'", notif.Type) - } - } - - if err := e.headChange(rev, app); err != nil { - log.Warnf("headChange failed: %s", err) - } - - // sync with fake chainstore (for tests) - if fcs, ok := e.api.(interface{ notifDone() }); ok { - fcs.notifDone() - } - } - - return nil -} - -func (e *Events) headChange(rev, app []*types.TipSet) error { - if len(app) == 0 { - return xerrors.New("events.headChange expected at least one applied tipset") - } - - e.lk.Lock() - defer e.lk.Unlock() - - if err := e.headChangeAt(rev, app); err != nil { - return err - } - - return e.processHeadChangeEvent(rev, app) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/events/events_called.go b/vendor/github.com/filecoin-project/lotus/chain/events/events_called.go deleted file mode 100644 index 04a87a5458..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/events/events_called.go +++ /dev/null @@ -1,611 +0,0 @@ -package events - -import ( - "context" - "math" - "sync" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/types" -) - -const NoTimeout = math.MaxInt64 -const NoHeight = abi.ChainEpoch(-1) - -type triggerID = uint64 - -// msgH is the block height at which a message was present / event has happened -type msgH = abi.ChainEpoch - -// triggerH is the block height at which the listener will be notified about the -// message (msgH+confidence) -type triggerH = abi.ChainEpoch - -type eventData interface{} - -// EventHandler arguments: -// `prevTs` is the previous tipset, eg the "from" tipset for a state change. -// `ts` is the event tipset, eg the tipset in which the `msg` is included. -// `curH`-`ts.Height` = `confidence` -type EventHandler func(data eventData, prevTs, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) - -// CheckFunc is used for atomicity guarantees. If the condition the callbacks -// wait for has already happened in tipset `ts` -// -// If `done` is true, timeout won't be triggered -// If `more` is false, no messages will be sent to EventHandler (RevertHandler -// may still be called) -type CheckFunc func(ts *types.TipSet) (done bool, more bool, err error) - -// Keep track of information for an event handler -type handlerInfo struct { - confidence int - timeout abi.ChainEpoch - - disabled bool // TODO: GC after gcConfidence reached - - handle EventHandler - revert RevertHandler -} - -// When a change occurs, a queuedEvent is created and put into a queue -// until the required confidence is reached -type queuedEvent struct { - trigger triggerID - - prevH abi.ChainEpoch - h abi.ChainEpoch - data eventData - - called bool -} - -// Manages chain head change events, which may be forward (new tipset added to -// chain) or backward (chain branch discarded in favour of heavier branch) -type hcEvents struct { - cs eventAPI - tsc *tipSetCache - ctx context.Context - gcConfidence uint64 - - lastTs *types.TipSet - - lk sync.Mutex - - ctr triggerID - - triggers map[triggerID]*handlerInfo - - // maps block heights to events - // [triggerH][msgH][event] - confQueue map[triggerH]map[msgH][]*queuedEvent - - // [msgH][triggerH] - revertQueue map[msgH][]triggerH - - // [timeoutH+confidence][triggerID]{calls} - timeouts map[abi.ChainEpoch]map[triggerID]int - - messageEvents - watcherEvents -} - -func newHCEvents(ctx context.Context, cs eventAPI, tsc *tipSetCache, gcConfidence uint64) *hcEvents { - e := hcEvents{ - ctx: ctx, - cs: cs, - tsc: tsc, - gcConfidence: gcConfidence, - - confQueue: map[triggerH]map[msgH][]*queuedEvent{}, - revertQueue: map[msgH][]triggerH{}, - triggers: map[triggerID]*handlerInfo{}, - timeouts: map[abi.ChainEpoch]map[triggerID]int{}, - } - - e.messageEvents = newMessageEvents(ctx, &e, cs) - e.watcherEvents = newWatcherEvents(ctx, &e, cs) - - return &e -} - -// Called when there is a change to the head with tipsets to be -// reverted / applied -func (e *hcEvents) processHeadChangeEvent(rev, app []*types.TipSet) error { - e.lk.Lock() - defer e.lk.Unlock() - - for _, ts := range rev { - e.handleReverts(ts) - e.lastTs = ts - } - - for _, ts := range app { - // Check if the head change caused any state changes that we were - // waiting for - stateChanges := e.watcherEvents.checkStateChanges(e.lastTs, ts) - - // Queue up calls until there have been enough blocks to reach - // confidence on the state changes - for tid, data := range stateChanges { - e.queueForConfidence(tid, data, e.lastTs, ts) - } - - // Check if the head change included any new message calls - newCalls, err := e.messageEvents.checkNewCalls(ts) - if err != nil { - return err - } - - // Queue up calls until there have been enough blocks to reach - // confidence on the message calls - for tid, data := range newCalls { - e.queueForConfidence(tid, data, nil, ts) - } - - for at := e.lastTs.Height(); at <= ts.Height(); at++ { - // Apply any queued events and timeouts that were targeted at the - // current chain height - e.applyWithConfidence(ts, at) - e.applyTimeouts(ts) - } - - // Update the latest known tipset - e.lastTs = ts - } - - return nil -} - -func (e *hcEvents) handleReverts(ts *types.TipSet) { - reverts, ok := e.revertQueue[ts.Height()] - if !ok { - return // nothing to do - } - - for _, triggerH := range reverts { - toRevert := e.confQueue[triggerH][ts.Height()] - for _, event := range toRevert { - if !event.called { - continue // event wasn't apply()-ied yet - } - - trigger := e.triggers[event.trigger] - - if err := trigger.revert(e.ctx, ts); err != nil { - log.Errorf("reverting chain trigger (@H %d, triggered @ %d) failed: %s", ts.Height(), triggerH, err) - } - } - delete(e.confQueue[triggerH], ts.Height()) - } - delete(e.revertQueue, ts.Height()) -} - -// Queue up events until the chain has reached a height that reflects the -// desired confidence -func (e *hcEvents) queueForConfidence(trigID uint64, data eventData, prevTs, ts *types.TipSet) { - trigger := e.triggers[trigID] - - prevH := NoHeight - if prevTs != nil { - prevH = prevTs.Height() - } - appliedH := ts.Height() - - triggerH := appliedH + abi.ChainEpoch(trigger.confidence) - - byOrigH, ok := e.confQueue[triggerH] - if !ok { - byOrigH = map[abi.ChainEpoch][]*queuedEvent{} - e.confQueue[triggerH] = byOrigH - } - - byOrigH[appliedH] = append(byOrigH[appliedH], &queuedEvent{ - trigger: trigID, - prevH: prevH, - h: appliedH, - data: data, - }) - - e.revertQueue[appliedH] = append(e.revertQueue[appliedH], triggerH) -} - -// Apply any events that were waiting for this chain height for confidence -func (e *hcEvents) applyWithConfidence(ts *types.TipSet, height abi.ChainEpoch) { - byOrigH, ok := e.confQueue[height] - if !ok { - return // no triggers at this height - } - - for origH, events := range byOrigH { - triggerTs, err := e.tsc.get(origH) - if err != nil { - log.Errorf("events: applyWithConfidence didn't find tipset for event; wanted %d; current %d", origH, height) - } - - for _, event := range events { - if event.called { - continue - } - - trigger := e.triggers[event.trigger] - if trigger.disabled { - continue - } - - // Previous tipset - this is relevant for example in a state change - // from one tipset to another - var prevTs *types.TipSet - if event.prevH != NoHeight { - prevTs, err = e.tsc.get(event.prevH) - if err != nil { - log.Errorf("events: applyWithConfidence didn't find tipset for previous event; wanted %d; current %d", event.prevH, height) - continue - } - } - - more, err := trigger.handle(event.data, prevTs, triggerTs, height) - if err != nil { - log.Errorf("chain trigger (@H %d, triggered @ %d) failed: %s", origH, height, err) - continue // don't revert failed calls - } - - event.called = true - - touts, ok := e.timeouts[trigger.timeout] - if ok { - touts[event.trigger]++ - } - - trigger.disabled = !more - } - } -} - -// Apply any timeouts that expire at this height -func (e *hcEvents) applyTimeouts(ts *types.TipSet) { - triggers, ok := e.timeouts[ts.Height()] - if !ok { - return // nothing to do - } - - for triggerID, calls := range triggers { - if calls > 0 { - continue // don't timeout if the method was called - } - trigger := e.triggers[triggerID] - if trigger.disabled { - continue - } - - timeoutTs, err := e.tsc.get(ts.Height() - abi.ChainEpoch(trigger.confidence)) - if err != nil { - log.Errorf("events: applyTimeouts didn't find tipset for event; wanted %d; current %d", ts.Height()-abi.ChainEpoch(trigger.confidence), ts.Height()) - } - - more, err := trigger.handle(nil, nil, timeoutTs, ts.Height()) - if err != nil { - log.Errorf("chain trigger (call @H %d, called @ %d) failed: %s", timeoutTs.Height(), ts.Height(), err) - continue // don't revert failed calls - } - - trigger.disabled = !more // allows messages after timeout - } -} - -// Listen for an event -// - CheckFunc: immediately checks if the event already occurred -// - EventHandler: called when the event has occurred, after confidence tipsets -// - RevertHandler: called if the chain head changes causing the event to revert -// - confidence: wait this many tipsets before calling EventHandler -// - timeout: at this chain height, timeout on waiting for this event -func (e *hcEvents) onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) { - e.lk.Lock() - defer e.lk.Unlock() - - // Check if the event has already occurred - ts := e.tsc.best() - done, more, err := check(ts) - if err != nil { - return 0, xerrors.Errorf("called check error (h: %d): %w", ts.Height(), err) - } - if done { - timeout = NoTimeout - } - - // Create a trigger for the event - id := e.ctr - e.ctr++ - - e.triggers[id] = &handlerInfo{ - confidence: confidence, - timeout: timeout + abi.ChainEpoch(confidence), - - disabled: !more, - - handle: hnd, - revert: rev, - } - - // If there's a timeout, set up a timeout check at that height - if timeout != NoTimeout { - if e.timeouts[timeout+abi.ChainEpoch(confidence)] == nil { - e.timeouts[timeout+abi.ChainEpoch(confidence)] = map[uint64]int{} - } - e.timeouts[timeout+abi.ChainEpoch(confidence)][id] = 0 - } - - return id, nil -} - -// headChangeAPI is used to allow the composed event APIs to call back to hcEvents -// to listen for changes -type headChangeAPI interface { - onHeadChanged(check CheckFunc, hnd EventHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch) (triggerID, error) -} - -// watcherEvents watches for a state change -type watcherEvents struct { - ctx context.Context - cs eventAPI - hcAPI headChangeAPI - - lk sync.RWMutex - matchers map[triggerID]StateMatchFunc -} - -func newWatcherEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) watcherEvents { - return watcherEvents{ - ctx: ctx, - cs: cs, - hcAPI: hcAPI, - matchers: make(map[triggerID]StateMatchFunc), - } -} - -// Run each of the matchers against the previous and current state to see if -// there's a change -func (we *watcherEvents) checkStateChanges(oldState, newState *types.TipSet) map[triggerID]eventData { - we.lk.RLock() - defer we.lk.RUnlock() - - res := make(map[triggerID]eventData) - for tid, matchFn := range we.matchers { - ok, data, err := matchFn(oldState, newState) - if err != nil { - log.Errorf("event diff fn failed: %s", err) - continue - } - - if ok { - res[tid] = data - } - } - return res -} - -// StateChange represents a change in state -type StateChange interface{} - -// StateChangeHandler arguments: -// `oldTs` is the state "from" tipset -// `newTs` is the state "to" tipset -// `states` is the change in state -// `curH`-`ts.Height` = `confidence` -type StateChangeHandler func(oldTs, newTs *types.TipSet, states StateChange, curH abi.ChainEpoch) (more bool, err error) - -type StateMatchFunc func(oldTs, newTs *types.TipSet) (bool, StateChange, error) - -// StateChanged registers a callback which is triggered when a specified state -// change occurs or a timeout is reached. -// -// * `CheckFunc` callback is invoked immediately with a recent tipset, it -// returns two booleans - `done`, and `more`. -// -// * `done` should be true when some on-chain state change we are waiting -// for has happened. When `done` is set to true, timeout trigger is disabled. -// -// * `more` should be false when we don't want to receive new notifications -// through StateChangeHandler. Note that notifications may still be delivered to -// RevertHandler -// -// * `StateChangeHandler` is called when the specified state change was observed -// on-chain, and a confidence threshold was reached, or the specified `timeout` -// height was reached with no state change observed. When this callback is -// invoked on a timeout, `oldState` and `newState` are set to nil. -// This callback returns a boolean specifying whether further notifications -// should be sent, like `more` return param from `CheckFunc` above. -// -// * `RevertHandler` is called after apply handler, when we drop the tipset -// containing the message. The tipset passed as the argument is the tipset -// that is being dropped. Note that the event dropped may be re-applied -// in a different tipset in small amount of time. -// -// * `StateMatchFunc` is called against each tipset state. If there is a match, -// the state change is queued up until the confidence interval has elapsed (and -// `StateChangeHandler` is called) -func (we *watcherEvents) StateChanged(check CheckFunc, scHnd StateChangeHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf StateMatchFunc) error { - hnd := func(data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) { - states, ok := data.(StateChange) - if data != nil && !ok { - panic("expected StateChange") - } - - return scHnd(prevTs, ts, states, height) - } - - id, err := we.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout) - if err != nil { - return err - } - - we.lk.Lock() - defer we.lk.Unlock() - we.matchers[id] = mf - - return nil -} - -// messageEvents watches for message calls to actors -type messageEvents struct { - ctx context.Context - cs eventAPI - hcAPI headChangeAPI - - lk sync.RWMutex - matchers map[triggerID][]MsgMatchFunc -} - -func newMessageEvents(ctx context.Context, hcAPI headChangeAPI, cs eventAPI) messageEvents { - return messageEvents{ - ctx: ctx, - cs: cs, - hcAPI: hcAPI, - matchers: map[triggerID][]MsgMatchFunc{}, - } -} - -// Check if there are any new actor calls -func (me *messageEvents) checkNewCalls(ts *types.TipSet) (map[triggerID]eventData, error) { - pts, err := me.cs.ChainGetTipSet(me.ctx, ts.Parents()) // we actually care about messages in the parent tipset here - if err != nil { - log.Errorf("getting parent tipset in checkNewCalls: %s", err) - return nil, err - } - - me.lk.RLock() - defer me.lk.RUnlock() - - res := make(map[triggerID]eventData) - me.messagesForTs(pts, func(msg *types.Message) { - // TODO: provide receipts - - for tid, matchFns := range me.matchers { - var matched bool - for _, matchFn := range matchFns { - ok, err := matchFn(msg) - if err != nil { - log.Errorf("event matcher failed: %s", err) - continue - } - matched = ok - - if matched { - break - } - } - - if matched { - res[tid] = msg - break - } - } - }) - - return res, nil -} - -// Get the messages in a tipset -func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Message)) { - seen := map[cid.Cid]struct{}{} - - for _, tsb := range ts.Blocks() { - - msgs, err := me.cs.ChainGetBlockMessages(context.TODO(), tsb.Cid()) - if err != nil { - log.Errorf("messagesForTs MessagesForBlock failed (ts.H=%d, Bcid:%s, B.Mcid:%s): %s", ts.Height(), tsb.Cid(), tsb.Messages, err) - // this is quite bad, but probably better than missing all the other updates - continue - } - - for _, m := range msgs.BlsMessages { - _, ok := seen[m.Cid()] - if ok { - continue - } - seen[m.Cid()] = struct{}{} - - consume(m) - } - - for _, m := range msgs.SecpkMessages { - _, ok := seen[m.Message.Cid()] - if ok { - continue - } - seen[m.Message.Cid()] = struct{}{} - - consume(&m.Message) - } - } -} - -// MsgHandler arguments: -// `ts` is the tipset, in which the `msg` is included. -// `curH`-`ts.Height` = `confidence` -type MsgHandler func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (more bool, err error) - -type MsgMatchFunc func(msg *types.Message) (bool, error) - -// Called registers a callback which is triggered when a specified method is -// called on an actor, or a timeout is reached. -// -// * `CheckFunc` callback is invoked immediately with a recent tipset, it -// returns two booleans - `done`, and `more`. -// -// * `done` should be true when some on-chain action we are waiting for has -// happened. When `done` is set to true, timeout trigger is disabled. -// -// * `more` should be false when we don't want to receive new notifications -// through MsgHandler. Note that notifications may still be delivered to -// RevertHandler -// -// * `MsgHandler` is called when the specified event was observed on-chain, -// and a confidence threshold was reached, or the specified `timeout` height -// was reached with no events observed. When this callback is invoked on a -// timeout, `msg` is set to nil. This callback returns a boolean specifying -// whether further notifications should be sent, like `more` return param -// from `CheckFunc` above. -// -// * `RevertHandler` is called after apply handler, when we drop the tipset -// containing the message. The tipset passed as the argument is the tipset -// that is being dropped. Note that the message dropped may be re-applied -// in a different tipset in small amount of time. -// -// * `MsgMatchFunc` is called against each message. If there is a match, the -// message is queued up until the confidence interval has elapsed (and -// `MsgHandler` is called) -func (me *messageEvents) Called(check CheckFunc, msgHnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, mf MsgMatchFunc) error { - hnd := func(data eventData, prevTs, ts *types.TipSet, height abi.ChainEpoch) (bool, error) { - msg, ok := data.(*types.Message) - if data != nil && !ok { - panic("expected msg") - } - - rec, err := me.cs.StateGetReceipt(me.ctx, msg.Cid(), ts.Key()) - if err != nil { - return false, err - } - - return msgHnd(msg, rec, ts, height) - } - - id, err := me.hcAPI.onHeadChanged(check, hnd, rev, confidence, timeout) - if err != nil { - return err - } - - me.lk.Lock() - defer me.lk.Unlock() - me.matchers[id] = append(me.matchers[id], mf) - - return nil -} - -// Convenience function for checking and matching messages -func (me *messageEvents) CalledMsg(ctx context.Context, hnd MsgHandler, rev RevertHandler, confidence int, timeout abi.ChainEpoch, msg types.ChainMsg) error { - return me.Called(me.CheckMsg(ctx, msg, hnd), hnd, rev, confidence, timeout, me.MatchMsg(msg.VMMessage())) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/events/events_height.go b/vendor/github.com/filecoin-project/lotus/chain/events/events_height.go deleted file mode 100644 index fc94d62624..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/events/events_height.go +++ /dev/null @@ -1,200 +0,0 @@ -package events - -import ( - "context" - "sync" - - "github.com/filecoin-project/specs-actors/actors/abi" - "go.opencensus.io/trace" - - "github.com/filecoin-project/lotus/chain/types" -) - -type heightEvents struct { - lk sync.Mutex - tsc *tipSetCache - gcConfidence abi.ChainEpoch - - ctr triggerID - - heightTriggers map[triggerID]*heightHandler - - htTriggerHeights map[triggerH][]triggerID - htHeights map[msgH][]triggerID - - ctx context.Context -} - -func (e *heightEvents) headChangeAt(rev, app []*types.TipSet) error { - - ctx, span := trace.StartSpan(e.ctx, "events.HeightHeadChange") - defer span.End() - span.AddAttributes(trace.Int64Attribute("endHeight", int64(app[0].Height()))) - span.AddAttributes(trace.Int64Attribute("reverts", int64(len(rev)))) - span.AddAttributes(trace.Int64Attribute("applies", int64(len(app)))) - - e.lk.Lock() - defer e.lk.Unlock() - for _, ts := range rev { - // TODO: log error if h below gcconfidence - // revert height-based triggers - - revert := func(h abi.ChainEpoch, ts *types.TipSet) { - for _, tid := range e.htHeights[h] { - ctx, span := trace.StartSpan(ctx, "events.HeightRevert") - - rev := e.heightTriggers[tid].revert - e.lk.Unlock() - err := rev(ctx, ts) - e.lk.Lock() - e.heightTriggers[tid].called = false - - span.End() - - if err != nil { - log.Errorf("reverting chain trigger (@H %d): %s", h, err) - } - } - } - revert(ts.Height(), ts) - - subh := ts.Height() - 1 - for { - cts, err := e.tsc.get(subh) - if err != nil { - return err - } - - if cts != nil { - break - } - - revert(subh, ts) - subh-- - } - - if err := e.tsc.revert(ts); err != nil { - return err - } - } - - for i := range app { - ts := app[i] - - if err := e.tsc.add(ts); err != nil { - return err - } - - // height triggers - - apply := func(h abi.ChainEpoch, ts *types.TipSet) error { - for _, tid := range e.htTriggerHeights[h] { - hnd := e.heightTriggers[tid] - if hnd.called { - return nil - } - hnd.called = true - - triggerH := h - abi.ChainEpoch(hnd.confidence) - - incTs, err := e.tsc.getNonNull(triggerH) - if err != nil { - return err - } - - ctx, span := trace.StartSpan(ctx, "events.HeightApply") - span.AddAttributes(trace.BoolAttribute("immediate", false)) - handle := hnd.handle - e.lk.Unlock() - err = handle(ctx, incTs, h) - e.lk.Lock() - span.End() - - if err != nil { - log.Errorf("chain trigger (@H %d, called @ %d) failed: %+v", triggerH, ts.Height(), err) - } - } - return nil - } - - if err := apply(ts.Height(), ts); err != nil { - return err - } - subh := ts.Height() - 1 - for { - cts, err := e.tsc.get(subh) - if err != nil { - return err - } - - if cts != nil { - break - } - - if err := apply(subh, ts); err != nil { - return err - } - - subh-- - } - - } - - return nil -} - -// ChainAt invokes the specified `HeightHandler` when the chain reaches the -// specified height+confidence threshold. If the chain is rolled-back under the -// specified height, `RevertHandler` will be called. -// -// ts passed to handlers is the tipset at the specified, or above, if lower tipsets were null -func (e *heightEvents) ChainAt(hnd HeightHandler, rev RevertHandler, confidence int, h abi.ChainEpoch) error { - - e.lk.Lock() // Tricky locking, check your locks if you modify this function! - - bestH := e.tsc.best().Height() - - if bestH >= h+abi.ChainEpoch(confidence) { - ts, err := e.tsc.getNonNull(h) - if err != nil { - log.Warnf("events.ChainAt: calling HandleFunc with nil tipset, not found in cache: %s", err) - } - - e.lk.Unlock() - ctx, span := trace.StartSpan(e.ctx, "events.HeightApply") - span.AddAttributes(trace.BoolAttribute("immediate", true)) - - err = hnd(ctx, ts, bestH) - span.End() - - if err != nil { - return err - } - - e.lk.Lock() - bestH = e.tsc.best().Height() - } - - defer e.lk.Unlock() - - if bestH >= h+abi.ChainEpoch(confidence)+e.gcConfidence { - return nil - } - - triggerAt := h + abi.ChainEpoch(confidence) - - id := e.ctr - e.ctr++ - - e.heightTriggers[id] = &heightHandler{ - confidence: confidence, - - handle: hnd, - revert: rev, - } - - e.htHeights[h] = append(e.htHeights[h], id) - e.htTriggerHeights[triggerAt] = append(e.htTriggerHeights[triggerAt], id) - - return nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/events/events_test.go b/vendor/github.com/filecoin-project/lotus/chain/events/events_test.go deleted file mode 100644 index 5798fb75ce..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/events/events_test.go +++ /dev/null @@ -1,1314 +0,0 @@ -package events - -import ( - "context" - "fmt" - "sync" - "testing" - - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" -) - -var dummyCid cid.Cid - -func init() { - dummyCid, _ = cid.Parse("bafkqaaa") -} - -type fakeMsg struct { - bmsgs []*types.Message - smsgs []*types.SignedMessage -} - -type fakeCS struct { - t *testing.T - h abi.ChainEpoch - tsc *tipSetCache - - msgs map[cid.Cid]fakeMsg - blkMsgs map[cid.Cid]cid.Cid - - sync sync.Mutex - - tipsets map[types.TipSetKey]*types.TipSet - - sub func(rev, app []*types.TipSet) -} - -func (fcs *fakeCS) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { - return fcs.tipsets[key], nil -} - -func (fcs *fakeCS) StateGetReceipt(context.Context, cid.Cid, types.TipSetKey) (*types.MessageReceipt, error) { - return nil, nil -} - -func (fcs *fakeCS) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { - panic("Not Implemented") -} - -func (fcs *fakeCS) ChainGetTipSetByHeight(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) { - panic("Not Implemented") -} - -func (fcs *fakeCS) makeTs(t *testing.T, parents []cid.Cid, h abi.ChainEpoch, msgcid cid.Cid) *types.TipSet { - a, _ := address.NewFromString("t00") - b, _ := address.NewFromString("t02") - var ts, err = types.NewTipSet([]*types.BlockHeader{ - { - Height: h, - Miner: a, - - Parents: parents, - - Ticket: &types.Ticket{VRFProof: []byte{byte(h % 2)}}, - - ParentStateRoot: dummyCid, - Messages: msgcid, - ParentMessageReceipts: dummyCid, - - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - }, - { - Height: h, - Miner: b, - - Parents: parents, - - Ticket: &types.Ticket{VRFProof: []byte{byte((h + 1) % 2)}}, - - ParentStateRoot: dummyCid, - Messages: msgcid, - ParentMessageReceipts: dummyCid, - - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - }, - }) - - if fcs.tipsets == nil { - fcs.tipsets = map[types.TipSetKey]*types.TipSet{} - } - fcs.tipsets[ts.Key()] = ts - - require.NoError(t, err) - - return ts -} - -func (fcs *fakeCS) ChainNotify(context.Context) (<-chan []*api.HeadChange, error) { - out := make(chan []*api.HeadChange, 1) - out <- []*api.HeadChange{{Type: store.HCCurrent, Val: fcs.tsc.best()}} - - fcs.sub = func(rev, app []*types.TipSet) { - notif := make([]*api.HeadChange, len(rev)+len(app)) - - for i, r := range rev { - notif[i] = &api.HeadChange{ - Type: store.HCRevert, - Val: r, - } - } - for i, r := range app { - notif[i+len(rev)] = &api.HeadChange{ - Type: store.HCApply, - Val: r, - } - } - - out <- notif - } - - return out, nil -} - -func (fcs *fakeCS) ChainGetBlockMessages(ctx context.Context, blk cid.Cid) (*api.BlockMessages, error) { - messages, ok := fcs.blkMsgs[blk] - if !ok { - return &api.BlockMessages{}, nil - } - - ms, ok := fcs.msgs[messages] - if !ok { - return &api.BlockMessages{}, nil - } - return &api.BlockMessages{BlsMessages: ms.bmsgs, SecpkMessages: ms.smsgs}, nil - -} - -func (fcs *fakeCS) fakeMsgs(m fakeMsg) cid.Cid { - n := len(fcs.msgs) - c, err := cid.Prefix{ - Version: 1, - Codec: cid.Raw, - MhType: multihash.IDENTITY, - MhLength: -1, - }.Sum([]byte(fmt.Sprintf("%d", n))) - require.NoError(fcs.t, err) - - fcs.msgs[c] = m - return c -} - -func (fcs *fakeCS) advance(rev, app int, msgs map[int]cid.Cid, nulls ...int) { // todo: allow msgs - if fcs.sub == nil { - fcs.t.Fatal("sub not be nil") - } - - nullm := map[int]struct{}{} - for _, v := range nulls { - nullm[v] = struct{}{} - } - - var revs []*types.TipSet - for i := 0; i < rev; i++ { - ts := fcs.tsc.best() - - if _, ok := nullm[int(ts.Height())]; !ok { - revs = append(revs, ts) - require.NoError(fcs.t, fcs.tsc.revert(ts)) - } - fcs.h-- - } - - var apps []*types.TipSet - for i := 0; i < app; i++ { - fcs.h++ - - mc, hasMsgs := msgs[i] - if !hasMsgs { - mc = dummyCid - } - - if _, ok := nullm[int(fcs.h)]; ok { - continue - } - - ts := fcs.makeTs(fcs.t, fcs.tsc.best().Key().Cids(), fcs.h, mc) - require.NoError(fcs.t, fcs.tsc.add(ts)) - - if hasMsgs { - fcs.blkMsgs[ts.Blocks()[0].Cid()] = mc - } - - apps = append(apps, ts) - } - - fcs.sync.Lock() - - fcs.sub(revs, apps) - - fcs.sync.Lock() - fcs.sync.Unlock() //nolint:staticcheck -} - -func (fcs *fakeCS) notifDone() { - fcs.sync.Unlock() -} - -var _ eventAPI = &fakeCS{} - -func TestAt(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - var applied bool - var reverted bool - - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - require.Equal(t, 5, int(ts.Height())) - require.Equal(t, 8, int(curH)) - applied = true - return nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 5) - require.NoError(t, err) - - fcs.advance(0, 3, nil) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - fcs.advance(0, 3, nil) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - fcs.advance(0, 3, nil) - require.Equal(t, true, applied) - require.Equal(t, false, reverted) - applied = false - - fcs.advance(0, 3, nil) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - fcs.advance(10, 10, nil) - require.Equal(t, true, applied) - require.Equal(t, true, reverted) - applied = false - reverted = false - - fcs.advance(10, 1, nil) - require.Equal(t, false, applied) - require.Equal(t, true, reverted) - reverted = false - - fcs.advance(0, 1, nil) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - fcs.advance(0, 2, nil) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - fcs.advance(0, 1, nil) // 8 - require.Equal(t, true, applied) - require.Equal(t, false, reverted) -} - -func TestAtDoubleTrigger(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - var applied bool - var reverted bool - - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - require.Equal(t, 5, int(ts.Height())) - require.Equal(t, 8, int(curH)) - applied = true - return nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 5) - require.NoError(t, err) - - fcs.advance(0, 6, nil) - require.False(t, applied) - require.False(t, reverted) - - fcs.advance(0, 1, nil) - require.True(t, applied) - require.False(t, reverted) - applied = false - - fcs.advance(2, 2, nil) - require.False(t, applied) - require.False(t, reverted) - - fcs.advance(4, 4, nil) - require.True(t, applied) - require.True(t, reverted) -} - -func TestAtNullTrigger(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - var applied bool - var reverted bool - - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - require.Equal(t, abi.ChainEpoch(6), ts.Height()) - require.Equal(t, 8, int(curH)) - applied = true - return nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 5) - require.NoError(t, err) - - fcs.advance(0, 6, nil, 5) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - fcs.advance(0, 3, nil) - require.Equal(t, true, applied) - require.Equal(t, false, reverted) - applied = false -} - -func TestAtNullConf(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - var applied bool - var reverted bool - - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - require.Equal(t, 5, int(ts.Height())) - require.Equal(t, 8, int(curH)) - applied = true - return nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 5) - require.NoError(t, err) - - fcs.advance(0, 6, nil) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - fcs.advance(0, 3, nil, 8) - require.Equal(t, true, applied) - require.Equal(t, false, reverted) - applied = false - - fcs.advance(7, 1, nil) - require.Equal(t, false, applied) - require.Equal(t, true, reverted) - reverted = false -} - -func TestAtStart(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - fcs.advance(0, 5, nil) // 6 - - var applied bool - var reverted bool - - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - require.Equal(t, 5, int(ts.Height())) - require.Equal(t, 8, int(curH)) - applied = true - return nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 5) - require.NoError(t, err) - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - fcs.advance(0, 5, nil) // 11 - require.Equal(t, true, applied) - require.Equal(t, false, reverted) -} - -func TestAtStartConfidence(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - fcs.advance(0, 10, nil) // 11 - - var applied bool - var reverted bool - - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - require.Equal(t, 5, int(ts.Height())) - require.Equal(t, 11, int(curH)) - applied = true - return nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 5) - require.NoError(t, err) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) -} - -func TestAtChained(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - var applied bool - var reverted bool - - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - return events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - require.Equal(t, 10, int(ts.Height())) - applied = true - return nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 10) - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 5) - require.NoError(t, err) - - fcs.advance(0, 15, nil) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) -} - -func TestAtChainedConfidence(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - fcs.advance(0, 15, nil) - - var applied bool - var reverted bool - - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - return events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - require.Equal(t, 10, int(ts.Height())) - applied = true - return nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 10) - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 5) - require.NoError(t, err) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) -} - -func TestAtChainedConfidenceNull(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - fcs.advance(0, 15, nil, 5) - - var applied bool - var reverted bool - - err := events.ChainAt(func(_ context.Context, ts *types.TipSet, curH abi.ChainEpoch) error { - applied = true - require.Equal(t, 6, int(ts.Height())) - return nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 5) - require.NoError(t, err) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) -} - -func matchAddrMethod(to address.Address, m abi.MethodNum) func(msg *types.Message) (bool, error) { - return func(msg *types.Message) (bool, error) { - return to == msg.To && m == msg.Method, nil - } -} - -func TestCalled(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - more := true - var applied, reverted bool - var appliedMsg *types.Message - var appliedTs *types.TipSet - var appliedH abi.ChainEpoch - - err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { - return false, true, nil - }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { - require.Equal(t, false, applied) - applied = true - appliedMsg = msg - appliedTs = ts - appliedH = curH - return more, nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 20, matchAddrMethod(t0123, 5)) - require.NoError(t, err) - - // create few blocks to make sure nothing get's randomly called - - fcs.advance(0, 4, nil) // H=5 - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // create blocks with message (but below confidence threshold) - - fcs.advance(0, 3, map[int]cid.Cid{ // msg at H=6; H=8 (confidence=2) - 0: fcs.fakeMsgs(fakeMsg{ - bmsgs: []*types.Message{ - {To: t0123, From: t0123, Method: 5, Nonce: 1}, - }, - }), - }) - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // create additional block so we are above confidence threshold - - fcs.advance(0, 2, nil) // H=10 (confidence=3, apply) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) - applied = false - - // dip below confidence - fcs.advance(2, 2, nil) // H=10 (confidence=3, apply) - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - require.Equal(t, abi.ChainEpoch(7), appliedTs.Height()) - require.Equal(t, "bafkqaaa", appliedTs.Blocks()[0].Messages.String()) - require.Equal(t, abi.ChainEpoch(10), appliedH) - require.Equal(t, t0123, appliedMsg.To) - require.Equal(t, uint64(1), appliedMsg.Nonce) - require.Equal(t, abi.MethodNum(5), appliedMsg.Method) - - // revert some blocks, keep the message - - fcs.advance(3, 1, nil) // H=8 (confidence=1) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // revert the message - - fcs.advance(2, 1, nil) // H=7, we reverted ts with the msg execution, but not the msg itself - - require.Equal(t, false, applied) - require.Equal(t, true, reverted) - reverted = false - - // send new message on different height - - n2msg := fcs.fakeMsgs(fakeMsg{ - bmsgs: []*types.Message{ - {To: t0123, From: t0123, Method: 5, Nonce: 2}, - }, - }) - - fcs.advance(0, 3, map[int]cid.Cid{ // (n2msg confidence=1) - 0: n2msg, - }) - - require.Equal(t, true, applied) // msg from H=7, which had reverted execution - require.Equal(t, false, reverted) - require.Equal(t, abi.ChainEpoch(10), appliedH) - applied = false - - fcs.advance(0, 2, nil) // (confidence=3) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) - applied = false - - require.Equal(t, abi.ChainEpoch(9), appliedTs.Height()) - require.Equal(t, "bafkqaaa", appliedTs.Blocks()[0].Messages.String()) - require.Equal(t, abi.ChainEpoch(12), appliedH) - require.Equal(t, t0123, appliedMsg.To) - require.Equal(t, uint64(2), appliedMsg.Nonce) - require.Equal(t, abi.MethodNum(5), appliedMsg.Method) - - // revert and apply at different height - - fcs.advance(8, 6, map[int]cid.Cid{ // (confidence=3) - 1: n2msg, - }) - - // TODO: We probably don't want to call revert/apply, as restarting certain - // actions may be expensive, and in this case the message is still - // on-chain, just at different height - require.Equal(t, true, applied) - require.Equal(t, true, reverted) - reverted = false - applied = false - - require.Equal(t, abi.ChainEpoch(7), appliedTs.Height()) - require.Equal(t, "bafkqaaa", appliedTs.Blocks()[0].Messages.String()) - require.Equal(t, abi.ChainEpoch(10), appliedH) - require.Equal(t, t0123, appliedMsg.To) - require.Equal(t, uint64(2), appliedMsg.Nonce) - require.Equal(t, abi.MethodNum(5), appliedMsg.Method) - - // call method again - - fcs.advance(0, 5, map[int]cid.Cid{ - 0: n2msg, - }) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) - applied = false - - // send and revert below confidence, then cross confidence - fcs.advance(0, 2, map[int]cid.Cid{ - 0: fcs.fakeMsgs(fakeMsg{ - bmsgs: []*types.Message{ - {To: t0123, From: t0123, Method: 5, Nonce: 3}, - }, - }), - }) - - fcs.advance(2, 5, nil) // H=19, but message reverted - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // test timeout (it's set to 20 in the call to `events.Called` above) - - fcs.advance(0, 6, nil) - - require.Equal(t, false, applied) // not calling timeout as we received messages - require.Equal(t, false, reverted) - - // test unregistering with more - - more = false - fcs.advance(0, 5, map[int]cid.Cid{ - 0: fcs.fakeMsgs(fakeMsg{ - bmsgs: []*types.Message{ - {To: t0123, From: t0123, Method: 5, Nonce: 4}, // this signals we don't want more - }, - }), - }) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) - applied = false - - fcs.advance(0, 5, map[int]cid.Cid{ - 0: fcs.fakeMsgs(fakeMsg{ - bmsgs: []*types.Message{ - {To: t0123, From: t0123, Method: 5, Nonce: 5}, - }, - }), - }) - - require.Equal(t, false, applied) // should not get any further notifications - require.Equal(t, false, reverted) - - // revert after disabled - - fcs.advance(5, 1, nil) // try reverting msg sent after disabling - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - fcs.advance(5, 1, nil) // try reverting msg sent before disabling - - require.Equal(t, false, applied) - require.Equal(t, true, reverted) -} - -func TestCalledTimeout(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - called := false - - err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { - return false, true, nil - }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { - called = true - require.Nil(t, msg) - require.Equal(t, abi.ChainEpoch(20), ts.Height()) - require.Equal(t, abi.ChainEpoch(23), curH) - return false, nil - }, func(_ context.Context, ts *types.TipSet) error { - t.Fatal("revert on timeout") - return nil - }, 3, 20, matchAddrMethod(t0123, 5)) - require.NoError(t, err) - - fcs.advance(0, 21, nil) - require.False(t, called) - - fcs.advance(0, 5, nil) - require.True(t, called) - called = false - - // with check func reporting done - - fcs = &fakeCS{ - t: t, - h: 1, - - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events = NewEvents(context.Background(), fcs) - - err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { - return true, true, nil - }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { - called = true - require.Nil(t, msg) - require.Equal(t, abi.ChainEpoch(20), ts.Height()) - require.Equal(t, abi.ChainEpoch(23), curH) - return false, nil - }, func(_ context.Context, ts *types.TipSet) error { - t.Fatal("revert on timeout") - return nil - }, 3, 20, matchAddrMethod(t0123, 5)) - require.NoError(t, err) - - fcs.advance(0, 21, nil) - require.False(t, called) - - fcs.advance(0, 5, nil) - require.False(t, called) -} - -func TestCalledOrder(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - at := 0 - - err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { - return false, true, nil - }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { - switch at { - case 0: - require.Equal(t, uint64(1), msg.Nonce) - require.Equal(t, abi.ChainEpoch(4), ts.Height()) - case 1: - require.Equal(t, uint64(2), msg.Nonce) - require.Equal(t, abi.ChainEpoch(5), ts.Height()) - default: - t.Fatal("apply should only get called twice, at: ", at) - } - at++ - return true, nil - }, func(_ context.Context, ts *types.TipSet) error { - switch at { - case 2: - require.Equal(t, abi.ChainEpoch(5), ts.Height()) - case 3: - require.Equal(t, abi.ChainEpoch(4), ts.Height()) - default: - t.Fatal("revert should only get called twice, at: ", at) - } - at++ - return nil - }, 3, 20, matchAddrMethod(t0123, 5)) - require.NoError(t, err) - - fcs.advance(0, 10, map[int]cid.Cid{ - 1: fcs.fakeMsgs(fakeMsg{ - bmsgs: []*types.Message{ - {To: t0123, From: t0123, Method: 5, Nonce: 1}, - }, - }), - 2: fcs.fakeMsgs(fakeMsg{ - bmsgs: []*types.Message{ - {To: t0123, From: t0123, Method: 5, Nonce: 2}, - }, - }), - }) - - fcs.advance(9, 1, nil) -} - -func TestCalledNull(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - more := true - var applied, reverted bool - - err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { - return false, true, nil - }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { - require.Equal(t, false, applied) - applied = true - return more, nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 20, matchAddrMethod(t0123, 5)) - require.NoError(t, err) - - // create few blocks to make sure nothing get's randomly called - - fcs.advance(0, 4, nil) // H=5 - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // create blocks with message (but below confidence threshold) - - fcs.advance(0, 3, map[int]cid.Cid{ // msg at H=6; H=8 (confidence=2) - 0: fcs.fakeMsgs(fakeMsg{ - bmsgs: []*types.Message{ - {To: t0123, From: t0123, Method: 5, Nonce: 1}, - }, - }), - }) - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // create additional blocks so we are above confidence threshold, but with null tipset at the height - // of application - - fcs.advance(0, 3, nil, 10) // H=11 (confidence=3, apply) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) - applied = false - - fcs.advance(5, 1, nil, 10) - - require.Equal(t, false, applied) - require.Equal(t, true, reverted) -} - -func TestRemoveTriggersOnMessage(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - t0123, err := address.NewFromString("t0123") - require.NoError(t, err) - - more := true - var applied, reverted bool - - err = events.Called(func(ts *types.TipSet) (d bool, m bool, e error) { - return false, true, nil - }, func(msg *types.Message, rec *types.MessageReceipt, ts *types.TipSet, curH abi.ChainEpoch) (bool, error) { - require.Equal(t, false, applied) - applied = true - return more, nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, 3, 20, matchAddrMethod(t0123, 5)) - require.NoError(t, err) - - // create few blocks to make sure nothing get's randomly called - - fcs.advance(0, 4, nil) // H=5 - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // create blocks with message (but below confidence threshold) - - fcs.advance(0, 3, map[int]cid.Cid{ // msg occurs at H=5, applied at H=6; H=8 (confidence=2) - 0: fcs.fakeMsgs(fakeMsg{ - bmsgs: []*types.Message{ - {To: t0123, From: t0123, Method: 5, Nonce: 1}, - }, - }), - }) - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // revert applied TS & message TS - fcs.advance(3, 1, nil) // H=6 (tipset message applied in reverted, AND message reverted) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // create additional blocks so we are above confidence threshold, but message not applied - // as it was reverted - fcs.advance(0, 5, nil) // H=11 (confidence=3, apply) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // create blocks with message again (but below confidence threshold) - - fcs.advance(0, 3, map[int]cid.Cid{ // msg occurs at H=12, applied at H=13; H=15 (confidence=2) - 0: fcs.fakeMsgs(fakeMsg{ - bmsgs: []*types.Message{ - {To: t0123, From: t0123, Method: 5, Nonce: 2}, - }, - }), - }) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // revert applied height TS, but don't remove message trigger - fcs.advance(2, 1, nil) // H=13 (tipset message applied in reverted, by tipset with message not reverted) - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // create additional blocks so we are above confidence threshold - fcs.advance(0, 4, nil) // H=18 (confidence=3, apply) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) -} - -type testStateChange struct { - from string - to string -} - -func TestStateChanged(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - more := true - var applied, reverted bool - var appliedData StateChange - var appliedOldTs *types.TipSet - var appliedNewTs *types.TipSet - var appliedH abi.ChainEpoch - var matchData StateChange - - confidence := 3 - timeout := abi.ChainEpoch(20) - - err := events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) { - return false, true, nil - }, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { - require.Equal(t, false, applied) - applied = true - appliedData = data - appliedOldTs = oldTs - appliedNewTs = newTs - appliedH = curH - return more, nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, confidence, timeout, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { - if matchData == nil { - return false, matchData, nil - } - - d := matchData - matchData = nil - return true, d, nil - }) - require.NoError(t, err) - - // create few blocks to make sure nothing get's randomly called - - fcs.advance(0, 4, nil) // H=5 - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // create state change (but below confidence threshold) - matchData = testStateChange{from: "a", to: "b"} - fcs.advance(0, 3, nil) - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // create additional block so we are above confidence threshold - - fcs.advance(0, 2, nil) // H=10 (confidence=3, apply) - - require.Equal(t, true, applied) - require.Equal(t, false, reverted) - applied = false - - // dip below confidence (should not apply again) - fcs.advance(2, 2, nil) // H=10 (confidence=3, apply) - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // Change happens from 5 -> 6 - require.Equal(t, abi.ChainEpoch(5), appliedOldTs.Height()) - require.Equal(t, abi.ChainEpoch(6), appliedNewTs.Height()) - - // Actually applied (with confidence) at 9 - require.Equal(t, abi.ChainEpoch(9), appliedH) - - // Make sure the state change was correctly passed through - rcvd := appliedData.(testStateChange) - require.Equal(t, "a", rcvd.from) - require.Equal(t, "b", rcvd.to) -} - -func TestStateChangedRevert(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - more := true - var applied, reverted bool - var matchData StateChange - - confidence := 1 - timeout := abi.ChainEpoch(20) - - err := events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) { - return false, true, nil - }, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { - require.Equal(t, false, applied) - applied = true - return more, nil - }, func(_ context.Context, ts *types.TipSet) error { - reverted = true - return nil - }, confidence, timeout, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { - if matchData == nil { - return false, matchData, nil - } - - d := matchData - matchData = nil - return true, d, nil - }) - require.NoError(t, err) - - fcs.advance(0, 2, nil) // H=3 - - // Make a state change from TS at height 3 to TS at height 4 - matchData = testStateChange{from: "a", to: "b"} - fcs.advance(0, 1, nil) // H=4 - - // Haven't yet reached confidence - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // Advance to reach confidence level - fcs.advance(0, 1, nil) // H=5 - - // Should now have called the handler - require.Equal(t, true, applied) - require.Equal(t, false, reverted) - applied = false - - // Advance 3 more TS - fcs.advance(0, 3, nil) // H=8 - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // Regress but not so far as to cause a revert - fcs.advance(3, 1, nil) // H=6 - - require.Equal(t, false, applied) - require.Equal(t, false, reverted) - - // Regress back to state where change happened - fcs.advance(3, 1, nil) // H=4 - - // Expect revert to have happened - require.Equal(t, false, applied) - require.Equal(t, true, reverted) -} - -func TestStateChangedTimeout(t *testing.T) { - fcs := &fakeCS{ - t: t, - h: 1, - - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events := NewEvents(context.Background(), fcs) - - called := false - - err := events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) { - return false, true, nil - }, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { - called = true - require.Nil(t, data) - require.Equal(t, abi.ChainEpoch(20), newTs.Height()) - require.Equal(t, abi.ChainEpoch(23), curH) - return false, nil - }, func(_ context.Context, ts *types.TipSet) error { - t.Fatal("revert on timeout") - return nil - }, 3, 20, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { - return false, nil, nil - }) - - require.NoError(t, err) - - fcs.advance(0, 21, nil) - require.False(t, called) - - fcs.advance(0, 5, nil) - require.True(t, called) - called = false - - // with check func reporting done - - fcs = &fakeCS{ - t: t, - h: 1, - - msgs: map[cid.Cid]fakeMsg{}, - blkMsgs: map[cid.Cid]cid.Cid{}, - tsc: newTSCache(2*build.ForkLengthThreshold, nil), - } - require.NoError(t, fcs.tsc.add(fcs.makeTs(t, nil, 1, dummyCid))) - - events = NewEvents(context.Background(), fcs) - - err = events.StateChanged(func(ts *types.TipSet) (d bool, m bool, e error) { - return true, true, nil - }, func(oldTs, newTs *types.TipSet, data StateChange, curH abi.ChainEpoch) (bool, error) { - called = true - require.Nil(t, data) - require.Equal(t, abi.ChainEpoch(20), newTs.Height()) - require.Equal(t, abi.ChainEpoch(23), curH) - return false, nil - }, func(_ context.Context, ts *types.TipSet) error { - t.Fatal("revert on timeout") - return nil - }, 3, 20, func(oldTs, newTs *types.TipSet) (bool, StateChange, error) { - return false, nil, nil - }) - require.NoError(t, err) - - fcs.advance(0, 21, nil) - require.False(t, called) - - fcs.advance(0, 5, nil) - require.False(t, called) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/events/state/predicates.go b/vendor/github.com/filecoin-project/lotus/chain/events/state/predicates.go deleted file mode 100644 index 3245d5c037..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/events/state/predicates.go +++ /dev/null @@ -1,137 +0,0 @@ -package state - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/lotus/api/apibstore" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" -) - -// UserData is the data returned from the DiffFunc -type UserData interface{} - -// ChainAPI abstracts out calls made by this class to external APIs -type ChainAPI interface { - apibstore.ChainIO - StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) -} - -// StatePredicates has common predicates for responding to state changes -type StatePredicates struct { - api ChainAPI - cst *cbor.BasicIpldStore -} - -func NewStatePredicates(api ChainAPI) *StatePredicates { - return &StatePredicates{ - api: api, - cst: cbor.NewCborStore(apibstore.NewAPIBlockstore(api)), - } -} - -// DiffFunc check if there's a change form oldState to newState, and returns -// - changed: was there a change -// - user: user-defined data representing the state change -// - err -type DiffFunc func(ctx context.Context, oldState, newState *types.TipSet) (changed bool, user UserData, err error) - -type DiffStateFunc func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error) - -// OnActorStateChanged calls diffStateFunc when the state changes for the given actor -func (sp *StatePredicates) OnActorStateChanged(addr address.Address, diffStateFunc DiffStateFunc) DiffFunc { - return func(ctx context.Context, oldState, newState *types.TipSet) (changed bool, user UserData, err error) { - oldActor, err := sp.api.StateGetActor(ctx, addr, oldState.Key()) - if err != nil { - return false, nil, err - } - newActor, err := sp.api.StateGetActor(ctx, addr, newState.Key()) - if err != nil { - return false, nil, err - } - - if oldActor.Head.Equals(newActor.Head) { - return false, nil, nil - } - return diffStateFunc(ctx, oldActor.Head, newActor.Head) - } -} - -type DiffStorageMarketStateFunc func(ctx context.Context, oldState *market.State, newState *market.State) (changed bool, user UserData, err error) - -// OnStorageMarketActorChanged calls diffStorageMarketState when the state changes for the market actor -func (sp *StatePredicates) OnStorageMarketActorChanged(diffStorageMarketState DiffStorageMarketStateFunc) DiffFunc { - return sp.OnActorStateChanged(builtin.StorageMarketActorAddr, func(ctx context.Context, oldActorStateHead, newActorStateHead cid.Cid) (changed bool, user UserData, err error) { - var oldState market.State - if err := sp.cst.Get(ctx, oldActorStateHead, &oldState); err != nil { - return false, nil, err - } - var newState market.State - if err := sp.cst.Get(ctx, newActorStateHead, &newState); err != nil { - return false, nil, err - } - return diffStorageMarketState(ctx, &oldState, &newState) - }) -} - -type DiffDealStatesFunc func(ctx context.Context, oldDealStateRoot *amt.Root, newDealStateRoot *amt.Root) (changed bool, user UserData, err error) - -// OnDealStateChanged calls diffDealStates when the market state changes -func (sp *StatePredicates) OnDealStateChanged(diffDealStates DiffDealStatesFunc) DiffStorageMarketStateFunc { - return func(ctx context.Context, oldState *market.State, newState *market.State) (changed bool, user UserData, err error) { - if oldState.States.Equals(newState.States) { - return false, nil, nil - } - - oldRoot, err := amt.LoadAMT(ctx, sp.cst, oldState.States) - if err != nil { - return false, nil, err - } - newRoot, err := amt.LoadAMT(ctx, sp.cst, newState.States) - if err != nil { - return false, nil, err - } - - return diffDealStates(ctx, oldRoot, newRoot) - } -} - -// ChangedDeals is a set of changes to deal state -type ChangedDeals map[abi.DealID]DealStateChange - -// DealStateChange is a change in deal state from -> to -type DealStateChange struct { - From market.DealState - To market.DealState -} - -// DealStateChangedForIDs detects changes in the deal state AMT for the given deal IDs -func (sp *StatePredicates) DealStateChangedForIDs(dealIds []abi.DealID) DiffDealStatesFunc { - return func(ctx context.Context, oldDealStateRoot *amt.Root, newDealStateRoot *amt.Root) (changed bool, user UserData, err error) { - changedDeals := make(ChangedDeals) - for _, dealID := range dealIds { - var oldDeal, newDeal market.DealState - err := oldDealStateRoot.Get(ctx, uint64(dealID), &oldDeal) - if err != nil { - return false, nil, err - } - err = newDealStateRoot.Get(ctx, uint64(dealID), &newDeal) - if err != nil { - return false, nil, err - } - if oldDeal != newDeal { - changedDeals[dealID] = DealStateChange{oldDeal, newDeal} - } - } - if len(changedDeals) > 0 { - return true, changedDeals, nil - } - return false, nil, nil - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/events/state/predicates_test.go b/vendor/github.com/filecoin-project/lotus/chain/events/state/predicates_test.go deleted file mode 100644 index 56387f8b53..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/events/state/predicates_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package state - -import ( - "context" - "testing" - - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-hamt-ipld" - - "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - ds "github.com/ipfs/go-datastore" - ds_sync "github.com/ipfs/go-datastore/sync" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbornode "github.com/ipfs/go-ipld-cbor" - "golang.org/x/xerrors" - - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/specs-actors/actors/abi" -) - -var dummyCid cid.Cid - -func init() { - dummyCid, _ = cid.Parse("bafkqaaa") -} - -type mockAPI struct { - ts map[types.TipSetKey]*types.Actor - bs bstore.Blockstore -} - -func newMockAPI(bs bstore.Blockstore) *mockAPI { - return &mockAPI{ - bs: bs, - ts: make(map[types.TipSetKey]*types.Actor), - } -} - -func (m mockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { - return m.bs.Has(c) -} - -func (m mockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { - blk, err := m.bs.Get(c) - if err != nil { - return nil, xerrors.Errorf("blockstore get: %w", err) - } - - return blk.RawData(), nil -} - -func (m mockAPI) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) { - return m.ts[tsk], nil -} - -func (m mockAPI) setActor(tsk types.TipSetKey, act *types.Actor) { - m.ts[tsk] = act -} - -func TestPredicates(t *testing.T) { - ctx := context.Background() - bs := bstore.NewBlockstore(ds_sync.MutexWrap(ds.NewMapDatastore())) - store := cbornode.NewCborStore(bs) - - oldDeals := map[abi.DealID]*market.DealState{ - abi.DealID(1): { - SectorStartEpoch: 1, - LastUpdatedEpoch: 2, - SlashEpoch: 0, - }, - abi.DealID(2): { - SectorStartEpoch: 4, - LastUpdatedEpoch: 5, - SlashEpoch: 0, - }, - } - oldStateC := createMarketState(ctx, t, store, oldDeals) - - newDeals := map[abi.DealID]*market.DealState{ - abi.DealID(1): { - SectorStartEpoch: 1, - LastUpdatedEpoch: 3, - SlashEpoch: 0, - }, - abi.DealID(2): { - SectorStartEpoch: 4, - LastUpdatedEpoch: 6, - SlashEpoch: 6, - }, - } - newStateC := createMarketState(ctx, t, store, newDeals) - - miner, err := address.NewFromString("t00") - require.NoError(t, err) - oldState, err := mockTipset(miner, 1) - require.NoError(t, err) - newState, err := mockTipset(miner, 2) - require.NoError(t, err) - - api := newMockAPI(bs) - api.setActor(oldState.Key(), &types.Actor{Head: oldStateC}) - api.setActor(newState.Key(), &types.Actor{Head: newStateC}) - - preds := NewStatePredicates(api) - - dealIds := []abi.DealID{abi.DealID(1), abi.DealID(2)} - diffFn := preds.OnStorageMarketActorChanged(preds.OnDealStateChanged(preds.DealStateChangedForIDs(dealIds))) - - // Diff a state against itself: expect no change - changed, _, err := diffFn(ctx, oldState, oldState) - require.NoError(t, err) - require.False(t, changed) - - // Diff old state against new state - changed, val, err := diffFn(ctx, oldState, newState) - require.NoError(t, err) - require.True(t, changed) - - changedDeals, ok := val.(ChangedDeals) - require.True(t, ok) - require.Len(t, changedDeals, 2) - require.Contains(t, changedDeals, abi.DealID(1)) - require.Contains(t, changedDeals, abi.DealID(2)) - deal1 := changedDeals[abi.DealID(1)] - if deal1.From.LastUpdatedEpoch != 2 || deal1.To.LastUpdatedEpoch != 3 { - t.Fatal("Unexpected change to LastUpdatedEpoch") - } - deal2 := changedDeals[abi.DealID(2)] - if deal2.From.SlashEpoch != 0 || deal2.To.SlashEpoch != 6 { - t.Fatal("Unexpected change to SlashEpoch") - } - - // Test that OnActorStateChanged does not call the callback if the state has not changed - mockAddr, err := address.NewFromString("t01") - require.NoError(t, err) - actorDiffFn := preds.OnActorStateChanged(mockAddr, func(context.Context, cid.Cid, cid.Cid) (bool, UserData, error) { - t.Fatal("No state change so this should not be called") - return false, nil, nil - }) - changed, _, err = actorDiffFn(ctx, oldState, oldState) - require.NoError(t, err) - require.False(t, changed) - - // Test that OnDealStateChanged does not call the callback if the state has not changed - diffDealStateFn := preds.OnDealStateChanged(func(context.Context, *amt.Root, *amt.Root) (bool, UserData, error) { - t.Fatal("No state change so this should not be called") - return false, nil, nil - }) - marketState := createEmptyMarketState(t, store) - changed, _, err = diffDealStateFn(ctx, marketState, marketState) - require.NoError(t, err) - require.False(t, changed) -} - -func mockTipset(miner address.Address, timestamp uint64) (*types.TipSet, error) { - return types.NewTipSet([]*types.BlockHeader{{ - Miner: miner, - Height: 5, - ParentStateRoot: dummyCid, - Messages: dummyCid, - ParentMessageReceipts: dummyCid, - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - Timestamp: timestamp, - }}) -} - -func createMarketState(ctx context.Context, t *testing.T, store *cbornode.BasicIpldStore, deals map[abi.DealID]*market.DealState) cid.Cid { - rootCid := createAMT(ctx, t, store, deals) - - state := createEmptyMarketState(t, store) - state.States = rootCid - - stateC, err := store.Put(ctx, state) - require.NoError(t, err) - return stateC -} - -func createEmptyMarketState(t *testing.T, store *cbornode.BasicIpldStore) *market.State { - emptyArrayCid, err := amt.NewAMT(store).Flush(context.TODO()) - require.NoError(t, err) - emptyMap, err := store.Put(context.TODO(), hamt.NewNode(store, hamt.UseTreeBitWidth(5))) - require.NoError(t, err) - return market.ConstructState(emptyArrayCid, emptyMap, emptyMap) -} - -func createAMT(ctx context.Context, t *testing.T, store *cbornode.BasicIpldStore, deals map[abi.DealID]*market.DealState) cid.Cid { - root := amt.NewAMT(store) - for dealID, dealState := range deals { - err := root.Set(ctx, uint64(dealID), dealState) - require.NoError(t, err) - } - rootCid, err := root.Flush(ctx) - require.NoError(t, err) - return rootCid -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/events/tscache.go b/vendor/github.com/filecoin-project/lotus/chain/events/tscache.go deleted file mode 100644 index 3852c9930c..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/events/tscache.go +++ /dev/null @@ -1,129 +0,0 @@ -package events - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/types" -) - -type tsByHFunc func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) - -// tipSetCache implements a simple ring-buffer cache to keep track of recent -// tipsets -type tipSetCache struct { - cache []*types.TipSet - start int - len int - - storage tsByHFunc -} - -func newTSCache(cap abi.ChainEpoch, storage tsByHFunc) *tipSetCache { - return &tipSetCache{ - cache: make([]*types.TipSet, cap), - start: 0, - len: 0, - - storage: storage, - } -} - -func (tsc *tipSetCache) add(ts *types.TipSet) error { - if tsc.len > 0 { - if tsc.cache[tsc.start].Height() >= ts.Height() { - return xerrors.Errorf("tipSetCache.add: expected new tipset height to be at least %d, was %d", tsc.cache[tsc.start].Height()+1, ts.Height()) - } - } - - nextH := ts.Height() - if tsc.len > 0 { - nextH = tsc.cache[tsc.start].Height() + 1 - } - - // fill null blocks - for nextH != ts.Height() { - tsc.start = normalModulo(tsc.start+1, len(tsc.cache)) - tsc.cache[tsc.start] = nil - if tsc.len < len(tsc.cache) { - tsc.len++ - } - nextH++ - } - - tsc.start = normalModulo(tsc.start+1, len(tsc.cache)) - tsc.cache[tsc.start] = ts - if tsc.len < len(tsc.cache) { - tsc.len++ - } - return nil -} - -func (tsc *tipSetCache) revert(ts *types.TipSet) error { - if tsc.len == 0 { - return nil // this can happen, and it's fine - } - - if !tsc.cache[tsc.start].Equals(ts) { - return xerrors.New("tipSetCache.revert: revert tipset didn't match cache head") - } - - tsc.cache[tsc.start] = nil - tsc.start = normalModulo(tsc.start-1, len(tsc.cache)) - tsc.len-- - - _ = tsc.revert(nil) // revert null block gap - return nil -} - -func (tsc *tipSetCache) getNonNull(height abi.ChainEpoch) (*types.TipSet, error) { - for { - ts, err := tsc.get(height) - if err != nil { - return nil, err - } - if ts != nil { - return ts, nil - } - height++ - } -} - -func (tsc *tipSetCache) get(height abi.ChainEpoch) (*types.TipSet, error) { - if tsc.len == 0 { - log.Warnf("tipSetCache.get: cache is empty, requesting from storage (h=%d)", height) - return tsc.storage(context.TODO(), height, types.EmptyTSK) - } - - headH := tsc.cache[tsc.start].Height() - - if height > headH { - return nil, xerrors.Errorf("tipSetCache.get: requested tipset not in cache (req: %d, cache head: %d)", height, headH) - } - - clen := len(tsc.cache) - var tail *types.TipSet - for i := 1; i <= tsc.len; i++ { - tail = tsc.cache[normalModulo(tsc.start-tsc.len+i, clen)] - if tail != nil { - break - } - } - - if height < tail.Height() { - log.Warnf("tipSetCache.get: requested tipset not in cache, requesting from storage (h=%d; tail=%d)", height, tail.Height()) - return tsc.storage(context.TODO(), height, tail.Key()) - } - - return tsc.cache[normalModulo(tsc.start-int(headH-height), clen)], nil -} - -func (tsc *tipSetCache) best() *types.TipSet { - return tsc.cache[tsc.start] -} - -func normalModulo(n, m int) int { - return ((n % m) + m) % m -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/events/tscache_test.go b/vendor/github.com/filecoin-project/lotus/chain/events/tscache_test.go deleted file mode 100644 index 1278e58e9c..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/events/tscache_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package events - -import ( - "context" - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/types" -) - -func TestTsCache(t *testing.T) { - tsc := newTSCache(50, func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) { - t.Fatal("storage call") - return &types.TipSet{}, nil - }) - - h := abi.ChainEpoch(75) - - a, _ := address.NewFromString("t00") - - add := func() { - ts, err := types.NewTipSet([]*types.BlockHeader{{ - Miner: a, - Height: h, - ParentStateRoot: dummyCid, - Messages: dummyCid, - ParentMessageReceipts: dummyCid, - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - }}) - if err != nil { - t.Fatal(err) - } - if err := tsc.add(ts); err != nil { - t.Fatal(err) - } - h++ - } - - for i := 0; i < 9000; i++ { - if i%90 > 60 { - if err := tsc.revert(tsc.best()); err != nil { - t.Fatal(err, "; i:", i) - return - } - h-- - } else { - add() - } - } - -} - -func TestTsCacheNulls(t *testing.T) { - tsc := newTSCache(50, func(context.Context, abi.ChainEpoch, types.TipSetKey) (*types.TipSet, error) { - t.Fatal("storage call") - return &types.TipSet{}, nil - }) - - h := abi.ChainEpoch(75) - - a, _ := address.NewFromString("t00") - add := func() { - ts, err := types.NewTipSet([]*types.BlockHeader{{ - Miner: a, - Height: h, - ParentStateRoot: dummyCid, - Messages: dummyCid, - ParentMessageReceipts: dummyCid, - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS}, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS}, - }}) - if err != nil { - t.Fatal(err) - } - if err := tsc.add(ts); err != nil { - t.Fatal(err) - } - h++ - } - - add() - add() - add() - h += 5 - - add() - add() - - require.Equal(t, h-1, tsc.best().Height()) - - ts, err := tsc.get(h - 1) - require.NoError(t, err) - require.Equal(t, h-1, ts.Height()) - - ts, err = tsc.get(h - 2) - require.NoError(t, err) - require.Equal(t, h-2, ts.Height()) - - ts, err = tsc.get(h - 3) - require.NoError(t, err) - require.Nil(t, ts) - - ts, err = tsc.get(h - 8) - require.NoError(t, err) - require.Equal(t, h-8, ts.Height()) - - require.NoError(t, tsc.revert(tsc.best())) - require.NoError(t, tsc.revert(tsc.best())) - require.Equal(t, h-8, tsc.best().Height()) - - h += 50 - add() - - ts, err = tsc.get(h - 1) - require.NoError(t, err) - require.Equal(t, h-1, ts.Height()) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/events/utils.go b/vendor/github.com/filecoin-project/lotus/chain/events/utils.go deleted file mode 100644 index 40556c9ff9..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/events/utils.go +++ /dev/null @@ -1,44 +0,0 @@ -package events - -import ( - "context" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/types" -) - -func (me *messageEvents) CheckMsg(ctx context.Context, smsg types.ChainMsg, hnd MsgHandler) CheckFunc { - msg := smsg.VMMessage() - - return func(ts *types.TipSet) (done bool, more bool, err error) { - fa, err := me.cs.StateGetActor(ctx, msg.From, ts.Key()) - if err != nil { - return false, true, err - } - - // >= because actor nonce is actually the next nonce that is expected to appear on chain - if msg.Nonce >= fa.Nonce { - return false, true, nil - } - - rec, err := me.cs.StateGetReceipt(ctx, smsg.VMMessage().Cid(), ts.Key()) - if err != nil { - return false, true, xerrors.Errorf("getting receipt in CheckMsg: %w", err) - } - - more, err = hnd(msg, rec, ts, ts.Height()) - - return true, more, err - } -} - -func (me *messageEvents) MatchMsg(inmsg *types.Message) MsgMatchFunc { - return func(msg *types.Message) (bool, error) { - if msg.From == inmsg.From && msg.Nonce == inmsg.Nonce && !inmsg.Equals(msg) { - return false, xerrors.Errorf("matching msg %s from %s, nonce %d: got duplicate origin/nonce msg %d", inmsg.Cid(), inmsg.From, inmsg.Nonce, msg.Nonce) - } - - return inmsg.Equals(msg), nil - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/gen.go b/vendor/github.com/filecoin-project/lotus/chain/gen/gen.go deleted file mode 100644 index 54040deb48..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/gen.go +++ /dev/null @@ -1,618 +0,0 @@ -package gen - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "sync/atomic" - "time" - - "github.com/filecoin-project/go-address" - commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/specs-actors/actors/abi" - saminer "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/crypto" - block "github.com/ipfs/go-block-format" - "github.com/ipfs/go-blockservice" - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - offline "github.com/ipfs/go-ipfs-exchange-offline" - format "github.com/ipfs/go-ipld-format" - logging "github.com/ipfs/go-log/v2" - "github.com/ipfs/go-merkledag" - "github.com/ipld/go-car" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/beacon" - genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/lotus/cmd/lotus-seed/seed" - "github.com/filecoin-project/lotus/genesis" - "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/node/repo" - "github.com/filecoin-project/sector-storage/ffiwrapper" -) - -var log = logging.Logger("gen") - -const msgsPerBlock = 20 - -var ValidWpostForTesting = []abi.PoStProof{{ - ProofBytes: []byte("valid proof"), -}} - -type ChainGen struct { - msgsPerBlock int - - bs blockstore.Blockstore - - cs *store.ChainStore - - beacon beacon.RandomBeacon - - sm *stmgr.StateManager - - genesis *types.BlockHeader - CurTipset *store.FullTipSet - - Timestamper func(*types.TipSet, abi.ChainEpoch) uint64 - - GetMessages func(*ChainGen) ([]*types.SignedMessage, error) - - w *wallet.Wallet - - eppProvs map[address.Address]WinningPoStProver - Miners []address.Address - receivers []address.Address - banker address.Address - bankerNonce uint64 - - r repo.Repo - lr repo.LockedRepo -} - -type mybs struct { - blockstore.Blockstore -} - -func (m mybs) Get(c cid.Cid) (block.Block, error) { - b, err := m.Blockstore.Get(c) - if err != nil { - return nil, err - } - - return b, nil -} - -func NewGeneratorWithSectors(numSectors int) (*ChainGen, error) { - saminer.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ - abi.RegisteredSealProof_StackedDrg2KiBV1: {}, - } - - mr := repo.NewMemory(nil) - lr, err := mr.Lock(repo.StorageMiner) - if err != nil { - return nil, xerrors.Errorf("taking mem-repo lock failed: %w", err) - } - - ds, err := lr.Datastore("/metadata") - if err != nil { - return nil, xerrors.Errorf("failed to get metadata datastore: %w", err) - } - - bds, err := lr.Datastore("/chain") - if err != nil { - return nil, xerrors.Errorf("failed to get blocks datastore: %w", err) - } - - bs := mybs{blockstore.NewIdStore(blockstore.NewBlockstore(bds))} - - ks, err := lr.KeyStore() - if err != nil { - return nil, xerrors.Errorf("getting repo keystore failed: %w", err) - } - - w, err := wallet.NewWallet(ks) - if err != nil { - return nil, xerrors.Errorf("creating memrepo wallet failed: %w", err) - } - - banker, err := w.GenerateKey(crypto.SigTypeSecp256k1) - if err != nil { - return nil, xerrors.Errorf("failed to generate banker key: %w", err) - } - - receievers := make([]address.Address, msgsPerBlock) - for r := range receievers { - receievers[r], err = w.GenerateKey(crypto.SigTypeBLS) - if err != nil { - return nil, xerrors.Errorf("failed to generate receiver key: %w", err) - } - } - - maddr1 := genesis2.MinerAddress(0) - - m1temp, err := ioutil.TempDir("", "preseal") - if err != nil { - return nil, err - } - - genm1, k1, err := seed.PreSeal(maddr1, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, numSectors, m1temp, []byte("some randomness"), nil, true) - if err != nil { - return nil, err - } - - maddr2 := genesis2.MinerAddress(1) - - m2temp, err := ioutil.TempDir("", "preseal") - if err != nil { - return nil, err - } - - genm2, k2, err := seed.PreSeal(maddr2, abi.RegisteredSealProof_StackedDrg2KiBV1, 0, numSectors, m2temp, []byte("some randomness"), nil, true) - if err != nil { - return nil, err - } - - mk1, err := w.Import(k1) - if err != nil { - return nil, err - } - mk2, err := w.Import(k2) - if err != nil { - return nil, err - } - - sys := vm.Syscalls(&genFakeVerifier{}) - - tpl := genesis.Template{ - Accounts: []genesis.Actor{ - { - Type: genesis.TAccount, - Balance: types.FromFil(40000), - Meta: (&genesis.AccountMeta{Owner: mk1}).ActorMeta(), - }, - { - Type: genesis.TAccount, - Balance: types.FromFil(40000), - Meta: (&genesis.AccountMeta{Owner: mk2}).ActorMeta(), - }, - { - Type: genesis.TAccount, - Balance: types.FromFil(50000), - Meta: (&genesis.AccountMeta{Owner: banker}).ActorMeta(), - }, - }, - Miners: []genesis.Miner{ - *genm1, - *genm2, - }, - NetworkName: "", - Timestamp: uint64(time.Now().Add(-500 * time.Duration(build.BlockDelaySecs) * time.Second).Unix()), - } - - genb, err := genesis2.MakeGenesisBlock(context.TODO(), bs, sys, tpl) - if err != nil { - return nil, xerrors.Errorf("make genesis block failed: %w", err) - } - - cs := store.NewChainStore(bs, ds, sys) - - genfb := &types.FullBlock{Header: genb.Genesis} - gents := store.NewFullTipSet([]*types.FullBlock{genfb}) - - if err := cs.SetGenesis(genb.Genesis); err != nil { - return nil, xerrors.Errorf("set genesis failed: %w", err) - } - - mgen := make(map[address.Address]WinningPoStProver) - for i := range tpl.Miners { - mgen[genesis2.MinerAddress(uint64(i))] = &wppProvider{} - } - - sm := stmgr.NewStateManager(cs) - - miners := []address.Address{maddr1, maddr2} - - beac := beacon.NewMockBeacon(time.Second) - //beac, err := drand.NewDrandBeacon(tpl.Timestamp, build.BlockDelaySecs) - //if err != nil { - //return nil, xerrors.Errorf("creating drand beacon: %w", err) - //} - - gen := &ChainGen{ - bs: bs, - cs: cs, - sm: sm, - msgsPerBlock: msgsPerBlock, - genesis: genb.Genesis, - beacon: beac, - w: w, - - GetMessages: getRandomMessages, - Miners: miners, - eppProvs: mgen, - banker: banker, - receivers: receievers, - - CurTipset: gents, - - r: mr, - lr: lr, - } - - return gen, nil -} - -func NewGenerator() (*ChainGen, error) { - return NewGeneratorWithSectors(1) -} - -func (cg *ChainGen) SetStateManager(sm *stmgr.StateManager) { - cg.sm = sm -} - -func (cg *ChainGen) ChainStore() *store.ChainStore { - return cg.cs -} - -func (cg *ChainGen) Genesis() *types.BlockHeader { - return cg.genesis -} - -func (cg *ChainGen) GenesisCar() ([]byte, error) { - offl := offline.Exchange(cg.bs) - blkserv := blockservice.New(cg.bs, offl) - dserv := merkledag.NewDAGService(blkserv) - - out := new(bytes.Buffer) - - if err := car.WriteCarWithWalker(context.TODO(), dserv, []cid.Cid{cg.Genesis().Cid()}, out, CarWalkFunc); err != nil { - return nil, xerrors.Errorf("genesis car write car failed: %w", err) - } - - return out.Bytes(), nil -} - -func CarWalkFunc(nd format.Node) (out []*format.Link, err error) { - for _, link := range nd.Links() { - if link.Cid.Prefix().MhType == uint64(commcid.FC_SEALED_V1) || link.Cid.Prefix().MhType == uint64(commcid.FC_UNSEALED_V1) { - continue - } - out = append(out, link) - } - - return out, nil -} - -func (cg *ChainGen) nextBlockProof(ctx context.Context, pts *types.TipSet, m address.Address, round abi.ChainEpoch) ([]types.BeaconEntry, *types.ElectionProof, *types.Ticket, error) { - mc := &mca{w: cg.w, sm: cg.sm, pv: ffiwrapper.ProofVerifier, bcn: cg.beacon} - - mbi, err := mc.MinerGetBaseInfo(ctx, m, round, pts.Key()) - if err != nil { - return nil, nil, nil, xerrors.Errorf("get miner base info: %w", err) - } - - prev := mbi.PrevBeaconEntry - - entries, err := beacon.BeaconEntriesForBlock(ctx, cg.beacon, round, prev) - if err != nil { - return nil, nil, nil, xerrors.Errorf("get beacon entries for block: %w", err) - } - - rbase := prev - if len(entries) > 0 { - rbase = entries[len(entries)-1] - } - - eproof, err := IsRoundWinner(ctx, pts, round, m, rbase, mbi, mc) - if err != nil { - return nil, nil, nil, xerrors.Errorf("checking round winner failed: %w", err) - } - - buf := new(bytes.Buffer) - if err := m.MarshalCBOR(buf); err != nil { - return nil, nil, nil, xerrors.Errorf("failed to cbor marshal address: %w", err) - } - - if len(entries) == 0 { - buf.Write(pts.MinTicket().VRFProof) - } - - ticketRand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_TicketProduction, round-build.TicketRandomnessLookback, buf.Bytes()) - if err != nil { - return nil, nil, nil, err - } - - st := pts.ParentState() - - worker, err := stmgr.GetMinerWorkerRaw(ctx, cg.sm, st, m) - if err != nil { - return nil, nil, nil, xerrors.Errorf("get miner worker: %w", err) - } - - vrfout, err := ComputeVRF(ctx, cg.w.Sign, worker, ticketRand) - if err != nil { - return nil, nil, nil, xerrors.Errorf("compute VRF: %w", err) - } - - return entries, eproof, &types.Ticket{VRFProof: vrfout}, nil -} - -type MinedTipSet struct { - TipSet *store.FullTipSet - Messages []*types.SignedMessage -} - -func (cg *ChainGen) NextTipSet() (*MinedTipSet, error) { - mts, err := cg.NextTipSetFromMiners(cg.CurTipset.TipSet(), cg.Miners) - if err != nil { - return nil, err - } - - cg.CurTipset = mts.TipSet - return mts, nil -} - -func (cg *ChainGen) NextTipSetFromMiners(base *types.TipSet, miners []address.Address) (*MinedTipSet, error) { - var blks []*types.FullBlock - - msgs, err := cg.GetMessages(cg) - if err != nil { - return nil, xerrors.Errorf("get random messages: %w", err) - } - - for round := base.Height() + 1; len(blks) == 0; round++ { - for _, m := range miners { - bvals, et, ticket, err := cg.nextBlockProof(context.TODO(), base, m, round) - if err != nil { - return nil, xerrors.Errorf("next block proof: %w", err) - } - - if et != nil { - // TODO: maybe think about passing in more real parameters to this? - wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil) - if err != nil { - return nil, err - } - - fblk, err := cg.makeBlock(base, m, ticket, et, bvals, round, wpost, msgs) - if err != nil { - return nil, xerrors.Errorf("making a block for next tipset failed: %w", err) - } - - if err := cg.cs.PersistBlockHeaders(fblk.Header); err != nil { - return nil, xerrors.Errorf("chainstore AddBlock: %w", err) - } - - blks = append(blks, fblk) - } - } - } - - fts := store.NewFullTipSet(blks) - - return &MinedTipSet{ - TipSet: fts, - Messages: msgs, - }, nil -} - -func (cg *ChainGen) makeBlock(parents *types.TipSet, m address.Address, vrfticket *types.Ticket, - eticket *types.ElectionProof, bvals []types.BeaconEntry, height abi.ChainEpoch, - wpost []abi.PoStProof, msgs []*types.SignedMessage) (*types.FullBlock, error) { - - var ts uint64 - if cg.Timestamper != nil { - ts = cg.Timestamper(parents, height-parents.Height()) - } else { - ts = parents.MinTimestamp() + uint64(height-parents.Height())*build.BlockDelaySecs - } - - fblk, err := MinerCreateBlock(context.TODO(), cg.sm, cg.w, &api.BlockTemplate{ - Miner: m, - Parents: parents.Key(), - Ticket: vrfticket, - Eproof: eticket, - BeaconValues: bvals, - Messages: msgs, - Epoch: height, - Timestamp: ts, - WinningPoStProof: wpost, - }) - if err != nil { - return nil, err - } - - return fblk, err -} - -// ResyncBankerNonce is used for dealing with messages made when -// simulating forks -func (cg *ChainGen) ResyncBankerNonce(ts *types.TipSet) error { - act, err := cg.sm.GetActor(cg.banker, ts) - if err != nil { - return err - } - - cg.bankerNonce = act.Nonce - return nil -} - -func (cg *ChainGen) Banker() address.Address { - return cg.banker -} - -func (cg *ChainGen) Wallet() *wallet.Wallet { - return cg.w -} - -func getRandomMessages(cg *ChainGen) ([]*types.SignedMessage, error) { - msgs := make([]*types.SignedMessage, cg.msgsPerBlock) - for m := range msgs { - msg := types.Message{ - To: cg.receivers[m%len(cg.receivers)], - From: cg.banker, - - Nonce: atomic.AddUint64(&cg.bankerNonce, 1) - 1, - - Value: types.NewInt(uint64(m + 1)), - - Method: 0, - - GasLimit: 10000, - GasPrice: types.NewInt(0), - } - - sig, err := cg.w.Sign(context.TODO(), cg.banker, msg.Cid().Bytes()) - if err != nil { - return nil, err - } - - msgs[m] = &types.SignedMessage{ - Message: msg, - Signature: *sig, - } - } - - return msgs, nil -} - -func (cg *ChainGen) YieldRepo() (repo.Repo, error) { - if err := cg.lr.Close(); err != nil { - return nil, err - } - return cg.r, nil -} - -type MiningCheckAPI interface { - ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) - - MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) - - WalletSign(context.Context, address.Address, []byte) (*crypto.Signature, error) -} - -type mca struct { - w *wallet.Wallet - sm *stmgr.StateManager - pv ffiwrapper.Verifier - bcn beacon.RandomBeacon -} - -func (mca mca) ChainGetRandomness(ctx context.Context, tsk types.TipSetKey, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) (abi.Randomness, error) { - pts, err := mca.sm.ChainStore().LoadTipSet(tsk) - if err != nil { - return nil, xerrors.Errorf("loading tipset key: %w", err) - } - - return mca.sm.ChainStore().GetRandomness(ctx, pts.Cids(), personalization, randEpoch, entropy) -} - -func (mca mca) MinerGetBaseInfo(ctx context.Context, maddr address.Address, epoch abi.ChainEpoch, tsk types.TipSetKey) (*api.MiningBaseInfo, error) { - return stmgr.MinerGetBaseInfo(ctx, mca.sm, mca.bcn, tsk, epoch, maddr, mca.pv) -} - -func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*crypto.Signature, error) { - return mca.w.Sign(ctx, a, v) -} - -type WinningPoStProver interface { - GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) - ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) -} - -type wppProvider struct{} - -func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandomness, _ uint64) ([]uint64, error) { - return []uint64{0}, nil -} - -func (wpp *wppProvider) ComputeProof(context.Context, []abi.SectorInfo, abi.PoStRandomness) ([]abi.PoStProof, error) { - return ValidWpostForTesting, nil -} - -func IsRoundWinner(ctx context.Context, ts *types.TipSet, round abi.ChainEpoch, - miner address.Address, brand types.BeaconEntry, mbi *api.MiningBaseInfo, a MiningCheckAPI) (*types.ElectionProof, error) { - - buf := new(bytes.Buffer) - if err := miner.MarshalCBOR(buf); err != nil { - return nil, xerrors.Errorf("failed to cbor marshal address: %w") - } - - electionRand, err := store.DrawRandomness(brand.Data, crypto.DomainSeparationTag_ElectionProofProduction, round, buf.Bytes()) - if err != nil { - return nil, xerrors.Errorf("failed to draw randomness: %w", err) - } - - vrfout, err := ComputeVRF(ctx, a.WalletSign, mbi.WorkerKey, electionRand) - if err != nil { - return nil, xerrors.Errorf("failed to compute VRF: %w", err) - } - - // TODO: wire in real power - if !types.IsTicketWinner(vrfout, mbi.MinerPower, mbi.NetworkPower) { - return nil, nil - } - - return &types.ElectionProof{VRFProof: vrfout}, nil -} - -type SignFunc func(context.Context, address.Address, []byte) (*crypto.Signature, error) - -func VerifyVRF(ctx context.Context, worker address.Address, vrfBase, vrfproof []byte) error { - _, span := trace.StartSpan(ctx, "VerifyVRF") - defer span.End() - - sig := &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: vrfproof, - } - - if err := sigs.Verify(sig, worker, vrfBase); err != nil { - return xerrors.Errorf("vrf was invalid: %w", err) - } - - return nil -} - -func ComputeVRF(ctx context.Context, sign SignFunc, worker address.Address, sigInput []byte) ([]byte, error) { - sig, err := sign(ctx, worker, sigInput) - if err != nil { - return nil, err - } - - if sig.Type != crypto.SigTypeBLS { - return nil, fmt.Errorf("miner worker address was not a BLS key") - } - - return sig.Data, nil -} - -type genFakeVerifier struct{} - -var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil) - -func (m genFakeVerifier) VerifySeal(svi abi.SealVerifyInfo) (bool, error) { - return true, nil -} - -func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info abi.WinningPoStVerifyInfo) (bool, error) { - panic("not supported") -} - -func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info abi.WindowPoStVerifyInfo) (bool, error) { - panic("not supported") -} - -func (m genFakeVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proof abi.RegisteredPoStProof, id abi.ActorID, randomness abi.PoStRandomness, u uint64) ([]uint64, error) { - panic("not supported") -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/gen_test.go b/vendor/github.com/filecoin-project/lotus/chain/gen/gen_test.go deleted file mode 100644 index 7a4d73031a..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/gen_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package gen - -import ( - "testing" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - - _ "github.com/filecoin-project/lotus/lib/sigs/bls" - _ "github.com/filecoin-project/lotus/lib/sigs/secp" -) - -func init() { - miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ - abi.RegisteredSealProof_StackedDrg2KiBV1: {}, - } - power.ConsensusMinerMinPower = big.NewInt(2048) - verifreg.MinVerifiedDealSize = big.NewInt(256) -} - -func testGeneration(t testing.TB, n int, msgs int, sectors int) { - g, err := NewGeneratorWithSectors(sectors) - if err != nil { - t.Fatalf("%+v", err) - } - - g.msgsPerBlock = msgs - - for i := 0; i < n; i++ { - mts, err := g.NextTipSet() - if err != nil { - t.Fatalf("error at H:%d, %+v", i, err) - } - _ = mts - } -} - -func TestChainGeneration(t *testing.T) { - testGeneration(t, 10, 20, 1) - testGeneration(t, 10, 20, 25) -} - -func BenchmarkChainGeneration(b *testing.B) { - b.Run("0-messages", func(b *testing.B) { - testGeneration(b, b.N, 0, 1) - }) - - b.Run("10-messages", func(b *testing.B) { - testGeneration(b, b.N, 10, 1) - }) - - b.Run("100-messages", func(b *testing.B) { - testGeneration(b, b.N, 100, 1) - }) - - b.Run("1000-messages", func(b *testing.B) { - testGeneration(b, b.N, 1000, 1) - }) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/genesis.go b/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/genesis.go deleted file mode 100644 index 203d8529e6..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/genesis.go +++ /dev/null @@ -1,370 +0,0 @@ -package genesis - -import ( - "context" - "encoding/json" - - "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/account" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/genesis" -) - -const AccountStart = 100 -const MinerStart = 1000 -const MaxAccounts = MinerStart - AccountStart - -var log = logging.Logger("genesis") - -type GenesisBootstrap struct { - Genesis *types.BlockHeader -} - -/* -From a list of parameters, create a genesis block / initial state - -The process: -- Bootstrap state (MakeInitialStateTree) - - Create empty state - - Create system actor - - Make init actor - - Create accounts mappings - - Set NextID to MinerStart - - Setup Reward (1.4B fil) - - Setup Cron - - Create empty power actor - - Create empty market - - Create verified registry - - Setup burnt fund address - - Initialize account / msig balances -- Instantiate early vm with genesis syscalls - - Create miners - - Each: - - power.CreateMiner, set msg value to PowerBalance - - market.AddFunds with correct value - - market.PublishDeals for related sectors - - Set precommits - - Commit presealed sectors - -Data Types: - -PreSeal :{ - CommR CID - CommD CID - SectorID SectorNumber - Deal market.DealProposal # Start at 0, self-deal! -} - -Genesis: { - Accounts: [ # non-miner, non-singleton actors, max len = MaxAccounts - { - Type: "account" / "multisig", - Value: "attofil", - [Meta: {msig settings, account key..}] - },... - ], - Miners: [ - { - Owner, Worker Addr # ID - MarketBalance, PowerBalance TokenAmount - SectorSize uint64 - PreSeals []PreSeal - },... - ], -} - -*/ - -func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template genesis.Template) (*state.StateTree, error) { - // Create empty state tree - - cst := cbor.NewCborStore(bs) - _, err := cst.Put(context.TODO(), []struct{}{}) - if err != nil { - return nil, xerrors.Errorf("putting empty object: %w", err) - } - - state, err := state.NewStateTree(cst) - if err != nil { - return nil, xerrors.Errorf("making new state tree: %w", err) - } - - emptyobject, err := cst.Put(context.TODO(), []struct{}{}) - if err != nil { - return nil, xerrors.Errorf("failed putting empty object: %w", err) - } - - // Create system actor - - sysact, err := SetupSystemActor(bs) - if err != nil { - return nil, xerrors.Errorf("setup init actor: %w", err) - } - if err := state.SetActor(builtin.SystemActorAddr, sysact); err != nil { - return nil, xerrors.Errorf("set init actor: %w", err) - } - - // Create init actor - - initact, err := SetupInitActor(bs, template.NetworkName, template.Accounts) - if err != nil { - return nil, xerrors.Errorf("setup init actor: %w", err) - } - if err := state.SetActor(builtin.InitActorAddr, initact); err != nil { - return nil, xerrors.Errorf("set init actor: %w", err) - } - - // Setup reward - rewact, err := SetupRewardActor(bs) - if err != nil { - return nil, xerrors.Errorf("setup init actor: %w", err) - } - - err = state.SetActor(builtin.RewardActorAddr, rewact) - if err != nil { - return nil, xerrors.Errorf("set network account actor: %w", err) - } - - // Setup cron - cronact, err := SetupCronActor(bs) - if err != nil { - return nil, xerrors.Errorf("setup cron actor: %w", err) - } - if err := state.SetActor(builtin.CronActorAddr, cronact); err != nil { - return nil, xerrors.Errorf("set cron actor: %w", err) - } - - // Create empty power actor - spact, err := SetupStoragePowerActor(bs) - if err != nil { - return nil, xerrors.Errorf("setup storage market actor: %w", err) - } - if err := state.SetActor(builtin.StoragePowerActorAddr, spact); err != nil { - return nil, xerrors.Errorf("set storage market actor: %w", err) - } - - // Create empty market actor - marketact, err := SetupStorageMarketActor(bs) - if err != nil { - return nil, xerrors.Errorf("setup storage market actor: %w", err) - } - if err := state.SetActor(builtin.StorageMarketActorAddr, marketact); err != nil { - return nil, xerrors.Errorf("set market actor: %w", err) - } - - // Create verified registry - verifact, err := SetupVerifiedRegistryActor(bs) - if err != nil { - return nil, xerrors.Errorf("setup storage market actor: %w", err) - } - if err := state.SetActor(builtin.VerifiedRegistryActorAddr, verifact); err != nil { - return nil, xerrors.Errorf("set market actor: %w", err) - } - - // Setup burnt-funds - err = state.SetActor(builtin.BurntFundsActorAddr, &types.Actor{ - Code: builtin.AccountActorCodeID, - Balance: types.NewInt(0), - Head: emptyobject, - }) - if err != nil { - return nil, xerrors.Errorf("set burnt funds account actor: %w", err) - } - - // Create accounts - for id, info := range template.Accounts { - if info.Type != genesis.TAccount { - return nil, xerrors.New("unsupported account type") // TODO: msigs - } - - ida, err := address.NewIDAddress(uint64(AccountStart + id)) - if err != nil { - return nil, err - } - - var ainfo genesis.AccountMeta - if err := json.Unmarshal(info.Meta, &ainfo); err != nil { - return nil, xerrors.Errorf("unmarshaling account meta: %w", err) - } - - st, err := cst.Put(ctx, &account.State{Address: ainfo.Owner}) - if err != nil { - return nil, err - } - - err = state.SetActor(ida, &types.Actor{ - Code: builtin.AccountActorCodeID, - Balance: info.Balance, - Head: st, - }) - if err != nil { - return nil, xerrors.Errorf("setting account from actmap: %w", err) - } - } - - vregroot, err := address.NewIDAddress(80) - if err != nil { - return nil, err - } - - vrst, err := cst.Put(ctx, &account.State{Address: RootVerifierAddr}) - if err != nil { - return nil, err - } - - err = state.SetActor(vregroot, &types.Actor{ - Code: builtin.AccountActorCodeID, - Balance: types.NewInt(0), - Head: vrst, - }) - - if err != nil { - return nil, xerrors.Errorf("setting account from actmap: %w", err) - } - - return state, nil -} - -func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, stateroot cid.Cid, template genesis.Template) (cid.Cid, error) { - verifNeeds := make(map[address.Address]abi.PaddedPieceSize) - var sum abi.PaddedPieceSize - for _, m := range template.Miners { - for _, s := range m.Sectors { - amt := s.Deal.PieceSize - verifNeeds[s.Deal.Client] += amt - sum += amt - } - } - - verifier, err := address.NewIDAddress(80) - if err != nil { - return cid.Undef, err - } - - vm, err := vm.NewVM(stateroot, 0, &fakeRand{}, cs.Blockstore(), &fakedSigSyscalls{cs.VMSys()}) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err) - } - - _, err = doExecValue(ctx, vm, builtin.VerifiedRegistryActorAddr, RootVerifierAddr, types.NewInt(0), builtin.MethodsVerifiedRegistry.AddVerifier, mustEnc(&verifreg.AddVerifierParams{ - Address: verifier, - Allowance: abi.NewStoragePower(int64(sum)), // eh, close enough - - })) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to failed to create verifier: %w", err) - } - - for c, amt := range verifNeeds { - _, err := doExecValue(ctx, vm, builtin.VerifiedRegistryActorAddr, verifier, types.NewInt(0), builtin.MethodsVerifiedRegistry.AddVerifiedClient, mustEnc(&verifreg.AddVerifiedClientParams{ - Address: c, - Allowance: abi.NewStoragePower(int64(amt)), - })) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to add verified client: %w", err) - } - } - - return vm.Flush(ctx) -} - -func MakeGenesisBlock(ctx context.Context, bs bstore.Blockstore, sys runtime.Syscalls, template genesis.Template) (*GenesisBootstrap, error) { - st, err := MakeInitialStateTree(ctx, bs, template) - if err != nil { - return nil, xerrors.Errorf("make initial state tree failed: %w", err) - } - - stateroot, err := st.Flush(ctx) - if err != nil { - return nil, xerrors.Errorf("flush state tree failed: %w", err) - } - - // temp chainstore - cs := store.NewChainStore(bs, datastore.NewMapDatastore(), sys) - - // Verify PreSealed Data - stateroot, err = VerifyPreSealedData(ctx, cs, stateroot, template) - if err != nil { - return nil, xerrors.Errorf("failed to verify presealed data: %w", err) - } - - stateroot, err = SetupStorageMiners(ctx, cs, stateroot, template.Miners) - if err != nil { - return nil, xerrors.Errorf("setup storage miners failed: %w", err) - } - - cst := cbor.NewCborStore(bs) - - emptyroot, err := amt.FromArray(ctx, cst, nil) - if err != nil { - return nil, xerrors.Errorf("amt build failed: %w", err) - } - - mm := &types.MsgMeta{ - BlsMessages: emptyroot, - SecpkMessages: emptyroot, - } - mmb, err := mm.ToStorageBlock() - if err != nil { - return nil, xerrors.Errorf("serializing msgmeta failed: %w", err) - } - if err := bs.Put(mmb); err != nil { - return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err) - } - - log.Infof("Empty Genesis root: %s", emptyroot) - - genesisticket := &types.Ticket{ - VRFProof: []byte("vrf proof0000000vrf proof0000000"), - } - - b := &types.BlockHeader{ - Miner: builtin.SystemActorAddr, - Ticket: genesisticket, - Parents: []cid.Cid{}, - Height: 0, - ParentWeight: types.NewInt(0), - ParentStateRoot: stateroot, - Messages: mmb.Cid(), - ParentMessageReceipts: emptyroot, - BLSAggregate: nil, - BlockSig: nil, - Timestamp: template.Timestamp, - ElectionProof: new(types.ElectionProof), - BeaconEntries: []types.BeaconEntry{ - { - Round: 0, - Data: make([]byte, 32), - }, - }, - } - - sb, err := b.ToStorageBlock() - if err != nil { - return nil, xerrors.Errorf("serializing block header failed: %w", err) - } - - if err := bs.Put(sb); err != nil { - return nil, xerrors.Errorf("putting header to blockstore: %w", err) - } - - return &GenesisBootstrap{ - Genesis: b, - }, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/miners.go b/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/miners.go deleted file mode 100644 index acc63c31ce..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/miners.go +++ /dev/null @@ -1,263 +0,0 @@ -package genesis - -import ( - "bytes" - "context" - "fmt" - "math/rand" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime" - - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/genesis" -) - -func MinerAddress(genesisIndex uint64) address.Address { - maddr, err := address.NewIDAddress(MinerStart + genesisIndex) - if err != nil { - panic(err) - } - - return maddr -} - -type fakedSigSyscalls struct { - runtime.Syscalls -} - -func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { - return nil -} - -func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sroot cid.Cid, miners []genesis.Miner) (cid.Cid, error) { - vm, err := vm.NewVM(sroot, 0, &fakeRand{}, cs.Blockstore(), &fakedSigSyscalls{cs.VMSys()}) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create NewVM: %w", err) - } - - if len(miners) == 0 { - return cid.Undef, xerrors.New("no genesis miners") - } - - for i, m := range miners { - // Create miner through power actor - i := i - m := m - - spt, err := ffiwrapper.SealProofTypeFromSectorSize(m.SectorSize) - if err != nil { - return cid.Undef, err - } - - var maddr address.Address - { - constructorParams := &power.CreateMinerParams{ - Owner: m.Worker, - Worker: m.Worker, - Peer: []byte(m.PeerId), - SealProofType: spt, - } - - params := mustEnc(constructorParams) - rval, err := doExecValue(ctx, vm, builtin.StoragePowerActorAddr, m.Owner, m.PowerBalance, builtin.MethodsPower.CreateMiner, params) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create genesis miner: %w", err) - } - - var ma power.CreateMinerReturn - if err := ma.UnmarshalCBOR(bytes.NewReader(rval)); err != nil { - return cid.Undef, xerrors.Errorf("unmarshaling CreateMinerReturn: %w", err) - } - - expma := MinerAddress(uint64(i)) - if ma.IDAddress != expma { - return cid.Undef, xerrors.Errorf("miner assigned wrong address: %s != %s", ma.IDAddress, expma) - } - maddr = ma.IDAddress - } - - // Add market funds - - { - params := mustEnc(&maddr) - _, err := doExecValue(ctx, vm, builtin.StorageMarketActorAddr, m.Worker, m.MarketBalance, builtin.MethodsMarket.AddBalance, params) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create genesis miner: %w", err) - } - } - { - params := mustEnc(&m.Worker) - _, err := doExecValue(ctx, vm, builtin.StorageMarketActorAddr, m.Worker, big.Zero(), builtin.MethodsMarket.AddBalance, params) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to create genesis miner: %w", err) - } - } - - // Publish preseal deals - - var dealIDs []abi.DealID - { - publish := func(params *market.PublishStorageDealsParams) error { - fmt.Printf("publishing %d storage deals on miner %s with worker %s\n", len(params.Deals), params.Deals[0].Proposal.Provider, m.Worker) - - ret, err := doExecValue(ctx, vm, builtin.StorageMarketActorAddr, m.Worker, big.Zero(), builtin.MethodsMarket.PublishStorageDeals, mustEnc(params)) - if err != nil { - return xerrors.Errorf("failed to create genesis miner: %w", err) - } - var ids market.PublishStorageDealsReturn - if err := ids.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { - return xerrors.Errorf("unmarsahling publishStorageDeals result: %w", err) - } - - dealIDs = append(dealIDs, ids.IDs...) - return nil - } - - params := &market.PublishStorageDealsParams{} - for _, preseal := range m.Sectors { - preseal.Deal.VerifiedDeal = true - params.Deals = append(params.Deals, market.ClientDealProposal{ - Proposal: preseal.Deal, - ClientSignature: crypto.Signature{Type: crypto.SigTypeBLS}, // TODO: do we want to sign these? Or do we want to fake signatures for genesis setup? - }) - - if len(params.Deals) == cbg.MaxLength { - if err := publish(params); err != nil { - return cid.Undef, err - } - - params = &market.PublishStorageDealsParams{} - } - } - - if len(params.Deals) > 0 { - if err := publish(params); err != nil { - return cid.Undef, err - } - } - } - - // Commit sectors - for pi, preseal := range m.Sectors { - preseal := preseal - // TODO: Maybe check seal (Can just be snark inputs, doesn't go into the genesis file) - - // check deals, get dealWeight - var dealWeight market.VerifyDealsOnSectorProveCommitReturn - { - params := &market.VerifyDealsOnSectorProveCommitParams{ - DealIDs: []abi.DealID{dealIDs[pi]}, - SectorExpiry: preseal.Deal.EndEpoch, - } - - ret, err := doExecValue(ctx, vm, builtin.StorageMarketActorAddr, maddr, big.Zero(), builtin.MethodsMarket.VerifyDealsOnSectorProveCommit, mustEnc(params)) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to verify preseal deals miner: %w", err) - } - if err := dealWeight.UnmarshalCBOR(bytes.NewReader(ret)); err != nil { - return cid.Undef, xerrors.Errorf("unmarshaling market onProveCommit result: %w", err) - } - } - - // update power claims - { - err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error { - weight := &power.SectorStorageWeightDesc{ - SectorSize: m.SectorSize, - Duration: preseal.Deal.Duration(), - DealWeight: dealWeight.DealWeight, - VerifiedDealWeight: dealWeight.VerifiedDealWeight, - } - - qapower := power.QAPowerForWeight(weight) - - err := st.AddToClaim(&state.AdtStore{cst}, maddr, types.NewInt(uint64(weight.SectorSize)), qapower) - if err != nil { - return xerrors.Errorf("add to claim: %w", err) - } - fmt.Println("Added weight to claim: ", st.TotalRawBytePower, st.TotalQualityAdjPower) - return nil - }) - if err != nil { - return cid.Undef, xerrors.Errorf("register power claim in power actor: %w", err) - } - } - - // Put sectors to miner sector sets - { - newSectorInfo := &miner.SectorOnChainInfo{ - Info: miner.SectorPreCommitInfo{ - SealProof: preseal.ProofType, - SectorNumber: preseal.SectorID, - SealedCID: preseal.CommR, - SealRandEpoch: 0, - DealIDs: []abi.DealID{dealIDs[pi]}, - Expiration: preseal.Deal.EndEpoch, - }, - ActivationEpoch: 0, - DealWeight: dealWeight.DealWeight, - VerifiedDealWeight: dealWeight.VerifiedDealWeight, - } - - err = vm.MutateState(ctx, maddr, func(cst cbor.IpldStore, st *miner.State) error { - store := &state.AdtStore{cst} - - if err = st.PutSector(store, newSectorInfo); err != nil { - return xerrors.Errorf("failed to put sector: %v", err) - } - - if err := st.AddNewSectors(newSectorInfo.Info.SectorNumber); err != nil { - return xerrors.Errorf("failed to add NewSector: %w", err) - } - - return nil - }) - if err != nil { - return cid.Cid{}, xerrors.Errorf("put to sset: %w", err) - } - } - } - - } - - // TODO: to avoid division by zero, we set the initial power actor power to 1, this adjusts that back down so the accounting is accurate. - err = vm.MutateState(ctx, builtin.StoragePowerActorAddr, func(cst cbor.IpldStore, st *power.State) error { - st.TotalQualityAdjPower = big.Sub(st.TotalQualityAdjPower, big.NewInt(1)) - return nil - }) - if err != nil { - return cid.Undef, xerrors.Errorf("mutating state: %w", err) - } - - c, err := vm.Flush(ctx) - if err != nil { - return cid.Undef, xerrors.Errorf("flushing vm: %w", err) - } - return c, nil -} - -// TODO: copied from actors test harness, deduplicate or remove from here -type fakeRand struct{} - -func (fr *fakeRand) GetRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { - out := make([]byte, 32) - _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) - return out, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t00_system.go b/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t00_system.go deleted file mode 100644 index 581d7e788f..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t00_system.go +++ /dev/null @@ -1,29 +0,0 @@ -package genesis - -import ( - "context" - "github.com/filecoin-project/specs-actors/actors/builtin/system" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/specs-actors/actors/builtin" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" -) - -func SetupSystemActor(bs bstore.Blockstore) (*types.Actor, error) { - var st system.State - - cst := cbor.NewCborStore(bs) - - statecid, err := cst.Put(context.TODO(), &st) - if err != nil { - return nil, err - } - - act := &types.Actor{ - Code: builtin.SystemActorCodeID, - Head: statecid, - } - - return act, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t01_init.go b/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t01_init.go deleted file mode 100644 index 9f0efb0c65..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t01_init.go +++ /dev/null @@ -1,74 +0,0 @@ -package genesis - -import ( - "context" - "encoding/json" - "fmt" - - "github.com/filecoin-project/specs-actors/actors/builtin" - - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/ipfs/go-hamt-ipld" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/genesis" -) - -func SetupInitActor(bs bstore.Blockstore, netname string, initialActors []genesis.Actor) (*types.Actor, error) { - if len(initialActors) > MaxAccounts { - return nil, xerrors.New("too many initial actors") - } - - var ias init_.State - ias.NextID = MinerStart - ias.NetworkName = netname - - cst := cbor.NewCborStore(bs) - amap := hamt.NewNode(cst, hamt.UseTreeBitWidth(5)) // TODO: use spec adt map - - for i, a := range initialActors { - if a.Type != genesis.TAccount { - return nil, xerrors.Errorf("unsupported account type: %s", a.Type) // TODO: Support msig (skip here) - } - - var ainfo genesis.AccountMeta - if err := json.Unmarshal(a.Meta, &ainfo); err != nil { - return nil, xerrors.Errorf("unmarshaling account meta: %w", err) - } - - fmt.Printf("init set %s t0%d\n", ainfo.Owner, AccountStart+uint64(i)) - - if err := amap.Set(context.TODO(), string(ainfo.Owner.Bytes()), AccountStart+uint64(i)); err != nil { - return nil, err - } - } - - if err := amap.Set(context.TODO(), string(RootVerifierAddr.Bytes()), 80); err != nil { - return nil, err - } - - if err := amap.Flush(context.TODO()); err != nil { - return nil, err - } - amapcid, err := cst.Put(context.TODO(), amap) - if err != nil { - return nil, err - } - - ias.AddressMap = amapcid - - statecid, err := cst.Put(context.TODO(), &ias) - if err != nil { - return nil, err - } - - act := &types.Actor{ - Code: builtin.InitActorCodeID, - Head: statecid, - } - - return act, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t02_reward.go b/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t02_reward.go deleted file mode 100644 index 96feff6710..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t02_reward.go +++ /dev/null @@ -1,30 +0,0 @@ -package genesis - -import ( - "context" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/reward" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" -) - -func SetupRewardActor(bs bstore.Blockstore) (*types.Actor, error) { - cst := cbor.NewCborStore(bs) - - st := reward.ConstructState() - st.LastPerEpochReward = types.FromFil(100) - - hcid, err := cst.Put(context.TODO(), st) - if err != nil { - return nil, err - } - - return &types.Actor{ - Code: builtin.RewardActorCodeID, - Balance: types.BigInt{Int: build.InitialRewardBalance}, - Head: hcid, - }, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t03_cron.go b/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t03_cron.go deleted file mode 100644 index e61a88d180..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t03_cron.go +++ /dev/null @@ -1,29 +0,0 @@ -package genesis - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/cron" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/lotus/chain/types" -) - -func SetupCronActor(bs bstore.Blockstore) (*types.Actor, error) { - cst := cbor.NewCborStore(bs) - cas := cron.ConstructState(cron.BuiltInEntries()) - - stcid, err := cst.Put(context.TODO(), cas) - if err != nil { - return nil, err - } - - return &types.Actor{ - Code: builtin.CronActorCodeID, - Head: stcid, - Nonce: 0, - Balance: types.NewInt(0), - }, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t04_power.go b/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t04_power.go deleted file mode 100644 index 1360269775..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t04_power.go +++ /dev/null @@ -1,47 +0,0 @@ -package genesis - -import ( - "context" - "github.com/filecoin-project/specs-actors/actors/builtin" - - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/ipfs/go-hamt-ipld" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/lotus/chain/types" -) - -func SetupStoragePowerActor(bs bstore.Blockstore) (*types.Actor, error) { - ctx := context.TODO() - cst := cbor.NewCborStore(bs) - nd := hamt.NewNode(cst, hamt.UseTreeBitWidth(5)) - emptyhamt, err := cst.Put(ctx, nd) - if err != nil { - return nil, err - } - - sms := &power.State{ - TotalRawBytePower: big.NewInt(0), - TotalQualityAdjPower: big.NewInt(1), // TODO: has to be 1 initially to avoid div by zero. Kinda annoying, should find a way to fix - TotalPledgeCollateral: big.NewInt(0), - MinerCount: 0, - CronEventQueue: emptyhamt, - LastEpochTick: 0, - Claims: emptyhamt, - NumMinersMeetingMinPower: 0, - } - - stcid, err := cst.Put(ctx, sms) - if err != nil { - return nil, err - } - - return &types.Actor{ - Code: builtin.StoragePowerActorCodeID, - Head: stcid, - Nonce: 0, - Balance: types.NewInt(0), - }, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t05_market.go b/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t05_market.go deleted file mode 100644 index 9c55dff19a..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t05_market.go +++ /dev/null @@ -1,42 +0,0 @@ -package genesis - -import ( - "context" - "github.com/ipfs/go-hamt-ipld" - - "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/lotus/chain/types" -) - -func SetupStorageMarketActor(bs bstore.Blockstore) (*types.Actor, error) { - cst := cbor.NewCborStore(bs) - - a, err := amt.NewAMT(cst).Flush(context.TODO()) - if err != nil { - return nil, err - } - h, err := cst.Put(context.TODO(), hamt.NewNode(cst, hamt.UseTreeBitWidth(5))) - if err != nil { - return nil, err - } - - sms := market.ConstructState(a, h, h) - - stcid, err := cst.Put(context.TODO(), sms) - if err != nil { - return nil, err - } - - act := &types.Actor{ - Code: builtin.StorageMarketActorCodeID, - Head: stcid, - Balance: types.NewInt(0), - } - - return act, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t06_vreg.go b/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t06_vreg.go deleted file mode 100644 index 093925d3ed..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/t06_vreg.go +++ /dev/null @@ -1,59 +0,0 @@ -package genesis - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-hamt-ipld" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - - "github.com/filecoin-project/lotus/chain/types" -) - -var RootVerifierAddr address.Address - -var RootVerifierID address.Address - -func init() { - k, err := address.NewFromString("t3qfoulel6fy6gn3hjmbhpdpf6fs5aqjb5fkurhtwvgssizq4jey5nw4ptq5up6h7jk7frdvvobv52qzmgjinq") - if err != nil { - panic(err) - } - - RootVerifierAddr = k - - idk, err := address.NewFromString("t080") - if err != nil { - panic(err) - } - - RootVerifierID = idk -} - -func SetupVerifiedRegistryActor(bs bstore.Blockstore) (*types.Actor, error) { - cst := cbor.NewCborStore(bs) - - h, err := cst.Put(context.TODO(), hamt.NewNode(cst, hamt.UseTreeBitWidth(5))) - if err != nil { - return nil, err - } - - sms := verifreg.ConstructState(h, RootVerifierID) - - stcid, err := cst.Put(context.TODO(), sms) - if err != nil { - return nil, err - } - - act := &types.Actor{ - Code: builtin.VerifiedRegistryActorCodeID, - Head: stcid, - Balance: types.NewInt(0), - } - - return act, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/util.go b/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/util.go deleted file mode 100644 index 0a5d7e16b6..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/genesis/util.go +++ /dev/null @@ -1,53 +0,0 @@ -package genesis - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" -) - -func mustEnc(i cbg.CBORMarshaler) []byte { - enc, err := actors.SerializeParams(i) - if err != nil { - panic(err) // ok - } - return enc -} - -func doExec(ctx context.Context, vm *vm.VM, to, from address.Address, method abi.MethodNum, params []byte) ([]byte, error) { - return doExecValue(ctx, vm, to, from, types.NewInt(0), method, params) -} - -func doExecValue(ctx context.Context, vm *vm.VM, to, from address.Address, value types.BigInt, method abi.MethodNum, params []byte) ([]byte, error) { - act, err := vm.StateTree().GetActor(from) - if err != nil { - return nil, xerrors.Errorf("doExec failed to get from actor: %w", err) - } - - ret, err := vm.ApplyImplicitMessage(ctx, &types.Message{ - To: to, - From: from, - Method: method, - Params: params, - GasLimit: 1_000_000_000_000_000, - GasPrice: types.NewInt(0), - Value: value, - Nonce: act.Nonce, - }) - if err != nil { - return nil, xerrors.Errorf("doExec apply message failed: %w", err) - } - - if ret.ExitCode != 0 { - return nil, xerrors.Errorf("failed to call method: %w", ret.ActorErr) - } - - return ret.Return, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/gen/mining.go b/vendor/github.com/filecoin-project/lotus/chain/gen/mining.go deleted file mode 100644 index bc809a8889..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/gen/mining.go +++ /dev/null @@ -1,177 +0,0 @@ -package gen - -import ( - "context" - - bls "github.com/filecoin-project/filecoin-ffi" - amt "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/specs-actors/actors/crypto" - cid "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/chain/wallet" -) - -func MinerCreateBlock(ctx context.Context, sm *stmgr.StateManager, w *wallet.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) { - - pts, err := sm.ChainStore().LoadTipSet(bt.Parents) - if err != nil { - return nil, xerrors.Errorf("failed to load parent tipset: %w", err) - } - - st, recpts, err := sm.TipSetState(ctx, pts) - if err != nil { - return nil, xerrors.Errorf("failed to load tipset state: %w", err) - } - - worker, err := stmgr.GetMinerWorkerRaw(ctx, sm, st, bt.Miner) - if err != nil { - return nil, xerrors.Errorf("failed to get miner worker: %w", err) - } - - next := &types.BlockHeader{ - Miner: bt.Miner, - Parents: bt.Parents.Cids(), - Ticket: bt.Ticket, - ElectionProof: bt.Eproof, - - BeaconEntries: bt.BeaconValues, - Height: bt.Epoch, - Timestamp: bt.Timestamp, - WinPoStProof: bt.WinningPoStProof, - ParentStateRoot: st, - ParentMessageReceipts: recpts, - } - - var blsMessages []*types.Message - var secpkMessages []*types.SignedMessage - - var blsMsgCids, secpkMsgCids []cid.Cid - var blsSigs []crypto.Signature - for _, msg := range bt.Messages { - if msg.Signature.Type == crypto.SigTypeBLS { - blsSigs = append(blsSigs, msg.Signature) - blsMessages = append(blsMessages, &msg.Message) - - c, err := sm.ChainStore().PutMessage(&msg.Message) - if err != nil { - return nil, err - } - - blsMsgCids = append(blsMsgCids, c) - } else { - c, err := sm.ChainStore().PutMessage(msg) - if err != nil { - return nil, err - } - - secpkMsgCids = append(secpkMsgCids, c) - secpkMessages = append(secpkMessages, msg) - - } - } - - bs := cbor.NewCborStore(sm.ChainStore().Blockstore()) - blsmsgroot, err := amt.FromArray(ctx, bs, toIfArr(blsMsgCids)) - if err != nil { - return nil, xerrors.Errorf("building bls amt: %w", err) - } - secpkmsgroot, err := amt.FromArray(ctx, bs, toIfArr(secpkMsgCids)) - if err != nil { - return nil, xerrors.Errorf("building secpk amt: %w", err) - } - - mmcid, err := bs.Put(ctx, &types.MsgMeta{ - BlsMessages: blsmsgroot, - SecpkMessages: secpkmsgroot, - }) - if err != nil { - return nil, err - } - next.Messages = mmcid - - aggSig, err := aggregateSignatures(blsSigs) - if err != nil { - return nil, err - } - - next.BLSAggregate = aggSig - pweight, err := sm.ChainStore().Weight(ctx, pts) - if err != nil { - return nil, err - } - next.ParentWeight = pweight - - cst := cbor.NewCborStore(sm.ChainStore().Blockstore()) - tree, err := state.LoadStateTree(cst, st) - if err != nil { - return nil, xerrors.Errorf("failed to load state tree: %w", err) - } - - waddr, err := vm.ResolveToKeyAddr(tree, cst, worker) - if err != nil { - return nil, xerrors.Errorf("failed to resolve miner address to key address: %w", err) - } - - nosigbytes, err := next.SigningBytes() - if err != nil { - return nil, xerrors.Errorf("failed to get signing bytes for block: %w", err) - } - - sig, err := w.Sign(ctx, waddr, nosigbytes) - if err != nil { - return nil, xerrors.Errorf("failed to sign new block: %w", err) - } - - next.BlockSig = sig - - fullBlock := &types.FullBlock{ - Header: next, - BlsMessages: blsMessages, - SecpkMessages: secpkMessages, - } - - return fullBlock, nil -} - -func aggregateSignatures(sigs []crypto.Signature) (*crypto.Signature, error) { - var blsSigs []bls.Signature - for _, s := range sigs { - var bsig bls.Signature - copy(bsig[:], s.Data) - blsSigs = append(blsSigs, bsig) - } - - aggSig := bls.Aggregate(blsSigs) - if aggSig == nil { - if len(sigs) > 0 { - return nil, xerrors.Errorf("bls.Aggregate returned nil with %d signatures", len(sigs)) - } - - return &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: new(bls.Signature)[:], - }, nil - } - - return &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: aggSig[:], - }, nil -} - -func toIfArr(cids []cid.Cid) []cbg.CBORMarshaler { - out := make([]cbg.CBORMarshaler, 0, len(cids)) - for _, c := range cids { - oc := cbg.CborCid(c) - out = append(out, &oc) - } - return out -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/market/fundmgr.go b/vendor/github.com/filecoin-project/lotus/chain/market/fundmgr.go deleted file mode 100644 index 42ad50b2b0..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/market/fundmgr.go +++ /dev/null @@ -1,82 +0,0 @@ -package market - -import ( - "context" - "sync" - - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl/full" -) - -var log = logging.Logger("market_adapter") - -type FundMgr struct { - sm *stmgr.StateManager - mpool full.MpoolAPI - - lk sync.Mutex - available map[address.Address]types.BigInt -} - -func NewFundMgr(sm *stmgr.StateManager, mpool full.MpoolAPI) *FundMgr { - return &FundMgr{ - sm: sm, - mpool: mpool, - - available: map[address.Address]types.BigInt{}, - } -} - -func (fm *FundMgr) EnsureAvailable(ctx context.Context, addr, wallet address.Address, amt types.BigInt) (cid.Cid, error) { - fm.lk.Lock() - avail, ok := fm.available[addr] - if !ok { - bal, err := fm.sm.MarketBalance(ctx, addr, nil) - if err != nil { - fm.lk.Unlock() - return cid.Undef, err - } - - avail = types.BigSub(bal.Escrow, bal.Locked) - } - - toAdd := types.NewInt(0) - avail = types.BigSub(avail, amt) - if avail.LessThan(types.NewInt(0)) { - // TODO: some rules around adding more to avoid doing stuff on-chain - // all the time - toAdd = types.BigSub(toAdd, avail) - avail = types.NewInt(0) - } - fm.available[addr] = avail - - fm.lk.Unlock() - - var err error - params, err := actors.SerializeParams(&addr) - if err != nil { - return cid.Undef, err - } - - smsg, err := fm.mpool.MpoolPushMessage(ctx, &types.Message{ - To: builtin.StorageMarketActorAddr, - From: wallet, - Value: toAdd, - GasPrice: types.NewInt(0), - GasLimit: 1000000, - Method: builtin.MethodsMarket.AddBalance, - Params: params, - }) - if err != nil { - return cid.Undef, err - } - - return smsg.Cid(), nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/messagepool/messagepool.go b/vendor/github.com/filecoin-project/lotus/chain/messagepool/messagepool.go deleted file mode 100644 index b8ac55c590..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/messagepool/messagepool.go +++ /dev/null @@ -1,884 +0,0 @@ -package messagepool - -import ( - "bytes" - "context" - "errors" - "fmt" - "math" - "sort" - "sync" - "time" - - "github.com/filecoin-project/specs-actors/actors/crypto" - lru "github.com/hashicorp/golang-lru" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" - "github.com/ipfs/go-datastore/query" - logging "github.com/ipfs/go-log/v2" - pubsub "github.com/libp2p/go-libp2p-pubsub" - lps "github.com/whyrusleeping/pubsub" - "go.uber.org/multierr" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -var log = logging.Logger("messagepool") - -const futureDebug = false - -const ReplaceByFeeRatio = 1.25 - -var ( - rbfNum = types.NewInt(uint64((ReplaceByFeeRatio - 1) * 256)) - rbfDenom = types.NewInt(256) -) - -var ( - ErrMessageTooBig = errors.New("message too big") - - ErrMessageValueTooHigh = errors.New("cannot send more filecoin than will ever exist") - - ErrNonceTooLow = errors.New("message nonce too low") - - ErrNotEnoughFunds = errors.New("not enough funds to execute transaction") - - ErrInvalidToAddr = errors.New("message had invalid to address") - - ErrBroadcastAnyway = errors.New("broadcasting message despite validation fail") -) - -const ( - localMsgsDs = "/mpool/local" - - localUpdates = "update" -) - -type MessagePool struct { - lk sync.Mutex - - closer chan struct{} - repubTk *time.Ticker - - localAddrs map[address.Address]struct{} - - pending map[address.Address]*msgSet - - curTsLk sync.Mutex // DO NOT LOCK INSIDE lk - curTs *types.TipSet - - api Provider - - minGasPrice types.BigInt - - maxTxPoolSize int - - blsSigCache *lru.TwoQueueCache - - changes *lps.PubSub - - localMsgs datastore.Datastore - - netName dtypes.NetworkName - - sigValCache *lru.TwoQueueCache -} - -type msgSet struct { - msgs map[uint64]*types.SignedMessage - nextNonce uint64 -} - -func newMsgSet() *msgSet { - return &msgSet{ - msgs: make(map[uint64]*types.SignedMessage), - } -} - -func (ms *msgSet) add(m *types.SignedMessage) error { - if len(ms.msgs) == 0 || m.Message.Nonce >= ms.nextNonce { - ms.nextNonce = m.Message.Nonce + 1 - } - exms, has := ms.msgs[m.Message.Nonce] - if has { - if m.Cid() != exms.Cid() { - // check if RBF passes - minPrice := exms.Message.GasPrice - minPrice = types.BigAdd(minPrice, types.BigDiv(types.BigMul(minPrice, rbfNum), rbfDenom)) - minPrice = types.BigAdd(minPrice, types.NewInt(1)) - if types.BigCmp(m.Message.GasPrice, minPrice) > 0 { - log.Infow("add with RBF", "oldprice", exms.Message.GasPrice, - "newprice", m.Message.GasPrice, "addr", m.Message.From, "nonce", m.Message.Nonce) - } else { - log.Info("add with duplicate nonce") - return xerrors.Errorf("message to %s with nonce %d already in mpool", m.Message.To, m.Message.Nonce) - } - } - } - ms.msgs[m.Message.Nonce] = m - - return nil -} - -type Provider interface { - SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet - PutMessage(m types.ChainMsg) (cid.Cid, error) - PubSubPublish(string, []byte) error - StateGetActor(address.Address, *types.TipSet) (*types.Actor, error) - StateAccountKey(context.Context, address.Address, *types.TipSet) (address.Address, error) - MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) - MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error) - LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) -} - -type mpoolProvider struct { - sm *stmgr.StateManager - ps *pubsub.PubSub -} - -func NewProvider(sm *stmgr.StateManager, ps *pubsub.PubSub) Provider { - return &mpoolProvider{sm, ps} -} - -func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { - mpp.sm.ChainStore().SubscribeHeadChanges(cb) - return mpp.sm.ChainStore().GetHeaviestTipSet() -} - -func (mpp *mpoolProvider) PutMessage(m types.ChainMsg) (cid.Cid, error) { - return mpp.sm.ChainStore().PutMessage(m) -} - -func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error { - return mpp.ps.Publish(k, v) -} - -func (mpp *mpoolProvider) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) { - return mpp.sm.GetActor(addr, ts) -} - -func (mpp *mpoolProvider) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { - return mpp.sm.ResolveToKeyAddress(ctx, addr, ts) -} - -func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - return mpp.sm.ChainStore().MessagesForBlock(h) -} - -func (mpp *mpoolProvider) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { - return mpp.sm.ChainStore().MessagesForTipset(ts) -} - -func (mpp *mpoolProvider) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { - return mpp.sm.ChainStore().LoadTipSet(tsk) -} - -func New(api Provider, ds dtypes.MetadataDS, netName dtypes.NetworkName) (*MessagePool, error) { - cache, _ := lru.New2Q(build.BlsSignatureCacheSize) - verifcache, _ := lru.New2Q(build.VerifSigCacheSize) - - mp := &MessagePool{ - closer: make(chan struct{}), - repubTk: time.NewTicker(time.Duration(build.BlockDelaySecs) * 10 * time.Second), - localAddrs: make(map[address.Address]struct{}), - pending: make(map[address.Address]*msgSet), - minGasPrice: types.NewInt(0), - maxTxPoolSize: 5000, - blsSigCache: cache, - sigValCache: verifcache, - changes: lps.New(50), - localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)), - api: api, - netName: netName, - } - - if err := mp.loadLocal(); err != nil { - log.Errorf("loading local messages: %+v", err) - } - - go mp.repubLocal() - - mp.curTs = api.SubscribeHeadChanges(func(rev, app []*types.TipSet) error { - err := mp.HeadChange(rev, app) - if err != nil { - log.Errorf("mpool head notif handler error: %+v", err) - } - return err - }) - - return mp, nil -} - -func (mp *MessagePool) Close() error { - close(mp.closer) - return nil -} - -func (mp *MessagePool) repubLocal() { - for { - select { - case <-mp.repubTk.C: - mp.lk.Lock() - - msgsForAddr := make(map[address.Address][]*types.SignedMessage) - for a := range mp.localAddrs { - msgsForAddr[a] = mp.pendingFor(a) - } - - mp.lk.Unlock() - - var errout error - outputMsgs := []*types.SignedMessage{} - - for a, msgs := range msgsForAddr { - a, err := mp.api.StateGetActor(a, nil) - if err != nil { - errout = multierr.Append(errout, xerrors.Errorf("could not get actor state: %w", err)) - continue - } - - curNonce := a.Nonce - for _, m := range msgs { - if m.Message.Nonce < curNonce { - continue - } - if m.Message.Nonce != curNonce { - break - } - outputMsgs = append(outputMsgs, m) - curNonce++ - } - - } - - if len(outputMsgs) != 0 { - log.Infow("republishing local messages", "n", len(outputMsgs)) - } - - for _, msg := range outputMsgs { - msgb, err := msg.Serialize() - if err != nil { - errout = multierr.Append(errout, xerrors.Errorf("could not serialize: %w", err)) - continue - } - - err = mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb) - if err != nil { - errout = multierr.Append(errout, xerrors.Errorf("could not publish: %w", err)) - continue - } - } - - if errout != nil { - log.Errorf("errors while republishing: %+v", errout) - } - case <-mp.closer: - mp.repubTk.Stop() - return - } - } - -} - -func (mp *MessagePool) addLocal(m *types.SignedMessage, msgb []byte) error { - mp.localAddrs[m.Message.From] = struct{}{} - - if err := mp.localMsgs.Put(datastore.NewKey(string(m.Cid().Bytes())), msgb); err != nil { - return xerrors.Errorf("persisting local message: %w", err) - } - - return nil -} - -func (mp *MessagePool) Push(m *types.SignedMessage) (cid.Cid, error) { - msgb, err := m.Serialize() - if err != nil { - return cid.Undef, err - } - - if err := mp.Add(m); err != nil { - return cid.Undef, err - } - - mp.lk.Lock() - if err := mp.addLocal(m, msgb); err != nil { - mp.lk.Unlock() - return cid.Undef, err - } - mp.lk.Unlock() - - return m.Cid(), mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb) -} - -func (mp *MessagePool) Add(m *types.SignedMessage) error { - // big messages are bad, anti DOS - if m.Size() > 32*1024 { - return xerrors.Errorf("mpool message too large (%dB): %w", m.Size(), ErrMessageTooBig) - } - - if m.Message.To == address.Undef { - return ErrInvalidToAddr - } - - if !m.Message.Value.LessThan(types.TotalFilecoinInt) { - return ErrMessageValueTooHigh - } - - if err := mp.VerifyMsgSig(m); err != nil { - log.Warnf("mpooladd signature verification failed: %s", err) - return err - } - - mp.curTsLk.Lock() - defer mp.curTsLk.Unlock() - return mp.addTs(m, mp.curTs) -} - -func sigCacheKey(m *types.SignedMessage) (string, error) { - switch m.Signature.Type { - case crypto.SigTypeBLS: - if len(m.Signature.Data) < 90 { - return "", fmt.Errorf("bls signature too short") - } - - return string(m.Cid().Bytes()) + string(m.Signature.Data[64:]), nil - case crypto.SigTypeSecp256k1: - return string(m.Cid().Bytes()), nil - default: - return "", xerrors.Errorf("unrecognized signature type: %d", m.Signature.Type) - } -} - -func (mp *MessagePool) VerifyMsgSig(m *types.SignedMessage) error { - sck, err := sigCacheKey(m) - if err != nil { - return err - } - - _, ok := mp.sigValCache.Get(sck) - if ok { - // already validated, great - return nil - } - - if err := sigs.Verify(&m.Signature, m.Message.From, m.Message.Cid().Bytes()); err != nil { - return err - } - - mp.sigValCache.Add(sck, struct{}{}) - - return nil -} - -func (mp *MessagePool) addTs(m *types.SignedMessage, curTs *types.TipSet) error { - snonce, err := mp.getStateNonce(m.Message.From, curTs) - if err != nil { - return xerrors.Errorf("failed to look up actor state nonce: %s: %w", err, ErrBroadcastAnyway) - } - - if snonce > m.Message.Nonce { - return xerrors.Errorf("minimum expected nonce is %d: %w", snonce, ErrNonceTooLow) - } - - balance, err := mp.getStateBalance(m.Message.From, curTs) - if err != nil { - return xerrors.Errorf("failed to check sender balance: %s: %w", err, ErrBroadcastAnyway) - } - - if balance.LessThan(m.Message.RequiredFunds()) { - return xerrors.Errorf("not enough funds (required: %s, balance: %s): %w", types.FIL(m.Message.RequiredFunds()), types.FIL(balance), ErrNotEnoughFunds) - } - - mp.lk.Lock() - defer mp.lk.Unlock() - - return mp.addLocked(m) -} - -func (mp *MessagePool) addSkipChecks(m *types.SignedMessage) error { - mp.lk.Lock() - defer mp.lk.Unlock() - - return mp.addLocked(m) -} - -func (mp *MessagePool) addLocked(m *types.SignedMessage) error { - log.Debugf("mpooladd: %s %d", m.Message.From, m.Message.Nonce) - if m.Signature.Type == crypto.SigTypeBLS { - mp.blsSigCache.Add(m.Cid(), m.Signature) - } - - if m.Message.GasLimit > build.BlockGasLimit { - return xerrors.Errorf("given message has too high of a gas limit") - } - - if _, err := mp.api.PutMessage(m); err != nil { - log.Warnf("mpooladd cs.PutMessage failed: %s", err) - return err - } - - if _, err := mp.api.PutMessage(&m.Message); err != nil { - log.Warnf("mpooladd cs.PutMessage failed: %s", err) - return err - } - - mset, ok := mp.pending[m.Message.From] - if !ok { - mset = newMsgSet() - mp.pending[m.Message.From] = mset - } - - if err := mset.add(m); err != nil { - log.Info(err) - } - - mp.changes.Pub(api.MpoolUpdate{ - Type: api.MpoolAdd, - Message: m, - }, localUpdates) - return nil -} - -func (mp *MessagePool) GetNonce(addr address.Address) (uint64, error) { - mp.curTsLk.Lock() - defer mp.curTsLk.Unlock() - - mp.lk.Lock() - defer mp.lk.Unlock() - - return mp.getNonceLocked(addr, mp.curTs) -} - -func (mp *MessagePool) getNonceLocked(addr address.Address, curTs *types.TipSet) (uint64, error) { - stateNonce, err := mp.getStateNonce(addr, curTs) // sanity check - if err != nil { - return 0, err - } - - mset, ok := mp.pending[addr] - if ok { - if stateNonce > mset.nextNonce { - log.Errorf("state nonce was larger than mset.nextNonce (%d > %d)", stateNonce, mset.nextNonce) - - return stateNonce, nil - } - - return mset.nextNonce, nil - } - - return stateNonce, nil -} - -func (mp *MessagePool) getStateNonce(addr address.Address, curTs *types.TipSet) (uint64, error) { - // TODO: this method probably should be cached - - act, err := mp.api.StateGetActor(addr, curTs) - if err != nil { - return 0, err - } - - baseNonce := act.Nonce - - // TODO: the correct thing to do here is probably to set curTs to chain.head - // but since we have an accurate view of the world until a head change occurs, - // this should be fine - if curTs == nil { - return baseNonce, nil - } - - msgs, err := mp.api.MessagesForTipset(curTs) - if err != nil { - return 0, xerrors.Errorf("failed to check messages for tipset: %w", err) - } - - for _, m := range msgs { - msg := m.VMMessage() - if msg.From == addr { - if msg.Nonce != baseNonce { - return 0, xerrors.Errorf("tipset %s has bad nonce ordering (%d != %d)", curTs.Cids(), msg.Nonce, baseNonce) - } - baseNonce++ - } - } - - return baseNonce, nil -} - -func (mp *MessagePool) getStateBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) { - act, err := mp.api.StateGetActor(addr, ts) - if err != nil { - return types.EmptyInt, err - } - - return act.Balance, nil -} - -func (mp *MessagePool) PushWithNonce(ctx context.Context, addr address.Address, cb func(address.Address, uint64) (*types.SignedMessage, error)) (*types.SignedMessage, error) { - mp.curTsLk.Lock() - defer mp.curTsLk.Unlock() - - mp.lk.Lock() - defer mp.lk.Unlock() - - fromKey := addr - if fromKey.Protocol() == address.ID { - var err error - fromKey, err = mp.api.StateAccountKey(ctx, fromKey, mp.curTs) - if err != nil { - return nil, xerrors.Errorf("resolving sender key: %w", err) - } - } - - nonce, err := mp.getNonceLocked(fromKey, mp.curTs) - if err != nil { - return nil, xerrors.Errorf("get nonce locked failed: %w", err) - } - - msg, err := cb(fromKey, nonce) - if err != nil { - return nil, err - } - - msgb, err := msg.Serialize() - if err != nil { - return nil, err - } - - if err := mp.addLocked(msg); err != nil { - return nil, xerrors.Errorf("add locked failed: %w", err) - } - if err := mp.addLocal(msg, msgb); err != nil { - log.Errorf("addLocal failed: %+v", err) - } - - return msg, mp.api.PubSubPublish(build.MessagesTopic(mp.netName), msgb) -} - -func (mp *MessagePool) Remove(from address.Address, nonce uint64) { - mp.lk.Lock() - defer mp.lk.Unlock() - - mset, ok := mp.pending[from] - if !ok { - return - } - - if m, ok := mset.msgs[nonce]; ok { - mp.changes.Pub(api.MpoolUpdate{ - Type: api.MpoolRemove, - Message: m, - }, localUpdates) - } - - // NB: This deletes any message with the given nonce. This makes sense - // as two messages with the same sender cannot have the same nonce - delete(mset.msgs, nonce) - - if len(mset.msgs) == 0 { - delete(mp.pending, from) - } else { - var max uint64 - for nonce := range mset.msgs { - if max < nonce { - max = nonce - } - } - if max < nonce { - max = nonce // we could have not seen the removed message before - } - - mset.nextNonce = max + 1 - } -} - -func (mp *MessagePool) Pending() ([]*types.SignedMessage, *types.TipSet) { - mp.curTsLk.Lock() - defer mp.curTsLk.Unlock() - - mp.lk.Lock() - defer mp.lk.Unlock() - - out := make([]*types.SignedMessage, 0) - for a := range mp.pending { - out = append(out, mp.pendingFor(a)...) - } - - return out, mp.curTs -} - -func (mp *MessagePool) pendingFor(a address.Address) []*types.SignedMessage { - mset := mp.pending[a] - if mset == nil || len(mset.msgs) == 0 { - return nil - } - - set := make([]*types.SignedMessage, 0, len(mset.msgs)) - - for _, m := range mset.msgs { - set = append(set, m) - } - - sort.Slice(set, func(i, j int) bool { - return set[i].Message.Nonce < set[j].Message.Nonce - }) - - return set -} - -func (mp *MessagePool) HeadChange(revert []*types.TipSet, apply []*types.TipSet) error { - mp.curTsLk.Lock() - defer mp.curTsLk.Unlock() - - rmsgs := make(map[address.Address]map[uint64]*types.SignedMessage) - add := func(m *types.SignedMessage) { - s, ok := rmsgs[m.Message.From] - if !ok { - s = make(map[uint64]*types.SignedMessage) - rmsgs[m.Message.From] = s - } - s[m.Message.Nonce] = m - } - rm := func(from address.Address, nonce uint64) { - s, ok := rmsgs[from] - if !ok { - mp.Remove(from, nonce) - return - } - - if _, ok := s[nonce]; ok { - delete(s, nonce) - return - } - - mp.Remove(from, nonce) - } - - for _, ts := range revert { - pts, err := mp.api.LoadTipSet(ts.Parents()) - if err != nil { - return err - } - - msgs, err := mp.MessagesForBlocks(ts.Blocks()) - if err != nil { - return err - } - - mp.curTs = pts - - for _, msg := range msgs { - add(msg) - } - } - - for _, ts := range apply { - for _, b := range ts.Blocks() { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) - if err != nil { - return xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) - } - for _, msg := range smsgs { - rm(msg.Message.From, msg.Message.Nonce) - } - - for _, msg := range bmsgs { - rm(msg.From, msg.Nonce) - } - } - - mp.curTs = ts - } - - for _, s := range rmsgs { - for _, msg := range s { - if err := mp.addSkipChecks(msg); err != nil { - log.Errorf("Failed to readd message from reorg to mpool: %s", err) - } - } - } - - if len(revert) > 0 && futureDebug { - msgs, ts := mp.Pending() - - buckets := map[address.Address]*statBucket{} - - for _, v := range msgs { - bkt, ok := buckets[v.Message.From] - if !ok { - bkt = &statBucket{ - msgs: map[uint64]*types.SignedMessage{}, - } - buckets[v.Message.From] = bkt - } - - bkt.msgs[v.Message.Nonce] = v - } - - for a, bkt := range buckets { - act, err := mp.api.StateGetActor(a, ts) - if err != nil { - log.Debugf("%s, err: %s\n", a, err) - continue - } - - var cmsg *types.SignedMessage - var ok bool - - cur := act.Nonce - for { - cmsg, ok = bkt.msgs[cur] - if !ok { - break - } - cur++ - } - - ff := uint64(math.MaxUint64) - for k := range bkt.msgs { - if k > cur && k < ff { - ff = k - } - } - - if ff != math.MaxUint64 { - m := bkt.msgs[ff] - - // cmsg can be nil if no messages from the current nonce are in the mpool - ccid := "nil" - if cmsg != nil { - ccid = cmsg.Cid().String() - } - - log.Debugw("Nonce gap", - "actor", a, - "future_cid", m.Cid(), - "future_nonce", ff, - "current_cid", ccid, - "current_nonce", cur, - "revert_tipset", revert[0].Key(), - "new_head", ts.Key(), - ) - } - } - } - - return nil -} - -type statBucket struct { - msgs map[uint64]*types.SignedMessage -} - -func (mp *MessagePool) MessagesForBlocks(blks []*types.BlockHeader) ([]*types.SignedMessage, error) { - out := make([]*types.SignedMessage, 0) - - for _, b := range blks { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) - if err != nil { - return nil, xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) - } - out = append(out, smsgs...) - - for _, msg := range bmsgs { - smsg := mp.RecoverSig(msg) - if smsg != nil { - out = append(out, smsg) - } else { - log.Warnf("could not recover signature for bls message %s", msg.Cid()) - } - } - } - - return out, nil -} - -func (mp *MessagePool) RecoverSig(msg *types.Message) *types.SignedMessage { - val, ok := mp.blsSigCache.Get(msg.Cid()) - if !ok { - return nil - } - sig, ok := val.(crypto.Signature) - if !ok { - log.Errorf("value in signature cache was not a signature (got %T)", val) - return nil - } - - return &types.SignedMessage{ - Message: *msg, - Signature: sig, - } -} - -func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, error) { - out := make(chan api.MpoolUpdate, 20) - sub := mp.changes.Sub(localUpdates) - - go func() { - defer mp.changes.Unsub(sub, localUpdates) - - for { - select { - case u := <-sub: - select { - case out <- u.(api.MpoolUpdate): - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } - }() - - return out, nil -} - -func (mp *MessagePool) loadLocal() error { - res, err := mp.localMsgs.Query(query.Query{}) - if err != nil { - return xerrors.Errorf("query local messages: %w", err) - } - - for r := range res.Next() { - if r.Error != nil { - return xerrors.Errorf("r.Error: %w", r.Error) - } - - var sm types.SignedMessage - if err := sm.UnmarshalCBOR(bytes.NewReader(r.Value)); err != nil { - return xerrors.Errorf("unmarshaling local message: %w", err) - } - - if err := mp.Add(&sm); err != nil { - if xerrors.Is(err, ErrNonceTooLow) { - continue // todo: drop the message from local cache (if above certain confidence threshold) - } - - log.Errorf("adding local message: %+v", err) - } - } - - return nil -} - -const MinGasPrice = 0 - -func (mp *MessagePool) EstimateGasPrice(ctx context.Context, nblocksincl uint64, sender address.Address, gaslimit int64, tsk types.TipSetKey) (types.BigInt, error) { - // TODO: something smarter obviously - switch nblocksincl { - case 0: - return types.NewInt(MinGasPrice + 2), nil - case 1: - return types.NewInt(MinGasPrice + 1), nil - default: - return types.NewInt(MinGasPrice), nil - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/messagepool/messagepool_test.go b/vendor/github.com/filecoin-project/lotus/chain/messagepool/messagepool_test.go deleted file mode 100644 index d1dedd5342..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/messagepool/messagepool_test.go +++ /dev/null @@ -1,235 +0,0 @@ -package messagepool - -import ( - "context" - "fmt" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/mock" - "github.com/filecoin-project/lotus/chain/wallet" - _ "github.com/filecoin-project/lotus/lib/sigs/bls" - _ "github.com/filecoin-project/lotus/lib/sigs/secp" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" -) - -type testMpoolAPI struct { - cb func(rev, app []*types.TipSet) error - - bmsgs map[cid.Cid][]*types.SignedMessage - statenonce map[address.Address]uint64 - - tipsets []*types.TipSet -} - -func newTestMpoolAPI() *testMpoolAPI { - return &testMpoolAPI{ - bmsgs: make(map[cid.Cid][]*types.SignedMessage), - statenonce: make(map[address.Address]uint64), - } -} - -func (tma *testMpoolAPI) applyBlock(t *testing.T, b *types.BlockHeader) { - t.Helper() - if err := tma.cb(nil, []*types.TipSet{mock.TipSet(b)}); err != nil { - t.Fatal(err) - } -} - -func (tma *testMpoolAPI) revertBlock(t *testing.T, b *types.BlockHeader) { - t.Helper() - if err := tma.cb([]*types.TipSet{mock.TipSet(b)}, nil); err != nil { - t.Fatal(err) - } -} - -func (tma *testMpoolAPI) setStateNonce(addr address.Address, v uint64) { - tma.statenonce[addr] = v -} - -func (tma *testMpoolAPI) setBlockMessages(h *types.BlockHeader, msgs ...*types.SignedMessage) { - tma.bmsgs[h.Cid()] = msgs - tma.tipsets = append(tma.tipsets, mock.TipSet(h)) -} - -func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) error) *types.TipSet { - tma.cb = cb - return nil -} - -func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) { - return cid.Undef, nil -} - -func (tma *testMpoolAPI) PubSubPublish(string, []byte) error { - return nil -} - -func (tma *testMpoolAPI) StateGetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) { - return &types.Actor{ - Nonce: tma.statenonce[addr], - Balance: types.NewInt(90000000), - }, nil -} - -func (tma *testMpoolAPI) StateAccountKey(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { - if addr.Protocol() != address.BLS && addr.Protocol() != address.SECP256K1 { - return address.Undef, fmt.Errorf("given address was not a key addr") - } - return addr, nil -} - -func (tma *testMpoolAPI) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - return nil, tma.bmsgs[h.Cid()], nil -} - -func (tma *testMpoolAPI) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { - if len(ts.Blocks()) != 1 { - panic("cant deal with multiblock tipsets in this test") - } - - bm, sm, err := tma.MessagesForBlock(ts.Blocks()[0]) - if err != nil { - return nil, err - } - - var out []types.ChainMsg - for _, m := range bm { - out = append(out, m) - } - - for _, m := range sm { - out = append(out, m) - } - - return out, nil -} - -func (tma *testMpoolAPI) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { - for _, ts := range tma.tipsets { - if types.CidArrsEqual(tsk.Cids(), ts.Cids()) { - return ts, nil - } - } - - return nil, fmt.Errorf("tipset not found") -} - -func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) { - t.Helper() - n, err := mp.GetNonce(addr) - if err != nil { - t.Fatal(err) - } - - if n != val { - t.Fatalf("expected nonce of %d, got %d", val, n) - } -} - -func mustAdd(t *testing.T, mp *MessagePool, msg *types.SignedMessage) { - t.Helper() - if err := mp.Add(msg); err != nil { - t.Fatal(err) - } -} - -func TestMessagePool(t *testing.T) { - tma := newTestMpoolAPI() - - w, err := wallet.NewWallet(wallet.NewMemKeyStore()) - if err != nil { - t.Fatal(err) - } - - ds := datastore.NewMapDatastore() - - mp, err := New(tma, ds, "mptest") - if err != nil { - t.Fatal(err) - } - - a := mock.MkBlock(nil, 1, 1) - - sender, err := w.GenerateKey(crypto.SigTypeBLS) - if err != nil { - t.Fatal(err) - } - target := mock.Address(1001) - - var msgs []*types.SignedMessage - for i := 0; i < 5; i++ { - msgs = append(msgs, mock.MkMessage(sender, target, uint64(i), w)) - } - - tma.setStateNonce(sender, 0) - assertNonce(t, mp, sender, 0) - mustAdd(t, mp, msgs[0]) - assertNonce(t, mp, sender, 1) - mustAdd(t, mp, msgs[1]) - assertNonce(t, mp, sender, 2) - - tma.setBlockMessages(a, msgs[0], msgs[1]) - tma.applyBlock(t, a) - - assertNonce(t, mp, sender, 2) -} - -func TestRevertMessages(t *testing.T) { - tma := newTestMpoolAPI() - - w, err := wallet.NewWallet(wallet.NewMemKeyStore()) - if err != nil { - t.Fatal(err) - } - - ds := datastore.NewMapDatastore() - - mp, err := New(tma, ds, "mptest") - if err != nil { - t.Fatal(err) - } - - a := mock.MkBlock(nil, 1, 1) - b := mock.MkBlock(mock.TipSet(a), 1, 1) - - sender, err := w.GenerateKey(crypto.SigTypeBLS) - if err != nil { - t.Fatal(err) - } - target := mock.Address(1001) - - var msgs []*types.SignedMessage - for i := 0; i < 5; i++ { - msgs = append(msgs, mock.MkMessage(sender, target, uint64(i), w)) - } - - tma.setBlockMessages(a, msgs[0]) - tma.setBlockMessages(b, msgs[1], msgs[2], msgs[3]) - - mustAdd(t, mp, msgs[0]) - mustAdd(t, mp, msgs[1]) - mustAdd(t, mp, msgs[2]) - mustAdd(t, mp, msgs[3]) - - tma.setStateNonce(sender, 0) - tma.applyBlock(t, a) - assertNonce(t, mp, sender, 4) - - tma.setStateNonce(sender, 1) - tma.applyBlock(t, b) - assertNonce(t, mp, sender, 4) - tma.setStateNonce(sender, 0) - tma.revertBlock(t, b) - - assertNonce(t, mp, sender, 4) - - p, _ := mp.Pending() - if len(p) != 3 { - t.Fatal("expected three messages in mempool") - } - -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/metrics/consensus.go b/vendor/github.com/filecoin-project/lotus/chain/metrics/consensus.go deleted file mode 100644 index bc7e019d21..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/metrics/consensus.go +++ /dev/null @@ -1,128 +0,0 @@ -package metrics - -import ( - "context" - "encoding/json" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - pubsub "github.com/libp2p/go-libp2p-pubsub" - "go.uber.org/fx" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/impl/full" - "github.com/filecoin-project/lotus/node/modules/helpers" -) - -var log = logging.Logger("metrics") - -const baseTopic = "/fil/headnotifs/" - -type Update struct { - Type string -} - -func SendHeadNotifs(nickname string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, chain full.ChainAPI) error { - return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ps *pubsub.PubSub, chain full.ChainAPI) error { - ctx := helpers.LifecycleCtx(mctx, lc) - - lc.Append(fx.Hook{ - OnStart: func(_ context.Context) error { - gen, err := chain.Chain.GetGenesis() - if err != nil { - return err - } - - topic := baseTopic + gen.Cid().String() - - go func() { - if err := sendHeadNotifs(ctx, ps, topic, chain, nickname); err != nil { - log.Error("consensus metrics error", err) - return - } - }() - go func() { - sub, err := ps.Subscribe(topic) - if err != nil { - return - } - defer sub.Cancel() - - for { - if _, err := sub.Next(ctx); err != nil { - return - } - } - - }() - return nil - }, - }) - - return nil - } -} - -type message struct { - // TipSet - Cids []cid.Cid - Blocks []*types.BlockHeader - Height abi.ChainEpoch - Weight types.BigInt - Time uint64 - Nonce uint64 - - // Meta - - NodeName string -} - -func sendHeadNotifs(ctx context.Context, ps *pubsub.PubSub, topic string, chain full.ChainAPI, nickname string) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - notifs, err := chain.ChainNotify(ctx) - if err != nil { - return err - } - - // using unix nano time makes very sure we pick a nonce higher than previous restart - nonce := uint64(time.Now().UnixNano()) - - for { - select { - case notif := <-notifs: - n := notif[len(notif)-1] - - w, err := chain.ChainTipSetWeight(ctx, n.Val.Key()) - if err != nil { - return err - } - - m := message{ - Cids: n.Val.Cids(), - Blocks: n.Val.Blocks(), - Height: n.Val.Height(), - Weight: w, - NodeName: nickname, - Time: uint64(time.Now().UnixNano() / 1000_000), - Nonce: nonce, - } - - b, err := json.Marshal(m) - if err != nil { - return err - } - - if err := ps.Publish(topic, b); err != nil { - return err - } - case <-ctx.Done(): - return nil - } - - nonce++ - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/state/statetree.go b/vendor/github.com/filecoin-project/lotus/chain/state/statetree.go deleted file mode 100644 index 0245248351..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/state/statetree.go +++ /dev/null @@ -1,341 +0,0 @@ -package state - -import ( - "context" - "fmt" - - "github.com/filecoin-project/specs-actors/actors/builtin" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/types" -) - -var log = logging.Logger("statetree") - -// StateTree stores actors state by their ID. -type StateTree struct { - root *hamt.Node - Store cbor.IpldStore - - snaps *stateSnaps -} - -type stateSnaps struct { - layers []*stateSnapLayer -} - -type stateSnapLayer struct { - actors map[address.Address]streeOp - resolveCache map[address.Address]address.Address -} - -func newStateSnapLayer() *stateSnapLayer { - return &stateSnapLayer{ - actors: make(map[address.Address]streeOp), - resolveCache: make(map[address.Address]address.Address), - } -} - -type streeOp struct { - Act types.Actor - Delete bool -} - -func newStateSnaps() *stateSnaps { - ss := &stateSnaps{} - ss.addLayer() - return ss -} - -func (ss *stateSnaps) addLayer() { - ss.layers = append(ss.layers, newStateSnapLayer()) -} - -func (ss *stateSnaps) dropLayer() { - ss.layers[len(ss.layers)-1] = nil // allow it to be GCed - ss.layers = ss.layers[:len(ss.layers)-1] -} - -func (ss *stateSnaps) mergeLastLayer() { - last := ss.layers[len(ss.layers)-1] - nextLast := ss.layers[len(ss.layers)-2] - - for k, v := range last.actors { - nextLast.actors[k] = v - } - - for k, v := range last.resolveCache { - nextLast.resolveCache[k] = v - } - - ss.dropLayer() -} - -func (ss *stateSnaps) resolveAddress(addr address.Address) (address.Address, bool) { - for i := len(ss.layers) - 1; i >= 0; i-- { - resa, ok := ss.layers[i].resolveCache[addr] - if ok { - return resa, true - } - } - return address.Undef, false -} - -func (ss *stateSnaps) cacheResolveAddress(addr, resa address.Address) { - ss.layers[len(ss.layers)-1].resolveCache[addr] = resa -} - -func (ss *stateSnaps) getActor(addr address.Address) (*types.Actor, error) { - for i := len(ss.layers) - 1; i >= 0; i-- { - act, ok := ss.layers[i].actors[addr] - if ok { - if act.Delete { - return nil, types.ErrActorNotFound - } - - return &act.Act, nil - } - } - return nil, nil -} - -func (ss *stateSnaps) setActor(addr address.Address, act *types.Actor) { - ss.layers[len(ss.layers)-1].actors[addr] = streeOp{Act: *act} -} - -func (ss *stateSnaps) deleteActor(addr address.Address) { - ss.layers[len(ss.layers)-1].actors[addr] = streeOp{Delete: true} -} - -func NewStateTree(cst cbor.IpldStore) (*StateTree, error) { - return &StateTree{ - root: hamt.NewNode(cst, hamt.UseTreeBitWidth(5)), - Store: cst, - snaps: newStateSnaps(), - }, nil -} - -func LoadStateTree(cst cbor.IpldStore, c cid.Cid) (*StateTree, error) { - nd, err := hamt.LoadNode(context.Background(), cst, c, hamt.UseTreeBitWidth(5)) - if err != nil { - log.Errorf("loading hamt node %s failed: %s", c, err) - return nil, err - } - - return &StateTree{ - root: nd, - Store: cst, - snaps: newStateSnaps(), - }, nil -} - -func (st *StateTree) SetActor(addr address.Address, act *types.Actor) error { - iaddr, err := st.LookupID(addr) - if err != nil { - return xerrors.Errorf("ID lookup failed: %w", err) - } - addr = iaddr - - st.snaps.setActor(addr, act) - return nil -} - -// LookupID gets the ID address of this actor's `addr` stored in the `InitActor`. -func (st *StateTree) LookupID(addr address.Address) (address.Address, error) { - if addr.Protocol() == address.ID { - return addr, nil - } - - resa, ok := st.snaps.resolveAddress(addr) - if ok { - return resa, nil - } - - act, err := st.GetActor(builtin.InitActorAddr) - if err != nil { - return address.Undef, xerrors.Errorf("getting init actor: %w", err) - } - - var ias init_.State - if err := st.Store.Get(context.TODO(), act.Head, &ias); err != nil { - return address.Undef, xerrors.Errorf("loading init actor state: %w", err) - } - - a, err := ias.ResolveAddress(&AdtStore{st.Store}, addr) - if err != nil { - return address.Undef, xerrors.Errorf("resolve address %s: %w", addr, err) - } - - st.snaps.cacheResolveAddress(addr, a) - - return a, nil -} - -// GetActor returns the actor from any type of `addr` provided. -func (st *StateTree) GetActor(addr address.Address) (*types.Actor, error) { - if addr == address.Undef { - return nil, fmt.Errorf("GetActor called on undefined address") - } - - // Transform `addr` to its ID format. - iaddr, err := st.LookupID(addr) - if err != nil { - if xerrors.Is(err, init_.ErrAddressNotFound) { - return nil, xerrors.Errorf("resolution lookup failed (%s): %w", addr, err) - } - return nil, xerrors.Errorf("address resolution: %w", err) - } - addr = iaddr - - snapAct, err := st.snaps.getActor(addr) - if err != nil { - return nil, err - } - - if snapAct != nil { - return snapAct, nil - } - - var act types.Actor - err = st.root.Find(context.TODO(), string(addr.Bytes()), &act) - if err != nil { - if err == hamt.ErrNotFound { - return nil, types.ErrActorNotFound - } - return nil, xerrors.Errorf("hamt find failed: %w", err) - } - - st.snaps.setActor(addr, &act) - - return &act, nil -} - -func (st *StateTree) DeleteActor(addr address.Address) error { - if addr == address.Undef { - return xerrors.Errorf("DeleteActor called on undefined address") - } - - iaddr, err := st.LookupID(addr) - if err != nil { - if xerrors.Is(err, init_.ErrAddressNotFound) { - return xerrors.Errorf("resolution lookup failed (%s): %w", addr, err) - } - return xerrors.Errorf("address resolution: %w", err) - } - - addr = iaddr - - _, err = st.GetActor(addr) - if err != nil { - return err - } - - st.snaps.deleteActor(addr) - - return nil -} - -func (st *StateTree) Flush(ctx context.Context) (cid.Cid, error) { - ctx, span := trace.StartSpan(ctx, "stateTree.Flush") - defer span.End() - if len(st.snaps.layers) != 1 { - return cid.Undef, xerrors.Errorf("tried to flush state tree with snapshots on the stack") - } - - for addr, sto := range st.snaps.layers[0].actors { - if sto.Delete { - if err := st.root.Delete(ctx, string(addr.Bytes())); err != nil { - return cid.Undef, err - } - } else { - if err := st.root.Set(ctx, string(addr.Bytes()), &sto.Act); err != nil { - return cid.Undef, err - } - } - } - - if err := st.root.Flush(ctx); err != nil { - return cid.Undef, err - } - - return st.Store.Put(ctx, st.root) -} - -func (st *StateTree) Snapshot(ctx context.Context) error { - ctx, span := trace.StartSpan(ctx, "stateTree.SnapShot") - defer span.End() - - st.snaps.addLayer() - - return nil -} - -func (st *StateTree) ClearSnapshot() { - st.snaps.mergeLastLayer() -} - -func (st *StateTree) RegisterNewAddress(addr address.Address) (address.Address, error) { - var out address.Address - err := st.MutateActor(builtin.InitActorAddr, func(initact *types.Actor) error { - var ias init_.State - if err := st.Store.Get(context.TODO(), initact.Head, &ias); err != nil { - return err - } - - oaddr, err := ias.MapAddressToNewID(&AdtStore{st.Store}, addr) - if err != nil { - return err - } - out = oaddr - - ncid, err := st.Store.Put(context.TODO(), &ias) - if err != nil { - return err - } - - initact.Head = ncid - return nil - }) - if err != nil { - return address.Undef, err - } - - return out, nil -} - -type AdtStore struct{ cbor.IpldStore } - -func (a *AdtStore) Context() context.Context { - return context.TODO() -} - -var _ adt.Store = (*AdtStore)(nil) - -func (st *StateTree) Revert() error { - st.snaps.dropLayer() - st.snaps.addLayer() - - return nil -} - -func (st *StateTree) MutateActor(addr address.Address, f func(*types.Actor) error) error { - act, err := st.GetActor(addr) - if err != nil { - return err - } - - if err := f(act); err != nil { - return err - } - - return st.SetActor(addr, act) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/state/statetree_test.go b/vendor/github.com/filecoin-project/lotus/chain/state/statetree_test.go deleted file mode 100644 index 3c832f7cc0..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/state/statetree_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package state - -import ( - "context" - "fmt" - "testing" - - "github.com/filecoin-project/specs-actors/actors/builtin" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/types" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" -) - -func BenchmarkStateTreeSet(b *testing.B) { - cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - a, err := address.NewIDAddress(uint64(i)) - if err != nil { - b.Fatal(err) - } - err = st.SetActor(a, &types.Actor{ - Balance: types.NewInt(1258812523), - Code: builtin.StorageMinerActorCodeID, - Head: builtin.AccountActorCodeID, - Nonce: uint64(i), - }) - if err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkStateTreeSetFlush(b *testing.B) { - cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - a, err := address.NewIDAddress(uint64(i)) - if err != nil { - b.Fatal(err) - } - err = st.SetActor(a, &types.Actor{ - Balance: types.NewInt(1258812523), - Code: builtin.StorageMinerActorCodeID, - Head: builtin.AccountActorCodeID, - Nonce: uint64(i), - }) - if err != nil { - b.Fatal(err) - } - if _, err := st.Flush(context.TODO()); err != nil { - b.Fatal(err) - } - } -} - -func BenchmarkStateTree10kGetActor(b *testing.B) { - cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst) - if err != nil { - b.Fatal(err) - } - for i := 0; i < 10000; i++ { - a, err := address.NewIDAddress(uint64(i)) - if err != nil { - b.Fatal(err) - } - err = st.SetActor(a, &types.Actor{ - Balance: types.NewInt(1258812523 + uint64(i)), - Code: builtin.StorageMinerActorCodeID, - Head: builtin.AccountActorCodeID, - Nonce: uint64(i), - }) - if err != nil { - b.Fatal(err) - } - } - - if _, err := st.Flush(context.TODO()); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - a, err := address.NewIDAddress(uint64(i % 10000)) - if err != nil { - b.Fatal(err) - } - - _, err = st.GetActor(a) - if err != nil { - b.Fatal(err) - } - } -} - -func TestSetCache(t *testing.T) { - cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst) - if err != nil { - t.Fatal(err) - } - - a, err := address.NewIDAddress(uint64(222)) - if err != nil { - t.Fatal(err) - } - - act := &types.Actor{ - Balance: types.NewInt(0), - Code: builtin.StorageMinerActorCodeID, - Head: builtin.AccountActorCodeID, - Nonce: 0, - } - - err = st.SetActor(a, act) - if err != nil { - t.Fatal(err) - } - - act.Nonce = 1 - - outact, err := st.GetActor(a) - if err != nil { - t.Fatal(err) - } - - if outact.Nonce == 1 { - t.Error("nonce should not have updated") - } -} - -func TestSnapshots(t *testing.T) { - ctx := context.Background() - cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst) - if err != nil { - t.Fatal(err) - } - - var addrs []address.Address - //for _, a := range []string{"t15ocrptbu4i5qucjvvwecihd7fqqgzb27pz5l5zy", "t1dpyvgavvl3f4ujlk6odedss54z6rt5gyuknsuva", "t1feiejbkcvozy7iltt2pxzuoq4d2kpbsusugan7a", "t3rgjfqybjx7bahuhfv7nwfg3tlm4i4zyvldfirjvzm5z5xwjoqbj3rfi2mpmlxpqwxxxafgpkjilqzpg7cefa"} { - for _, a := range []string{"t0100", "t0101", "t0102", "t0103"} { - addr, err := address.NewFromString(a) - if err != nil { - t.Fatal(err) - } - addrs = append(addrs, addr) - } - - if err := st.Snapshot(ctx); err != nil { - t.Fatal(err) - } - - if err := st.SetActor(addrs[0], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(55)}); err != nil { - t.Fatal(err) - } - - { // sub call that will fail - if err := st.Snapshot(ctx); err != nil { - t.Fatal(err) - } - - if err := st.SetActor(addrs[1], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(77)}); err != nil { - t.Fatal(err) - } - - if err := st.Revert(); err != nil { - t.Fatal(err) - } - st.ClearSnapshot() - } - - // more operations in top level call... - if err := st.SetActor(addrs[2], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(123)}); err != nil { - t.Fatal(err) - } - - { // sub call that succeeds - if err := st.Snapshot(ctx); err != nil { - t.Fatal(err) - } - - if err := st.SetActor(addrs[3], &types.Actor{Code: builtin.AccountActorCodeID, Head: builtin.AccountActorCodeID, Balance: types.NewInt(5)}); err != nil { - t.Fatal(err) - } - - st.ClearSnapshot() - } - - st.ClearSnapshot() - - if _, err := st.Flush(ctx); err != nil { - t.Fatal(err) - } - - assertHas(t, st, addrs[0]) - assertNotHas(t, st, addrs[1]) - assertHas(t, st, addrs[2]) - assertHas(t, st, addrs[3]) -} - -func assertHas(t *testing.T, st *StateTree, addr address.Address) { - _, err := st.GetActor(addr) - if err != nil { - t.Fatal(err) - } -} - -func assertNotHas(t *testing.T, st *StateTree, addr address.Address) { - _, err := st.GetActor(addr) - if err == nil { - t.Fatal("shouldnt have found actor", addr) - } -} - -func TestStateTreeConsistency(t *testing.T) { - cst := cbor.NewMemCborStore() - st, err := NewStateTree(cst) - if err != nil { - t.Fatal(err) - } - - var addrs []address.Address - for i := 100; i < 150; i++ { - a, err := address.NewIDAddress(uint64(i)) - if err != nil { - t.Fatal(err) - } - - addrs = append(addrs, a) - } - - randomCid, err := cid.Decode("bafy2bzacecu7n7wbtogznrtuuvf73dsz7wasgyneqasksdblxupnyovmtwxxu") - if err != nil { - t.Fatal(err) - } - - for i, a := range addrs { - err := st.SetActor(a, &types.Actor{ - Code: randomCid, - Head: randomCid, - Balance: types.NewInt(uint64(10000 + i)), - Nonce: uint64(1000 - i), - }) - if err != nil { - t.Fatalf("while setting actor: %+v", err) - } - } - - root, err := st.Flush(context.TODO()) - if err != nil { - t.Fatal(err) - } - - fmt.Println("root is: ", root) - if root.String() != "bafy2bzaceadyjnrv3sbjvowfl3jr4pdn5p2bf3exjjie2f3shg4oy5sub7h34" { - t.Fatal("MISMATCH!") - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/stmgr/call.go b/vendor/github.com/filecoin-project/lotus/chain/stmgr/call.go deleted file mode 100644 index 32e502e953..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/stmgr/call.go +++ /dev/null @@ -1,109 +0,0 @@ -package stmgr - -import ( - "context" - "fmt" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" -) - -func (sm *StateManager) CallRaw(ctx context.Context, msg *types.Message, bstate cid.Cid, r vm.Rand, bheight abi.ChainEpoch) (*api.InvocResult, error) { - ctx, span := trace.StartSpan(ctx, "statemanager.CallRaw") - defer span.End() - - vmi, err := vm.NewVM(bstate, bheight, r, sm.cs.Blockstore(), sm.cs.VMSys()) - if err != nil { - return nil, xerrors.Errorf("failed to set up vm: %w", err) - } - - if msg.GasLimit == 0 { - msg.GasLimit = 10000000000 - } - if msg.GasPrice == types.EmptyInt { - msg.GasPrice = types.NewInt(0) - } - if msg.Value == types.EmptyInt { - msg.Value = types.NewInt(0) - } - - if span.IsRecordingEvents() { - span.AddAttributes( - trace.Int64Attribute("gas_limit", msg.GasLimit), - trace.Int64Attribute("gas_price", int64(msg.GasPrice.Uint64())), - trace.StringAttribute("value", msg.Value.String()), - ) - } - - fromActor, err := vmi.StateTree().GetActor(msg.From) - if err != nil { - return nil, xerrors.Errorf("call raw get actor: %s", err) - } - - msg.Nonce = fromActor.Nonce - - // TODO: maybe just use the invoker directly? - ret, err := vmi.ApplyImplicitMessage(ctx, msg) - if err != nil { - return nil, xerrors.Errorf("apply message failed: %w", err) - } - - var errs string - if ret.ActorErr != nil { - errs = ret.ActorErr.Error() - log.Warnf("chain call failed: %s", ret.ActorErr) - } - - return &api.InvocResult{ - Msg: msg, - MsgRct: &ret.MessageReceipt, - ExecutionTrace: ret.ExecutionTrace, - Error: errs, - Duration: ret.Duration, - }, nil - -} - -func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*api.InvocResult, error) { - if ts == nil { - ts = sm.cs.GetHeaviestTipSet() - } - - state := ts.ParentState() - - r := store.NewChainRand(sm.cs, ts.Cids(), ts.Height()) - - return sm.CallRaw(ctx, msg, state, r, ts.Height()) -} - -var errHaltExecution = fmt.Errorf("halt") - -func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.Cid) (*types.Message, *vm.ApplyRet, error) { - var outm *types.Message - var outr *vm.ApplyRet - - _, _, err := sm.computeTipSetState(ctx, ts.Blocks(), func(c cid.Cid, m *types.Message, ret *vm.ApplyRet) error { - if c == mcid { - outm = m - outr = ret - return errHaltExecution - } - return nil - }) - if err != nil && err != errHaltExecution { - return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err) - } - - if outr == nil { - return nil, nil, xerrors.Errorf("given message not found in tipset") - } - - return outm, outr, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/stmgr/forks.go b/vendor/github.com/filecoin-project/lotus/chain/stmgr/forks.go deleted file mode 100644 index 7f8dc579b7..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/stmgr/forks.go +++ /dev/null @@ -1,25 +0,0 @@ -package stmgr - -import ( - "context" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/ipfs/go-cid" -) - -var ForksAtHeight = map[abi.ChainEpoch]func(context.Context, *StateManager, cid.Cid) (cid.Cid, error){} - -func (sm *StateManager) handleStateForks(ctx context.Context, pstate cid.Cid, height, parentH abi.ChainEpoch) (_ cid.Cid, err error) { - for i := parentH; i < height; i++ { - f, ok := ForksAtHeight[i] - if ok { - nstate, err := f(ctx, sm, pstate) - if err != nil { - return cid.Undef, err - } - pstate = nstate - } - } - - return pstate, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/stmgr/forks_test.go b/vendor/github.com/filecoin-project/lotus/chain/stmgr/forks_test.go deleted file mode 100644 index 2fbcbbc99f..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/stmgr/forks_test.go +++ /dev/null @@ -1,233 +0,0 @@ -package stmgr_test - -import ( - "context" - "fmt" - "io" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/stmgr" - . "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - _ "github.com/filecoin-project/lotus/lib/sigs/bls" - _ "github.com/filecoin-project/lotus/lib/sigs/secp" - - "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log" - cbg "github.com/whyrusleeping/cbor-gen" -) - -func init() { - miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ - abi.RegisteredSealProof_StackedDrg2KiBV1: {}, - } - power.ConsensusMinerMinPower = big.NewInt(2048) - verifreg.MinVerifiedDealSize = big.NewInt(256) -} - -const testForkHeight = 40 - -type testActor struct { -} - -type testActorState struct { - HasUpgraded uint64 -} - -func (tas *testActorState) MarshalCBOR(w io.Writer) error { - return cbg.CborWriteHeader(w, cbg.MajUnsignedInt, tas.HasUpgraded) -} - -func (tas *testActorState) UnmarshalCBOR(r io.Reader) error { - t, v, err := cbg.CborReadHeader(r) - if err != nil { - return err - } - if t != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type in test actor state (got %d)", t) - } - tas.HasUpgraded = v - return nil -} - -func (ta *testActor) Exports() []interface{} { - return []interface{}{ - 1: ta.Constructor, - 2: ta.TestMethod, - } -} - -func (ta *testActor) Constructor(rt runtime.Runtime, params *adt.EmptyValue) *adt.EmptyValue { - rt.ValidateImmediateCallerAcceptAny() - rt.State().Create(&testActorState{11}) - fmt.Println("NEW ACTOR ADDRESS IS: ", rt.Message().Receiver()) - - return adt.Empty -} - -func (ta *testActor) TestMethod(rt runtime.Runtime, params *adt.EmptyValue) *adt.EmptyValue { - rt.ValidateImmediateCallerAcceptAny() - var st testActorState - rt.State().Readonly(&st) - - if rt.CurrEpoch() > testForkHeight { - if st.HasUpgraded != 55 { - panic(aerrors.Fatal("fork updating applied in wrong order")) - } - } else { - if st.HasUpgraded != 11 { - panic(aerrors.Fatal("fork updating happened too early")) - } - } - - return adt.Empty -} - -func TestForkHeightTriggers(t *testing.T) { - logging.SetAllLoggers(logging.LevelInfo) - - ctx := context.TODO() - - cg, err := gen.NewGenerator() - if err != nil { - t.Fatal(err) - } - - sm := NewStateManager(cg.ChainStore()) - - inv := vm.NewInvoker() - - // predicting the address here... may break if other assumptions change - taddr, err := address.NewIDAddress(1002) - if err != nil { - t.Fatal(err) - } - - stmgr.ForksAtHeight[testForkHeight] = func(ctx context.Context, sm *StateManager, pstate cid.Cid) (cid.Cid, error) { - cst := cbor.NewCborStore(sm.ChainStore().Blockstore()) - st, err := state.LoadStateTree(cst, pstate) - if err != nil { - return cid.Undef, err - } - - act, err := st.GetActor(taddr) - if err != nil { - return cid.Undef, err - } - - var tas testActorState - if err := cst.Get(ctx, act.Head, &tas); err != nil { - return cid.Undef, xerrors.Errorf("in fork handler, failed to run get: %w", err) - } - - tas.HasUpgraded = 55 - - ns, err := cst.Put(ctx, &tas) - if err != nil { - return cid.Undef, err - } - - act.Head = ns - - if err := st.SetActor(taddr, act); err != nil { - return cid.Undef, err - } - - return st.Flush(ctx) - } - - inv.Register(builtin.PaymentChannelActorCodeID, &testActor{}, &testActorState{}) - sm.SetVMConstructor(func(c cid.Cid, h abi.ChainEpoch, r vm.Rand, b blockstore.Blockstore, s runtime.Syscalls) (*vm.VM, error) { - nvm, err := vm.NewVM(c, h, r, b, s) - if err != nil { - return nil, err - } - nvm.SetInvoker(inv) - return nvm, nil - }) - - cg.SetStateManager(sm) - - var msgs []*types.SignedMessage - - enc, err := actors.SerializeParams(&init_.ExecParams{CodeCID: builtin.PaymentChannelActorCodeID}) - if err != nil { - t.Fatal(err) - } - - m := &types.Message{ - From: cg.Banker(), - To: builtin.InitActorAddr, - Method: builtin.MethodsInit.Exec, - Params: enc, - GasLimit: 10000, - GasPrice: types.NewInt(0), - } - sig, err := cg.Wallet().Sign(ctx, cg.Banker(), m.Cid().Bytes()) - if err != nil { - t.Fatal(err) - } - msgs = append(msgs, &types.SignedMessage{ - Signature: *sig, - Message: *m, - }) - - nonce := uint64(1) - cg.GetMessages = func(cg *gen.ChainGen) ([]*types.SignedMessage, error) { - if len(msgs) > 0 { - fmt.Println("added construct method") - m := msgs - msgs = nil - return m, nil - } - - m := &types.Message{ - From: cg.Banker(), - To: taddr, - Method: 2, - Params: nil, - Nonce: nonce, - GasLimit: 10000, - GasPrice: types.NewInt(0), - } - nonce++ - - sig, err := cg.Wallet().Sign(ctx, cg.Banker(), m.Cid().Bytes()) - if err != nil { - return nil, err - } - - return []*types.SignedMessage{ - { - Signature: *sig, - Message: *m, - }, - }, nil - } - - for i := 0; i < 50; i++ { - _, err = cg.NextTipSet() - if err != nil { - t.Fatal(err) - } - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/stmgr/stmgr.go b/vendor/github.com/filecoin-project/lotus/chain/stmgr/stmgr.go deleted file mode 100644 index 917b5ca26d..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/stmgr/stmgr.go +++ /dev/null @@ -1,820 +0,0 @@ -package stmgr - -import ( - "context" - "fmt" - "sync" - - "github.com/filecoin-project/go-address" - amt "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/reward" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/util/adt" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - "go.opencensus.io/trace" -) - -var log = logging.Logger("statemgr") - -type StateManager struct { - cs *store.ChainStore - - stCache map[string][]cid.Cid - compWait map[string]chan struct{} - stlk sync.Mutex - newVM func(cid.Cid, abi.ChainEpoch, vm.Rand, blockstore.Blockstore, runtime.Syscalls) (*vm.VM, error) -} - -func NewStateManager(cs *store.ChainStore) *StateManager { - return &StateManager{ - newVM: vm.NewVM, - cs: cs, - stCache: make(map[string][]cid.Cid), - compWait: make(map[string]chan struct{}), - } -} - -func cidsToKey(cids []cid.Cid) string { - var out string - for _, c := range cids { - out += c.KeyString() - } - return out -} - -func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st cid.Cid, rec cid.Cid, err error) { - ctx, span := trace.StartSpan(ctx, "tipSetState") - defer span.End() - if span.IsRecordingEvents() { - span.AddAttributes(trace.StringAttribute("tipset", fmt.Sprint(ts.Cids()))) - } - - ck := cidsToKey(ts.Cids()) - sm.stlk.Lock() - cw, cwok := sm.compWait[ck] - if cwok { - sm.stlk.Unlock() - span.AddAttributes(trace.BoolAttribute("waited", true)) - select { - case <-cw: - sm.stlk.Lock() - case <-ctx.Done(): - return cid.Undef, cid.Undef, ctx.Err() - } - } - cached, ok := sm.stCache[ck] - if ok { - sm.stlk.Unlock() - span.AddAttributes(trace.BoolAttribute("cache", true)) - return cached[0], cached[1], nil - } - ch := make(chan struct{}) - sm.compWait[ck] = ch - - defer func() { - sm.stlk.Lock() - delete(sm.compWait, ck) - if st != cid.Undef { - sm.stCache[ck] = []cid.Cid{st, rec} - } - sm.stlk.Unlock() - close(ch) - }() - - sm.stlk.Unlock() - - if ts.Height() == 0 { - // NB: This is here because the process that executes blocks requires that the - // block miner reference a valid miner in the state tree. Unless we create some - // magical genesis miner, this won't work properly, so we short circuit here - // This avoids the question of 'who gets paid the genesis block reward' - return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil - } - - st, rec, err = sm.computeTipSetState(ctx, ts.Blocks(), nil) - if err != nil { - return cid.Undef, cid.Undef, err - } - - return st, rec, nil -} - -func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) { - var trace []*api.InvocResult - st, _, err := sm.computeTipSetState(ctx, ts.Blocks(), func(mcid cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { - ir := &api.InvocResult{ - Msg: msg, - MsgRct: &ret.MessageReceipt, - ExecutionTrace: ret.ExecutionTrace, - Duration: ret.Duration, - } - if ret.ActorErr != nil { - ir.Error = ret.ActorErr.Error() - } - trace = append(trace, ir) - return nil - }) - if err != nil { - return cid.Undef, nil, err - } - - return st, trace, nil -} - -type BlockMessages struct { - Miner address.Address - BlsMessages []types.ChainMsg - SecpkMessages []types.ChainMsg - TicketCount int64 -} - -type ExecCallback func(cid.Cid, *types.Message, *vm.ApplyRet) error - -func (sm *StateManager) ApplyBlocks(ctx context.Context, pstate cid.Cid, bms []BlockMessages, epoch abi.ChainEpoch, r vm.Rand, cb ExecCallback) (cid.Cid, cid.Cid, error) { - vmi, err := sm.newVM(pstate, epoch, r, sm.cs.Blockstore(), sm.cs.VMSys()) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("instantiating VM failed: %w", err) - } - - var receipts []cbg.CBORMarshaler - processedMsgs := map[cid.Cid]bool{} - for _, b := range bms { - penalty := types.NewInt(0) - gasReward := big.Zero() - - for _, cm := range append(b.BlsMessages, b.SecpkMessages...) { - m := cm.VMMessage() - if _, found := processedMsgs[m.Cid()]; found { - continue - } - r, err := vmi.ApplyMessage(ctx, cm) - if err != nil { - return cid.Undef, cid.Undef, err - } - - receipts = append(receipts, &r.MessageReceipt) - gasReward = big.Add(gasReward, big.Mul(m.GasPrice, big.NewInt(r.GasUsed))) - penalty = big.Add(penalty, r.Penalty) - - if cb != nil { - if err := cb(cm.Cid(), m, r); err != nil { - return cid.Undef, cid.Undef, err - } - } - processedMsgs[m.Cid()] = true - } - - var err error - params, err := actors.SerializeParams(&reward.AwardBlockRewardParams{ - Miner: b.Miner, - Penalty: penalty, - GasReward: gasReward, - }) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to serialize award params: %w", err) - } - - sysAct, err := vmi.StateTree().GetActor(builtin.SystemActorAddr) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to get system actor: %w", err) - } - - rwMsg := &types.Message{ - From: builtin.SystemActorAddr, - To: builtin.RewardActorAddr, - Nonce: sysAct.Nonce, - Value: types.NewInt(0), - GasPrice: types.NewInt(0), - GasLimit: 1 << 30, - Method: builtin.MethodsReward.AwardBlockReward, - Params: params, - } - ret, err := vmi.ApplyImplicitMessage(ctx, rwMsg) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, err) - } - if cb != nil { - if err := cb(rwMsg.Cid(), rwMsg, ret); err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err) - } - } - - if ret.ExitCode != 0 { - return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr) - } - - } - - // TODO: this nonce-getting is a tiny bit ugly - ca, err := vmi.StateTree().GetActor(builtin.SystemActorAddr) - if err != nil { - return cid.Undef, cid.Undef, err - } - - cronMsg := &types.Message{ - To: builtin.CronActorAddr, - From: builtin.SystemActorAddr, - Nonce: ca.Nonce, - Value: types.NewInt(0), - GasPrice: types.NewInt(0), - GasLimit: 1 << 30, // Make super sure this is never too little - Method: builtin.MethodsCron.EpochTick, - Params: nil, - } - ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg) - if err != nil { - return cid.Undef, cid.Undef, err - } - if cb != nil { - if err := cb(cronMsg.Cid(), cronMsg, ret); err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on cron message: %w", err) - } - } - if ret.ExitCode != 0 { - return cid.Undef, cid.Undef, xerrors.Errorf("CheckProofSubmissions exit was non-zero: %d", ret.ExitCode) - } - - bs := cbor.NewCborStore(sm.cs.Blockstore()) - rectroot, err := amt.FromArray(ctx, bs, receipts) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err) - } - - st, err := vmi.Flush(ctx) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err) - } - - return st, rectroot, nil -} - -func (sm *StateManager) computeTipSetState(ctx context.Context, blks []*types.BlockHeader, cb ExecCallback) (cid.Cid, cid.Cid, error) { - ctx, span := trace.StartSpan(ctx, "computeTipSetState") - defer span.End() - - for i := 0; i < len(blks); i++ { - for j := i + 1; j < len(blks); j++ { - if blks[i].Miner == blks[j].Miner { - return cid.Undef, cid.Undef, - xerrors.Errorf("duplicate miner in a tipset (%s %s)", - blks[i].Miner, blks[j].Miner) - } - } - } - - pstate := blks[0].ParentStateRoot - if len(blks[0].Parents) > 0 { // don't support forks on genesis - parent, err := sm.cs.GetBlock(blks[0].Parents[0]) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err) - } - - pstate, err = sm.handleStateForks(ctx, blks[0].ParentStateRoot, blks[0].Height, parent.Height) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err) - } - } - - cids := make([]cid.Cid, len(blks)) - for i, v := range blks { - cids[i] = v.Cid() - } - - r := store.NewChainRand(sm.cs, cids, blks[0].Height) - - var blkmsgs []BlockMessages - for _, b := range blks { - bms, sms, err := sm.cs.MessagesForBlock(b) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to get messages for block: %w", err) - } - - bm := BlockMessages{ - Miner: b.Miner, - BlsMessages: make([]types.ChainMsg, 0, len(bms)), - SecpkMessages: make([]types.ChainMsg, 0, len(sms)), - TicketCount: 1, //int64(len(b.EPostProof.Proofs)), // TODO fix this - } - - for _, m := range bms { - bm.BlsMessages = append(bm.BlsMessages, m) - } - - for _, m := range sms { - bm.SecpkMessages = append(bm.SecpkMessages, m) - } - - blkmsgs = append(blkmsgs, bm) - } - - return sm.ApplyBlocks(ctx, pstate, blkmsgs, blks[0].Height, r, cb) -} - -func (sm *StateManager) parentState(ts *types.TipSet) cid.Cid { - if ts == nil { - ts = sm.cs.GetHeaviestTipSet() - } - - return ts.ParentState() -} - -func (sm *StateManager) GetActor(addr address.Address, ts *types.TipSet) (*types.Actor, error) { - cst := cbor.NewCborStore(sm.cs.Blockstore()) - state, err := state.LoadStateTree(cst, sm.parentState(ts)) - if err != nil { - return nil, xerrors.Errorf("load state tree: %w", err) - } - - return state.GetActor(addr) -} - -func (sm *StateManager) getActorRaw(addr address.Address, st cid.Cid) (*types.Actor, error) { - cst := cbor.NewCborStore(sm.cs.Blockstore()) - state, err := state.LoadStateTree(cst, st) - if err != nil { - return nil, xerrors.Errorf("load state tree: %w", err) - } - - return state.GetActor(addr) -} - -func (sm *StateManager) GetBalance(addr address.Address, ts *types.TipSet) (types.BigInt, error) { - act, err := sm.GetActor(addr, ts) - if err != nil { - if xerrors.Is(err, types.ErrActorNotFound) { - return types.NewInt(0), nil - } - return types.EmptyInt, xerrors.Errorf("get actor: %w", err) - } - - return act.Balance, nil -} - -func (sm *StateManager) ChainStore() *store.ChainStore { - return sm.cs -} - -func (sm *StateManager) LoadActorState(ctx context.Context, a address.Address, out interface{}, ts *types.TipSet) (*types.Actor, error) { - act, err := sm.GetActor(a, ts) - if err != nil { - return nil, err - } - - cst := cbor.NewCborStore(sm.cs.Blockstore()) - if err := cst.Get(ctx, act.Head, out); err != nil { - var r cbg.Deferred - _ = cst.Get(ctx, act.Head, &r) - log.Errorw("bad actor head", "error", err, "raw", r.Raw, "address", a) - - return nil, err - } - - return act, nil -} - -func (sm *StateManager) LoadActorStateRaw(ctx context.Context, a address.Address, out interface{}, st cid.Cid) (*types.Actor, error) { - act, err := sm.getActorRaw(a, st) - if err != nil { - return nil, err - } - - cst := cbor.NewCborStore(sm.cs.Blockstore()) - if err := cst.Get(ctx, act.Head, out); err != nil { - return nil, err - } - - return act, nil -} - -// ResolveToKeyAddress is similar to `vm.ResolveToKeyAddr` but does not allow `Actor` type of addresses. -// Uses the `TipSet` `ts` to generate the VM state. -func (sm *StateManager) ResolveToKeyAddress(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { - switch addr.Protocol() { - case address.BLS, address.SECP256K1: - return addr, nil - case address.Actor: - return address.Undef, xerrors.New("cannot resolve actor address to key address") - default: - } - - if ts == nil { - ts = sm.cs.GetHeaviestTipSet() - } - - st, _, err := sm.TipSetState(ctx, ts) - if err != nil { - return address.Undef, xerrors.Errorf("resolve address failed to get tipset state: %w", err) - } - - cst := cbor.NewCborStore(sm.cs.Blockstore()) - tree, err := state.LoadStateTree(cst, st) - if err != nil { - return address.Undef, xerrors.Errorf("failed to load state tree") - } - - return vm.ResolveToKeyAddr(tree, cst, addr) -} - -func (sm *StateManager) GetBlsPublicKey(ctx context.Context, addr address.Address, ts *types.TipSet) (pubk bls.PublicKey, err error) { - kaddr, err := sm.ResolveToKeyAddress(ctx, addr, ts) - if err != nil { - return pubk, xerrors.Errorf("failed to resolve address to key address: %w", err) - } - - if kaddr.Protocol() != address.BLS { - return pubk, xerrors.Errorf("address must be BLS address to load bls public key") - } - - copy(pubk[:], kaddr.Payload()) - return pubk, nil -} - -func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts *types.TipSet) (address.Address, error) { - cst := cbor.NewCborStore(sm.cs.Blockstore()) - state, err := state.LoadStateTree(cst, sm.parentState(ts)) - if err != nil { - return address.Undef, xerrors.Errorf("load state tree: %w", err) - } - return state.LookupID(addr) -} - -func (sm *StateManager) GetReceipt(ctx context.Context, msg cid.Cid, ts *types.TipSet) (*types.MessageReceipt, error) { - m, err := sm.cs.GetCMessage(msg) - if err != nil { - return nil, fmt.Errorf("failed to load message: %w", err) - } - - r, err := sm.tipsetExecutedMessage(ts, msg, m.VMMessage()) - if err != nil { - return nil, err - } - - if r != nil { - return r, nil - } - - _, r, err = sm.searchBackForMsg(ctx, ts, m) - if err != nil { - return nil, fmt.Errorf("failed to look back through chain for message: %w", err) - } - - return r, nil -} - -// WaitForMessage blocks until a message appears on chain. It looks backwards in the chain to see if this has already -// happened. It guarantees that the message has been on chain for at least confidence epochs without being reverted -// before returning. -func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confidence uint64) (*types.TipSet, *types.MessageReceipt, error) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - msg, err := sm.cs.GetCMessage(mcid) - if err != nil { - return nil, nil, fmt.Errorf("failed to load message: %w", err) - } - - tsub := sm.cs.SubHeadChanges(ctx) - - head, ok := <-tsub - if !ok { - return nil, nil, fmt.Errorf("SubHeadChanges stream was invalid") - } - - if len(head) != 1 { - return nil, nil, fmt.Errorf("SubHeadChanges first entry should have been one item") - } - - if head[0].Type != store.HCCurrent { - return nil, nil, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type) - } - - r, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage()) - if err != nil { - return nil, nil, err - } - - if r != nil { - return head[0].Val, r, nil - } - - var backTs *types.TipSet - var backRcp *types.MessageReceipt - backSearchWait := make(chan struct{}) - go func() { - fts, r, err := sm.searchBackForMsg(ctx, head[0].Val, msg) - if err != nil { - log.Warnf("failed to look back through chain for message: %w", err) - return - } - - backTs = fts - backRcp = r - close(backSearchWait) - }() - - var candidateTs *types.TipSet - var candidateRcp *types.MessageReceipt - heightOfHead := head[0].Val.Height() - reverts := map[types.TipSetKey]bool{} - - for { - select { - case notif, ok := <-tsub: - if !ok { - return nil, nil, ctx.Err() - } - for _, val := range notif { - switch val.Type { - case store.HCRevert: - if val.Val.Equals(candidateTs) { - candidateTs = nil - candidateRcp = nil - } - if backSearchWait != nil { - reverts[val.Val.Key()] = true - } - case store.HCApply: - if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) { - return candidateTs, candidateRcp, nil - } - r, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage()) - if err != nil { - return nil, nil, err - } - if r != nil { - if confidence == 0 { - return val.Val, r, err - } - candidateTs = val.Val - candidateRcp = r - } - heightOfHead = val.Val.Height() - } - } - case <-backSearchWait: - // check if we found the message in the chain and that is hasn't been reverted since we started searching - if backTs != nil && !reverts[backTs.Key()] { - // if head is at or past confidence interval, return immediately - if heightOfHead >= backTs.Height()+abi.ChainEpoch(confidence) { - return backTs, backRcp, nil - } - - // wait for confidence interval - candidateTs = backTs - candidateRcp = backRcp - } - reverts = nil - backSearchWait = nil - case <-ctx.Done(): - return nil, nil, ctx.Err() - } - } -} - -func (sm *StateManager) SearchForMessage(ctx context.Context, mcid cid.Cid) (*types.TipSet, *types.MessageReceipt, error) { - msg, err := sm.cs.GetCMessage(mcid) - if err != nil { - return nil, nil, fmt.Errorf("failed to load message: %w", err) - } - - head := sm.cs.GetHeaviestTipSet() - - r, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage()) - if err != nil { - return nil, nil, err - } - - if r != nil { - return head, r, nil - } - - fts, r, err := sm.searchBackForMsg(ctx, head, msg) - - if err != nil { - log.Warnf("failed to look back through chain for message %s", mcid) - return nil, nil, err - } - - if fts == nil { - return nil, nil, nil - } - - return fts, r, nil -} - -func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet, m types.ChainMsg) (*types.TipSet, *types.MessageReceipt, error) { - - cur := from - for { - if cur.Height() == 0 { - // it ain't here! - return nil, nil, nil - } - - select { - case <-ctx.Done(): - return nil, nil, nil - default: - } - - act, err := sm.GetActor(m.VMMessage().From, cur) - if err != nil { - return nil, nil, err - } - - // we either have no messages from the sender, or the latest message we found has a lower nonce than the one being searched for, - // either way, no reason to lookback, it ain't there - if act.Nonce == 0 || act.Nonce < m.VMMessage().Nonce { - return nil, nil, nil - } - - ts, err := sm.cs.LoadTipSet(cur.Parents()) - if err != nil { - return nil, nil, fmt.Errorf("failed to load tipset during msg wait searchback: %w", err) - } - - r, err := sm.tipsetExecutedMessage(ts, m.Cid(), m.VMMessage()) - if err != nil { - return nil, nil, fmt.Errorf("checking for message execution during lookback: %w", err) - } - - if r != nil { - return ts, r, nil - } - - cur = ts - } -} - -func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message) (*types.MessageReceipt, error) { - // The genesis block did not execute any messages - if ts.Height() == 0 { - return nil, nil - } - - pts, err := sm.cs.LoadTipSet(ts.Parents()) - if err != nil { - return nil, err - } - - cm, err := sm.cs.MessagesForTipset(pts) - if err != nil { - return nil, err - } - - for ii := range cm { - // iterate in reverse because we going backwards through the chain - i := len(cm) - ii - 1 - m := cm[i] - - if m.VMMessage().From == vmm.From { // cheaper to just check origin first - if m.VMMessage().Nonce == vmm.Nonce { - if m.Cid() == msg { - return sm.cs.GetParentReceipt(ts.Blocks()[0], i) - } - - // this should be that message - return nil, xerrors.Errorf("found message with equal nonce as the one we are looking for (F:%s n %d, TS: %s n%d)", - msg, vmm.Nonce, m.Cid(), m.VMMessage().Nonce) - } - if m.VMMessage().Nonce < vmm.Nonce { - return nil, nil // don't bother looking further - } - } - } - - return nil, nil -} - -func (sm *StateManager) ListAllActors(ctx context.Context, ts *types.TipSet) ([]address.Address, error) { - if ts == nil { - ts = sm.cs.GetHeaviestTipSet() - } - st, _, err := sm.TipSetState(ctx, ts) - if err != nil { - return nil, err - } - - cst := cbor.NewCborStore(sm.cs.Blockstore()) - r, err := hamt.LoadNode(ctx, cst, st, hamt.UseTreeBitWidth(5)) - if err != nil { - return nil, err - } - - var out []address.Address - err = r.ForEach(ctx, func(k string, val interface{}) error { - addr, err := address.NewFromBytes([]byte(k)) - if err != nil { - return xerrors.Errorf("address in state tree was not valid: %w", err) - } - out = append(out, addr) - return nil - }) - if err != nil { - return nil, err - } - - return out, nil -} - -func (sm *StateManager) MarketBalance(ctx context.Context, addr address.Address, ts *types.TipSet) (api.MarketBalance, error) { - var state market.State - _, err := sm.LoadActorState(ctx, builtin.StorageMarketActorAddr, &state, ts) - if err != nil { - return api.MarketBalance{}, err - } - - addr, err = sm.LookupID(ctx, addr, ts) - if err != nil { - return api.MarketBalance{}, err - } - - var out api.MarketBalance - - et, err := adt.AsBalanceTable(sm.cs.Store(ctx), state.EscrowTable) - if err != nil { - return api.MarketBalance{}, err - } - ehas, err := et.Has(addr) - if err != nil { - return api.MarketBalance{}, err - } - if ehas { - out.Escrow, err = et.Get(addr) - if err != nil { - return api.MarketBalance{}, xerrors.Errorf("getting escrow balance: %w", err) - } - } else { - out.Escrow = big.Zero() - } - - lt, err := adt.AsBalanceTable(sm.cs.Store(ctx), state.LockedTable) - if err != nil { - return api.MarketBalance{}, err - } - lhas, err := lt.Has(addr) - if err != nil { - return api.MarketBalance{}, err - } - if lhas { - out.Locked, err = lt.Get(addr) - if err != nil { - return api.MarketBalance{}, xerrors.Errorf("getting locked balance: %w", err) - } - } else { - out.Locked = big.Zero() - } - - return out, nil -} - -func (sm *StateManager) ValidateChain(ctx context.Context, ts *types.TipSet) error { - tschain := []*types.TipSet{ts} - for ts.Height() != 0 { - next, err := sm.cs.LoadTipSet(ts.Parents()) - if err != nil { - return err - } - - tschain = append(tschain, next) - ts = next - } - - lastState := tschain[len(tschain)-1].ParentState() - for i := len(tschain) - 1; i >= 0; i-- { - cur := tschain[i] - log.Infof("computing state (height: %d, ts=%s)", cur.Height(), cur.Cids()) - if cur.ParentState() != lastState { - return xerrors.Errorf("tipset chain had state mismatch at height %d", cur.Height()) - } - st, _, err := sm.TipSetState(ctx, cur) - if err != nil { - return err - } - lastState = st - } - - return nil -} - -func (sm *StateManager) SetVMConstructor(nvm func(cid.Cid, abi.ChainEpoch, vm.Rand, blockstore.Blockstore, runtime.Syscalls) (*vm.VM, error)) { - sm.newVM = nvm -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/stmgr/utils.go b/vendor/github.com/filecoin-project/lotus/chain/stmgr/utils.go deleted file mode 100644 index 8c9cfc792b..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/stmgr/utils.go +++ /dev/null @@ -1,644 +0,0 @@ -package stmgr - -import ( - "bytes" - "context" - "os" - "reflect" - - cid "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - amt "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/go-bitfield" - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/account" - "github.com/filecoin-project/specs-actors/actors/builtin/cron" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/builtin/reward" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/util/adt" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/node/modules/dtypes" -) - -func GetNetworkName(ctx context.Context, sm *StateManager, st cid.Cid) (dtypes.NetworkName, error) { - var state init_.State - _, err := sm.LoadActorStateRaw(ctx, builtin.InitActorAddr, &state, st) - if err != nil { - return "", xerrors.Errorf("(get sset) failed to load init actor state: %w", err) - } - - return dtypes.NetworkName(state.NetworkName), nil -} - -func GetMinerWorkerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (address.Address, error) { - var mas miner.State - _, err := sm.LoadActorStateRaw(ctx, maddr, &mas, st) - if err != nil { - return address.Undef, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) - } - - cst := cbor.NewCborStore(sm.cs.Blockstore()) - state, err := state.LoadStateTree(cst, st) - if err != nil { - return address.Undef, xerrors.Errorf("load state tree: %w", err) - } - - return vm.ResolveToKeyAddr(state, cst, mas.Info.Worker) -} - -func GetPower(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (power.Claim, power.Claim, error) { - return GetPowerRaw(ctx, sm, ts.ParentState(), maddr) -} - -func GetPowerRaw(ctx context.Context, sm *StateManager, st cid.Cid, maddr address.Address) (power.Claim, power.Claim, error) { - var ps power.State - _, err := sm.LoadActorStateRaw(ctx, builtin.StoragePowerActorAddr, &ps, st) - if err != nil { - return power.Claim{}, power.Claim{}, xerrors.Errorf("(get sset) failed to load power actor state: %w", err) - } - - var mpow power.Claim - if maddr != address.Undef { - cm, err := adt.AsMap(sm.cs.Store(ctx), ps.Claims) - if err != nil { - return power.Claim{}, power.Claim{}, err - } - - var claim power.Claim - if _, err := cm.Get(adt.AddrKey(maddr), &claim); err != nil { - return power.Claim{}, power.Claim{}, err - } - - mpow = claim - } - - return mpow, power.Claim{ - RawBytePower: ps.TotalRawBytePower, - QualityAdjPower: ps.TotalQualityAdjPower, - }, nil -} - -func SectorSetSizes(ctx context.Context, sm *StateManager, maddr address.Address, ts *types.TipSet) (api.MinerSectors, error) { - var mas miner.State - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return api.MinerSectors{}, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) - } - - notProving, err := abi.BitFieldUnion(mas.Faults, mas.Recoveries) - if err != nil { - return api.MinerSectors{}, err - } - - npc, err := notProving.Count() - if err != nil { - return api.MinerSectors{}, err - } - - blks := cbor.NewCborStore(sm.ChainStore().Blockstore()) - ss, err := amt.LoadAMT(ctx, blks, mas.Sectors) - if err != nil { - return api.MinerSectors{}, err - } - - return api.MinerSectors{ - Sset: ss.Count, - Pset: ss.Count - npc, - }, nil -} - -func PreCommitInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (miner.SectorPreCommitOnChainInfo, error) { - var mas miner.State - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return miner.SectorPreCommitOnChainInfo{}, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) - } - - i, ok, err := mas.GetPrecommittedSector(sm.cs.Store(ctx), sid) - if err != nil { - return miner.SectorPreCommitOnChainInfo{}, err - } - if !ok { - return miner.SectorPreCommitOnChainInfo{}, xerrors.New("precommit not found") - } - - return *i, nil -} - -func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Address, sid abi.SectorNumber, ts *types.TipSet) (*miner.SectorOnChainInfo, error) { - var mas miner.State - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) - } - - sectorInfo, ok, err := mas.GetSector(sm.cs.Store(ctx), sid) - if err != nil { - return nil, err - } - if !ok { - return nil, xerrors.New("sector not found") - } - - return sectorInfo, nil -} - -func GetMinerSectorSet(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address, filter *abi.BitField, filterOut bool) ([]*api.ChainSectorInfo, error) { - var mas miner.State - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return nil, xerrors.Errorf("(get sset) failed to load miner actor state: %w", err) - } - - return LoadSectorsFromSet(ctx, sm.ChainStore().Blockstore(), mas.Sectors, filter, filterOut) -} - -func GetSectorsForWinningPoSt(ctx context.Context, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]abi.SectorInfo, error) { - var mas miner.State - _, err := sm.LoadActorStateRaw(ctx, maddr, &mas, st) - if err != nil { - return nil, xerrors.Errorf("(get sectors) failed to load miner actor state: %w", err) - } - - cst := cbor.NewCborStore(sm.cs.Blockstore()) - var deadlines miner.Deadlines - if err := cst.Get(ctx, mas.Deadlines, &deadlines); err != nil { - return nil, xerrors.Errorf("failed to load deadlines: %w", err) - } - - notProving, err := abi.BitFieldUnion(mas.Faults, mas.Recoveries) - if err != nil { - return nil, xerrors.Errorf("failed to union faults and recoveries: %w", err) - } - - allSectors, err := bitfield.MultiMerge(append(deadlines.Due[:], mas.NewSectors)...) - if err != nil { - return nil, xerrors.Errorf("merging deadline bitfields failed: %w", err) - } - - provingSectors, err := bitfield.SubtractBitField(allSectors, notProving) - if err != nil { - return nil, xerrors.Errorf("failed to subtract non-proving sectors from set: %w", err) - } - - numProvSect, err := provingSectors.Count() - if err != nil { - return nil, xerrors.Errorf("failed to count bits: %w", err) - } - - // TODO(review): is this right? feels fishy to me - if numProvSect == 0 { - return nil, nil - } - - spt, err := ffiwrapper.SealProofTypeFromSectorSize(mas.Info.SectorSize) - if err != nil { - return nil, xerrors.Errorf("getting seal proof type: %w", err) - } - - wpt, err := spt.RegisteredWinningPoStProof() - if err != nil { - return nil, xerrors.Errorf("getting window proof type: %w", err) - } - - mid, err := address.IDFromAddress(maddr) - if err != nil { - return nil, xerrors.Errorf("getting miner ID: %w", err) - } - - ids, err := pv.GenerateWinningPoStSectorChallenge(ctx, wpt, abi.ActorID(mid), rand, numProvSect) - if err != nil { - return nil, xerrors.Errorf("generating winning post challenges: %w", err) - } - - sectors, err := provingSectors.All(miner.SectorsMax) - if err != nil { - return nil, xerrors.Errorf("failed to enumerate all sector IDs: %w", err) - } - - sectorAmt, err := amt.LoadAMT(ctx, cst, mas.Sectors) - if err != nil { - return nil, xerrors.Errorf("failed to load sectors amt: %w", err) - } - - out := make([]abi.SectorInfo, len(ids)) - for i, n := range ids { - sid := sectors[n] - - var sinfo miner.SectorOnChainInfo - if err := sectorAmt.Get(ctx, sid, &sinfo); err != nil { - return nil, xerrors.Errorf("failed to get sector %d: %w", sid, err) - } - - out[i] = abi.SectorInfo{ - SealProof: spt, - SectorNumber: sinfo.Info.SectorNumber, - SealedCID: sinfo.Info.SealedCID, - } - } - - return out, nil -} - -func StateMinerInfo(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (miner.MinerInfo, error) { - var mas miner.State - _, err := sm.LoadActorStateRaw(ctx, maddr, &mas, ts.ParentState()) - if err != nil { - return miner.MinerInfo{}, xerrors.Errorf("(get ssize) failed to load miner actor state: %w", err) - } - - return mas.Info, nil -} - -func GetMinerSlashed(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (bool, error) { - var mas miner.State - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return false, xerrors.Errorf("(get miner slashed) failed to load miner actor state") - } - - var spas power.State - _, err = sm.LoadActorState(ctx, builtin.StoragePowerActorAddr, &spas, ts) - if err != nil { - return false, xerrors.Errorf("(get miner slashed) failed to load power actor state") - } - - store := sm.cs.Store(ctx) - - claims, err := adt.AsMap(store, spas.Claims) - if err != nil { - return false, err - } - - ok, err := claims.Get(power.AddrKey(maddr), nil) - if err != nil { - return false, err - } - if !ok { - return true, nil - } - - return false, nil -} - -func GetMinerDeadlines(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*miner.Deadlines, error) { - var mas miner.State - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return nil, xerrors.Errorf("(get ssize) failed to load miner actor state: %w", err) - } - - return mas.LoadDeadlines(sm.cs.Store(ctx)) -} - -func GetMinerFaults(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*abi.BitField, error) { - var mas miner.State - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return nil, xerrors.Errorf("(get faults) failed to load miner actor state: %w", err) - } - - return mas.Faults, nil -} - -func GetMinerRecoveries(ctx context.Context, sm *StateManager, ts *types.TipSet, maddr address.Address) (*abi.BitField, error) { - var mas miner.State - _, err := sm.LoadActorState(ctx, maddr, &mas, ts) - if err != nil { - return nil, xerrors.Errorf("(get recoveries) failed to load miner actor state: %w", err) - } - - return mas.Recoveries, nil -} - -func GetStorageDeal(ctx context.Context, sm *StateManager, dealID abi.DealID, ts *types.TipSet) (*api.MarketDeal, error) { - var state market.State - if _, err := sm.LoadActorState(ctx, builtin.StorageMarketActorAddr, &state, ts); err != nil { - return nil, err - } - - da, err := amt.LoadAMT(ctx, cbor.NewCborStore(sm.ChainStore().Blockstore()), state.Proposals) - if err != nil { - return nil, err - } - - var dp market.DealProposal - if err := da.Get(ctx, uint64(dealID), &dp); err != nil { - return nil, err - } - - sa, err := market.AsDealStateArray(sm.ChainStore().Store(ctx), state.States) - if err != nil { - return nil, err - } - - st, found, err := sa.Get(dealID) - if err != nil { - return nil, err - } - - if !found { - st = &market.DealState{ - SectorStartEpoch: -1, - LastUpdatedEpoch: -1, - SlashEpoch: -1, - } - } - - return &api.MarketDeal{ - Proposal: dp, - State: *st, - }, nil -} - -func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([]address.Address, error) { - var state power.State - if _, err := sm.LoadActorState(ctx, builtin.StoragePowerActorAddr, &state, ts); err != nil { - return nil, err - } - - m, err := adt.AsMap(sm.cs.Store(ctx), state.Claims) - if err != nil { - return nil, err - } - - var miners []address.Address - err = m.ForEach(nil, func(k string) error { - a, err := address.NewFromBytes([]byte(k)) - if err != nil { - return err - } - miners = append(miners, a) - return nil - }) - if err != nil { - return nil, err - } - - return miners, nil -} - -func LoadSectorsFromSet(ctx context.Context, bs blockstore.Blockstore, ssc cid.Cid, filter *abi.BitField, filterOut bool) ([]*api.ChainSectorInfo, error) { - a, err := amt.LoadAMT(ctx, cbor.NewCborStore(bs), ssc) - if err != nil { - return nil, err - } - - var sset []*api.ChainSectorInfo - if err := a.ForEach(ctx, func(i uint64, v *cbg.Deferred) error { - if filter != nil { - set, err := filter.IsSet(i) - if err != nil { - return xerrors.Errorf("filter check error: %w", err) - } - if set == filterOut { - return nil - } - } - - var oci miner.SectorOnChainInfo - if err := cbor.DecodeInto(v.Raw, &oci); err != nil { - return err - } - sset = append(sset, &api.ChainSectorInfo{ - Info: oci, - ID: abi.SectorNumber(i), - }) - return nil - }); err != nil { - return nil, err - } - - return sset, nil -} - -func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, msgs []*types.Message, ts *types.TipSet) (cid.Cid, []*api.InvocResult, error) { - if ts == nil { - ts = sm.cs.GetHeaviestTipSet() - } - - base, trace, err := sm.ExecutionTrace(ctx, ts) - if err != nil { - return cid.Undef, nil, err - } - - fstate, err := sm.handleStateForks(ctx, base, height, ts.Height()) - if err != nil { - return cid.Undef, nil, err - } - - r := store.NewChainRand(sm.cs, ts.Cids(), height) - vmi, err := vm.NewVM(fstate, height, r, sm.cs.Blockstore(), sm.cs.VMSys()) - if err != nil { - return cid.Undef, nil, err - } - - for i, msg := range msgs { - // TODO: Use the signed message length for secp messages - ret, err := vmi.ApplyMessage(ctx, msg) - if err != nil { - return cid.Undef, nil, xerrors.Errorf("applying message %s: %w", msg.Cid(), err) - } - if ret.ExitCode != 0 { - log.Infof("compute state apply message %d failed (exit: %d): %s", i, ret.ExitCode, ret.ActorErr) - } - } - - root, err := vmi.Flush(ctx) - if err != nil { - return cid.Undef, nil, err - } - - return root, trace, nil -} - -func GetProvingSetRaw(ctx context.Context, sm *StateManager, mas miner.State) ([]*api.ChainSectorInfo, error) { - notProving, err := abi.BitFieldUnion(mas.Faults, mas.Recoveries) - if err != nil { - return nil, err - } - - provset, err := LoadSectorsFromSet(ctx, sm.cs.Blockstore(), mas.Sectors, notProving, true) - if err != nil { - return nil, xerrors.Errorf("failed to get proving set: %w", err) - } - - return provset, nil -} - -func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, error) { - var lbr abi.ChainEpoch - if round > build.WinningPoStSectorSetLookback { - lbr = round - build.WinningPoStSectorSetLookback - } - - // more null blocks than our lookback - if lbr > ts.Height() { - return ts, nil - } - - lbts, err := sm.ChainStore().GetTipsetByHeight(ctx, lbr, ts, true) - if err != nil { - return nil, xerrors.Errorf("failed to get lookback tipset: %w", err) - } - - return lbts, nil -} - -func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcn beacon.RandomBeacon, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) { - ts, err := sm.ChainStore().LoadTipSet(tsk) - if err != nil { - return nil, xerrors.Errorf("failed to load tipset for mining base: %w", err) - } - - prev, err := sm.ChainStore().GetLatestBeaconEntry(ts) - if err != nil { - if os.Getenv("LOTUS_IGNORE_DRAND") != "_yes_" { - return nil, xerrors.Errorf("failed to get latest beacon entry: %w", err) - } - - prev = &types.BeaconEntry{} - } - - entries, err := beacon.BeaconEntriesForBlock(ctx, bcn, round, *prev) - if err != nil { - return nil, err - } - - rbase := *prev - if len(entries) > 0 { - rbase = entries[len(entries)-1] - } - - lbts, err := GetLookbackTipSetForRound(ctx, sm, ts, round) - if err != nil { - return nil, xerrors.Errorf("getting lookback miner actor state: %w", err) - } - - lbst, _, err := sm.TipSetState(ctx, lbts) - if err != nil { - return nil, err - } - - var mas miner.State - if _, err := sm.LoadActorStateRaw(ctx, maddr, &mas, lbst); err != nil { - return nil, err - } - - buf := new(bytes.Buffer) - if err := maddr.MarshalCBOR(buf); err != nil { - return nil, xerrors.Errorf("failed to marshal miner address: %w", err) - } - - prand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, round, buf.Bytes()) - if err != nil { - return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err) - } - - sectors, err := GetSectorsForWinningPoSt(ctx, pv, sm, lbst, maddr, prand) - if err != nil { - return nil, xerrors.Errorf("getting wpost proving set: %w", err) - } - - if len(sectors) == 0 { - return nil, nil - } - - mpow, tpow, err := GetPowerRaw(ctx, sm, lbst, maddr) - if err != nil { - return nil, xerrors.Errorf("failed to get power: %w", err) - } - - worker, err := sm.ResolveToKeyAddress(ctx, mas.GetWorker(), ts) - if err != nil { - return nil, xerrors.Errorf("resolving worker address: %w", err) - } - - return &api.MiningBaseInfo{ - MinerPower: mpow.QualityAdjPower, - NetworkPower: tpow.QualityAdjPower, - Sectors: sectors, - WorkerKey: worker, - SectorSize: mas.Info.SectorSize, - PrevBeaconEntry: *prev, - BeaconEntries: entries, - }, nil -} - -type methodMeta struct { - Name string - - Params reflect.Type - Ret reflect.Type -} - -var MethodsMap = map[cid.Cid][]methodMeta{} - -func init() { - cidToMethods := map[cid.Cid][2]interface{}{ - // builtin.SystemActorCodeID: {builtin.MethodsSystem, system.Actor{} }- apparently it doesn't have methods - builtin.InitActorCodeID: {builtin.MethodsInit, init_.Actor{}}, - builtin.CronActorCodeID: {builtin.MethodsCron, cron.Actor{}}, - builtin.AccountActorCodeID: {builtin.MethodsAccount, account.Actor{}}, - builtin.StoragePowerActorCodeID: {builtin.MethodsPower, power.Actor{}}, - builtin.StorageMinerActorCodeID: {builtin.MethodsMiner, miner.Actor{}}, - builtin.StorageMarketActorCodeID: {builtin.MethodsMarket, market.Actor{}}, - builtin.PaymentChannelActorCodeID: {builtin.MethodsPaych, paych.Actor{}}, - builtin.MultisigActorCodeID: {builtin.MethodsMultisig, multisig.Actor{}}, - builtin.RewardActorCodeID: {builtin.MethodsReward, reward.Actor{}}, - builtin.VerifiedRegistryActorCodeID: {builtin.MethodsVerifiedRegistry, verifreg.Actor{}}, - } - - for c, m := range cidToMethods { - rt := reflect.TypeOf(m[0]) - nf := rt.NumField() - - MethodsMap[c] = append(MethodsMap[c], methodMeta{ - Name: "Send", - Params: reflect.TypeOf(new(adt.EmptyValue)), - Ret: reflect.TypeOf(new(adt.EmptyValue)), - }) - - exports := m[1].(abi.Invokee).Exports() - for i := 0; i < nf; i++ { - export := reflect.TypeOf(exports[i+1]) - - MethodsMap[c] = append(MethodsMap[c], methodMeta{ - Name: rt.Field(i).Name, - Params: export.In(1), - Ret: export.Out(0), - }) - } - } -} - -func GetReturnType(ctx context.Context, sm *StateManager, to address.Address, method abi.MethodNum, ts *types.TipSet) (cbg.CBORUnmarshaler, error) { - act, err := sm.GetActor(to, ts) - if err != nil { - return nil, err - } - - m := MethodsMap[act.Code][method] - return reflect.New(m.Ret.Elem()).Interface().(cbg.CBORUnmarshaler), nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/store/fts.go b/vendor/github.com/filecoin-project/lotus/chain/store/fts.go deleted file mode 100644 index 0324938d7b..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/store/fts.go +++ /dev/null @@ -1,54 +0,0 @@ -package store - -import ( - "github.com/filecoin-project/lotus/chain/types" - "github.com/ipfs/go-cid" -) - -// FullTipSet is an expanded version of the TipSet that contains all the blocks and messages -type FullTipSet struct { - Blocks []*types.FullBlock - tipset *types.TipSet - cids []cid.Cid -} - -func NewFullTipSet(blks []*types.FullBlock) *FullTipSet { - return &FullTipSet{ - Blocks: blks, - } -} - -func (fts *FullTipSet) Cids() []cid.Cid { - if fts.cids != nil { - return fts.cids - } - - var cids []cid.Cid - for _, b := range fts.Blocks { - cids = append(cids, b.Cid()) - } - fts.cids = cids - - return cids -} - -// TipSet returns a narrower view of this FullTipSet elliding the block -// messages. -func (fts *FullTipSet) TipSet() *types.TipSet { - if fts.tipset != nil { - // FIXME: fts.tipset is actually never set. Should it memoize? - return fts.tipset - } - - var headers []*types.BlockHeader - for _, b := range fts.Blocks { - headers = append(headers, b.Header) - } - - ts, err := types.NewTipSet(headers) - if err != nil { - panic(err) - } - - return ts -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/store/index.go b/vendor/github.com/filecoin-project/lotus/chain/store/index.go deleted file mode 100644 index 7edbf251fb..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/store/index.go +++ /dev/null @@ -1,161 +0,0 @@ -package store - -import ( - "context" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/specs-actors/actors/abi" - lru "github.com/hashicorp/golang-lru" - "golang.org/x/xerrors" -) - -type ChainIndex struct { - skipCache *lru.ARCCache - - loadTipSet loadTipSetFunc - - skipLength abi.ChainEpoch -} -type loadTipSetFunc func(types.TipSetKey) (*types.TipSet, error) - -func NewChainIndex(lts loadTipSetFunc) *ChainIndex { - sc, _ := lru.NewARC(8192) - return &ChainIndex{ - skipCache: sc, - loadTipSet: lts, - skipLength: 20, - } -} - -type lbEntry struct { - ts *types.TipSet - parentHeight abi.ChainEpoch - targetHeight abi.ChainEpoch - target types.TipSetKey -} - -func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { - if from.Height()-to <= ci.skipLength { - return ci.walkBack(from, to) - } - - rounded, err := ci.roundDown(from) - if err != nil { - return nil, err - } - - cur := rounded.Key() - for { - cval, ok := ci.skipCache.Get(cur) - if !ok { - fc, err := ci.fillCache(cur) - if err != nil { - return nil, err - } - cval = fc - } - - lbe := cval.(*lbEntry) - if lbe.ts.Height() == to || lbe.parentHeight < to { - return lbe.ts, nil - } else if to > lbe.targetHeight { - return ci.walkBack(lbe.ts, to) - } - - cur = lbe.target - } -} - -func (ci *ChainIndex) GetTipsetByHeightWithoutCache(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { - return ci.walkBack(from, to) -} - -func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { - ts, err := ci.loadTipSet(tsk) - if err != nil { - return nil, err - } - - if ts.Height() == 0 { - return &lbEntry{ - ts: ts, - parentHeight: 0, - }, nil - } - - // will either be equal to ts.Height, or at least > ts.Parent.Height() - rheight := ci.roundHeight(ts.Height()) - - parent, err := ci.loadTipSet(ts.Parents()) - if err != nil { - return nil, err - } - - rheight -= ci.skipLength - - var skipTarget *types.TipSet - if parent.Height() < rheight { - skipTarget = parent - } else { - skipTarget, err = ci.walkBack(parent, rheight) - if err != nil { - return nil, xerrors.Errorf("fillCache walkback: %w", err) - } - } - - lbe := &lbEntry{ - ts: ts, - parentHeight: parent.Height(), - targetHeight: skipTarget.Height(), - target: skipTarget.Key(), - } - ci.skipCache.Add(tsk, lbe) - - return lbe, nil -} - -// floors to nearest skipLength multiple -func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch { - return (h / ci.skipLength) * ci.skipLength -} - -func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) { - target := ci.roundHeight(ts.Height()) - - rounded, err := ci.walkBack(ts, target) - if err != nil { - return nil, err - } - - return rounded, nil -} - -func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { - if to > from.Height() { - return nil, xerrors.Errorf("looking for tipset with height greater than start point") - } - - if to == from.Height() { - return from, nil - } - - ts := from - - for { - pts, err := ci.loadTipSet(ts.Parents()) - if err != nil { - return nil, err - } - - if to > pts.Height() { - // in case pts is lower than the epoch we're looking for (null blocks) - // return a tipset above that height - return ts, nil - } - if to == pts.Height() { - return pts, nil - } - - ts = pts - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/store/index_test.go b/vendor/github.com/filecoin-project/lotus/chain/store/index_test.go deleted file mode 100644 index 73f4901f0c..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/store/index_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package store_test - -import ( - "bytes" - "context" - "testing" - - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types/mock" - "github.com/filecoin-project/specs-actors/actors/abi" - datastore "github.com/ipfs/go-datastore" - syncds "github.com/ipfs/go-datastore/sync" - blockstore "github.com/ipfs/go-ipfs-blockstore" - "github.com/stretchr/testify/assert" -) - -func TestIndexSeeks(t *testing.T) { - cg, err := gen.NewGenerator() - if err != nil { - t.Fatal(err) - } - - gencar, err := cg.GenesisCar() - if err != nil { - t.Fatal(err) - } - - gen := cg.Genesis() - - ctx := context.TODO() - - nbs := blockstore.NewBlockstore(syncds.MutexWrap(datastore.NewMapDatastore())) - cs := store.NewChainStore(nbs, syncds.MutexWrap(datastore.NewMapDatastore()), nil) - - _, err = cs.Import(bytes.NewReader(gencar)) - if err != nil { - t.Fatal(err) - } - - cur := mock.TipSet(gen) - if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil { - t.Fatal(err) - } - cs.SetGenesis(gen) - - // Put 113 blocks from genesis - for i := 0; i < 113; i++ { - nextts := mock.TipSet(mock.MkBlock(cur, 1, 1)) - - if err := cs.PutTipSet(ctx, nextts); err != nil { - t.Fatal(err) - } - cur = nextts - } - - // Put 50 null epochs + 1 block - skip := mock.MkBlock(cur, 1, 1) - skip.Height += 50 - - skipts := mock.TipSet(skip) - - if err := cs.PutTipSet(ctx, skipts); err != nil { - t.Fatal(err) - } - - ts, err := cs.GetTipsetByHeight(ctx, skip.Height-10, skipts, false) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, abi.ChainEpoch(164), ts.Height()) - - for i := 0; i <= 113; i++ { - ts3, err := cs.GetTipsetByHeight(ctx, abi.ChainEpoch(i), skipts, false) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, abi.ChainEpoch(i), ts3.Height()) - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/store/store.go b/vendor/github.com/filecoin-project/lotus/chain/store/store.go deleted file mode 100644 index 4dabb96f7e..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/store/store.go +++ /dev/null @@ -1,1209 +0,0 @@ -package store - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/json" - "io" - "os" - "sync" - - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/minio/blake2b-simd" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/util/adt" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/metrics" - "go.opencensus.io/stats" - "go.opencensus.io/trace" - "go.uber.org/multierr" - - amt "github.com/filecoin-project/go-amt-ipld/v2" - - "github.com/filecoin-project/lotus/chain/types" - - lru "github.com/hashicorp/golang-lru" - block "github.com/ipfs/go-block-format" - "github.com/ipfs/go-cid" - dstore "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - car "github.com/ipld/go-car" - carutil "github.com/ipld/go-car/util" - cbg "github.com/whyrusleeping/cbor-gen" - pubsub "github.com/whyrusleeping/pubsub" - "golang.org/x/xerrors" -) - -var log = logging.Logger("chainstore") - -var chainHeadKey = dstore.NewKey("head") -var blockValidationCacheKeyPrefix = dstore.NewKey("blockValidation") - -// ReorgNotifee represents a callback that gets called upon reorgs. -type ReorgNotifee func(rev, app []*types.TipSet) error - -// ChainStore is the main point of access to chain data. -// -// Raw chain data is stored in the Blockstore, with relevant markers (genesis, -// latest head tipset references) being tracked in the Datastore (key-value -// store). -// -// To alleviate disk access, the ChainStore has two ARC caches: -// 1. a tipset cache -// 2. a block => messages references cache. -type ChainStore struct { - bs bstore.Blockstore - ds dstore.Datastore - - heaviestLk sync.Mutex - heaviest *types.TipSet - - bestTips *pubsub.PubSub - pubLk sync.Mutex - - tstLk sync.Mutex - tipsets map[abi.ChainEpoch][]cid.Cid - - cindex *ChainIndex - - reorgCh chan<- reorg - reorgNotifeeCh chan ReorgNotifee - - mmCache *lru.ARCCache - tsCache *lru.ARCCache - - vmcalls runtime.Syscalls -} - -func NewChainStore(bs bstore.Blockstore, ds dstore.Batching, vmcalls runtime.Syscalls) *ChainStore { - c, _ := lru.NewARC(2048) - tsc, _ := lru.NewARC(4096) - cs := &ChainStore{ - bs: bs, - ds: ds, - bestTips: pubsub.New(64), - tipsets: make(map[abi.ChainEpoch][]cid.Cid), - mmCache: c, - tsCache: tsc, - vmcalls: vmcalls, - } - - ci := NewChainIndex(cs.LoadTipSet) - - cs.cindex = ci - - hcnf := func(rev, app []*types.TipSet) error { - cs.pubLk.Lock() - defer cs.pubLk.Unlock() - - notif := make([]*api.HeadChange, len(rev)+len(app)) - - for i, r := range rev { - notif[i] = &api.HeadChange{ - Type: HCRevert, - Val: r, - } - } - for i, r := range app { - notif[i+len(rev)] = &api.HeadChange{ - Type: HCApply, - Val: r, - } - } - - cs.bestTips.Pub(notif, "headchange") - return nil - } - - hcmetric := func(rev, app []*types.TipSet) error { - ctx := context.Background() - for _, r := range app { - stats.Record(ctx, metrics.ChainNodeHeight.M(int64(r.Height()))) - } - return nil - } - - cs.reorgNotifeeCh = make(chan ReorgNotifee) - cs.reorgCh = cs.reorgWorker(context.TODO(), []ReorgNotifee{hcnf, hcmetric}) - - return cs -} - -func (cs *ChainStore) Load() error { - head, err := cs.ds.Get(chainHeadKey) - if err == dstore.ErrNotFound { - log.Warn("no previous chain state found") - return nil - } - if err != nil { - return xerrors.Errorf("failed to load chain state from datastore: %w", err) - } - - var tscids []cid.Cid - if err := json.Unmarshal(head, &tscids); err != nil { - return xerrors.Errorf("failed to unmarshal stored chain head: %w", err) - } - - ts, err := cs.LoadTipSet(types.NewTipSetKey(tscids...)) - if err != nil { - return xerrors.Errorf("loading tipset: %w", err) - } - - cs.heaviest = ts - - return nil -} - -func (cs *ChainStore) writeHead(ts *types.TipSet) error { - data, err := json.Marshal(ts.Cids()) - if err != nil { - return xerrors.Errorf("failed to marshal tipset: %w", err) - } - - if err := cs.ds.Put(chainHeadKey, data); err != nil { - return xerrors.Errorf("failed to write chain head to datastore: %w", err) - } - - return nil -} - -const ( - HCRevert = "revert" - HCApply = "apply" - HCCurrent = "current" -) - -func (cs *ChainStore) SubHeadChanges(ctx context.Context) chan []*api.HeadChange { - cs.pubLk.Lock() - subch := cs.bestTips.Sub("headchange") - head := cs.GetHeaviestTipSet() - cs.pubLk.Unlock() - - out := make(chan []*api.HeadChange, 16) - out <- []*api.HeadChange{{ - Type: HCCurrent, - Val: head, - }} - - go func() { - defer close(out) - var unsubOnce sync.Once - - for { - select { - case val, ok := <-subch: - if !ok { - log.Warn("chain head sub exit loop") - return - } - if len(out) > 0 { - log.Warnf("head change sub is slow, has %d buffered entries", len(out)) - } - select { - case out <- val.([]*api.HeadChange): - case <-ctx.Done(): - } - case <-ctx.Done(): - unsubOnce.Do(func() { - go cs.bestTips.Unsub(subch) - }) - } - } - }() - return out -} - -func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) { - cs.reorgNotifeeCh <- f -} - -func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) { - key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - - return cs.ds.Has(key) -} - -func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { - key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - - if err := cs.ds.Put(key, []byte{0}); err != nil { - return xerrors.Errorf("cache block validation: %w", err) - } - - return nil -} - -func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error { - ts, err := types.NewTipSet([]*types.BlockHeader{b}) - if err != nil { - return err - } - - if err := cs.PutTipSet(context.TODO(), ts); err != nil { - return err - } - - return cs.ds.Put(dstore.NewKey("0"), b.Cid().Bytes()) -} - -func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error { - for _, b := range ts.Blocks() { - if err := cs.PersistBlockHeaders(b); err != nil { - return err - } - } - - expanded, err := cs.expandTipset(ts.Blocks()[0]) - if err != nil { - return xerrors.Errorf("errored while expanding tipset: %w", err) - } - log.Debugf("expanded %s into %s\n", ts.Cids(), expanded.Cids()) - - if err := cs.MaybeTakeHeavierTipSet(ctx, expanded); err != nil { - return xerrors.Errorf("MaybeTakeHeavierTipSet failed in PutTipSet: %w", err) - } - return nil -} - -// MaybeTakeHeavierTipSet evaluates the incoming tipset and locks it in our -// internal state as our new head, if and only if it is heavier than the current -// head. -func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipSet) error { - cs.heaviestLk.Lock() - defer cs.heaviestLk.Unlock() - w, err := cs.Weight(ctx, ts) - if err != nil { - return err - } - heaviestW, err := cs.Weight(ctx, cs.heaviest) - if err != nil { - return err - } - - if w.GreaterThan(heaviestW) { - // TODO: don't do this for initial sync. Now that we don't have a - // difference between 'bootstrap sync' and 'caught up' sync, we need - // some other heuristic. - return cs.takeHeaviestTipSet(ctx, ts) - } - return nil -} - -type reorg struct { - old *types.TipSet - new *types.TipSet -} - -func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNotifee) chan<- reorg { - out := make(chan reorg, 32) - notifees := make([]ReorgNotifee, len(initialNotifees)) - copy(notifees, initialNotifees) - - go func() { - defer log.Warn("reorgWorker quit") - - for { - select { - case n := <-cs.reorgNotifeeCh: - notifees = append(notifees, n) - - case r := <-out: - revert, apply, err := cs.ReorgOps(r.old, r.new) - if err != nil { - log.Error("computing reorg ops failed: ", err) - continue - } - - // reverse the apply array - for i := len(apply)/2 - 1; i >= 0; i-- { - opp := len(apply) - 1 - i - apply[i], apply[opp] = apply[opp], apply[i] - } - - for _, hcf := range notifees { - if err := hcf(revert, apply); err != nil { - log.Error("head change func errored (BAD): ", err) - } - } - case <-ctx.Done(): - return - } - } - }() - return out -} - -// takeHeaviestTipSet actually sets the incoming tipset as our head both in -// memory and in the ChainStore. It also sends a notification to deliver to -// ReorgNotifees. -func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) error { - _, span := trace.StartSpan(ctx, "takeHeaviestTipSet") - defer span.End() - - if cs.heaviest != nil { // buf - if len(cs.reorgCh) > 0 { - log.Warnf("Reorg channel running behind, %d reorgs buffered", len(cs.reorgCh)) - } - cs.reorgCh <- reorg{ - old: cs.heaviest, - new: ts, - } - } else { - log.Warnf("no heaviest tipset found, using %s", ts.Cids()) - } - - span.AddAttributes(trace.BoolAttribute("newHead", true)) - - log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height()) - cs.heaviest = ts - - if err := cs.writeHead(ts); err != nil { - log.Errorf("failed to write chain head: %s", err) - return nil - } - - return nil -} - -// SetHead sets the chainstores current 'best' head node. -// This should only be called if something is broken and needs fixing -func (cs *ChainStore) SetHead(ts *types.TipSet) error { - cs.heaviestLk.Lock() - defer cs.heaviestLk.Unlock() - return cs.takeHeaviestTipSet(context.TODO(), ts) -} - -// Contains returns whether our BlockStore has all blocks in the supplied TipSet. -func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { - for _, c := range ts.Cids() { - has, err := cs.bs.Has(c) - if err != nil { - return false, err - } - - if !has { - return false, nil - } - } - return true, nil -} - -// GetBlock fetches a BlockHeader with the supplied CID. It returns -// blockstore.ErrNotFound if the block was not found in the BlockStore. -func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { - sb, err := cs.bs.Get(c) - if err != nil { - return nil, err - } - - return types.DecodeBlock(sb.RawData()) -} - -func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { - v, ok := cs.tsCache.Get(tsk) - if ok { - return v.(*types.TipSet), nil - } - - var blks []*types.BlockHeader - for _, c := range tsk.Cids() { - b, err := cs.GetBlock(c) - if err != nil { - return nil, xerrors.Errorf("get block %s: %w", c, err) - } - - blks = append(blks, b) - } - - ts, err := types.NewTipSet(blks) - if err != nil { - return nil, err - } - - cs.tsCache.Add(tsk, ts) - - return ts, nil -} - -// IsAncestorOf returns true if 'a' is an ancestor of 'b' -func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { - if b.Height() <= a.Height() { - return false, nil - } - - cur := b - for !a.Equals(cur) && cur.Height() > a.Height() { - next, err := cs.LoadTipSet(b.Parents()) - if err != nil { - return false, err - } - - cur = next - } - - return cur.Equals(a), nil -} - -func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, error) { - l, _, err := cs.ReorgOps(a, b) - if err != nil { - return nil, err - } - - return cs.LoadTipSet(l[len(l)-1].Parents()) -} - -func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { - left := a - right := b - - var leftChain, rightChain []*types.TipSet - for !left.Equals(right) { - if left.Height() > right.Height() { - leftChain = append(leftChain, left) - par, err := cs.LoadTipSet(left.Parents()) - if err != nil { - return nil, nil, err - } - - left = par - } else { - rightChain = append(rightChain, right) - par, err := cs.LoadTipSet(right.Parents()) - if err != nil { - log.Infof("failed to fetch right.Parents: %s", err) - return nil, nil, err - } - - right = par - } - } - - return leftChain, rightChain, nil -} - -// GetHeaviestTipSet returns the current heaviest tipset known (i.e. our head). -func (cs *ChainStore) GetHeaviestTipSet() *types.TipSet { - cs.heaviestLk.Lock() - defer cs.heaviestLk.Unlock() - return cs.heaviest -} - -func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { - cs.tstLk.Lock() - defer cs.tstLk.Unlock() - - tss := cs.tipsets[b.Height] - for _, oc := range tss { - if oc == b.Cid() { - log.Debug("tried to add block to tipset tracker that was already there") - return nil - } - } - - cs.tipsets[b.Height] = append(tss, b.Cid()) - - // TODO: do we want to look for slashable submissions here? might as well... - - return nil -} - -func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error { - sbs := make([]block.Block, len(b)) - - for i, header := range b { - var err error - sbs[i], err = header.ToStorageBlock() - if err != nil { - return err - } - } - - batchSize := 256 - calls := len(b) / batchSize - - var err error - for i := 0; i <= calls; i++ { - start := batchSize * i - end := start + batchSize - if end > len(b) { - end = len(b) - } - - err = multierr.Append(err, cs.bs.PutMany(sbs[start:end])) - } - - return err -} - -type storable interface { - ToStorageBlock() (block.Block, error) -} - -func PutMessage(bs bstore.Blockstore, m storable) (cid.Cid, error) { - b, err := m.ToStorageBlock() - if err != nil { - return cid.Undef, err - } - - if err := bs.Put(b); err != nil { - return cid.Undef, err - } - - return b.Cid(), nil -} - -func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) { - return PutMessage(cs.bs, m) -} - -func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) { - // Hold lock for the whole function for now, if it becomes a problem we can - // fix pretty easily - cs.tstLk.Lock() - defer cs.tstLk.Unlock() - - all := []*types.BlockHeader{b} - - tsets, ok := cs.tipsets[b.Height] - if !ok { - return types.NewTipSet(all) - } - - inclMiners := map[address.Address]bool{b.Miner: true} - for _, bhc := range tsets { - if bhc == b.Cid() { - continue - } - - h, err := cs.GetBlock(bhc) - if err != nil { - return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err) - } - - if inclMiners[h.Miner] { - log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache", h.Miner, h.Height) - continue - } - - if types.CidArrsEqual(h.Parents, b.Parents) { - all = append(all, h) - inclMiners[h.Miner] = true - } - } - - // TODO: other validation...? - - return types.NewTipSet(all) -} - -func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error { - if err := cs.PersistBlockHeaders(b); err != nil { - return err - } - - ts, err := cs.expandTipset(b) - if err != nil { - return err - } - - if err := cs.MaybeTakeHeavierTipSet(ctx, ts); err != nil { - return xerrors.Errorf("MaybeTakeHeavierTipSet failed: %w", err) - } - - return nil -} - -func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { - data, err := cs.ds.Get(dstore.NewKey("0")) - if err != nil { - return nil, err - } - - c, err := cid.Cast(data) - if err != nil { - return nil, err - } - - genb, err := cs.bs.Get(c) - if err != nil { - return nil, err - } - - return types.DecodeBlock(genb.RawData()) -} - -func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { - m, err := cs.GetMessage(c) - if err == nil { - return m, nil - } - if err != bstore.ErrNotFound { - log.Warn("GetCMessage: unexpected error getting unsigned message: %s", err) - } - - return cs.GetSignedMessage(c) -} - -func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) { - sb, err := cs.bs.Get(c) - if err != nil { - log.Errorf("get message get failed: %s: %s", c, err) - return nil, err - } - - return types.DecodeMessage(sb.RawData()) -} - -func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) { - sb, err := cs.bs.Get(c) - if err != nil { - log.Errorf("get message get failed: %s: %s", c, err) - return nil, err - } - - return types.DecodeSignedMessage(sb.RawData()) -} - -func (cs *ChainStore) readAMTCids(root cid.Cid) ([]cid.Cid, error) { - ctx := context.TODO() - bs := cbor.NewCborStore(cs.bs) - a, err := amt.LoadAMT(ctx, bs, root) - if err != nil { - return nil, xerrors.Errorf("amt load: %w", err) - } - - var cids []cid.Cid - for i := uint64(0); i < a.Count; i++ { - var c cbg.CborCid - if err := a.Get(ctx, i, &c); err != nil { - return nil, xerrors.Errorf("failed to load cid from amt: %w", err) - } - - cids = append(cids, cid.Cid(c)) - } - - return cids, nil -} - -func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { - applied := make(map[address.Address]uint64) - balances := make(map[address.Address]types.BigInt) - - cst := cbor.NewCborStore(cs.bs) - st, err := state.LoadStateTree(cst, ts.Blocks()[0].ParentStateRoot) - if err != nil { - return nil, xerrors.Errorf("failed to load state tree") - } - - preloadAddr := func(a address.Address) error { - if _, ok := applied[a]; !ok { - act, err := st.GetActor(a) - if err != nil { - return err - } - - applied[a] = act.Nonce - balances[a] = act.Balance - } - return nil - } - - var out []types.ChainMsg - for _, b := range ts.Blocks() { - bms, sms, err := cs.MessagesForBlock(b) - if err != nil { - return nil, xerrors.Errorf("failed to get messages for block: %w", err) - } - - cmsgs := make([]types.ChainMsg, 0, len(bms)+len(sms)) - for _, m := range bms { - cmsgs = append(cmsgs, m) - } - for _, sm := range sms { - cmsgs = append(cmsgs, sm) - } - - for _, cm := range cmsgs { - m := cm.VMMessage() - if err := preloadAddr(m.From); err != nil { - return nil, err - } - - if applied[m.From] != m.Nonce { - continue - } - applied[m.From]++ - - if balances[m.From].LessThan(m.RequiredFunds()) { - continue - } - balances[m.From] = types.BigSub(balances[m.From], m.RequiredFunds()) - - out = append(out, cm) - } - } - - return out, nil -} - -type mmCids struct { - bls []cid.Cid - secpk []cid.Cid -} - -func (cs *ChainStore) readMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { - o, ok := cs.mmCache.Get(mmc) - if ok { - mmcids := o.(*mmCids) - return mmcids.bls, mmcids.secpk, nil - } - - cst := cbor.NewCborStore(cs.bs) - var msgmeta types.MsgMeta - if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil { - return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err) - } - - blscids, err := cs.readAMTCids(msgmeta.BlsMessages) - if err != nil { - return nil, nil, xerrors.Errorf("loading bls message cids for block: %w", err) - } - - secpkcids, err := cs.readAMTCids(msgmeta.SecpkMessages) - if err != nil { - return nil, nil, xerrors.Errorf("loading secpk message cids for block: %w", err) - } - - cs.mmCache.Add(mmc, &mmCids{ - bls: blscids, - secpk: secpkcids, - }) - - return blscids, secpkcids, nil -} - -func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { - fts, err := cs.LoadTipSet(from) - if err != nil { - return nil, xerrors.Errorf("loading from tipset %s: %w", from, err) - } - tts, err := cs.LoadTipSet(to) - if err != nil { - return nil, xerrors.Errorf("loading to tipset %s: %w", to, err) - } - revert, apply, err := cs.ReorgOps(fts, tts) - if err != nil { - return nil, xerrors.Errorf("error getting tipset branches: %w", err) - } - - path := make([]*api.HeadChange, len(revert)+len(apply)) - for i, r := range revert { - path[i] = &api.HeadChange{Type: HCRevert, Val: r} - } - for j, i := 0, len(apply)-1; i >= 0; j, i = j+1, i-1 { - path[j+len(revert)] = &api.HeadChange{Type: HCApply, Val: apply[i]} - } - return path, nil -} - -func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - blscids, secpkcids, err := cs.readMsgMetaCids(b.Messages) - if err != nil { - return nil, nil, err - } - - blsmsgs, err := cs.LoadMessagesFromCids(blscids) - if err != nil { - return nil, nil, xerrors.Errorf("loading bls messages for block: %w", err) - } - - secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids) - if err != nil { - return nil, nil, xerrors.Errorf("loading secpk messages for block: %w", err) - } - - return blsmsgs, secpkmsgs, nil -} - -func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) { - ctx := context.TODO() - bs := cbor.NewCborStore(cs.bs) - a, err := amt.LoadAMT(ctx, bs, b.ParentMessageReceipts) - if err != nil { - return nil, xerrors.Errorf("amt load: %w", err) - } - - var r types.MessageReceipt - if err := a.Get(ctx, uint64(i), &r); err != nil { - return nil, err - } - - return &r, nil -} - -func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, error) { - msgs := make([]*types.Message, 0, len(cids)) - for i, c := range cids { - m, err := cs.GetMessage(c) - if err != nil { - return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", err, c, i) - } - - msgs = append(msgs, m) - } - - return msgs, nil -} - -func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.SignedMessage, error) { - msgs := make([]*types.SignedMessage, 0, len(cids)) - for i, c := range cids { - m, err := cs.GetSignedMessage(c) - if err != nil { - return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", err, c, i) - } - - msgs = append(msgs, m) - } - - return msgs, nil -} - -func (cs *ChainStore) Blockstore() bstore.Blockstore { - return cs.bs -} - -func ActorStore(ctx context.Context, bs blockstore.Blockstore) adt.Store { - return &astore{ - cst: cbor.NewCborStore(bs), - ctx: ctx, - } -} - -type astore struct { - cst cbor.IpldStore - ctx context.Context -} - -func (a *astore) Context() context.Context { - return a.ctx -} - -func (a *astore) Get(ctx context.Context, c cid.Cid, out interface{}) error { - return a.cst.Get(ctx, c, out) -} - -func (a *astore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { - return a.cst.Put(ctx, v) -} - -func (cs *ChainStore) Store(ctx context.Context) adt.Store { - return ActorStore(ctx, cs.bs) -} - -func (cs *ChainStore) VMSys() runtime.Syscalls { - return cs.vmcalls -} - -func (cs *ChainStore) TryFillTipSet(ts *types.TipSet) (*FullTipSet, error) { - var out []*types.FullBlock - - for _, b := range ts.Blocks() { - bmsgs, smsgs, err := cs.MessagesForBlock(b) - if err != nil { - // TODO: check for 'not found' errors, and only return nil if this - // is actually a 'not found' error - return nil, nil - } - - fb := &types.FullBlock{ - Header: b, - BlsMessages: bmsgs, - SecpkMessages: smsgs, - } - - out = append(out, fb) - } - return NewFullTipSet(out), nil -} - -func DrawRandomness(rbase []byte, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - h := blake2b.New256() - if err := binary.Write(h, binary.BigEndian, int64(pers)); err != nil { - return nil, xerrors.Errorf("deriving randomness: %w", err) - } - VRFDigest := blake2b.Sum256(rbase) - _, err := h.Write(VRFDigest[:]) - if err != nil { - return nil, xerrors.Errorf("hashing VRFDigest: %w", err) - } - if err := binary.Write(h, binary.BigEndian, round); err != nil { - return nil, xerrors.Errorf("deriving randomness: %w", err) - } - _, err = h.Write(entropy) - if err != nil { - return nil, xerrors.Errorf("hashing entropy: %w", err) - } - - return h.Sum(nil), nil -} - -func (cs *ChainStore) GetRandomness(ctx context.Context, blks []cid.Cid, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - _, span := trace.StartSpan(ctx, "store.GetRandomness") - defer span.End() - span.AddAttributes(trace.Int64Attribute("round", int64(round))) - - ts, err := cs.LoadTipSet(types.NewTipSetKey(blks...)) - if err != nil { - return nil, err - } - - if round > ts.Height() { - return nil, xerrors.Errorf("cannot draw randomness from the future") - } - - searchHeight := round - if searchHeight < 0 { - searchHeight = 0 - } - - randTs, err := cs.GetTipsetByHeight(ctx, searchHeight, ts, true) - if err != nil { - return nil, err - } - - mtb := randTs.MinTicketBlock() - - // if at (or just past -- for null epochs) appropriate epoch - // or at genesis (works for negative epochs) - return DrawRandomness(mtb.Ticket.VRFProof, pers, round, entropy) -} - -// GetTipsetByHeight returns the tipset on the chain behind 'ts' at the given -// height. In the case that the given height is a null round, the 'prev' flag -// selects the tipset before the null round if true, and the tipset following -// the null round if false. -func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, ts *types.TipSet, prev bool) (*types.TipSet, error) { - if ts == nil { - ts = cs.GetHeaviestTipSet() - } - - if h > ts.Height() { - return nil, xerrors.Errorf("looking for tipset with height greater than start point") - } - - if h == ts.Height() { - return ts, nil - } - - lbts, err := cs.cindex.GetTipsetByHeight(ctx, ts, h) - if err != nil { - return nil, err - } - - if lbts.Height() < h { - log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h) - lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ts, h) - if err != nil { - return nil, err - } - } - - if lbts.Height() == h || !prev { - return lbts, nil - } - - return cs.LoadTipSet(lbts.Parents()) -} - -func recurseLinks(bs blockstore.Blockstore, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) { - if root.Prefix().Codec != cid.DagCBOR { - return in, nil - } - - data, err := bs.Get(root) - if err != nil { - return nil, xerrors.Errorf("recurse links get (%s) failed: %w", root, err) - } - - top, err := cbg.ScanForLinks(bytes.NewReader(data.RawData())) - if err != nil { - return nil, xerrors.Errorf("scanning for links failed: %w", err) - } - - in = append(in, top...) - for _, c := range top { - var err error - in, err = recurseLinks(bs, c, in) - if err != nil { - return nil, err - } - } - - return in, nil -} - -func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, w io.Writer) error { - if ts == nil { - ts = cs.GetHeaviestTipSet() - } - - seen := cid.NewSet() - - h := &car.CarHeader{ - Roots: ts.Cids(), - Version: 1, - } - - if err := car.WriteHeader(h, w); err != nil { - return xerrors.Errorf("failed to write car header: %s", err) - } - - blocksToWalk := ts.Cids() - - walkChain := func(blk cid.Cid) error { - if !seen.Visit(blk) { - return nil - } - - data, err := cs.bs.Get(blk) - if err != nil { - return xerrors.Errorf("getting block: %w", err) - } - - if err := carutil.LdWrite(w, blk.Bytes(), data.RawData()); err != nil { - return xerrors.Errorf("failed to write block to car output: %w", err) - } - - var b types.BlockHeader - if err := b.UnmarshalCBOR(bytes.NewBuffer(data.RawData())); err != nil { - return xerrors.Errorf("unmarshaling block header (cid=%s): %w", blk, err) - } - - for _, p := range b.Parents { - blocksToWalk = append(blocksToWalk, p) - } - - cids, err := recurseLinks(cs.bs, b.Messages, []cid.Cid{b.Messages}) - if err != nil { - return xerrors.Errorf("recursing messages failed: %w", err) - } - - out := cids - - if b.Height == 0 { - cids, err := recurseLinks(cs.bs, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) - if err != nil { - return xerrors.Errorf("recursing genesis state failed: %w", err) - } - - out = append(out, cids...) - } - - for _, c := range out { - if seen.Visit(c) { - if c.Prefix().Codec != cid.DagCBOR { - continue - } - data, err := cs.bs.Get(c) - if err != nil { - return xerrors.Errorf("writing object to car (get %s): %w", c, err) - } - - if err := carutil.LdWrite(w, c.Bytes(), data.RawData()); err != nil { - return xerrors.Errorf("failed to write out car object: %w", err) - } - } - } - - return nil - } - - for len(blocksToWalk) > 0 { - next := blocksToWalk[0] - blocksToWalk = blocksToWalk[1:] - if err := walkChain(next); err != nil { - return xerrors.Errorf("walk chain failed: %w", err) - } - } - - return nil -} - -func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) { - header, err := car.LoadCar(cs.Blockstore(), r) - if err != nil { - return nil, xerrors.Errorf("loadcar failed: %w", err) - } - - root, err := cs.LoadTipSet(types.NewTipSetKey(header.Roots...)) - if err != nil { - return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err) - } - - return root, nil -} - -func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry, error) { - cur := ts - for i := 0; i < 20; i++ { - cbe := cur.Blocks()[0].BeaconEntries - if len(cbe) > 0 { - return &cbe[len(cbe)-1], nil - } - - if cur.Height() == 0 { - return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") - } - - next, err := cs.LoadTipSet(cur.Parents()) - if err != nil { - return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) - } - cur = next - } - - if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" { - return &types.BeaconEntry{ - Data: []byte{9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9}, - }, nil - } - - return nil, xerrors.Errorf("found NO beacon entries in the 20 blocks prior to given tipset") -} - -type chainRand struct { - cs *ChainStore - blks []cid.Cid - bh abi.ChainEpoch -} - -func NewChainRand(cs *ChainStore, blks []cid.Cid, bheight abi.ChainEpoch) vm.Rand { - return &chainRand{ - cs: cs, - blks: blks, - bh: bheight, - } -} - -func (cr *chainRand) GetRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return cr.cs.GetRandomness(ctx, cr.blks, pers, round, entropy) -} - -func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { - if tsk.IsEmpty() { - return cs.GetHeaviestTipSet(), nil - } - return cs.LoadTipSet(tsk) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/store/store_test.go b/vendor/github.com/filecoin-project/lotus/chain/store/store_test.go deleted file mode 100644 index 939c85d201..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/store/store_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package store_test - -import ( - "bytes" - "context" - "testing" - - datastore "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - "github.com/filecoin-project/specs-actors/actors/crypto" - - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/repo" -) - -func init() { - miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ - abi.RegisteredSealProof_StackedDrg2KiBV1: {}, - } - power.ConsensusMinerMinPower = big.NewInt(2048) - verifreg.MinVerifiedDealSize = big.NewInt(256) -} - -func BenchmarkGetRandomness(b *testing.B) { - cg, err := gen.NewGenerator() - if err != nil { - b.Fatal(err) - } - - var last *types.TipSet - for i := 0; i < 2000; i++ { - ts, err := cg.NextTipSet() - if err != nil { - b.Fatal(err) - } - - last = ts.TipSet.TipSet() - } - - r, err := cg.YieldRepo() - if err != nil { - b.Fatal(err) - } - - lr, err := r.Lock(repo.FullNode) - if err != nil { - b.Fatal(err) - } - - bds, err := lr.Datastore("/chain") - if err != nil { - b.Fatal(err) - } - - mds, err := lr.Datastore("/metadata") - if err != nil { - b.Fatal(err) - } - - bs := blockstore.NewBlockstore(bds) - - cs := store.NewChainStore(bs, mds, nil) - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _, err := cs.GetRandomness(context.TODO(), last.Cids(), crypto.DomainSeparationTag_SealRandomness, 500, nil) - if err != nil { - b.Fatal(err) - } - } -} - -func TestChainExportImport(t *testing.T) { - cg, err := gen.NewGenerator() - if err != nil { - t.Fatal(err) - } - - var last *types.TipSet - for i := 0; i < 100; i++ { - ts, err := cg.NextTipSet() - if err != nil { - t.Fatal(err) - } - - last = ts.TipSet.TipSet() - } - - buf := new(bytes.Buffer) - if err := cg.ChainStore().Export(context.TODO(), last, buf); err != nil { - t.Fatal(err) - } - - nbs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - cs := store.NewChainStore(nbs, datastore.NewMapDatastore(), nil) - - root, err := cs.Import(buf) - if err != nil { - t.Fatal(err) - } - - if !root.Equals(last) { - t.Fatal("imported chain differed from exported chain") - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/store/weight.go b/vendor/github.com/filecoin-project/lotus/chain/store/weight.go deleted file mode 100644 index c92f1a74da..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/store/weight.go +++ /dev/null @@ -1,108 +0,0 @@ -package store - -import ( - "context" - "math/big" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - big2 "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - cbor "github.com/ipfs/go-ipld-cbor" - "golang.org/x/xerrors" -) - -var zero = types.NewInt(0) - -func (cs *ChainStore) Weight(ctx context.Context, ts *types.TipSet) (types.BigInt, error) { - if ts == nil { - return types.NewInt(0), nil - } - // >>> w[r] <<< + wFunction(totalPowerAtTipset(ts)) * 2^8 + (wFunction(totalPowerAtTipset(ts)) * len(ts.blocks) * wRatio_num * 2^8) / (e * wRatio_den) - - var out = new(big.Int).Set(ts.Blocks()[0].ParentWeight.Int) - - // >>> wFunction(totalPowerAtTipset(ts)) * 2^8 <<< + (wFunction(totalPowerAtTipset(ts)) * len(ts.blocks) * wRatio_num * 2^8) / (e * wRatio_den) - - tpow := big2.Zero() - { - cst := cbor.NewCborStore(cs.Blockstore()) - state, err := state.LoadStateTree(cst, ts.ParentState()) - if err != nil { - return types.NewInt(0), xerrors.Errorf("load state tree: %w", err) - } - - act, err := state.GetActor(builtin.StoragePowerActorAddr) - if err != nil { - return types.NewInt(0), xerrors.Errorf("get power actor: %w", err) - } - - var st power.State - if err := cst.Get(ctx, act.Head, &st); err != nil { - return types.NewInt(0), xerrors.Errorf("get power actor head: %w", err) - } - tpow = st.TotalQualityAdjPower // TODO: REVIEW: Is this correct? - } - - log2P := int64(0) - if tpow.GreaterThan(zero) { - log2P = int64(tpow.BitLen() - 1) - } else { - // Not really expect to be here ... - return types.EmptyInt, xerrors.Errorf("All power in the net is gone. You network might be disconnected, or the net is dead!") - } - - out.Add(out, big.NewInt(log2P<<8)) - - // (wFunction(totalPowerAtTipset(ts)) * len(ts.blocks) * wRatio_num * 2^8) / (e * wRatio_den) - - eWeight := big.NewInt((log2P * int64(len(ts.Blocks())) * build.WRatioNum) << 8) - eWeight.Div(eWeight, big.NewInt(int64(build.BlocksPerEpoch*build.WRatioDen))) - out.Add(out, eWeight) - - return types.BigInt{Int: out}, nil -} - -// todo: dedupe with state manager -func (cs *ChainStore) call(ctx context.Context, msg *types.Message, ts *types.TipSet) (*types.MessageReceipt, error) { - bstate := ts.ParentState() - - r := NewChainRand(cs, ts.Cids(), ts.Height()) - - vmi, err := vm.NewVM(bstate, ts.Height(), r, cs.bs, cs.vmcalls) - if err != nil { - return nil, xerrors.Errorf("failed to set up vm: %w", err) - } - - if msg.GasLimit == 0 { - msg.GasLimit = 10000000000 - } - if msg.GasPrice == types.EmptyInt { - msg.GasPrice = types.NewInt(0) - } - if msg.Value == types.EmptyInt { - msg.Value = types.NewInt(0) - } - - fromActor, err := vmi.StateTree().GetActor(msg.From) - if err != nil { - return nil, xerrors.Errorf("call raw get actor: %s", err) - } - - msg.Nonce = fromActor.Nonce - - // TODO: maybe just use the invoker directly? - // TODO: use signed message length for secp messages - ret, err := vmi.ApplyMessage(ctx, msg) - if err != nil { - return nil, xerrors.Errorf("apply message failed: %w", err) - } - - if ret.ActorErr != nil { - log.Warnf("chain call failed: %s", ret.ActorErr) - } - return &ret.MessageReceipt, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/sub/incoming.go b/vendor/github.com/filecoin-project/lotus/chain/sub/incoming.go deleted file mode 100644 index ba61dc5c56..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/sub/incoming.go +++ /dev/null @@ -1,400 +0,0 @@ -package sub - -import ( - "bytes" - "context" - "fmt" - "sync" - "time" - - "golang.org/x/xerrors" - - address "github.com/filecoin-project/go-address" - amt "github.com/filecoin-project/go-amt-ipld/v2" - miner "github.com/filecoin-project/specs-actors/actors/builtin/miner" - lru "github.com/hashicorp/golang-lru" - "github.com/ipfs/go-cid" - dstore "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - connmgr "github.com/libp2p/go-libp2p-core/connmgr" - "github.com/libp2p/go-libp2p-core/peer" - pubsub "github.com/libp2p/go-libp2p-pubsub" - cbg "github.com/whyrusleeping/cbor-gen" - "go.opencensus.io/stats" - "go.opencensus.io/tag" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/messagepool" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/bufbstore" - "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/metrics" -) - -var log = logging.Logger("sub") - -func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *chain.Syncer, cmgr connmgr.ConnManager) { - for { - msg, err := bsub.Next(ctx) - if err != nil { - if ctx.Err() != nil { - log.Warn("quitting HandleIncomingBlocks loop") - return - } - log.Error("error from block subscription: ", err) - continue - } - - blk, ok := msg.ValidatorData.(*types.BlockMsg) - if !ok { - log.Warnf("pubsub block validator passed on wrong type: %#v", msg.ValidatorData) - return - } - - src := msg.GetFrom() - - go func() { - start := time.Now() - log.Debug("about to fetch messages for block from pubsub") - bmsgs, err := s.Bsync.FetchMessagesByCids(context.TODO(), blk.BlsMessages) - if err != nil { - log.Errorf("failed to fetch all bls messages for block received over pubusb: %s; source: %s", err, src) - return - } - - smsgs, err := s.Bsync.FetchSignedMessagesByCids(context.TODO(), blk.SecpkMessages) - if err != nil { - log.Errorf("failed to fetch all secpk messages for block received over pubusb: %s; source: %s", err, src) - return - } - - took := time.Since(start) - log.Infow("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took) - if delay := time.Now().Unix() - int64(blk.Header.Timestamp); delay > 5 { - log.Warnf("Received block with large delay %d from miner %s", delay, blk.Header.Miner) - } - - if s.InformNewBlock(msg.ReceivedFrom, &types.FullBlock{ - Header: blk.Header, - BlsMessages: bmsgs, - SecpkMessages: smsgs, - }) { - cmgr.TagPeer(msg.ReceivedFrom, "blkprop", 5) - } - }() - } -} - -type BlockValidator struct { - peers *lru.TwoQueueCache - - killThresh int - - recvBlocks *blockReceiptCache - - blacklist func(peer.ID) - - // necessary for block validation - chain *store.ChainStore - stmgr *stmgr.StateManager - - mx sync.Mutex - keycache map[string]address.Address -} - -func NewBlockValidator(chain *store.ChainStore, stmgr *stmgr.StateManager, blacklist func(peer.ID)) *BlockValidator { - p, _ := lru.New2Q(4096) - return &BlockValidator{ - peers: p, - killThresh: 10, - blacklist: blacklist, - recvBlocks: newBlockReceiptCache(), - chain: chain, - stmgr: stmgr, - keycache: make(map[string]address.Address), - } -} - -func (bv *BlockValidator) flagPeer(p peer.ID) { - v, ok := bv.peers.Get(p) - if !ok { - bv.peers.Add(p, int(1)) - return - } - - val := v.(int) - - if val >= bv.killThresh { - log.Warnf("blacklisting peer %s", p) - bv.blacklist(p) - return - } - - bv.peers.Add(p, v.(int)+1) -} - -func (bv *BlockValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult { - // track validation time - begin := time.Now() - defer func() { - log.Debugf("block validation time: %s", time.Since(begin)) - }() - - stats.Record(ctx, metrics.BlockReceived.M(1)) - - recordFailure := func(what string) { - ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, what)) - stats.Record(ctx, metrics.BlockValidationFailure.M(1)) - bv.flagPeer(pid) - } - - // make sure the block can be decoded - blk, err := types.DecodeBlockMsg(msg.GetData()) - if err != nil { - log.Error("got invalid block over pubsub: ", err) - recordFailure("invalid") - return pubsub.ValidationReject - } - - // check the message limit constraints - if len(blk.BlsMessages)+len(blk.SecpkMessages) > build.BlockMessageLimit { - log.Warnf("received block with too many messages over pubsub") - recordFailure("too_many_messages") - return pubsub.ValidationReject - } - - // make sure we have a signature - if blk.Header.BlockSig == nil { - log.Warnf("received block without a signature over pubsub") - recordFailure("missing_signature") - return pubsub.ValidationReject - } - - // validate the block meta: the Message CID in the header must match the included messages - err = bv.validateMsgMeta(ctx, blk) - if err != nil { - log.Warnf("error validating message metadata: %s", err) - recordFailure("invalid_block_meta") - return pubsub.ValidationReject - } - - // we want to ensure that it is a block from a known miner; we reject blocks from unknown miners - // to prevent spam attacks. - // the logic works as follows: we lookup the miner in the chain for its key. - // if we can find it then it's a known miner and we can validate the signature. - // if we can't find it, we check whether we are (near) synced in the chain. - // if we are not synced we cannot validate the block and we must ignore it. - // if we are synced and the miner is unknown, then the block is rejcected. - key, err := bv.getMinerWorkerKey(ctx, blk) - if err != nil { - if bv.isChainNearSynced() { - log.Warnf("received block message from unknown miner over pubsub; rejecting message") - recordFailure("unknown_miner") - return pubsub.ValidationReject - } else { - log.Warnf("cannot validate block message; unknown miner in unsynced chain") - return pubsub.ValidationIgnore - } - } - - err = sigs.CheckBlockSignature(ctx, blk.Header, key) - if err != nil { - log.Errorf("block signature verification failed: %s", err) - recordFailure("signature_verification_failed") - return pubsub.ValidationReject - } - - // it's a good block! make sure we've only seen it once - if bv.recvBlocks.add(blk.Header.Cid()) > 0 { - // TODO: once these changes propagate to the network, we can consider - // dropping peers who send us the same block multiple times - return pubsub.ValidationIgnore - } - - // all good, accept the block - msg.ValidatorData = blk - stats.Record(ctx, metrics.BlockValidationSuccess.M(1)) - return pubsub.ValidationAccept -} - -func (bv *BlockValidator) isChainNearSynced() bool { - ts := bv.chain.GetHeaviestTipSet() - timestamp := ts.MinTimestamp() - now := time.Now().UnixNano() - cutoff := uint64(now) - uint64(6*time.Hour) - return timestamp > cutoff -} - -func (bv *BlockValidator) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { - var bcids, scids []cbg.CBORMarshaler - for _, m := range msg.BlsMessages { - c := cbg.CborCid(m) - bcids = append(bcids, &c) - } - - for _, m := range msg.SecpkMessages { - c := cbg.CborCid(m) - scids = append(scids, &c) - } - - // TODO there has to be a simpler way to do this without the blockstore dance - bs := cbor.NewCborStore(bstore.NewBlockstore(dstore.NewMapDatastore())) - - bmroot, err := amt.FromArray(ctx, bs, bcids) - if err != nil { - return err - } - - smroot, err := amt.FromArray(ctx, bs, scids) - if err != nil { - return err - } - - mrcid, err := bs.Put(ctx, &types.MsgMeta{ - BlsMessages: bmroot, - SecpkMessages: smroot, - }) - - if err != nil { - return err - } - - if msg.Header.Messages != mrcid { - return fmt.Errorf("messages didn't match root cid in header") - } - - return nil -} - -func (bv *BlockValidator) getMinerWorkerKey(ctx context.Context, msg *types.BlockMsg) (address.Address, error) { - addr := msg.Header.Miner - - bv.mx.Lock() - key, ok := bv.keycache[addr.String()] - bv.mx.Unlock() - if ok { - return key, nil - } - - // TODO I have a feeling all this can be simplified by cleverer DI to use the API - ts := bv.chain.GetHeaviestTipSet() - st, _, err := bv.stmgr.TipSetState(ctx, ts) - if err != nil { - return address.Undef, err - } - buf := bufbstore.NewBufferedBstore(bv.chain.Blockstore()) - cst := cbor.NewCborStore(buf) - state, err := state.LoadStateTree(cst, st) - if err != nil { - return address.Undef, err - } - act, err := state.GetActor(addr) - if err != nil { - return address.Undef, err - } - - blk, err := bv.chain.Blockstore().Get(act.Head) - if err != nil { - return address.Undef, err - } - aso := blk.RawData() - - var mst miner.State - err = mst.UnmarshalCBOR(bytes.NewReader(aso)) - if err != nil { - return address.Undef, err - } - - worker := mst.Info.Worker - key, err = bv.stmgr.ResolveToKeyAddress(ctx, worker, ts) - if err != nil { - return address.Undef, err - } - - bv.mx.Lock() - bv.keycache[addr.String()] = key - bv.mx.Unlock() - - return key, nil -} - -type blockReceiptCache struct { - blocks *lru.TwoQueueCache -} - -func newBlockReceiptCache() *blockReceiptCache { - c, _ := lru.New2Q(8192) - - return &blockReceiptCache{ - blocks: c, - } -} - -func (brc *blockReceiptCache) add(bcid cid.Cid) int { - val, ok := brc.blocks.Get(bcid) - if !ok { - brc.blocks.Add(bcid, int(1)) - return 0 - } - - brc.blocks.Add(bcid, val.(int)+1) - return val.(int) -} - -type MessageValidator struct { - mpool *messagepool.MessagePool -} - -func NewMessageValidator(mp *messagepool.MessagePool) *MessageValidator { - return &MessageValidator{mp} -} - -func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) pubsub.ValidationResult { - stats.Record(ctx, metrics.MessageReceived.M(1)) - m, err := types.DecodeSignedMessage(msg.Message.GetData()) - if err != nil { - log.Warnf("failed to decode incoming message: %s", err) - ctx, _ = tag.New(ctx, tag.Insert(metrics.FailureType, "decode")) - stats.Record(ctx, metrics.MessageValidationFailure.M(1)) - return pubsub.ValidationReject - } - - if err := mv.mpool.Add(m); err != nil { - log.Debugf("failed to add message from network to message pool (From: %s, To: %s, Nonce: %d, Value: %s): %s", m.Message.From, m.Message.To, m.Message.Nonce, types.FIL(m.Message.Value), err) - ctx, _ = tag.New( - ctx, - tag.Insert(metrics.FailureType, "add"), - ) - stats.Record(ctx, metrics.MessageValidationFailure.M(1)) - switch { - case xerrors.Is(err, messagepool.ErrBroadcastAnyway): - return pubsub.ValidationIgnore - default: - return pubsub.ValidationReject - } - } - stats.Record(ctx, metrics.MessageValidationSuccess.M(1)) - return pubsub.ValidationAccept -} - -func HandleIncomingMessages(ctx context.Context, mpool *messagepool.MessagePool, msub *pubsub.Subscription) { - for { - _, err := msub.Next(ctx) - if err != nil { - log.Warn("error from message subscription: ", err) - if ctx.Err() != nil { - log.Warn("quitting HandleIncomingMessages loop") - return - } - continue - } - - // Do nothing... everything happens in validate - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/sync.go b/vendor/github.com/filecoin-project/lotus/chain/sync.go deleted file mode 100644 index 5253799dd0..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/sync.go +++ /dev/null @@ -1,1538 +0,0 @@ -package chain - -import ( - "bytes" - "context" - "errors" - "fmt" - "os" - "sort" - "strings" - "time" - - "github.com/Gurpartap/async" - "github.com/hashicorp/go-multierror" - "github.com/ipfs/go-cid" - dstore "github.com/ipfs/go-datastore" - bstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/connmgr" - "github.com/libp2p/go-libp2p-core/peer" - cbg "github.com/whyrusleeping/cbor-gen" - "github.com/whyrusleeping/pubsub" - "go.opencensus.io/stats" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - bls "github.com/filecoin-project/filecoin-ffi" - "github.com/filecoin-project/go-address" - amt "github.com/filecoin-project/go-amt-ipld/v2" - "github.com/filecoin-project/sector-storage/ffiwrapper" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/util/adt" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/blocksync" - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/lotus/metrics" -) - -// Blocks that are more than MaxHeightDrift epochs above -//the theoretical max height based on systime are quickly rejected -const MaxHeightDrift = 5 - -var log = logging.Logger("chain") - -var LocalIncoming = "incoming" - -// Syncer is in charge of running the chain synchronization logic. As such, it -// is tasked with these functions, amongst others: -// -// * Fast-forwards the chain as it learns of new TipSets from the network via -// the SyncManager. -// * Applies the fork choice rule to select the correct side when confronted -// with a fork in the network. -// * Requests block headers and messages from other peers when not available -// in our BlockStore. -// * Tracks blocks marked as bad in a cache. -// * Keeps the BlockStore and ChainStore consistent with our view of the world, -// the latter of which in turn informs other components when a reorg has been -// committed. -// -// The Syncer does not run workers itself. It's mainly concerned with -// ensuring a consistent state of chain consensus. The reactive and network- -// interfacing processes are part of other components, such as the SyncManager -// (which owns the sync scheduler and sync workers), BlockSync, the HELLO -// protocol, and the gossipsub block propagation layer. -// -// {hint/concept} The fork-choice rule as it currently stands is: "pick the -// chain with the heaviest weight, so long as it hasn’t deviated one finality -// threshold from our head (900 epochs, parameter determined by spec-actors)". -type Syncer struct { - // The interface for accessing and putting tipsets into local storage - store *store.ChainStore - - // handle to the random beacon for verification - beacon beacon.RandomBeacon - - // the state manager handles making state queries - sm *stmgr.StateManager - - // The known Genesis tipset - Genesis *types.TipSet - - // TipSets known to be invalid - bad *BadBlockCache - - // handle to the block sync service - Bsync *blocksync.BlockSync - - self peer.ID - - syncmgr *SyncManager - - connmgr connmgr.ConnManager - - incoming *pubsub.PubSub - - receiptTracker *blockReceiptTracker - - verifier ffiwrapper.Verifier -} - -// NewSyncer creates a new Syncer object. -func NewSyncer(sm *stmgr.StateManager, bsync *blocksync.BlockSync, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.RandomBeacon, verifier ffiwrapper.Verifier) (*Syncer, error) { - gen, err := sm.ChainStore().GetGenesis() - if err != nil { - return nil, xerrors.Errorf("getting genesis block: %w", err) - } - - gent, err := types.NewTipSet([]*types.BlockHeader{gen}) - if err != nil { - return nil, err - } - - s := &Syncer{ - beacon: beacon, - bad: NewBadBlockCache(), - Genesis: gent, - Bsync: bsync, - store: sm.ChainStore(), - sm: sm, - self: self, - receiptTracker: newBlockReceiptTracker(), - connmgr: connmgr, - verifier: verifier, - - incoming: pubsub.New(50), - } - - s.syncmgr = NewSyncManager(s.Sync) - return s, nil -} - -func (syncer *Syncer) Start() { - syncer.syncmgr.Start() -} - -func (syncer *Syncer) Stop() { - syncer.syncmgr.Stop() -} - -// InformNewHead informs the syncer about a new potential tipset -// This should be called when connecting to new peers, and additionally -// when receiving new blocks from the network -func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { - ctx := context.Background() - if fts == nil { - log.Errorf("got nil tipset in InformNewHead") - return false - } - - if syncer.IsEpochBeyondCurrMax(fts.TipSet().Height()) { - log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height()) - return false - } - - for _, b := range fts.Blocks { - if reason, ok := syncer.bad.Has(b.Cid()); ok { - log.Warnf("InformNewHead called on block marked as bad: %s (reason: %s)", b.Cid(), reason) - return false - } - if err := syncer.ValidateMsgMeta(b); err != nil { - log.Warnf("invalid block received: %s", err) - return false - } - } - - syncer.incoming.Pub(fts.TipSet().Blocks(), LocalIncoming) - - if from == syncer.self { - // TODO: this is kindof a hack... - log.Debug("got block from ourselves") - - if err := syncer.Sync(ctx, fts.TipSet()); err != nil { - log.Errorf("failed to sync our own block %s: %+v", fts.TipSet().Cids(), err) - return false - } - - return true - } - - // TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of - // the blockstore - if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil { - log.Warn("failed to persist incoming block header: ", err) - return false - } - - syncer.Bsync.AddPeer(from) - - bestPweight := syncer.store.GetHeaviestTipSet().Blocks()[0].ParentWeight - targetWeight := fts.TipSet().Blocks()[0].ParentWeight - if targetWeight.LessThan(bestPweight) { - var miners []string - for _, blk := range fts.TipSet().Blocks() { - miners = append(miners, blk.Miner.String()) - } - log.Infof("incoming tipset from %s does not appear to be better than our best chain, ignoring for now", miners) - return false - } - - syncer.syncmgr.SetPeerHead(ctx, from, fts.TipSet()) - return true -} - -// IncomingBlocks spawns a goroutine that subscribes to the local eventbus to -// receive new block headers as they arrive from the network, and sends them to -// the returned channel. -// -// These blocks have not necessarily been incorporated to our view of the chain. -func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) { - sub := syncer.incoming.Sub(LocalIncoming) - out := make(chan *types.BlockHeader, 10) - - go func() { - defer syncer.incoming.Unsub(sub, LocalIncoming) - - for { - select { - case r := <-sub: - hs := r.([]*types.BlockHeader) - for _, h := range hs { - select { - case out <- h: - case <-ctx.Done(): - return - } - } - case <-ctx.Done(): - return - } - } - }() - - return out, nil -} - -// ValidateMsgMeta performs structural and content hash validation of the -// messages within this block. If validation passes, it stores the messages in -// the underlying IPLD block store. -func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { - if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit { - return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc) - } - - // Collect the CIDs of both types of messages separately: BLS and Secpk. - var bcids, scids []cbg.CBORMarshaler - for _, m := range fblk.BlsMessages { - c := cbg.CborCid(m.Cid()) - bcids = append(bcids, &c) - } - - for _, m := range fblk.SecpkMessages { - c := cbg.CborCid(m.Cid()) - scids = append(scids, &c) - } - - // TODO: IMPORTANT(GARBAGE). These message puts and the msgmeta - // computation need to go into the 'temporary' side of the blockstore when - // we implement that - blockstore := syncer.store.Blockstore() - - bs := cbor.NewCborStore(blockstore) - - // Compute the root CID of the combined message trie. - smroot, err := computeMsgMeta(bs, bcids, scids) - if err != nil { - return xerrors.Errorf("validating msgmeta, compute failed: %w", err) - } - - // Check that the message trie root matches with what's in the block. - if fblk.Header.Messages != smroot { - return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot) - } - - for _, m := range fblk.BlsMessages { - _, err := store.PutMessage(blockstore, m) - if err != nil { - return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) - } - } - - for _, m := range fblk.SecpkMessages { - _, err := store.PutMessage(blockstore, m) - if err != nil { - return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) - } - } - - return nil -} - -func (syncer *Syncer) LocalPeer() peer.ID { - return syncer.self -} - -func (syncer *Syncer) ChainStore() *store.ChainStore { - return syncer.store -} - -func (syncer *Syncer) InformNewBlock(from peer.ID, blk *types.FullBlock) bool { - // TODO: search for other blocks that could form a tipset with this block - // and then send that tipset to InformNewHead - - fts := &store.FullTipSet{Blocks: []*types.FullBlock{blk}} - return syncer.InformNewHead(from, fts) -} - -func copyBlockstore(from, to bstore.Blockstore) error { - cids, err := from.AllKeysChan(context.TODO()) - if err != nil { - return err - } - - for c := range cids { - b, err := from.Get(c) - if err != nil { - return err - } - - if err := to.Put(b); err != nil { - return err - } - } - - return nil -} - -// TODO: this function effectively accepts unchecked input from the network, -// either validate it here, or ensure that its validated elsewhere (maybe make -// sure the blocksync code checks it?) -// maybe this code should actually live in blocksync?? -func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) (*store.FullTipSet, error) { - if len(ts.Blocks()) != len(smi) || len(ts.Blocks()) != len(bmi) { - return nil, fmt.Errorf("msgincl length didnt match tipset size") - } - - fts := &store.FullTipSet{} - for bi, b := range ts.Blocks() { - var smsgs []*types.SignedMessage - var smsgCids []cbg.CBORMarshaler - for _, m := range smi[bi] { - smsgs = append(smsgs, allsmsgs[m]) - c := cbg.CborCid(allsmsgs[m].Cid()) - smsgCids = append(smsgCids, &c) - } - - var bmsgs []*types.Message - var bmsgCids []cbg.CBORMarshaler - for _, m := range bmi[bi] { - bmsgs = append(bmsgs, allbmsgs[m]) - c := cbg.CborCid(allbmsgs[m].Cid()) - bmsgCids = append(bmsgCids, &c) - } - - if msgc := len(bmsgCids) + len(smsgCids); msgc > build.BlockMessageLimit { - return nil, fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc) - } - - mrcid, err := computeMsgMeta(bs, bmsgCids, smsgCids) - if err != nil { - return nil, err - } - - if b.Messages != mrcid { - return nil, fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key()) - } - - fb := &types.FullBlock{ - Header: b, - BlsMessages: bmsgs, - SecpkMessages: smsgs, - } - - fts.Blocks = append(fts.Blocks, fb) - } - - return fts, nil -} - -// computeMsgMeta computes the root CID of the combined arrays of message CIDs -// of both types (BLS and Secpk). -func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cbg.CBORMarshaler) (cid.Cid, error) { - ctx := context.TODO() - bmroot, err := amt.FromArray(ctx, bs, bmsgCids) - if err != nil { - return cid.Undef, err - } - - smroot, err := amt.FromArray(ctx, bs, smsgCids) - if err != nil { - return cid.Undef, err - } - - mrcid, err := bs.Put(ctx, &types.MsgMeta{ - BlsMessages: bmroot, - SecpkMessages: smroot, - }) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to put msgmeta: %w", err) - } - - return mrcid, nil -} - -// FetchTipSet tries to load the provided tipset from the store, and falls back -// to the network (BlockSync) by querying the supplied peer if not found -// locally. -// -// {hint/usage} This is used from the HELLO protocol, to fetch the greeting -// peer's heaviest tipset if we don't have it. -func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { - if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil { - return fts, nil - } - - // fall back to the network. - return syncer.Bsync.GetFullTipSet(ctx, p, tsk) -} - -// tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full -// representation of it containing FullBlocks. If ALL blocks are not found -// locally, it errors entirely with blockstore.ErrNotFound. -func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) { - ts, err := syncer.store.LoadTipSet(tsk) - if err != nil { - return nil, err - } - - fts := &store.FullTipSet{} - for _, b := range ts.Blocks() { - bmsgs, smsgs, err := syncer.store.MessagesForBlock(b) - if err != nil { - return nil, err - } - - fb := &types.FullBlock{ - Header: b, - BlsMessages: bmsgs, - SecpkMessages: smsgs, - } - fts.Blocks = append(fts.Blocks, fb) - } - - return fts, nil -} - -// Sync tries to advance our view of the chain to `maybeHead`. It does nothing -// if our current head is heavier than the requested tipset, or if we're already -// at the requested head, or if the head is the genesis. -// -// Most of the heavy-lifting logic happens in syncer#collectChain. Refer to the -// godocs on that method for a more detailed view. -func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { - ctx, span := trace.StartSpan(ctx, "chain.Sync") - defer span.End() - - if span.IsRecordingEvents() { - span.AddAttributes( - trace.StringAttribute("tipset", fmt.Sprint(maybeHead.Cids())), - trace.Int64Attribute("height", int64(maybeHead.Height())), - ) - } - - if syncer.store.GetHeaviestTipSet().ParentWeight().GreaterThan(maybeHead.ParentWeight()) { - return nil - } - - if syncer.Genesis.Equals(maybeHead) || syncer.store.GetHeaviestTipSet().Equals(maybeHead) { - return nil - } - - if err := syncer.collectChain(ctx, maybeHead); err != nil { - span.AddAttributes(trace.StringAttribute("col_error", err.Error())) - span.SetStatus(trace.Status{ - Code: 13, - Message: err.Error(), - }) - return xerrors.Errorf("collectChain failed: %w", err) - } - - // At this point we have accepted and synced to the new `maybeHead` - // (`StageSyncComplete`). - if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil { - span.AddAttributes(trace.StringAttribute("put_error", err.Error())) - span.SetStatus(trace.Status{ - Code: 13, - Message: err.Error(), - }) - return xerrors.Errorf("failed to put synced tipset to chainstore: %w", err) - } - - peers := syncer.receiptTracker.GetPeers(maybeHead) - if len(peers) > 0 { - syncer.connmgr.TagPeer(peers[0], "new-block", 40) - - for _, p := range peers[1:] { - syncer.connmgr.TagPeer(p, "new-block", 25) - } - } - - return nil -} - -func isPermanent(err error) bool { - return !errors.Is(err, ErrTemporal) -} - -func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet) error { - ctx, span := trace.StartSpan(ctx, "validateTipSet") - defer span.End() - - span.AddAttributes(trace.Int64Attribute("height", int64(fts.TipSet().Height()))) - - ts := fts.TipSet() - if ts.Equals(syncer.Genesis) { - return nil - } - - var futures []async.ErrorFuture - for _, b := range fts.Blocks { - b := b // rebind to a scoped variable - - futures = append(futures, async.Err(func() error { - if err := syncer.ValidateBlock(ctx, b); err != nil { - if isPermanent(err) { - syncer.bad.Add(b.Cid(), err.Error()) - } - return xerrors.Errorf("validating block %s: %w", b.Cid(), err) - } - - if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil { - return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err) - } - return nil - })) - } - for _, f := range futures { - if err := f.AwaitContext(ctx); err != nil { - return err - } - } - return nil -} - -func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error { - var spast power.State - - _, err := syncer.sm.LoadActorState(ctx, builtin.StoragePowerActorAddr, &spast, baseTs) - if err != nil { - return err - } - - cm, err := adt.AsMap(syncer.store.Store(ctx), spast.Claims) - if err != nil { - return err - } - - var claim power.Claim - exist, err := cm.Get(adt.AddrKey(maddr), &claim) - if err != nil { - return err - } - if !exist { - return xerrors.New("miner isn't valid") - } - return nil -} - -var ErrTemporal = errors.New("temporal error") - -func blockSanityChecks(h *types.BlockHeader) error { - if h.ElectionProof == nil { - return xerrors.Errorf("block cannot have nil election proof") - } - - if h.Ticket == nil { - return xerrors.Errorf("block cannot have nil ticket") - } - - if h.BlockSig == nil { - return xerrors.Errorf("block had nil signature") - } - - if h.BLSAggregate == nil { - return xerrors.Errorf("block had nil bls aggregate signature") - } - - return nil -} - -// ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec -func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) { - defer func() { - // b.Cid() could panic for empty blocks that are used in tests. - if rerr := recover(); rerr != nil { - err = xerrors.Errorf("validate block panic: %w", rerr) - return - } - }() - - isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid()) - if err != nil { - return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err) - } - - if isValidated { - return nil - } - - validationStart := time.Now() - defer func() { - dur := time.Since(validationStart) - durMilli := dur.Seconds() * float64(1000) - stats.Record(ctx, metrics.BlockValidationDurationMilliseconds.M(durMilli)) - log.Infow("block validation", "took", dur, "height", b.Header.Height) - }() - - ctx, span := trace.StartSpan(ctx, "validateBlock") - defer span.End() - - if build.InsecurePoStValidation { - log.Warn("insecure test validation is enabled, if you see this outside of a test, it is a severe bug!") - } - - if err := blockSanityChecks(b.Header); err != nil { - return xerrors.Errorf("incoming header failed basic sanity checks: %w", err) - } - - h := b.Header - - baseTs, err := syncer.store.LoadTipSet(types.NewTipSetKey(h.Parents...)) - if err != nil { - return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) - } - - lbts, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height) - if err != nil { - return xerrors.Errorf("failed to get lookback tipset for block: %w", err) - } - - lbst, _, err := syncer.sm.TipSetState(ctx, lbts) - if err != nil { - return xerrors.Errorf("failed to compute lookback tipset state: %w", err) - } - - prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs) - if err != nil { - return xerrors.Errorf("failed to get latest beacon entry: %w", err) - } - - //nulls := h.Height - (baseTs.Height() + 1) - - // fast checks first - - now := uint64(time.Now().Unix()) - if h.Timestamp > now+build.AllowableClockDriftSecs { - return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal) - } - if h.Timestamp > now { - log.Warn("Got block from the future, but within threshold", h.Timestamp, time.Now().Unix()) - } - - if h.Timestamp < baseTs.MinTimestamp()+(build.BlockDelaySecs*uint64(h.Height-baseTs.Height())) { - log.Warn("timestamp funtimes: ", h.Timestamp, baseTs.MinTimestamp(), h.Height, baseTs.Height()) - diff := (baseTs.MinTimestamp() + (build.BlockDelaySecs * uint64(h.Height-baseTs.Height()))) - h.Timestamp - - return xerrors.Errorf("block was generated too soon (h.ts:%d < base.mints:%d + BLOCK_DELAY:%d * deltaH:%d; diff %d)", h.Timestamp, baseTs.MinTimestamp(), build.BlockDelaySecs, h.Height-baseTs.Height(), diff) - } - - msgsCheck := async.Err(func() error { - if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil { - return xerrors.Errorf("block had invalid messages: %w", err) - } - return nil - }) - - minerCheck := async.Err(func() error { - if err := syncer.minerIsValid(ctx, h.Miner, baseTs); err != nil { - return xerrors.Errorf("minerIsValid failed: %w", err) - } - return nil - }) - - // Stuff that needs stateroot / worker address - stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs) - if err != nil { - return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err) - } - - if stateroot != h.ParentStateRoot { - msgs, err := syncer.store.MessagesForTipset(baseTs) - if err != nil { - log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) - } else { - log.Warn("Messages for tipset with mismatching state:") - for i, m := range msgs { - mm := m.VMMessage() - log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params) - } - } - - return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot) - } - - if precp != h.ParentMessageReceipts { - return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts) - } - - waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner) - if err != nil { - return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err) - } - - winnerCheck := async.Err(func() error { - rBeacon := *prevBeacon - if len(h.BeaconEntries) != 0 { - rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1] - } - buf := new(bytes.Buffer) - if err := h.Miner.MarshalCBOR(buf); err != nil { - return xerrors.Errorf("failed to marshal miner address to cbor: %w", err) - } - - //TODO: DST from spec actors when it is there - vrfBase, err := store.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes()) - if err != nil { - return xerrors.Errorf("could not draw randomness: %w", err) - } - - if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil { - return xerrors.Errorf("validating block election proof failed: %w", err) - } - - slashed, err := stmgr.GetMinerSlashed(ctx, syncer.sm, baseTs, h.Miner) - if err != nil { - return xerrors.Errorf("failed to check if block miner was slashed: %w", err) - } - - if slashed { - return xerrors.Errorf("received block was from slashed or invalid miner") - } - - mpow, tpow, err := stmgr.GetPowerRaw(ctx, syncer.sm, lbst, h.Miner) - if err != nil { - return xerrors.Errorf("failed getting power: %w", err) - } - - if !types.IsTicketWinner(h.ElectionProof.VRFProof, mpow.QualityAdjPower, tpow.QualityAdjPower) { - return xerrors.Errorf("miner created a block but was not a winner") - } - - return nil - }) - - blockSigCheck := async.Err(func() error { - if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil { - return xerrors.Errorf("check block signature failed: %w", err) - } - return nil - }) - - beaconValuesCheck := async.Err(func() error { - if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" { - return nil - } - - if err := beacon.ValidateBlockValues(syncer.beacon, h, *prevBeacon); err != nil { - return xerrors.Errorf("failed to validate blocks random beacon values: %w", err) - } - return nil - }) - - tktsCheck := async.Err(func() error { - buf := new(bytes.Buffer) - if err := h.Miner.MarshalCBOR(buf); err != nil { - return xerrors.Errorf("failed to marshal miner address to cbor: %w", err) - } - - beaconBase := *prevBeacon - if len(h.BeaconEntries) == 0 { - buf.Write(baseTs.MinTicket().VRFProof) - } else { - beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1] - } - - vrfBase, err := store.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes()) - if err != nil { - return xerrors.Errorf("failed to compute vrf base for ticket: %w", err) - } - - err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof) - if err != nil { - return xerrors.Errorf("validating block tickets failed: %w", err) - } - return nil - }) - - wproofCheck := async.Err(func() error { - if err := syncer.VerifyWinningPoStProof(ctx, h, *prevBeacon, lbst, waddr); err != nil { - return xerrors.Errorf("invalid election post: %w", err) - } - return nil - }) - - await := []async.ErrorFuture{ - minerCheck, - tktsCheck, - blockSigCheck, - beaconValuesCheck, - wproofCheck, - winnerCheck, - msgsCheck, - } - - var merr error - for _, fut := range await { - if err := fut.AwaitContext(ctx); err != nil { - merr = multierror.Append(merr, err) - } - } - if merr != nil { - mulErr := merr.(*multierror.Error) - mulErr.ErrorFormat = func(es []error) string { - if len(es) == 1 { - return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0]) - } - - points := make([]string, len(es)) - for i, err := range es { - points[i] = fmt.Sprintf("* %+v", err) - } - - return fmt.Sprintf( - "%d errors occurred:\n\t%s\n\n", - len(es), strings.Join(points, "\n\t")) - } - } - - if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil { - return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err) - } - - return nil -} - -func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error { - if build.InsecurePoStValidation { - if len(h.WinPoStProof) == 0 { - return xerrors.Errorf("[TESTING] No winning post proof given") - } - - if string(h.WinPoStProof[0].ProofBytes) == "valid proof" { - return nil - } - return xerrors.Errorf("[TESTING] winning post was invalid") - } - - buf := new(bytes.Buffer) - if err := h.Miner.MarshalCBOR(buf); err != nil { - return xerrors.Errorf("failed to marshal miner address: %w", err) - } - - rbase := prevBeacon - if len(h.BeaconEntries) > 0 { - rbase = h.BeaconEntries[len(h.BeaconEntries)-1] - } - - rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes()) - if err != nil { - return xerrors.Errorf("failed to get randomness for verifying winningPost proof: %w", err) - } - - mid, err := address.IDFromAddress(h.Miner) - if err != nil { - return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err) - } - - sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, syncer.verifier, syncer.sm, lbst, h.Miner, rand) - if err != nil { - return xerrors.Errorf("getting winning post sector set: %w", err) - } - - ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, abi.WinningPoStVerifyInfo{ - Randomness: rand, - Proofs: h.WinPoStProof, - ChallengedSectors: sectors, - Prover: abi.ActorID(mid), - }) - if err != nil { - return xerrors.Errorf("failed to verify election post: %w", err) - } - - if !ok { - log.Errorf("invalid winning post (%x; %v)", rand, sectors) - return xerrors.Errorf("winning post was invalid") - } - - return nil -} - -// TODO: We should extract this somewhere else and make the message pool and miner use the same logic -func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error { - { - var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type - var pubks []bls.PublicKey - - for _, m := range b.BlsMessages { - sigCids = append(sigCids, m.Cid()) - - pubk, err := syncer.sm.GetBlsPublicKey(ctx, m.From, baseTs) - if err != nil { - return xerrors.Errorf("failed to load bls public to validate block: %w", err) - } - - pubks = append(pubks, pubk) - } - - if err := syncer.verifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil { - return xerrors.Errorf("bls aggregate signature was invalid: %w", err) - } - } - - nonces := make(map[address.Address]uint64) - - stateroot, _, err := syncer.sm.TipSetState(ctx, baseTs) - if err != nil { - return err - } - - cst := cbor.NewCborStore(syncer.store.Blockstore()) - st, err := state.LoadStateTree(cst, stateroot) - if err != nil { - return xerrors.Errorf("failed to load base state tree: %w", err) - } - - checkMsg := func(msg types.ChainMsg) error { - m := msg.VMMessage() - - // Phase 1: syntactic validation, as defined in the spec - minGas := vm.PricelistByEpoch(baseTs.Height()).OnChainMessage(msg.ChainLength()) - if err := m.ValidForBlockInclusion(minGas.Total()); err != nil { - return err - } - - // Phase 2: (Partial) semantic validation: - // the sender exists and is an account actor, and the nonces make sense - if _, ok := nonces[m.From]; !ok { - // `GetActor` does not validate that this is an account actor. - act, err := st.GetActor(m.From) - if err != nil { - return xerrors.Errorf("failed to get actor: %w", err) - } - - // redundant check - if !act.IsAccountActor() { - return xerrors.New("Sender must be an account actor") - } - nonces[m.From] = act.Nonce - } - - if nonces[m.From] != m.Nonce { - return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[m.From], m.Nonce) - } - nonces[m.From]++ - - return nil - } - - var blsCids []cbg.CBORMarshaler - - for i, m := range b.BlsMessages { - if err := checkMsg(m); err != nil { - return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) - } - - c := cbg.CborCid(m.Cid()) - blsCids = append(blsCids, &c) - } - - var secpkCids []cbg.CBORMarshaler - for i, m := range b.SecpkMessages { - if err := checkMsg(m); err != nil { - return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) - } - - // `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call - // in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`). - kaddr, err := syncer.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs) - if err != nil { - return xerrors.Errorf("failed to resolve key addr: %w", err) - } - - if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil { - return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err) - } - - c := cbg.CborCid(m.Cid()) - secpkCids = append(secpkCids, &c) - } - - bmroot, err := amt.FromArray(ctx, cst, blsCids) - if err != nil { - return xerrors.Errorf("failed to build amt from bls msg cids: %w", err) - } - - smroot, err := amt.FromArray(ctx, cst, secpkCids) - if err != nil { - return xerrors.Errorf("failed to build amt from bls msg cids: %w", err) - } - - mrcid, err := cst.Put(ctx, &types.MsgMeta{ - BlsMessages: bmroot, - SecpkMessages: smroot, - }) - if err != nil { - return err - } - - if b.Header.Messages != mrcid { - return fmt.Errorf("messages didnt match message root in header") - } - - return nil -} - -func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks []bls.PublicKey) error { - _, span := trace.StartSpan(ctx, "syncer.verifyBlsAggregate") - defer span.End() - span.AddAttributes( - trace.Int64Attribute("msgCount", int64(len(msgs))), - ) - - bmsgs := make([]bls.Message, len(msgs)) - for i, m := range msgs { - bmsgs[i] = m.Bytes() - } - - var bsig bls.Signature - copy(bsig[:], sig.Data) - if !bls.HashVerify(&bsig, bmsgs, pubks) { - return xerrors.New("bls aggregate signature failed to verify") - } - - return nil -} - -type syncStateKey struct{} - -func extractSyncState(ctx context.Context) *SyncerState { - v := ctx.Value(syncStateKey{}) - if v != nil { - return v.(*SyncerState) - } - return nil -} - -// collectHeaders collects the headers from the blocks between any two tipsets. -// -// `from` is the heaviest/projected/target tipset we have learned about, and -// `to` is usually an anchor tipset we already have in our view of the chain -// (which could be the genesis). -// -// collectHeaders checks if portions of the chain are in our ChainStore; falling -// down to the network to retrieve the missing parts. If during the process, any -// portion we receive is in our denylist (bad list), we short-circuit. -// -// {hint/naming}: `from` and `to` is in inverse order. `from` is the highest, -// and `to` is the lowest. This method traverses the chain backwards. -// -// {hint/usage}: This is used by collectChain, which is in turn called from the -// main Sync method (Syncer#Sync), so it's a pretty central method. -// -// {hint/logic}: The logic of this method is as follows: -// -// 1. Check that the from tipset is not linked to a parent block known to be -// bad. -// 2. Check the consistency of beacon entries in the from tipset. We check -// total equality of the BeaconEntries in each block. -// 3. Travers the chain backwards, for each tipset: -// 3a. Load it from the chainstore; if found, it move on to its parent. -// 3b. Query our peers via BlockSync in batches, requesting up to a -// maximum of 500 tipsets every time. -// -// Once we've concluded, if we find a mismatching tipset at the height where the -// anchor tipset should be, we are facing a fork, and we invoke Syncer#syncFork -// to resolve it. Refer to the godocs there. -// -// All throughout the process, we keep checking if the received blocks are in -// the deny list, and short-circuit the process if so. -func (syncer *Syncer) collectHeaders(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) { - ctx, span := trace.StartSpan(ctx, "collectHeaders") - defer span.End() - ss := extractSyncState(ctx) - - span.AddAttributes( - trace.Int64Attribute("fromHeight", int64(from.Height())), - trace.Int64Attribute("toHeight", int64(to.Height())), - ) - - markBad := func(fmts string, args ...interface{}) { - for _, b := range from.Cids() { - syncer.bad.Add(b, fmt.Sprintf(fmts, args...)) - } - } - - // Check if the parents of the from block are in the denylist. - // i.e. if a fork of the chain has been requested that we know to be bad. - for _, pcid := range from.Parents().Cids() { - if reason, ok := syncer.bad.Has(pcid); ok { - markBad("linked to %s", pcid) - return nil, xerrors.Errorf("chain linked to block marked previously as bad (%s, %s) (reason: %s)", from.Cids(), pcid, reason) - } - } - - { - // ensure consistency of beacon entires - targetBE := from.Blocks()[0].BeaconEntries - sorted := sort.SliceIsSorted(targetBE, func(i, j int) bool { - return targetBE[i].Round < targetBE[j].Round - }) - if !sorted { - syncer.bad.Add(from.Cids()[0], "wrong order of beacon entires") - return nil, xerrors.Errorf("wrong order of beacon entires") - } - - for _, bh := range from.Blocks()[1:] { - if len(targetBE) != len(bh.BeaconEntries) { - // cannot mark bad, I think @Kubuxu - return nil, xerrors.Errorf("tipset contained different number for beacon entires") - } - for i, be := range bh.BeaconEntries { - if targetBE[i].Round != be.Round || !bytes.Equal(targetBE[i].Data, be.Data) { - // cannot mark bad, I think @Kubuxu - return nil, xerrors.Errorf("tipset contained different beacon entires") - } - } - - } - } - - blockSet := []*types.TipSet{from} - - at := from.Parents() - - // we want to sync all the blocks until the height above the block we have - untilHeight := to.Height() + 1 - - ss.SetHeight(blockSet[len(blockSet)-1].Height()) - - var acceptedBlocks []cid.Cid - -loop: - for blockSet[len(blockSet)-1].Height() > untilHeight { - for _, bc := range at.Cids() { - if reason, ok := syncer.bad.Has(bc); ok { - for _, b := range acceptedBlocks { - syncer.bad.Add(b, fmt.Sprintf("chain contained %s", bc)) - } - - return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", from.Cids(), bc, reason) - } - } - - // If, for some reason, we have a suffix of the chain locally, handle that here - ts, err := syncer.store.LoadTipSet(at) - if err == nil { - acceptedBlocks = append(acceptedBlocks, at.Cids()...) - - blockSet = append(blockSet, ts) - at = ts.Parents() - continue - } - if !xerrors.Is(err, bstore.ErrNotFound) { - log.Warn("loading local tipset: %s", err) - } - - // NB: GetBlocks validates that the blocks are in-fact the ones we - // requested, and that they are correctly linked to one another. It does - // not validate any state transitions. - window := 500 - if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window { - window = gap - } - blks, err := syncer.Bsync.GetBlocks(ctx, at, window) - if err != nil { - // Most likely our peers aren't fully synced yet, but forwarded - // new block message (ideally we'd find better peers) - - log.Errorf("failed to get blocks: %+v", err) - - span.AddAttributes(trace.StringAttribute("error", err.Error())) - - // This error will only be logged above, - return nil, xerrors.Errorf("failed to get blocks: %w", err) - } - log.Info("Got blocks: ", blks[0].Height(), len(blks)) - - for _, b := range blks { - if b.Height() < untilHeight { - break loop - } - for _, bc := range b.Cids() { - if reason, ok := syncer.bad.Has(bc); ok { - for _, b := range acceptedBlocks { - syncer.bad.Add(b, fmt.Sprintf("chain contained %s", bc)) - } - - return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", from.Cids(), bc, reason) - } - } - blockSet = append(blockSet, b) - } - - acceptedBlocks = append(acceptedBlocks, at.Cids()...) - - ss.SetHeight(blks[len(blks)-1].Height()) - at = blks[len(blks)-1].Parents() - } - - if !types.CidArrsEqual(blockSet[len(blockSet)-1].Parents().Cids(), to.Cids()) { - last := blockSet[len(blockSet)-1] - if last.Parents() == to.Parents() { - // common case: receiving a block thats potentially part of the same tipset as our best block - return blockSet, nil - } - - // We have now ascertained that this is *not* a 'fast forward' - - log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", from.Cids(), from.Height(), to.Cids(), to.Height()) - fork, err := syncer.syncFork(ctx, last, to) - if err != nil { - if xerrors.Is(err, ErrForkTooLong) { - // TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish? - log.Warn("adding forked chain to our bad tipset cache") - for _, b := range from.Blocks() { - syncer.bad.Add(b.Cid(), "fork past finality") - } - } - return nil, xerrors.Errorf("failed to sync fork: %w", err) - } - - blockSet = append(blockSet, fork...) - } - - return blockSet, nil -} - -var ErrForkTooLong = fmt.Errorf("fork longer than threshold") - -// syncFork tries to obtain the chain fragment that links a fork into a common -// ancestor in our view of the chain. -// -// If the fork is too long (build.ForkLengthThreshold), we add the entire subchain to the -// denylist. Else, we find the common ancestor, and add the missing chain -// fragment until the fork point to the returned []TipSet. -func (syncer *Syncer) syncFork(ctx context.Context, from *types.TipSet, to *types.TipSet) ([]*types.TipSet, error) { - tips, err := syncer.Bsync.GetBlocks(ctx, from.Parents(), int(build.ForkLengthThreshold)) - if err != nil { - return nil, err - } - - nts, err := syncer.store.LoadTipSet(to.Parents()) - if err != nil { - return nil, xerrors.Errorf("failed to load next local tipset: %w", err) - } - - for cur := 0; cur < len(tips); { - if nts.Height() == 0 { - if !syncer.Genesis.Equals(nts) { - return nil, xerrors.Errorf("somehow synced chain that linked back to a different genesis (bad genesis: %s)", nts.Key()) - } - return nil, xerrors.Errorf("synced chain forked at genesis, refusing to sync") - } - - if nts.Equals(tips[cur]) { - return tips[:cur+1], nil - } - - if nts.Height() < tips[cur].Height() { - cur++ - } else { - nts, err = syncer.store.LoadTipSet(nts.Parents()) - if err != nil { - return nil, xerrors.Errorf("loading next local tipset: %w", err) - } - } - } - return nil, ErrForkTooLong -} - -func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*types.TipSet) error { - ss := extractSyncState(ctx) - ss.SetHeight(0) - - return syncer.iterFullTipsets(ctx, headers, func(ctx context.Context, fts *store.FullTipSet) error { - log.Debugw("validating tipset", "height", fts.TipSet().Height(), "size", len(fts.TipSet().Cids())) - if err := syncer.ValidateTipSet(ctx, fts); err != nil { - log.Errorf("failed to validate tipset: %+v", err) - return xerrors.Errorf("message processing failed: %w", err) - } - - stats.Record(ctx, metrics.ChainNodeWorkerHeight.M(int64(fts.TipSet().Height()))) - ss.SetHeight(fts.TipSet().Height()) - - return nil - }) -} - -// fills out each of the given tipsets with messages and calls the callback with it -func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipSet, cb func(context.Context, *store.FullTipSet) error) error { - ctx, span := trace.StartSpan(ctx, "iterFullTipsets") - defer span.End() - - span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers)))) - - windowSize := 200 - for i := len(headers) - 1; i >= 0; { - fts, err := syncer.store.TryFillTipSet(headers[i]) - if err != nil { - return err - } - if fts != nil { - if err := cb(ctx, fts); err != nil { - return err - } - i-- - continue - } - - batchSize := windowSize - if i < batchSize { - batchSize = i - } - - nextI := (i + 1) - batchSize // want to fetch batchSize values, 'i' points to last one we want to fetch, so its 'inclusive' of our request, thus we need to add one to our request start index - - var bstout []*blocksync.BSTipSet - for len(bstout) < batchSize { - next := headers[nextI] - - nreq := batchSize - len(bstout) - bstips, err := syncer.Bsync.GetChainMessages(ctx, next, uint64(nreq)) - if err != nil { - return xerrors.Errorf("message processing failed: %w", err) - } - - bstout = append(bstout, bstips...) - nextI += len(bstips) - } - - for bsi := 0; bsi < len(bstout); bsi++ { - // temp storage so we don't persist data we dont want to - ds := dstore.NewMapDatastore() - bs := bstore.NewBlockstore(ds) - blks := cbor.NewCborStore(bs) - - this := headers[i-bsi] - bstip := bstout[len(bstout)-(bsi+1)] - fts, err := zipTipSetAndMessages(blks, this, bstip.BlsMessages, bstip.SecpkMessages, bstip.BlsMsgIncludes, bstip.SecpkMsgIncludes) - if err != nil { - log.Warnw("zipping failed", "error", err, "bsi", bsi, "i", i, - "height", this.Height(), "bstip-height", bstip.Blocks[0].Height, - "next-height", i+batchSize) - return xerrors.Errorf("message processing failed: %w", err) - } - - if err := cb(ctx, fts); err != nil { - return err - } - - if err := persistMessages(bs, bstip); err != nil { - return err - } - - if err := copyBlockstore(bs, syncer.store.Blockstore()); err != nil { - return xerrors.Errorf("message processing failed: %w", err) - } - } - i -= batchSize - } - - return nil -} - -func persistMessages(bs bstore.Blockstore, bst *blocksync.BSTipSet) error { - for _, m := range bst.BlsMessages { - //log.Infof("putting BLS message: %s", m.Cid()) - if _, err := store.PutMessage(bs, m); err != nil { - log.Errorf("failed to persist messages: %+v", err) - return xerrors.Errorf("BLS message processing failed: %w", err) - } - } - for _, m := range bst.SecpkMessages { - if m.Signature.Type != crypto.SigTypeSecp256k1 { - return xerrors.Errorf("unknown signature type on message %s: %q", m.Cid(), m.Signature.Type) - } - //log.Infof("putting secp256k1 message: %s", m.Cid()) - if _, err := store.PutMessage(bs, m); err != nil { - log.Errorf("failed to persist messages: %+v", err) - return xerrors.Errorf("secp256k1 message processing failed: %w", err) - } - } - - return nil -} - -// collectChain tries to advance our view of the chain to the purported head. -// -// It goes through various stages: -// -// 1. StageHeaders: we proceed in the sync process by requesting block headers -// from our peers, moving back from their heads, until we reach a tipset -// that we have in common (such a common tipset must exist, thought it may -// simply be the genesis block). -// -// If the common tipset is our head, we treat the sync as a "fast-forward", -// else we must drop part of our chain to connect to the peer's head -// (referred to as "forking"). -// -// 2. StagePersistHeaders: now that we've collected the missing headers, -// augmented by those on the other side of a fork, we persist them to the -// BlockStore. -// -// 3. StageMessages: having acquired the headers and found a common tipset, -// we then move forward, requesting the full blocks, including the messages. -func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet) error { - ctx, span := trace.StartSpan(ctx, "collectChain") - defer span.End() - ss := extractSyncState(ctx) - - ss.Init(syncer.store.GetHeaviestTipSet(), ts) - - headers, err := syncer.collectHeaders(ctx, ts, syncer.store.GetHeaviestTipSet()) - if err != nil { - ss.Error(err) - return err - } - - span.AddAttributes(trace.Int64Attribute("syncChainLength", int64(len(headers)))) - - if !headers[0].Equals(ts) { - log.Errorf("collectChain headers[0] should be equal to sync target. Its not: %s != %s", headers[0].Cids(), ts.Cids()) - } - - ss.SetStage(api.StagePersistHeaders) - - toPersist := make([]*types.BlockHeader, 0, len(headers)*int(build.BlocksPerEpoch)) - for _, ts := range headers { - toPersist = append(toPersist, ts.Blocks()...) - } - if err := syncer.store.PersistBlockHeaders(toPersist...); err != nil { - err = xerrors.Errorf("failed to persist synced blocks to the chainstore: %w", err) - ss.Error(err) - return err - } - toPersist = nil - - ss.SetStage(api.StageMessages) - - if err := syncer.syncMessagesAndCheckState(ctx, headers); err != nil { - err = xerrors.Errorf("collectChain syncMessages: %w", err) - ss.Error(err) - return err - } - - ss.SetStage(api.StageSyncComplete) - log.Debugw("new tipset", "height", ts.Height(), "tipset", types.LogCids(ts.Cids())) - - return nil -} - -func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error { - if build.InsecurePoStValidation { - return nil - } - return gen.VerifyVRF(ctx, worker, rand, evrf) -} - -func (syncer *Syncer) State() []SyncerState { - var out []SyncerState - for _, ss := range syncer.syncmgr.syncStates { - out = append(out, ss.Snapshot()) - } - return out -} - -// MarkBad manually adds a block to the "bad blocks" cache. -func (syncer *Syncer) MarkBad(blk cid.Cid) { - syncer.bad.Add(blk, "manually marked bad") -} - -func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) { - return syncer.bad.Has(blk) -} -func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { - cur := ts - for i := 0; i < 20; i++ { - cbe := cur.Blocks()[0].BeaconEntries - if len(cbe) > 0 { - return &cbe[len(cbe)-1], nil - } - - if cur.Height() == 0 { - return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") - } - - next, err := syncer.store.LoadTipSet(cur.Parents()) - if err != nil { - return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) - } - cur = next - } - - return nil, xerrors.Errorf("found NO beacon entries in the 20 blocks prior to given tipset") -} - -func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool { - g, err := syncer.store.GetGenesis() - if err != nil { - return false - } - - now := uint64(time.Now().Unix()) - return epoch > (abi.ChainEpoch((now-g.Timestamp)/build.BlockDelaySecs) + MaxHeightDrift) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/sync_manager.go b/vendor/github.com/filecoin-project/lotus/chain/sync_manager.go deleted file mode 100644 index e3fbdf4e11..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/sync_manager.go +++ /dev/null @@ -1,426 +0,0 @@ -package chain - -import ( - "context" - "sort" - "sync" - - "github.com/filecoin-project/lotus/chain/types" - peer "github.com/libp2p/go-libp2p-core/peer" -) - -const BootstrapPeerThreshold = 2 - -const ( - BSStateInit = 0 - BSStateSelected = 1 - BSStateScheduled = 2 - BSStateComplete = 3 -) - -type SyncFunc func(context.Context, *types.TipSet) error - -type SyncManager struct { - lk sync.Mutex - peerHeads map[peer.ID]*types.TipSet - - bssLk sync.Mutex - bootstrapState int - - bspThresh int - - incomingTipSets chan *types.TipSet - syncTargets chan *types.TipSet - syncResults chan *syncResult - - syncStates []*SyncerState - - // Normally this handler is set to `(*Syncer).Sync()`. - doSync func(context.Context, *types.TipSet) error - - stop chan struct{} - - // Sync Scheduler fields - activeSyncs map[types.TipSetKey]*types.TipSet - syncQueue syncBucketSet - activeSyncTips syncBucketSet - nextSyncTarget *syncTargetBucket - workerChan chan *types.TipSet -} - -type syncResult struct { - ts *types.TipSet - success bool -} - -const syncWorkerCount = 3 - -func NewSyncManager(sync SyncFunc) *SyncManager { - return &SyncManager{ - bspThresh: 1, - peerHeads: make(map[peer.ID]*types.TipSet), - syncTargets: make(chan *types.TipSet), - syncResults: make(chan *syncResult), - syncStates: make([]*SyncerState, syncWorkerCount), - incomingTipSets: make(chan *types.TipSet), - activeSyncs: make(map[types.TipSetKey]*types.TipSet), - doSync: sync, - stop: make(chan struct{}), - } -} - -func (sm *SyncManager) Start() { - go sm.syncScheduler() - for i := 0; i < syncWorkerCount; i++ { - go sm.syncWorker(i) - } -} - -func (sm *SyncManager) Stop() { - close(sm.stop) -} - -func (sm *SyncManager) SetPeerHead(ctx context.Context, p peer.ID, ts *types.TipSet) { - sm.lk.Lock() - defer sm.lk.Unlock() - sm.peerHeads[p] = ts - - if sm.getBootstrapState() == BSStateInit { - spc := sm.syncedPeerCount() - if spc >= sm.bspThresh { - // Its go time! - target, err := sm.selectSyncTarget() - if err != nil { - log.Error("failed to select sync target: ", err) - return - } - sm.setBootstrapState(BSStateSelected) - - sm.incomingTipSets <- target - } - log.Infof("sync bootstrap has %d peers", spc) - return - } - - sm.incomingTipSets <- ts -} - -type syncBucketSet struct { - buckets []*syncTargetBucket -} - -func newSyncTargetBucket(tipsets ...*types.TipSet) *syncTargetBucket { - var stb syncTargetBucket - for _, ts := range tipsets { - stb.add(ts) - } - return &stb -} - -func (sbs *syncBucketSet) RelatedToAny(ts *types.TipSet) bool { - for _, b := range sbs.buckets { - if b.sameChainAs(ts) { - return true - } - } - return false -} - -func (sbs *syncBucketSet) Insert(ts *types.TipSet) { - for _, b := range sbs.buckets { - if b.sameChainAs(ts) { - b.add(ts) - return - } - } - sbs.buckets = append(sbs.buckets, newSyncTargetBucket(ts)) -} - -func (sbs *syncBucketSet) Pop() *syncTargetBucket { - var bestBuck *syncTargetBucket - var bestTs *types.TipSet - for _, b := range sbs.buckets { - hts := b.heaviestTipSet() - if bestBuck == nil || bestTs.ParentWeight().LessThan(hts.ParentWeight()) { - bestBuck = b - bestTs = hts - } - } - - sbs.removeBucket(bestBuck) - - return bestBuck -} - -func (sbs *syncBucketSet) removeBucket(toremove *syncTargetBucket) { - nbuckets := make([]*syncTargetBucket, 0, len(sbs.buckets)-1) - for _, b := range sbs.buckets { - if b != toremove { - nbuckets = append(nbuckets, b) - } - } - sbs.buckets = nbuckets -} - -func (sbs *syncBucketSet) PopRelated(ts *types.TipSet) *syncTargetBucket { - for _, b := range sbs.buckets { - if b.sameChainAs(ts) { - sbs.removeBucket(b) - return b - } - } - return nil -} - -func (sbs *syncBucketSet) Heaviest() *types.TipSet { - // TODO: should also consider factoring in number of peers represented by each bucket here - var bestTs *types.TipSet - for _, b := range sbs.buckets { - bhts := b.heaviestTipSet() - if bestTs == nil || bhts.ParentWeight().GreaterThan(bestTs.ParentWeight()) { - bestTs = bhts - } - } - return bestTs -} - -func (sbs *syncBucketSet) Empty() bool { - return len(sbs.buckets) == 0 -} - -type syncTargetBucket struct { - tips []*types.TipSet - count int -} - -func (stb *syncTargetBucket) sameChainAs(ts *types.TipSet) bool { - for _, t := range stb.tips { - if ts.Equals(t) { - return true - } - if ts.Key() == t.Parents() { - return true - } - if ts.Parents() == t.Key() { - return true - } - } - return false -} - -func (stb *syncTargetBucket) add(ts *types.TipSet) { - stb.count++ - - for _, t := range stb.tips { - if t.Equals(ts) { - return - } - } - - stb.tips = append(stb.tips, ts) -} - -func (stb *syncTargetBucket) heaviestTipSet() *types.TipSet { - if stb == nil { - return nil - } - - var best *types.TipSet - for _, ts := range stb.tips { - if best == nil || ts.ParentWeight().GreaterThan(best.ParentWeight()) { - best = ts - } - } - return best -} - -func (sm *SyncManager) selectSyncTarget() (*types.TipSet, error) { - var buckets syncBucketSet - - var peerHeads []*types.TipSet - for _, ts := range sm.peerHeads { - peerHeads = append(peerHeads, ts) - } - sort.Slice(peerHeads, func(i, j int) bool { - return peerHeads[i].Height() < peerHeads[j].Height() - }) - - for _, ts := range peerHeads { - buckets.Insert(ts) - } - - if len(buckets.buckets) > 1 { - log.Warn("caution, multiple distinct chains seen during head selections") - // TODO: we *could* refuse to sync here without user intervention. - // For now, just select the best cluster - } - - return buckets.Heaviest(), nil -} - -func (sm *SyncManager) syncScheduler() { - - for { - select { - case ts, ok := <-sm.incomingTipSets: - if !ok { - log.Info("shutting down sync scheduler") - return - } - - sm.scheduleIncoming(ts) - case res := <-sm.syncResults: - sm.scheduleProcessResult(res) - case sm.workerChan <- sm.nextSyncTarget.heaviestTipSet(): - sm.scheduleWorkSent() - case <-sm.stop: - log.Info("sync scheduler shutting down") - return - } - } -} - -func (sm *SyncManager) scheduleIncoming(ts *types.TipSet) { - log.Debug("scheduling incoming tipset sync: ", ts.Cids()) - if sm.getBootstrapState() == BSStateSelected { - sm.setBootstrapState(BSStateScheduled) - sm.syncTargets <- ts - return - } - - var relatedToActiveSync bool - for _, acts := range sm.activeSyncs { - if ts.Equals(acts) { - break - } - - if ts.Parents() == acts.Key() { - // sync this next, after that sync process finishes - relatedToActiveSync = true - } - } - - if !relatedToActiveSync && sm.activeSyncTips.RelatedToAny(ts) { - relatedToActiveSync = true - } - - // if this is related to an active sync process, immediately bucket it - // we don't want to start a parallel sync process that duplicates work - if relatedToActiveSync { - sm.activeSyncTips.Insert(ts) - return - } - - if sm.getBootstrapState() == BSStateScheduled { - sm.syncQueue.Insert(ts) - return - } - - if sm.nextSyncTarget != nil && sm.nextSyncTarget.sameChainAs(ts) { - sm.nextSyncTarget.add(ts) - } else { - sm.syncQueue.Insert(ts) - - if sm.nextSyncTarget == nil { - sm.nextSyncTarget = sm.syncQueue.Pop() - sm.workerChan = sm.syncTargets - } - } -} - -func (sm *SyncManager) scheduleProcessResult(res *syncResult) { - if res.success && sm.getBootstrapState() != BSStateComplete { - sm.setBootstrapState(BSStateComplete) - } - delete(sm.activeSyncs, res.ts.Key()) - relbucket := sm.activeSyncTips.PopRelated(res.ts) - if relbucket != nil { - if res.success { - if sm.nextSyncTarget == nil { - sm.nextSyncTarget = relbucket - sm.workerChan = sm.syncTargets - } else { - sm.syncQueue.buckets = append(sm.syncQueue.buckets, relbucket) - } - return - } else { - // TODO: this is the case where we try to sync a chain, and - // fail, and we have more blocks on top of that chain that - // have come in since. The question is, should we try to - // sync these? or just drop them? - } - } - - if sm.nextSyncTarget == nil && !sm.syncQueue.Empty() { - next := sm.syncQueue.Pop() - if next != nil { - sm.nextSyncTarget = next - sm.workerChan = sm.syncTargets - } - } -} - -func (sm *SyncManager) scheduleWorkSent() { - hts := sm.nextSyncTarget.heaviestTipSet() - sm.activeSyncs[hts.Key()] = hts - - if !sm.syncQueue.Empty() { - sm.nextSyncTarget = sm.syncQueue.Pop() - } else { - sm.nextSyncTarget = nil - sm.workerChan = nil - } -} - -func (sm *SyncManager) syncWorker(id int) { - ss := &SyncerState{} - sm.syncStates[id] = ss - for { - select { - case ts, ok := <-sm.syncTargets: - if !ok { - log.Info("sync manager worker shutting down") - return - } - - ctx := context.WithValue(context.TODO(), syncStateKey{}, ss) - err := sm.doSync(ctx, ts) - if err != nil { - log.Errorf("sync error: %+v", err) - } - - sm.syncResults <- &syncResult{ - ts: ts, - success: err == nil, - } - } - } -} - -func (sm *SyncManager) syncedPeerCount() int { - var count int - for _, ts := range sm.peerHeads { - if ts.Height() > 0 { - count++ - } - } - return count -} - -func (sm *SyncManager) getBootstrapState() int { - sm.bssLk.Lock() - defer sm.bssLk.Unlock() - return sm.bootstrapState -} - -func (sm *SyncManager) setBootstrapState(v int) { - sm.bssLk.Lock() - defer sm.bssLk.Unlock() - sm.bootstrapState = v -} - -func (sm *SyncManager) IsBootstrapped() bool { - sm.bssLk.Lock() - defer sm.bssLk.Unlock() - return sm.bootstrapState == BSStateComplete -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/sync_manager_test.go b/vendor/github.com/filecoin-project/lotus/chain/sync_manager_test.go deleted file mode 100644 index ca2ced856c..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/sync_manager_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package chain - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/mock" -) - -var genTs = mock.TipSet(mock.MkBlock(nil, 0, 0)) - -type syncOp struct { - ts *types.TipSet - done func() -} - -func runSyncMgrTest(t *testing.T, tname string, thresh int, tf func(*testing.T, *SyncManager, chan *syncOp)) { - syncTargets := make(chan *syncOp) - sm := NewSyncManager(func(ctx context.Context, ts *types.TipSet) error { - ch := make(chan struct{}) - syncTargets <- &syncOp{ - ts: ts, - done: func() { close(ch) }, - } - <-ch - return nil - }) - sm.bspThresh = thresh - - sm.Start() - defer sm.Stop() - t.Run(tname+fmt.Sprintf("-%d", thresh), func(t *testing.T) { - tf(t, sm, syncTargets) - }) -} - -func assertTsEqual(t *testing.T, actual, expected *types.TipSet) { - t.Helper() - if !actual.Equals(expected) { - t.Fatalf("got unexpected tipset %s (expected: %s)", actual.Cids(), expected.Cids()) - } -} - -func assertNoOp(t *testing.T, c chan *syncOp) { - t.Helper() - select { - case <-time.After(time.Millisecond * 20): - case <-c: - t.Fatal("shouldnt have gotten any sync operations yet") - } -} - -func assertGetSyncOp(t *testing.T, c chan *syncOp, ts *types.TipSet) { - t.Helper() - - select { - case <-time.After(time.Millisecond * 100): - t.Fatal("expected sync manager to try and sync to our target") - case op := <-c: - op.done() - if !op.ts.Equals(ts) { - t.Fatalf("somehow got wrong tipset from syncer (got %s, expected %s)", op.ts.Cids(), ts.Cids()) - } - } -} - -func TestSyncManager(t *testing.T) { - ctx := context.Background() - - a := mock.TipSet(mock.MkBlock(genTs, 1, 1)) - b := mock.TipSet(mock.MkBlock(a, 1, 2)) - c1 := mock.TipSet(mock.MkBlock(b, 1, 3)) - c2 := mock.TipSet(mock.MkBlock(b, 2, 4)) - c3 := mock.TipSet(mock.MkBlock(b, 3, 5)) - d := mock.TipSet(mock.MkBlock(c1, 4, 5)) - - runSyncMgrTest(t, "testBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) { - sm.SetPeerHead(ctx, "peer1", c1) - assertGetSyncOp(t, stc, c1) - }) - - runSyncMgrTest(t, "testBootstrap", 2, func(t *testing.T, sm *SyncManager, stc chan *syncOp) { - sm.SetPeerHead(ctx, "peer1", c1) - assertNoOp(t, stc) - - sm.SetPeerHead(ctx, "peer2", c1) - assertGetSyncOp(t, stc, c1) - }) - - runSyncMgrTest(t, "testSyncAfterBootstrap", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) { - sm.SetPeerHead(ctx, "peer1", b) - assertGetSyncOp(t, stc, b) - - sm.SetPeerHead(ctx, "peer2", c1) - assertGetSyncOp(t, stc, c1) - - sm.SetPeerHead(ctx, "peer2", c2) - assertGetSyncOp(t, stc, c2) - }) - - runSyncMgrTest(t, "testCoalescing", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) { - sm.SetPeerHead(ctx, "peer1", a) - assertGetSyncOp(t, stc, a) - - sm.SetPeerHead(ctx, "peer2", b) - op := <-stc - - sm.SetPeerHead(ctx, "peer2", c1) - sm.SetPeerHead(ctx, "peer2", c2) - sm.SetPeerHead(ctx, "peer2", d) - - assertTsEqual(t, op.ts, b) - - // need a better way to 'wait until syncmgr is idle' - time.Sleep(time.Millisecond * 20) - - op.done() - - assertGetSyncOp(t, stc, d) - }) - - runSyncMgrTest(t, "testSyncIncomingTipset", 1, func(t *testing.T, sm *SyncManager, stc chan *syncOp) { - sm.SetPeerHead(ctx, "peer1", a) - assertGetSyncOp(t, stc, a) - - sm.SetPeerHead(ctx, "peer2", b) - op := <-stc - op.done() - - sm.SetPeerHead(ctx, "peer2", c1) - op1 := <-stc - fmt.Println("op1: ", op1.ts.Cids()) - - sm.SetPeerHead(ctx, "peer2", c2) - sm.SetPeerHead(ctx, "peer2", c3) - - op1.done() - - op2 := <-stc - fmt.Println("op2: ", op2.ts.Cids()) - op2.done() - - op3 := <-stc - fmt.Println("op3: ", op3.ts.Cids()) - op3.done() - }) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/sync_test.go b/vendor/github.com/filecoin-project/lotus/chain/sync_test.go deleted file mode 100644 index efb6010415..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/sync_test.go +++ /dev/null @@ -1,547 +0,0 @@ -package chain_test - -import ( - "context" - "fmt" - "os" - "testing" - "time" - - logging "github.com/ipfs/go-log/v2" - "github.com/libp2p/go-libp2p-core/peer" - mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" - "github.com/stretchr/testify/require" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - mocktypes "github.com/filecoin-project/lotus/chain/types/mock" - "github.com/filecoin-project/lotus/node" - "github.com/filecoin-project/lotus/node/impl" - "github.com/filecoin-project/lotus/node/modules" - "github.com/filecoin-project/lotus/node/repo" -) - -func init() { - build.InsecurePoStValidation = true - os.Setenv("TRUST_PARAMS", "1") - miner.SupportedProofTypes = map[abi.RegisteredSealProof]struct{}{ - abi.RegisteredSealProof_StackedDrg2KiBV1: {}, - } - power.ConsensusMinerMinPower = big.NewInt(2048) - verifreg.MinVerifiedDealSize = big.NewInt(256) -} - -const source = 0 - -func (tu *syncTestUtil) repoWithChain(t testing.TB, h int) (repo.Repo, []byte, []*store.FullTipSet) { - blks := make([]*store.FullTipSet, h) - - for i := 0; i < h; i++ { - mts, err := tu.g.NextTipSet() - require.NoError(t, err) - - blks[i] = mts.TipSet - } - - r, err := tu.g.YieldRepo() - require.NoError(t, err) - - genb, err := tu.g.GenesisCar() - require.NoError(t, err) - - return r, genb, blks -} - -type syncTestUtil struct { - t testing.TB - - ctx context.Context - cancel func() - - mn mocknet.Mocknet - - g *gen.ChainGen - - genesis []byte - blocks []*store.FullTipSet - - nds []api.FullNode -} - -func prepSyncTest(t testing.TB, h int) *syncTestUtil { - logging.SetLogLevel("*", "INFO") - - g, err := gen.NewGenerator() - if err != nil { - t.Fatalf("%+v", err) - } - - ctx, cancel := context.WithCancel(context.Background()) - - tu := &syncTestUtil{ - t: t, - ctx: ctx, - cancel: cancel, - - mn: mocknet.New(ctx), - g: g, - } - - tu.addSourceNode(h) - //tu.checkHeight("source", source, h) - - // separate logs - fmt.Println("\x1b[31m///////////////////////////////////////////////////\x1b[39b") - - return tu -} - -func (tu *syncTestUtil) Shutdown() { - tu.cancel() -} - -func (tu *syncTestUtil) printHeads() { - for i, n := range tu.nds { - head, err := n.ChainHead(tu.ctx) - if err != nil { - tu.t.Fatal(err) - } - - fmt.Printf("Node %d: %s\n", i, head.Cids()) - } -} - -func (tu *syncTestUtil) pushFtsAndWait(to int, fts *store.FullTipSet, wait bool) { - // TODO: would be great if we could pass a whole tipset here... - tu.pushTsExpectErr(to, fts, false) - - if wait { - start := time.Now() - h, err := tu.nds[to].ChainHead(tu.ctx) - require.NoError(tu.t, err) - for !h.Equals(fts.TipSet()) { - time.Sleep(time.Millisecond * 50) - h, err = tu.nds[to].ChainHead(tu.ctx) - require.NoError(tu.t, err) - - if time.Since(start) > time.Second*10 { - tu.t.Fatal("took too long waiting for block to be accepted") - } - } - } -} - -func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bool) { - for _, fb := range fts.Blocks { - var b types.BlockMsg - - // -1 to match block.Height - b.Header = fb.Header - for _, msg := range fb.SecpkMessages { - c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(msg) - require.NoError(tu.t, err) - - b.SecpkMessages = append(b.SecpkMessages, c) - } - - for _, msg := range fb.BlsMessages { - c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(msg) - require.NoError(tu.t, err) - - b.BlsMessages = append(b.BlsMessages, c) - } - - err := tu.nds[to].SyncSubmitBlock(tu.ctx, &b) - if experr { - require.Error(tu.t, err, "expected submit block to fail") - } else { - require.NoError(tu.t, err) - } - } -} - -func (tu *syncTestUtil) mineOnBlock(blk *store.FullTipSet, src int, miners []int, wait, fail bool) *store.FullTipSet { - if miners == nil { - for i := range tu.g.Miners { - miners = append(miners, i) - } - } - - var maddrs []address.Address - for _, i := range miners { - maddrs = append(maddrs, tu.g.Miners[i]) - } - - fmt.Println("Miner mining block: ", maddrs) - - mts, err := tu.g.NextTipSetFromMiners(blk.TipSet(), maddrs) - require.NoError(tu.t, err) - - if fail { - tu.pushTsExpectErr(src, mts.TipSet, true) - } else { - tu.pushFtsAndWait(src, mts.TipSet, wait) - } - - return mts.TipSet -} - -func (tu *syncTestUtil) mineNewBlock(src int, miners []int) { - mts := tu.mineOnBlock(tu.g.CurTipset, src, miners, true, false) - tu.g.CurTipset = mts -} - -func fblkToBlkMsg(fb *types.FullBlock) *types.BlockMsg { - out := &types.BlockMsg{ - Header: fb.Header, - } - - for _, msg := range fb.BlsMessages { - out.BlsMessages = append(out.BlsMessages, msg.Cid()) - } - for _, msg := range fb.SecpkMessages { - out.SecpkMessages = append(out.SecpkMessages, msg.Cid()) - } - return out -} - -func (tu *syncTestUtil) addSourceNode(gen int) { - if tu.genesis != nil { - tu.t.Fatal("source node already exists") - } - - sourceRepo, genesis, blocks := tu.repoWithChain(tu.t, gen) - var out api.FullNode - - // TODO: Don't ignore stop - _, err := node.New(tu.ctx, - node.FullAPI(&out), - node.Online(), - node.Repo(sourceRepo), - node.MockHost(tu.mn), - node.Test(), - - node.Override(new(modules.Genesis), modules.LoadGenesis(genesis)), - ) - require.NoError(tu.t, err) - - lastTs := blocks[len(blocks)-1].Blocks - for _, lastB := range lastTs { - cs := out.(*impl.FullNodeAPI).ChainAPI.Chain - require.NoError(tu.t, cs.AddToTipSetTracker(lastB.Header)) - err = cs.AddBlock(tu.ctx, lastB.Header) - require.NoError(tu.t, err) - } - - tu.genesis = genesis - tu.blocks = blocks - tu.nds = append(tu.nds, out) // always at 0 -} - -func (tu *syncTestUtil) addClientNode() int { - if tu.genesis == nil { - tu.t.Fatal("source doesn't exists") - } - - var out api.FullNode - - // TODO: Don't ignore stop - _, err := node.New(tu.ctx, - node.FullAPI(&out), - node.Online(), - node.Repo(repo.NewMemory(nil)), - node.MockHost(tu.mn), - node.Test(), - - node.Override(new(modules.Genesis), modules.LoadGenesis(tu.genesis)), - ) - require.NoError(tu.t, err) - - tu.nds = append(tu.nds, out) - return len(tu.nds) - 1 -} - -func (tu *syncTestUtil) pid(n int) peer.ID { - nal, err := tu.nds[n].NetAddrsListen(tu.ctx) - require.NoError(tu.t, err) - - return nal.ID -} - -func (tu *syncTestUtil) connect(from, to int) { - toPI, err := tu.nds[to].NetAddrsListen(tu.ctx) - require.NoError(tu.t, err) - - err = tu.nds[from].NetConnect(tu.ctx, toPI) - require.NoError(tu.t, err) -} - -func (tu *syncTestUtil) disconnect(from, to int) { - toPI, err := tu.nds[to].NetAddrsListen(tu.ctx) - require.NoError(tu.t, err) - - err = tu.nds[from].NetDisconnect(tu.ctx, toPI.ID) - require.NoError(tu.t, err) -} - -func (tu *syncTestUtil) checkHeight(name string, n int, h int) { - b, err := tu.nds[n].ChainHead(tu.ctx) - require.NoError(tu.t, err) - - require.Equal(tu.t, uint64(h), b.Height()) - fmt.Printf("%s H: %d\n", name, b.Height()) -} - -func (tu *syncTestUtil) compareSourceState(with int) { - sourceHead, err := tu.nds[source].ChainHead(tu.ctx) - require.NoError(tu.t, err) - - targetHead, err := tu.nds[with].ChainHead(tu.ctx) - require.NoError(tu.t, err) - - if !sourceHead.Equals(targetHead) { - fmt.Println("different chains: ", sourceHead.Height(), targetHead.Height()) - tu.t.Fatalf("nodes were not synced correctly: %s != %s", sourceHead.Cids(), targetHead.Cids()) - } - - sourceAccounts, err := tu.nds[source].WalletList(tu.ctx) - require.NoError(tu.t, err) - - for _, addr := range sourceAccounts { - sourceBalance, err := tu.nds[source].WalletBalance(tu.ctx, addr) - require.NoError(tu.t, err) - fmt.Printf("Source state check for %s, expect %s\n", addr, sourceBalance) - - actBalance, err := tu.nds[with].WalletBalance(tu.ctx, addr) - require.NoError(tu.t, err) - - require.Equal(tu.t, sourceBalance, actBalance) - fmt.Printf("Source state check for %s\n", addr) - } -} - -func (tu *syncTestUtil) waitUntilSync(from, to int) { - target, err := tu.nds[from].ChainHead(tu.ctx) - if err != nil { - tu.t.Fatal(err) - } - - tu.waitUntilSyncTarget(to, target) -} - -func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - hc, err := tu.nds[to].ChainNotify(ctx) - if err != nil { - tu.t.Fatal(err) - } - - // TODO: some sort of timeout? - for n := range hc { - for _, c := range n { - if c.Val.Equals(target) { - return - } - } - } -} - -func TestSyncSimple(t *testing.T) { - H := 50 - tu := prepSyncTest(t, H) - - client := tu.addClientNode() - //tu.checkHeight("client", client, 0) - - require.NoError(t, tu.mn.LinkAll()) - tu.connect(1, 0) - tu.waitUntilSync(0, client) - - //tu.checkHeight("client", client, H) - - tu.compareSourceState(client) -} - -func TestSyncMining(t *testing.T) { - H := 50 - tu := prepSyncTest(t, H) - - client := tu.addClientNode() - //tu.checkHeight("client", client, 0) - - require.NoError(t, tu.mn.LinkAll()) - tu.connect(client, 0) - tu.waitUntilSync(0, client) - - //tu.checkHeight("client", client, H) - - tu.compareSourceState(client) - - for i := 0; i < 5; i++ { - tu.mineNewBlock(0, nil) - tu.waitUntilSync(0, client) - tu.compareSourceState(client) - } -} - -func TestSyncBadTimestamp(t *testing.T) { - H := 50 - tu := prepSyncTest(t, H) - - client := tu.addClientNode() - - require.NoError(t, tu.mn.LinkAll()) - tu.connect(client, 0) - tu.waitUntilSync(0, client) - - base := tu.g.CurTipset - tu.g.Timestamper = func(pts *types.TipSet, tl abi.ChainEpoch) uint64 { - return pts.MinTimestamp() + (build.BlockDelaySecs / 2) - } - - fmt.Println("BASE: ", base.Cids()) - tu.printHeads() - - a1 := tu.mineOnBlock(base, 0, nil, false, true) - - tu.g.Timestamper = nil - require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) - - fmt.Println("After mine bad block!") - tu.printHeads() - a2 := tu.mineOnBlock(base, 0, nil, true, false) - - tu.waitUntilSync(0, client) - - head, err := tu.nds[0].ChainHead(tu.ctx) - require.NoError(t, err) - - if !head.Equals(a2.TipSet()) { - t.Fatalf("expected head to be %s, but got %s", a2.Cids(), head.Cids()) - } -} - -func (tu *syncTestUtil) loadChainToNode(to int) { - // utility to simulate incoming blocks without miner process - // TODO: should call syncer directly, this won't work correctly in all cases - - for i := 0; i < len(tu.blocks); i++ { - tu.pushFtsAndWait(to, tu.blocks[i], true) - } -} - -func TestSyncFork(t *testing.T) { - H := 10 - tu := prepSyncTest(t, H) - - p1 := tu.addClientNode() - p2 := tu.addClientNode() - - fmt.Println("GENESIS: ", tu.g.Genesis().Cid()) - tu.loadChainToNode(p1) - tu.loadChainToNode(p2) - - phead := func() { - h1, err := tu.nds[1].ChainHead(tu.ctx) - require.NoError(tu.t, err) - - h2, err := tu.nds[2].ChainHead(tu.ctx) - require.NoError(tu.t, err) - - fmt.Println("Node 1: ", h1.Cids(), h1.Parents(), h1.Height()) - fmt.Println("Node 2: ", h2.Cids(), h1.Parents(), h2.Height()) - //time.Sleep(time.Second * 2) - fmt.Println() - fmt.Println() - fmt.Println() - fmt.Println() - } - - phead() - - base := tu.g.CurTipset - fmt.Println("Mining base: ", base.TipSet().Cids(), base.TipSet().Height()) - - // The two nodes fork at this point into 'a' and 'b' - a1 := tu.mineOnBlock(base, p1, []int{0}, true, false) - a := tu.mineOnBlock(a1, p1, []int{0}, true, false) - a = tu.mineOnBlock(a, p1, []int{0}, true, false) - - require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) - // chain B will now be heaviest - b := tu.mineOnBlock(base, p2, []int{1}, true, false) - b = tu.mineOnBlock(b, p2, []int{1}, true, false) - b = tu.mineOnBlock(b, p2, []int{1}, true, false) - b = tu.mineOnBlock(b, p2, []int{1}, true, false) - - fmt.Println("A: ", a.Cids(), a.TipSet().Height()) - fmt.Println("B: ", b.Cids(), b.TipSet().Height()) - - // Now for the fun part!! - - require.NoError(t, tu.mn.LinkAll()) - tu.connect(p1, p2) - tu.waitUntilSyncTarget(p1, b.TipSet()) - tu.waitUntilSyncTarget(p2, b.TipSet()) - - phead() -} - -func BenchmarkSyncBasic(b *testing.B) { - for i := 0; i < b.N; i++ { - runSyncBenchLength(b, 100) - } -} - -func runSyncBenchLength(b *testing.B, l int) { - tu := prepSyncTest(b, l) - - client := tu.addClientNode() - tu.checkHeight("client", client, 0) - - b.ResetTimer() - - require.NoError(b, tu.mn.LinkAll()) - tu.connect(1, 0) - - tu.waitUntilSync(0, client) -} - -func TestSyncInputs(t *testing.T) { - H := 10 - tu := prepSyncTest(t, H) - - p1 := tu.addClientNode() - - fn := tu.nds[p1].(*impl.FullNodeAPI) - - s := fn.SyncAPI.Syncer - - err := s.ValidateBlock(context.TODO(), &types.FullBlock{ - Header: &types.BlockHeader{}, - }) - if err == nil { - t.Fatal("should error on empty block") - } - - h := mocktypes.MkBlock(nil, 123, 432) - - h.ElectionProof = nil - - err = s.ValidateBlock(context.TODO(), &types.FullBlock{Header: h}) - if err == nil { - t.Fatal("should error on block with nil election proof") - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/syncstate.go b/vendor/github.com/filecoin-project/lotus/chain/syncstate.go deleted file mode 100644 index b213c74831..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/syncstate.go +++ /dev/null @@ -1,105 +0,0 @@ -package chain - -import ( - "fmt" - "sync" - "time" - - "github.com/filecoin-project/specs-actors/actors/abi" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/types" -) - -func SyncStageString(v api.SyncStateStage) string { - switch v { - case api.StageHeaders: - return "header sync" - case api.StagePersistHeaders: - return "persisting headers" - case api.StageMessages: - return "message sync" - case api.StageSyncComplete: - return "complete" - case api.StageSyncErrored: - return "error" - default: - return fmt.Sprintf("", v) - } -} - -type SyncerState struct { - lk sync.Mutex - Target *types.TipSet - Base *types.TipSet - Stage api.SyncStateStage - Height abi.ChainEpoch - Message string - Start time.Time - End time.Time -} - -func (ss *SyncerState) SetStage(v api.SyncStateStage) { - if ss == nil { - return - } - - ss.lk.Lock() - defer ss.lk.Unlock() - ss.Stage = v - if v == api.StageSyncComplete { - ss.End = time.Now() - } -} - -func (ss *SyncerState) Init(base, target *types.TipSet) { - if ss == nil { - return - } - - ss.lk.Lock() - defer ss.lk.Unlock() - ss.Target = target - ss.Base = base - ss.Stage = api.StageHeaders - ss.Height = 0 - ss.Message = "" - ss.Start = time.Now() - ss.End = time.Time{} -} - -func (ss *SyncerState) SetHeight(h abi.ChainEpoch) { - if ss == nil { - return - } - - ss.lk.Lock() - defer ss.lk.Unlock() - ss.Height = h -} - -func (ss *SyncerState) Error(err error) { - if ss == nil { - return - } - - ss.lk.Lock() - defer ss.lk.Unlock() - ss.Message = err.Error() - ss.Stage = api.StageSyncErrored - ss.End = time.Now() -} - -func (ss *SyncerState) Snapshot() SyncerState { - ss.lk.Lock() - defer ss.lk.Unlock() - return SyncerState{ - Base: ss.Base, - Target: ss.Target, - Stage: ss.Stage, - Height: ss.Height, - Message: ss.Message, - Start: ss.Start, - End: ss.End, - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/actor.go b/vendor/github.com/filecoin-project/lotus/chain/types/actor.go index 56aa557351..a9974a01f4 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/actor.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/actor.go @@ -1,13 +1,12 @@ package types import ( - "github.com/ipfs/go-cid" + "errors" - "github.com/filecoin-project/specs-actors/actors/builtin" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" + "github.com/ipfs/go-cid" ) -var ErrActorNotFound = init_.ErrAddressNotFound +var ErrActorNotFound = errors.New("actor not found") type Actor struct { // Identifies the type of actor (string coded as a CID), see `chain/actors/actors.go`. @@ -16,7 +15,3 @@ type Actor struct { Nonce uint64 Balance BigInt } - -func (a *Actor) IsAccountActor() bool { - return a.Code == builtin.AccountActorCodeID -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/bigint.go b/vendor/github.com/filecoin-project/lotus/chain/types/bigint.go index a7b25870af..da4857d5b4 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/bigint.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/bigint.go @@ -4,14 +4,14 @@ import ( "fmt" "math/big" - big2 "github.com/filecoin-project/specs-actors/actors/abi/big" + big2 "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" ) const BigIntMaxSerializedLen = 128 // is this big enough? or too big? -var TotalFilecoinInt = FromFil(build.TotalFilecoin) +var TotalFilecoinInt = FromFil(build.FilBase) var EmptyInt = BigInt{} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/bigint_test.go b/vendor/github.com/filecoin-project/lotus/chain/types/bigint_test.go deleted file mode 100644 index 43e5633b2a..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/types/bigint_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package types - -import ( - "bytes" - "math/big" - "math/rand" - "strings" - "testing" - "time" - - "github.com/docker/go-units" - - "github.com/stretchr/testify/assert" -) - -func TestBigIntSerializationRoundTrip(t *testing.T) { - testValues := []string{ - "0", "1", "10", "-10", "9999", "12345678901234567891234567890123456789012345678901234567890", - } - - for _, v := range testValues { - bi, err := BigFromString(v) - if err != nil { - t.Fatal(err) - } - - buf := new(bytes.Buffer) - if err := bi.MarshalCBOR(buf); err != nil { - t.Fatal(err) - } - - var out BigInt - if err := out.UnmarshalCBOR(buf); err != nil { - t.Fatal(err) - } - - if BigCmp(out, bi) != 0 { - t.Fatal("failed to round trip BigInt through cbor") - } - - } -} - -func TestFilRoundTrip(t *testing.T) { - testValues := []string{ - "0", "1", "1.001", "100.10001", "101100", "5000.01", "5000", - } - - for _, v := range testValues { - fval, err := ParseFIL(v) - if err != nil { - t.Fatal(err) - } - - if fval.String() != v { - t.Fatal("mismatch in values!", v, fval.String()) - } - } -} - -func TestSizeStr(t *testing.T) { - cases := []struct { - in uint64 - out string - }{ - {0, "0 B"}, - {1, "1 B"}, - {1016, "1016 B"}, - {1024, "1 KiB"}, - {1000 * 1024, "1000 KiB"}, - {2000, "1.953 KiB"}, - {5 << 20, "5 MiB"}, - {11 << 60, "11 EiB"}, - } - - for _, c := range cases { - assert.Equal(t, c.out, SizeStr(NewInt(c.in)), "input %+v, produced wrong result", c) - } -} - -func TestSizeStrUnitsSymmetry(t *testing.T) { - s := rand.NewSource(time.Now().UnixNano()) - r := rand.New(s) - - for i := 0; i < 1000000; i++ { - n := r.Uint64() - l := strings.ReplaceAll(units.BytesSize(float64(n)), " ", "") - r := strings.ReplaceAll(SizeStr(NewInt(n)), " ", "") - - assert.NotContains(t, l, "e+") - assert.NotContains(t, r, "e+") - - assert.Equal(t, l, r, "wrong formatting for %d", n) - } -} - -func TestSizeStrBig(t *testing.T) { - ZiB := big.NewInt(50000) - ZiB = ZiB.Lsh(ZiB, 70) - - assert.Equal(t, "5e+04 ZiB", SizeStr(BigInt{Int: ZiB}), "inout %+v, produced wrong result", ZiB) - -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/blockheader.go b/vendor/github.com/filecoin-project/lotus/chain/types/blockheader.go index e238b3e5e6..0ec33fe421 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/blockheader.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/blockheader.go @@ -4,14 +4,15 @@ import ( "bytes" "math/big" + "github.com/filecoin-project/specs-actors/actors/runtime/proof" + "github.com/minio/blake2b-simd" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" xerrors "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -23,8 +24,14 @@ type Ticket struct { VRFProof []byte } -type ElectionProof struct { - VRFProof []byte +func (t *Ticket) Quality() float64 { + ticketHash := blake2b.Sum256(t.VRFProof) + ticketNum := BigFromBytes(ticketHash[:]).Int + ticketDenu := big.NewInt(1) + ticketDenu.Lsh(ticketDenu, 256) + tv, _ := new(big.Rat).SetFrac(ticketNum, ticketDenu).Float64() + tq := 1 - tv + return tq } type BeaconEntry struct { @@ -48,7 +55,7 @@ type BlockHeader struct { BeaconEntries []BeaconEntry // 3 - WinPoStProof []abi.PoStProof // 4 + WinPoStProof []proof.PoStProof // 4 Parents []cid.Cid // 5 @@ -70,6 +77,9 @@ type BlockHeader struct { ForkSignaling uint64 // 14 + // ParentBaseFee is the base fee after executing parent tipset + ParentBaseFee abi.TokenAmount // 15 + // internal validated bool // true if the signature has been validated } @@ -80,8 +90,7 @@ func (blk *BlockHeader) ToStorageBlock() (block.Block, error) { return nil, err } - pref := cid.NewPrefixV1(cid.DagCBOR, multihash.BLAKE2B_MIN+31) - c, err := pref.Sum(data) + c, err := abi.CidBuilder.Sum(data) if err != nil { return nil, err } @@ -149,13 +158,12 @@ func (mm *MsgMeta) Cid() cid.Cid { } func (mm *MsgMeta) ToStorageBlock() (block.Block, error) { - buf := new(bytes.Buffer) - if err := mm.MarshalCBOR(buf); err != nil { + var buf bytes.Buffer + if err := mm.MarshalCBOR(&buf); err != nil { return nil, xerrors.Errorf("failed to marshal MsgMeta: %w", err) } - pref := cid.NewPrefixV1(cid.DagCBOR, multihash.BLAKE2B_MIN+31) - c, err := pref.Sum(buf.Bytes()) + c, err := abi.CidBuilder.Sum(buf.Bytes()) if err != nil { return nil, err } @@ -182,6 +190,21 @@ func CidArrsEqual(a, b []cid.Cid) bool { return true } +func CidArrsSubset(a, b []cid.Cid) bool { + // order ignoring compare... + s := make(map[cid.Cid]bool) + for _, c := range b { + s[c] = true + } + + for _, c := range a { + if !s[c] { + return false + } + } + return true +} + func CidArrsContains(a []cid.Cid, b cid.Cid) bool { for _, elem := range a { if elem.Equals(b) { diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/blockheader_test.go b/vendor/github.com/filecoin-project/lotus/chain/types/blockheader_test.go deleted file mode 100644 index a1ece308ad..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/types/blockheader_test.go +++ /dev/null @@ -1,136 +0,0 @@ -package types - -import ( - "bytes" - "encoding/hex" - "fmt" - "github.com/stretchr/testify/require" - "reflect" - "testing" - - cid "github.com/ipfs/go-cid" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" -) - -func testBlockHeader(t testing.TB) *BlockHeader { - t.Helper() - - addr, err := address.NewIDAddress(12512063) - if err != nil { - t.Fatal(err) - } - - c, err := cid.Decode("bafyreicmaj5hhoy5mgqvamfhgexxyergw7hdeshizghodwkjg6qmpoco7i") - if err != nil { - t.Fatal(err) - } - - return &BlockHeader{ - Miner: addr, - Ticket: &Ticket{ - VRFProof: []byte("vrf proof0000000vrf proof0000000"), - }, - ElectionProof: &ElectionProof{ - VRFProof: []byte("vrf proof0000000vrf proof0000000"), - }, - Parents: []cid.Cid{c, c}, - ParentMessageReceipts: c, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, - ParentWeight: NewInt(123125126212), - Messages: c, - Height: 85919298723, - ParentStateRoot: c, - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, - } -} - -func TestBlockHeaderSerialization(t *testing.T) { - bh := testBlockHeader(t) - - buf := new(bytes.Buffer) - if err := bh.MarshalCBOR(buf); err != nil { - t.Fatal(err) - } - - var out BlockHeader - if err := out.UnmarshalCBOR(buf); err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(&out, bh) { - fmt.Printf("%#v\n", &out) - fmt.Printf("%#v\n", bh) - t.Fatal("not equal") - } -} - -func TestInteropBH(t *testing.T) { - newAddr, err := address.NewSecp256k1Address([]byte("address0")) - - if err != nil { - t.Fatal(err) - } - - mcid, err := cid.Parse("bafy2bzaceaxyj7xq27gc2747adjcirpxx52tt7owqx6z6kckun7tqivvoym4y") - if err != nil { - t.Fatal(err) - } - - posts := []abi.PoStProof{ - {abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, []byte{0x07}}, - } - - bh := &BlockHeader{ - Miner: newAddr, - Ticket: &Ticket{[]byte{0x01, 0x02, 0x03}}, - ElectionProof: &ElectionProof{[]byte{0x0a, 0x0b}}, - BeaconEntries: []BeaconEntry{ - { - Round: 5, - Data: []byte{0x0c}, - //prevRound: 0, - }, - }, - Height: 2, - Messages: mcid, - ParentMessageReceipts: mcid, - Parents: []cid.Cid{mcid}, - ParentWeight: NewInt(1000), - ForkSignaling: 3, - ParentStateRoot: mcid, - Timestamp: 1, - WinPoStProof: posts, - BlockSig: &crypto.Signature{ - Type: crypto.SigTypeBLS, - Data: []byte{0x3}, - }, - BLSAggregate: &crypto.Signature{}, - } - - bhsb, err := bh.SigningBytes() - - if err != nil { - t.Fatal(err) - } - - // acquired from go-filecoin - gfc := "8f5501d04cb15021bf6bd003073d79e2238d4e61f1ad22814301020381420a0b818205410c818200410781d82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619cc430003e802d82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619ccd82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619ccd82a5827000171a0e402202f84fef0d7cc2d7f9f00d22445f7bf7539fdd685fd9f284aa37f3822b57619cc410001f603" - require.Equal(t, gfc, hex.EncodeToString(bhsb)) -} - -func BenchmarkBlockHeaderMarshal(b *testing.B) { - bh := testBlockHeader(b) - - b.ReportAllocs() - - buf := new(bytes.Buffer) - for i := 0; i < b.N; i++ { - buf.Reset() - if err := bh.MarshalCBOR(buf); err != nil { - b.Fatal(err) - } - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/cbor_gen.go b/vendor/github.com/filecoin-project/lotus/chain/types/cbor_gen.go index b521717975..d063ce8c9f 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/cbor_gen.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/cbor_gen.go @@ -6,17 +6,18 @@ import ( "fmt" "io" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/ipfs/go-cid" + abi "github.com/filecoin-project/go-state-types/abi" + crypto "github.com/filecoin-project/go-state-types/crypto" + exitcode "github.com/filecoin-project/go-state-types/exitcode" + proof "github.com/filecoin-project/specs-actors/actors/runtime/proof" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf -var lengthBufBlockHeader = []byte{143} +var lengthBufBlockHeader = []byte{144} func (t *BlockHeader) MarshalCBOR(w io.Writer) error { if t == nil { @@ -58,7 +59,7 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { } } - // t.WinPoStProof ([]abi.PoStProof) (slice) + // t.WinPoStProof ([]proof.PoStProof) (slice) if len(t.WinPoStProof) > cbg.MaxLength { return xerrors.Errorf("Slice value in field t.WinPoStProof was too long") } @@ -142,10 +143,16 @@ func (t *BlockHeader) MarshalCBOR(w io.Writer) error { return err } + // t.ParentBaseFee (big.Int) (struct) + if err := t.ParentBaseFee.MarshalCBOR(w); err != nil { + return err + } return nil } func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { + *t = BlockHeader{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -157,7 +164,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 15 { + if extra != 16 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -174,16 +181,14 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { { - pb, err := br.PeekByte() + b, err := br.ReadByte() if err != nil { return err } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { return err } - } else { t.Ticket = new(Ticket) if err := t.Ticket.UnmarshalCBOR(br); err != nil { return xerrors.Errorf("unmarshaling t.Ticket pointer: %w", err) @@ -195,16 +200,14 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { { - pb, err := br.PeekByte() + b, err := br.ReadByte() if err != nil { return err } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { return err } - } else { t.ElectionProof = new(ElectionProof) if err := t.ElectionProof.UnmarshalCBOR(br); err != nil { return xerrors.Errorf("unmarshaling t.ElectionProof pointer: %w", err) @@ -241,7 +244,7 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { t.BeaconEntries[i] = v } - // t.WinPoStProof ([]abi.PoStProof) (slice) + // t.WinPoStProof ([]proof.PoStProof) (slice) maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { @@ -257,12 +260,12 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { } if extra > 0 { - t.WinPoStProof = make([]abi.PoStProof, extra) + t.WinPoStProof = make([]proof.PoStProof, extra) } for i := 0; i < int(extra); i++ { - var v abi.PoStProof + var v proof.PoStProof if err := v.UnmarshalCBOR(br); err != nil { return err } @@ -372,16 +375,14 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { { - pb, err := br.PeekByte() + b, err := br.ReadByte() if err != nil { return err } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { return err } - } else { t.BLSAggregate = new(crypto.Signature) if err := t.BLSAggregate.UnmarshalCBOR(br); err != nil { return xerrors.Errorf("unmarshaling t.BLSAggregate pointer: %w", err) @@ -407,16 +408,14 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { { - pb, err := br.PeekByte() + b, err := br.ReadByte() if err != nil { return err } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { return err } - } else { t.BlockSig = new(crypto.Signature) if err := t.BlockSig.UnmarshalCBOR(br); err != nil { return xerrors.Errorf("unmarshaling t.BlockSig pointer: %w", err) @@ -437,6 +436,15 @@ func (t *BlockHeader) UnmarshalCBOR(r io.Reader) error { } t.ForkSignaling = uint64(extra) + } + // t.ParentBaseFee (big.Int) (struct) + + { + + if err := t.ParentBaseFee.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ParentBaseFee: %w", err) + } + } return nil } @@ -463,13 +471,15 @@ func (t *Ticket) MarshalCBOR(w io.Writer) error { return err } - if _, err := w.Write(t.VRFProof); err != nil { + if _, err := w.Write(t.VRFProof[:]); err != nil { return err } return nil } func (t *Ticket) UnmarshalCBOR(r io.Reader) error { + *t = Ticket{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -498,14 +508,18 @@ func (t *Ticket) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - t.VRFProof = make([]byte, extra) - if _, err := io.ReadFull(br, t.VRFProof); err != nil { + + if extra > 0 { + t.VRFProof = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.VRFProof[:]); err != nil { return err } return nil } -var lengthBufElectionProof = []byte{129} +var lengthBufElectionProof = []byte{130} func (t *ElectionProof) MarshalCBOR(w io.Writer) error { if t == nil { @@ -518,6 +532,17 @@ func (t *ElectionProof) MarshalCBOR(w io.Writer) error { scratch := make([]byte, 9) + // t.WinCount (int64) (int64) + if t.WinCount >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.WinCount)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.WinCount-1)); err != nil { + return err + } + } + // t.VRFProof ([]uint8) (slice) if len(t.VRFProof) > cbg.ByteArrayMaxLen { return xerrors.Errorf("Byte array in field t.VRFProof was too long") @@ -527,13 +552,15 @@ func (t *ElectionProof) MarshalCBOR(w io.Writer) error { return err } - if _, err := w.Write(t.VRFProof); err != nil { + if _, err := w.Write(t.VRFProof[:]); err != nil { return err } return nil } func (t *ElectionProof) UnmarshalCBOR(r io.Reader) error { + *t = ElectionProof{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -545,10 +572,35 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 1 { + if extra != 2 { return fmt.Errorf("cbor input had wrong number of fields") } + // t.WinCount (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.WinCount = int64(extraI) + } // t.VRFProof ([]uint8) (slice) maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) @@ -562,14 +614,18 @@ func (t *ElectionProof) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - t.VRFProof = make([]byte, extra) - if _, err := io.ReadFull(br, t.VRFProof); err != nil { + + if extra > 0 { + t.VRFProof = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.VRFProof[:]); err != nil { return err } return nil } -var lengthBufMessage = []byte{137} +var lengthBufMessage = []byte{138} func (t *Message) MarshalCBOR(w io.Writer) error { if t == nil { @@ -582,15 +638,10 @@ func (t *Message) MarshalCBOR(w io.Writer) error { scratch := make([]byte, 9) - // t.Version (int64) (int64) - if t.Version >= 0 { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil { - return err - } - } else { - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Version-1)); err != nil { - return err - } + // t.Version (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil { + return err } // t.To (address.Address) (struct) @@ -614,11 +665,6 @@ func (t *Message) MarshalCBOR(w io.Writer) error { return err } - // t.GasPrice (big.Int) (struct) - if err := t.GasPrice.MarshalCBOR(w); err != nil { - return err - } - // t.GasLimit (int64) (int64) if t.GasLimit >= 0 { if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.GasLimit)); err != nil { @@ -630,6 +676,16 @@ func (t *Message) MarshalCBOR(w io.Writer) error { } } + // t.GasFeeCap (big.Int) (struct) + if err := t.GasFeeCap.MarshalCBOR(w); err != nil { + return err + } + + // t.GasPremium (big.Int) (struct) + if err := t.GasPremium.MarshalCBOR(w); err != nil { + return err + } + // t.Method (abi.MethodNum) (uint64) if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Method)); err != nil { @@ -645,13 +701,15 @@ func (t *Message) MarshalCBOR(w io.Writer) error { return err } - if _, err := w.Write(t.Params); err != nil { + if _, err := w.Write(t.Params[:]); err != nil { return err } return nil } func (t *Message) UnmarshalCBOR(r io.Reader) error { + *t = Message{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -663,34 +721,23 @@ func (t *Message) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 9 { + if extra != 10 { return fmt.Errorf("cbor input had wrong number of fields") } - // t.Version (int64) (int64) + // t.Version (uint64) (uint64) + { - maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) - var extraI int64 + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { return err } - switch maj { - case cbg.MajUnsignedInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 positive overflow") - } - case cbg.MajNegativeInt: - extraI = int64(extra) - if extraI < 0 { - return fmt.Errorf("int64 negative oveflow") - } - extraI = -1 - extraI - default: - return fmt.Errorf("wrong type for int64 field: %d", maj) + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") } + t.Version = uint64(extra) - t.Version = int64(extraI) } // t.To (address.Address) (struct) @@ -732,15 +779,6 @@ func (t *Message) UnmarshalCBOR(r io.Reader) error { return xerrors.Errorf("unmarshaling t.Value: %w", err) } - } - // t.GasPrice (big.Int) (struct) - - { - - if err := t.GasPrice.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.GasPrice: %w", err) - } - } // t.GasLimit (int64) (int64) { @@ -767,6 +805,24 @@ func (t *Message) UnmarshalCBOR(r io.Reader) error { t.GasLimit = int64(extraI) } + // t.GasFeeCap (big.Int) (struct) + + { + + if err := t.GasFeeCap.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.GasFeeCap: %w", err) + } + + } + // t.GasPremium (big.Int) (struct) + + { + + if err := t.GasPremium.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.GasPremium: %w", err) + } + + } // t.Method (abi.MethodNum) (uint64) { @@ -794,8 +850,12 @@ func (t *Message) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - t.Params = make([]byte, extra) - if _, err := io.ReadFull(br, t.Params); err != nil { + + if extra > 0 { + t.Params = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Params[:]); err != nil { return err } return nil @@ -825,6 +885,8 @@ func (t *SignedMessage) MarshalCBOR(w io.Writer) error { } func (t *SignedMessage) UnmarshalCBOR(r io.Reader) error { + *t = SignedMessage{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -890,6 +952,8 @@ func (t *MsgMeta) MarshalCBOR(w io.Writer) error { } func (t *MsgMeta) UnmarshalCBOR(r io.Reader) error { + *t = MsgMeta{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -971,6 +1035,8 @@ func (t *Actor) MarshalCBOR(w io.Writer) error { } func (t *Actor) UnmarshalCBOR(r io.Reader) error { + *t = Actor{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -1069,7 +1135,7 @@ func (t *MessageReceipt) MarshalCBOR(w io.Writer) error { return err } - if _, err := w.Write(t.Return); err != nil { + if _, err := w.Write(t.Return[:]); err != nil { return err } @@ -1087,6 +1153,8 @@ func (t *MessageReceipt) MarshalCBOR(w io.Writer) error { } func (t *MessageReceipt) UnmarshalCBOR(r io.Reader) error { + *t = MessageReceipt{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -1140,8 +1208,12 @@ func (t *MessageReceipt) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - t.Return = make([]byte, extra) - if _, err := io.ReadFull(br, t.Return); err != nil { + + if extra > 0 { + t.Return = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Return[:]); err != nil { return err } // t.GasUsed (int64) (int64) @@ -1221,6 +1293,8 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { } func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { + *t = BlockMsg{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -1240,16 +1314,14 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { { - pb, err := br.PeekByte() + b, err := br.ReadByte() if err != nil { return err } - if pb == cbg.CborNull[0] { - var nbuf [1]byte - if _, err := br.Read(nbuf[:]); err != nil { + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { return err } - } else { t.Header = new(BlockHeader) if err := t.Header.UnmarshalCBOR(br); err != nil { return xerrors.Errorf("unmarshaling t.Header pointer: %w", err) @@ -1371,6 +1443,8 @@ func (t *ExpTipSet) MarshalCBOR(w io.Writer) error { } func (t *ExpTipSet) UnmarshalCBOR(r io.Reader) error { + *t = ExpTipSet{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -1499,13 +1573,15 @@ func (t *BeaconEntry) MarshalCBOR(w io.Writer) error { return err } - if _, err := w.Write(t.Data); err != nil { + if _, err := w.Write(t.Data[:]); err != nil { return err } return nil } func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) error { + *t = BeaconEntry{} + br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -1548,9 +1624,141 @@ func (t *BeaconEntry) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajByteString { return fmt.Errorf("expected byte array") } - t.Data = make([]byte, extra) - if _, err := io.ReadFull(br, t.Data); err != nil { + + if extra > 0 { + t.Data = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Data[:]); err != nil { return err } return nil } + +var lengthBufStateRoot = []byte{131} + +func (t *StateRoot) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufStateRoot); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Version (types.StateTreeVersion) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Version)); err != nil { + return err + } + + // t.Actors (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Actors); err != nil { + return xerrors.Errorf("failed to write cid field t.Actors: %w", err) + } + + // t.Info (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Info); err != nil { + return xerrors.Errorf("failed to write cid field t.Info: %w", err) + } + + return nil +} + +func (t *StateRoot) UnmarshalCBOR(r io.Reader) error { + *t = StateRoot{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Version (types.StateTreeVersion) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Version = StateTreeVersion(extra) + + } + // t.Actors (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Actors: %w", err) + } + + t.Actors = c + + } + // t.Info (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Info: %w", err) + } + + t.Info = c + + } + return nil +} + +var lengthBufStateInfo0 = []byte{128} + +func (t *StateInfo0) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufStateInfo0); err != nil { + return err + } + + return nil +} + +func (t *StateInfo0) UnmarshalCBOR(r io.Reader) error { + *t = StateInfo0{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 0 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + return nil +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/electionproof.go b/vendor/github.com/filecoin-project/lotus/chain/types/electionproof.go new file mode 100644 index 0000000000..b8879b27c8 --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/electionproof.go @@ -0,0 +1,205 @@ +package types + +import ( + "math/big" + + "github.com/filecoin-project/lotus/build" + "github.com/minio/blake2b-simd" +) + +type ElectionProof struct { + WinCount int64 + VRFProof []byte +} + +const precision = 256 + +var ( + expNumCoef []*big.Int + expDenoCoef []*big.Int +) + +func init() { + parse := func(coefs []string) []*big.Int { + out := make([]*big.Int, len(coefs)) + for i, coef := range coefs { + c, ok := new(big.Int).SetString(coef, 10) + if !ok { + panic("could not parse exp paramemter") + } + // << 256 (Q.0 to Q.256), >> 128 to transform integer params to coefficients + c = c.Lsh(c, precision-128) + out[i] = c + } + return out + } + + // parameters are in integer format, + // coefficients are *2^-128 of that + num := []string{ + "-648770010757830093818553637600", + "67469480939593786226847644286976", + "-3197587544499098424029388939001856", + "89244641121992890118377641805348864", + "-1579656163641440567800982336819953664", + "17685496037279256458459817590917169152", + "-115682590513835356866803355398940131328", + "340282366920938463463374607431768211456", + } + expNumCoef = parse(num) + + deno := []string{ + "1225524182432722209606361", + "114095592300906098243859450", + "5665570424063336070530214243", + "194450132448609991765137938448", + "5068267641632683791026134915072", + "104716890604972796896895427629056", + "1748338658439454459487681798864896", + "23704654329841312470660182937960448", + "259380097567996910282699886670381056", + "2250336698853390384720606936038375424", + "14978272436876548034486263159246028800", + "72144088983913131323343765784380833792", + "224599776407103106596571252037123047424", + "340282366920938463463374607431768211456", + } + expDenoCoef = parse(deno) +} + +// expneg accepts x in Q.256 format and computes e^-x. +// It is most precise within [0, 1.725) range, where error is less than 3.4e-30. +// Over the [0, 5) range its error is less than 4.6e-15. +// Output is in Q.256 format. +func expneg(x *big.Int) *big.Int { + // exp is approximated by rational function + // polynomials of the rational function are evaluated using Horner's method + num := polyval(expNumCoef, x) // Q.256 + deno := polyval(expDenoCoef, x) // Q.256 + + num = num.Lsh(num, precision) // Q.512 + return num.Div(num, deno) // Q.512 / Q.256 => Q.256 +} + +// polyval evaluates a polynomial given by coefficients `p` in Q.256 format +// at point `x` in Q.256 format. Output is in Q.256. +// Coefficients should be ordered from the highest order coefficient to the lowest. +func polyval(p []*big.Int, x *big.Int) *big.Int { + // evaluation using Horner's method + res := new(big.Int).Set(p[0]) // Q.256 + tmp := new(big.Int) // big.Int.Mul doesn't like when input is reused as output + for _, c := range p[1:] { + tmp = tmp.Mul(res, x) // Q.256 * Q.256 => Q.512 + res = res.Rsh(tmp, precision) // Q.512 >> 256 => Q.256 + res = res.Add(res, c) + } + + return res +} + +// computes lambda in Q.256 +func lambda(power, totalPower *big.Int) *big.Int { + lam := new(big.Int).Mul(power, blocksPerEpoch.Int) // Q.0 + lam = lam.Lsh(lam, precision) // Q.256 + lam = lam.Div(lam /* Q.256 */, totalPower /* Q.0 */) // Q.256 + return lam +} + +var MaxWinCount = 3 * int64(build.BlocksPerEpoch) + +type poiss struct { + lam *big.Int + pmf *big.Int + icdf *big.Int + + tmp *big.Int // temporary variable for optimization + + k uint64 +} + +// newPoiss starts poisson inverted CDF +// lambda is in Q.256 format +// returns (instance, `1-poisscdf(0, lambda)`) +// CDF value returend is reused when calling `next` +func newPoiss(lambda *big.Int) (*poiss, *big.Int) { + + // pmf(k) = (lambda^k)*(e^lambda) / k! + // k = 0 here, so it simplifies to just e^-lambda + elam := expneg(lambda) // Q.256 + pmf := new(big.Int).Set(elam) + + // icdf(k) = 1 - ∑ᵏᵢ₌₀ pmf(i) + // icdf(0) = 1 - pmf(0) + icdf := big.NewInt(1) + icdf = icdf.Lsh(icdf, precision) // Q.256 + icdf = icdf.Sub(icdf, pmf) // Q.256 + + k := uint64(0) + + p := &poiss{ + lam: lambda, + pmf: pmf, + + tmp: elam, + icdf: icdf, + + k: k, + } + + return p, icdf +} + +// next computes `k++, 1-poisscdf(k, lam)` +// return is in Q.256 format +func (p *poiss) next() *big.Int { + // incrementally compute next pmf and icdf + + // pmf(k) = (lambda^k)*(e^lambda) / k! + // so pmf(k) = pmf(k-1) * lambda / k + p.k++ + p.tmp.SetUint64(p.k) // Q.0 + + // calculate pmf for k + p.pmf = p.pmf.Div(p.pmf, p.tmp) // Q.256 / Q.0 => Q.256 + // we are using `tmp` as target for multiplication as using an input as output + // for Int.Mul causes allocations + p.tmp = p.tmp.Mul(p.pmf, p.lam) // Q.256 * Q.256 => Q.512 + p.pmf = p.pmf.Rsh(p.tmp, precision) // Q.512 >> 256 => Q.256 + + // calculate output + // icdf(k) = icdf(k-1) - pmf(k) + p.icdf = p.icdf.Sub(p.icdf, p.pmf) // Q.256 + return p.icdf +} + +// ComputeWinCount uses VRFProof to compute number of wins +// The algorithm is based on Algorand's Sortition with Binomial distribution +// replaced by Poisson distribution. +func (ep *ElectionProof) ComputeWinCount(power BigInt, totalPower BigInt) int64 { + h := blake2b.Sum256(ep.VRFProof) + + lhs := BigFromBytes(h[:]).Int // 256bits, assume Q.256 so [0, 1) + + // We are calculating upside-down CDF of Poisson distribution with + // rate λ=power*E/totalPower + // Steps: + // 1. calculate λ=power*E/totalPower + // 2. calculate elam = exp(-λ) + // 3. Check how many times we win: + // j = 0 + // pmf = elam + // rhs = 1 - pmf + // for h(vrf) < rhs: j++; pmf = pmf * lam / j; rhs = rhs - pmf + + lam := lambda(power.Int, totalPower.Int) // Q.256 + + p, rhs := newPoiss(lam) + + var j int64 + for lhs.Cmp(rhs) < 0 && j < MaxWinCount { + rhs = p.next() + j++ + } + + return j +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/fil.go b/vendor/github.com/filecoin-project/lotus/chain/types/fil.go index 527078e0fc..0ea77660c3 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/fil.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/fil.go @@ -1,6 +1,7 @@ package types import ( + "encoding" "fmt" "math/big" "strings" @@ -11,6 +12,10 @@ import ( type FIL BigInt func (f FIL) String() string { + return f.Unitless() + " FIL" +} + +func (f FIL) Unitless() string { r := new(big.Rat).SetFrac(f.Int, big.NewInt(int64(build.FilecoinPrecision))) if r.Sign() == 0 { return "0" @@ -27,16 +32,63 @@ func (f FIL) Format(s fmt.State, ch rune) { } } +func (f FIL) MarshalText() (text []byte, err error) { + return []byte(f.String()), nil +} + +func (f FIL) UnmarshalText(text []byte) error { + p, err := ParseFIL(string(text)) + if err != nil { + return err + } + + f.Int.Set(p.Int) + return nil +} + func ParseFIL(s string) (FIL, error) { + suffix := strings.TrimLeft(s, ".1234567890") + s = s[:len(s)-len(suffix)] + var attofil bool + if suffix != "" { + norm := strings.ToLower(strings.TrimSpace(suffix)) + switch norm { + case "", "fil": + case "attofil", "afil": + attofil = true + default: + return FIL{}, fmt.Errorf("unrecognized suffix: %q", suffix) + } + } + r, ok := new(big.Rat).SetString(s) if !ok { return FIL{}, fmt.Errorf("failed to parse %q as a decimal number", s) } - r = r.Mul(r, big.NewRat(int64(build.FilecoinPrecision), 1)) + if !attofil { + r = r.Mul(r, big.NewRat(int64(build.FilecoinPrecision), 1)) + } + if !r.IsInt() { - return FIL{}, fmt.Errorf("invalid FIL value: %q", s) + var pref string + if attofil { + pref = "atto" + } + return FIL{}, fmt.Errorf("invalid %sFIL value: %q", pref, s) } return FIL{r.Num()}, nil } + +func MustParseFIL(s string) FIL { + n, err := ParseFIL(s) + if err != nil { + panic(err) + } + + return n +} + +var _ encoding.TextMarshaler = (*FIL)(nil) +var _ encoding.TextUnmarshaler = (*FIL)(nil) diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/keystore.go b/vendor/github.com/filecoin-project/lotus/chain/types/keystore.go index 76eb5f2967..107c1fbe3a 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/keystore.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/keystore.go @@ -1,7 +1,10 @@ package types import ( + "encoding/json" "fmt" + + "github.com/filecoin-project/go-state-types/crypto" ) var ( @@ -9,9 +12,50 @@ var ( ErrKeyExists = fmt.Errorf("key already exists") ) +// KeyType defines a type of a key +type KeyType string + +func (kt *KeyType) UnmarshalJSON(bb []byte) error { + { + // first option, try unmarshaling as string + var s string + err := json.Unmarshal(bb, &s) + if err == nil { + *kt = KeyType(s) + return nil + } + } + + { + var b byte + err := json.Unmarshal(bb, &b) + if err != nil { + return fmt.Errorf("could not unmarshal KeyType either as string nor integer: %w", err) + } + bst := crypto.SigType(b) + + switch bst { + case crypto.SigTypeBLS: + *kt = KTBLS + case crypto.SigTypeSecp256k1: + *kt = KTSecp256k1 + default: + return fmt.Errorf("unknown sigtype: %d", bst) + } + log.Warnf("deprecation: integer style 'KeyType' is deprecated, switch to string style") + return nil + } +} + +const ( + KTBLS KeyType = "bls" + KTSecp256k1 KeyType = "secp256k1" + KTSecp256k1Ledger KeyType = "secp256k1-ledger" +) + // KeyInfo is used for storing keys in KeyStore type KeyInfo struct { - Type string + Type KeyType PrivateKey []byte } diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/message.go b/vendor/github.com/filecoin-project/lotus/chain/types/message.go index b0d7f885fb..c53ecc7c16 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/message.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/message.go @@ -2,14 +2,14 @@ package types import ( "bytes" + "encoding/json" "fmt" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" xerrors "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -26,17 +26,18 @@ type ChainMsg interface { } type Message struct { - Version int64 + Version uint64 To address.Address From address.Address Nonce uint64 - Value BigInt + Value abi.TokenAmount - GasPrice BigInt - GasLimit int64 + GasLimit int64 + GasFeeCap abi.TokenAmount + GasPremium abi.TokenAmount Method abi.MethodNum Params []byte @@ -89,8 +90,7 @@ func (m *Message) ToStorageBlock() (block.Block, error) { return nil, err } - pref := cid.NewPrefixV1(cid.DagCBOR, multihash.BLAKE2B_MIN+31) - c, err := pref.Sum(data) + c, err := abi.CidBuilder.Sum(data) if err != nil { return nil, err } @@ -107,11 +107,22 @@ func (m *Message) Cid() cid.Cid { return b.Cid() } +type mCid struct { + *RawMessage + CID cid.Cid +} + +type RawMessage Message + +func (m *Message) MarshalJSON() ([]byte, error) { + return json.Marshal(&mCid{ + RawMessage: (*RawMessage)(m), + CID: m.Cid(), + }) +} + func (m *Message) RequiredFunds() BigInt { - return BigAdd( - m.Value, - BigMul(m.GasPrice, NewInt(uint64(m.GasLimit))), - ) + return BigMul(m.GasFeeCap, NewInt(uint64(m.GasLimit))) } func (m *Message) VMMessage() *Message { @@ -122,6 +133,17 @@ func (m *Message) Equals(o *Message) bool { return m.Cid() == o.Cid() } +func (m *Message) EqualCall(o *Message) bool { + m1 := *m + m2 := *o + + m1.GasLimit, m2.GasLimit = 0, 0 + m1.GasFeeCap, m2.GasFeeCap = big.Zero(), big.Zero() + m1.GasPremium, m2.GasPremium = big.Zero(), big.Zero() + + return (&m1).Equals(&m2) +} + func (m *Message) ValidForBlockInclusion(minGas int64) error { if m.Version != 0 { return xerrors.New("'Version' unsupported") @@ -135,6 +157,10 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error { return xerrors.New("'From' address cannot be empty") } + if m.Value.Int == nil { + return xerrors.New("'Value' cannot be nil") + } + if m.Value.LessThan(big.Zero()) { return xerrors.New("'Value' field cannot be negative") } @@ -143,8 +169,24 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error { return xerrors.New("'Value' field cannot be greater than total filecoin supply") } - if m.GasPrice.LessThan(big.Zero()) { - return xerrors.New("'GasPrice' field cannot be negative") + if m.GasFeeCap.Int == nil { + return xerrors.New("'GasFeeCap' cannot be nil") + } + + if m.GasFeeCap.LessThan(big.Zero()) { + return xerrors.New("'GasFeeCap' field cannot be negative") + } + + if m.GasPremium.Int == nil { + return xerrors.New("'GasPremium' cannot be nil") + } + + if m.GasPremium.LessThan(big.Zero()) { + return xerrors.New("'GasPremium' field cannot be negative") + } + + if m.GasPremium.GreaterThan(m.GasFeeCap) { + return xerrors.New("'GasFeeCap' less than 'GasPremium'") } if m.GasLimit > build.BlockGasLimit { @@ -153,8 +195,10 @@ func (m *Message) ValidForBlockInclusion(minGas int64) error { // since prices might vary with time, this is technically semantic validation if m.GasLimit < minGas { - return xerrors.New("'GasLimit' field cannot be less than the cost of storing a message on chain") + return xerrors.Errorf("'GasLimit' field cannot be less than the cost of storing a message on chain %d < %d", m.GasLimit, minGas) } return nil } + +const TestGasLimit = 100e6 diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/message_receipt.go b/vendor/github.com/filecoin-project/lotus/chain/types/message_receipt.go index 6671595ff0..57761680d2 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/message_receipt.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/message_receipt.go @@ -3,7 +3,7 @@ package types import ( "bytes" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" + "github.com/filecoin-project/go-state-types/exitcode" ) type MessageReceipt struct { diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/mock/chain.go b/vendor/github.com/filecoin-project/lotus/chain/types/mock/chain.go deleted file mode 100644 index 00d0eecc99..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/types/mock/chain.go +++ /dev/null @@ -1,91 +0,0 @@ -package mock - -import ( - "context" - "fmt" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/ipfs/go-cid" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" -) - -func Address(i uint64) address.Address { - a, err := address.NewIDAddress(i) - if err != nil { - panic(err) - } - return a -} - -func MkMessage(from, to address.Address, nonce uint64, w *wallet.Wallet) *types.SignedMessage { - msg := &types.Message{ - To: to, - From: from, - Value: types.NewInt(1), - Nonce: nonce, - GasLimit: 1, - GasPrice: types.NewInt(0), - } - - sig, err := w.Sign(context.TODO(), from, msg.Cid().Bytes()) - if err != nil { - panic(err) - } - return &types.SignedMessage{ - Message: *msg, - Signature: *sig, - } -} - -func MkBlock(parents *types.TipSet, weightInc uint64, ticketNonce uint64) *types.BlockHeader { - addr := Address(123561) - - c, err := cid.Decode("bafyreicmaj5hhoy5mgqvamfhgexxyergw7hdeshizghodwkjg6qmpoco7i") - if err != nil { - panic(err) - } - - pstateRoot := c - if parents != nil { - pstateRoot = parents.Blocks()[0].ParentStateRoot - } - - var pcids []cid.Cid - var height abi.ChainEpoch - weight := types.NewInt(weightInc) - if parents != nil { - pcids = parents.Cids() - height = parents.Height() + 1 - weight = types.BigAdd(parents.Blocks()[0].ParentWeight, weight) - } - - return &types.BlockHeader{ - Miner: addr, - ElectionProof: &types.ElectionProof{ - VRFProof: []byte(fmt.Sprintf("====%d=====", ticketNonce)), - }, - Ticket: &types.Ticket{ - VRFProof: []byte(fmt.Sprintf("====%d=====", ticketNonce)), - }, - Parents: pcids, - ParentMessageReceipts: c, - BLSAggregate: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, - ParentWeight: weight, - Messages: c, - Height: height, - ParentStateRoot: pstateRoot, - BlockSig: &crypto.Signature{Type: crypto.SigTypeBLS, Data: []byte("boo! im a signature")}, - } -} - -func TipSet(blks ...*types.BlockHeader) *types.TipSet { - ts, err := types.NewTipSet(blks) - if err != nil { - panic(err) - } - return ts -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/mpool.go b/vendor/github.com/filecoin-project/lotus/chain/types/mpool.go new file mode 100644 index 0000000000..cf08177e9c --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/mpool.go @@ -0,0 +1,22 @@ +package types + +import ( + "time" + + "github.com/filecoin-project/go-address" +) + +type MpoolConfig struct { + PriorityAddrs []address.Address + SizeLimitHigh int + SizeLimitLow int + ReplaceByFeeRatio float64 + PruneCooldown time.Duration + GasLimitOverestimation float64 +} + +func (mc *MpoolConfig) Clone() *MpoolConfig { + r := new(MpoolConfig) + *r = *mc + return r +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/signature_test.go b/vendor/github.com/filecoin-project/lotus/chain/types/signature_test.go deleted file mode 100644 index 751f55252e..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/types/signature_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package types - -import ( - "bytes" - "testing" - - "github.com/filecoin-project/specs-actors/actors/crypto" -) - -func TestSignatureSerializeRoundTrip(t *testing.T) { - s := &crypto.Signature{ - Data: []byte("foo bar cat dog"), - Type: crypto.SigTypeBLS, - } - - buf := new(bytes.Buffer) - if err := s.MarshalCBOR(buf); err != nil { - t.Fatal(err) - } - - var outs crypto.Signature - if err := outs.UnmarshalCBOR(buf); err != nil { - t.Fatal(err) - } - - if !outs.Equals(s) { - t.Fatal("serialization round trip failed") - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/signedmessage.go b/vendor/github.com/filecoin-project/lotus/chain/types/signedmessage.go index 54e82a9572..c539ac2402 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/signedmessage.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/signedmessage.go @@ -2,11 +2,12 @@ package types import ( "bytes" + "encoding/json" - "github.com/filecoin-project/specs-actors/actors/crypto" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/crypto" block "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" ) func (sm *SignedMessage) ToStorageBlock() (block.Block, error) { @@ -19,8 +20,7 @@ func (sm *SignedMessage) ToStorageBlock() (block.Block, error) { return nil, err } - pref := cid.NewPrefixV1(cid.DagCBOR, multihash.BLAKE2B_MIN+31) - c, err := pref.Sum(data) + c, err := abi.CidBuilder.Sum(data) if err != nil { return nil, err } @@ -63,8 +63,29 @@ func (sm *SignedMessage) Serialize() ([]byte, error) { return buf.Bytes(), nil } -func (m *SignedMessage) ChainLength() int { - ser, err := m.Serialize() +type smCid struct { + *RawSignedMessage + CID cid.Cid +} + +type RawSignedMessage SignedMessage + +func (sm *SignedMessage) MarshalJSON() ([]byte, error) { + return json.Marshal(&smCid{ + RawSignedMessage: (*RawSignedMessage)(sm), + CID: sm.Cid(), + }) +} + +func (sm *SignedMessage) ChainLength() int { + var ser []byte + var err error + if sm.Signature.Type == crypto.SigTypeBLS { + // BLS chain message length doesn't include signature + ser, err = sm.Message.Serialize() + } else { + ser, err = sm.Serialize() + } if err != nil { panic(err) } diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/state.go b/vendor/github.com/filecoin-project/lotus/chain/types/state.go new file mode 100644 index 0000000000..a96883604b --- /dev/null +++ b/vendor/github.com/filecoin-project/lotus/chain/types/state.go @@ -0,0 +1,26 @@ +package types + +import "github.com/ipfs/go-cid" + +// StateTreeVersion is the version of the state tree itself, independent of the +// network version or the actors version. +type StateTreeVersion uint64 + +const ( + // StateTreeVersion0 corresponds to actors < v2. + StateTreeVersion0 StateTreeVersion = iota + // StateTreeVersion1 corresponds to actors >= v2. + StateTreeVersion1 +) + +type StateRoot struct { + // State tree version. + Version StateTreeVersion + // Actors tree. The structure depends on the state root version. + Actors cid.Cid + // Info. The structure depends on the state root version. + Info cid.Cid +} + +// TODO: version this. +type StateInfo0 struct{} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/tipset.go b/vendor/github.com/filecoin-project/lotus/chain/types/tipset.go index 09483dc5e1..07eff37345 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/tipset.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/tipset.go @@ -7,7 +7,7 @@ import ( "io" "sort" - "github.com/filecoin-project/specs-actors/actors/abi" + "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" "github.com/minio/blake2b-simd" @@ -97,6 +97,12 @@ func tipsetSortFunc(blks []*BlockHeader) func(i, j int) bool { } } +// Checks: +// * A tipset is composed of at least one block. (Because of our variable +// number of blocks per tipset, determined by randomness, we do not impose +// an upper limit.) +// * All blocks have the same height. +// * All blocks have the same parents (same number of them and matching CIDs). func NewTipSet(blks []*BlockHeader) (*TipSet, error) { if len(blks) == 0 { return nil, xerrors.Errorf("NewTipSet called with zero length array of blocks") @@ -112,6 +118,10 @@ func NewTipSet(blks []*BlockHeader) (*TipSet, error) { return nil, fmt.Errorf("cannot create tipset with mismatching heights") } + if len(blks[0].Parents) != len(b.Parents) { + return nil, fmt.Errorf("cannot create tipset with mismatching number of parents") + } + for i, cid := range b.Parents { if cid != blks[0].Parents[i] { return nil, fmt.Errorf("cannot create tipset with mismatching parents") @@ -157,12 +167,16 @@ func (ts *TipSet) Equals(ots *TipSet) bool { return false } - if len(ts.blks) != len(ots.blks) { + if ts.height != ots.height { + return false + } + + if len(ts.cids) != len(ots.cids) { return false } - for i, b := range ts.blks { - if b.Cid() != ots.blks[i].Cid() { + for i, cid := range ts.cids { + if cid != ots.cids[i] { return false } } @@ -220,3 +234,15 @@ func (ts *TipSet) Contains(oc cid.Cid) bool { } return false } + +func (ts *TipSet) IsChildOf(parent *TipSet) bool { + return CidArrsEqual(ts.Parents().Cids(), parent.Cids()) && + // FIXME: The height check might go beyond what is meant by + // "parent", but many parts of the code rely on the tipset's + // height for their processing logic at the moment to obviate it. + ts.height > parent.height +} + +func (ts *TipSet) String() string { + return fmt.Sprintf("%v", ts.cids) +} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/tipset_key.go b/vendor/github.com/filecoin-project/lotus/chain/types/tipset_key.go index 638b4380e3..e5bc7750de 100644 --- a/vendor/github.com/filecoin-project/lotus/chain/types/tipset_key.go +++ b/vendor/github.com/filecoin-project/lotus/chain/types/tipset_key.go @@ -5,8 +5,8 @@ import ( "encoding/json" "strings" + "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" ) var EmptyTSK = TipSetKey{} @@ -15,7 +15,9 @@ var EmptyTSK = TipSetKey{} var blockHeaderCIDLen int func init() { - c, err := cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.BLAKE2B_MIN + 31}.Sum([]byte{}) + // hash a large string of zeros so we don't estimate based on inlined CIDs. + var buf [256]byte + c, err := abi.CidBuilder.Sum(buf[:]) if err != nil { panic(err) } diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/tipset_key_test.go b/vendor/github.com/filecoin-project/lotus/chain/types/tipset_key_test.go deleted file mode 100644 index 7b3ce439db..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/types/tipset_key_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package types - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestTipSetKey(t *testing.T) { - cb := cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.BLAKE2B_MIN + 31} - c1, _ := cb.Sum([]byte("a")) - c2, _ := cb.Sum([]byte("b")) - c3, _ := cb.Sum([]byte("c")) - fmt.Println(len(c1.Bytes())) - - t.Run("zero value", func(t *testing.T) { - assert.Equal(t, TipSetKey{}, NewTipSetKey()) - }) - - t.Run("CID extraction", func(t *testing.T) { - assert.Equal(t, []cid.Cid{}, NewTipSetKey().Cids()) - assert.Equal(t, []cid.Cid{c1}, NewTipSetKey(c1).Cids()) - assert.Equal(t, []cid.Cid{c1, c2, c3}, NewTipSetKey(c1, c2, c3).Cids()) - - // The key doesn't check for duplicates. - assert.Equal(t, []cid.Cid{c1, c1}, NewTipSetKey(c1, c1).Cids()) - }) - - t.Run("equality", func(t *testing.T) { - assert.Equal(t, NewTipSetKey(), NewTipSetKey()) - assert.Equal(t, NewTipSetKey(c1), NewTipSetKey(c1)) - assert.Equal(t, NewTipSetKey(c1, c2, c3), NewTipSetKey(c1, c2, c3)) - - assert.NotEqual(t, NewTipSetKey(), NewTipSetKey(c1)) - assert.NotEqual(t, NewTipSetKey(c2), NewTipSetKey(c1)) - // The key doesn't normalize order. - assert.NotEqual(t, NewTipSetKey(c1, c2), NewTipSetKey(c2, c1)) - }) - - t.Run("encoding", func(t *testing.T) { - keys := []TipSetKey{ - NewTipSetKey(), - NewTipSetKey(c1), - NewTipSetKey(c1, c2, c3), - } - - for _, tk := range keys { - roundTrip, err := TipSetKeyFromBytes(tk.Bytes()) - require.NoError(t, err) - assert.Equal(t, tk, roundTrip) - } - - _, err := TipSetKeyFromBytes(NewTipSetKey(c1).Bytes()[1:]) - assert.Error(t, err) - }) - - t.Run("JSON", func(t *testing.T) { - k0 := NewTipSetKey() - verifyJSON(t, "[]", k0) - k3 := NewTipSetKey(c1, c2, c3) - verifyJSON(t, `[`+ - `{"/":"bafy2bzacecesrkxghscnq7vatble2hqdvwat6ed23vdu4vvo3uuggsoaya7ki"},`+ - `{"/":"bafy2bzacebxfyh2fzoxrt6kcgc5dkaodpcstgwxxdizrww225vrhsizsfcg4g"},`+ - `{"/":"bafy2bzacedwviarjtjraqakob5pslltmuo5n3xev3nt5zylezofkbbv5jclyu"}`+ - `]`, k3) - }) -} - -func verifyJSON(t *testing.T, expected string, k TipSetKey) { - bytes, err := json.Marshal(k) - require.NoError(t, err) - assert.Equal(t, expected, string(bytes)) - - var rehydrated TipSetKey - err = json.Unmarshal(bytes, &rehydrated) - require.NoError(t, err) - assert.Equal(t, k, rehydrated) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/types_test.go b/vendor/github.com/filecoin-project/lotus/chain/types/types_test.go deleted file mode 100644 index a2b47ad517..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/types/types_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package types - -import ( - "math/rand" - "testing" - - "github.com/filecoin-project/go-address" -) - -func blsaddr(n int64) address.Address { - buf := make([]byte, 48) - r := rand.New(rand.NewSource(n)) - r.Read(buf) - - addr, err := address.NewBLSAddress(buf) - if err != nil { - panic(err) // ok - } - - return addr -} - -func BenchmarkSerializeMessage(b *testing.B) { - m := &Message{ - To: blsaddr(1), - From: blsaddr(2), - Nonce: 197, - Method: 1231254, - Params: []byte("some bytes, idk. probably at least ten of them"), - GasLimit: 126723, - GasPrice: NewInt(1776234), - } - - b.ReportAllocs() - for i := 0; i < b.N; i++ { - _, err := m.Serialize() - if err != nil { - b.Fatal(err) - } - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types/voucher.go b/vendor/github.com/filecoin-project/lotus/chain/types/voucher.go deleted file mode 100644 index 687109c33a..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/types/voucher.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -import ( - "encoding/base64" - - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - cbor "github.com/ipfs/go-ipld-cbor" -) - -func DecodeSignedVoucher(s string) (*paych.SignedVoucher, error) { - data, err := base64.RawURLEncoding.DecodeString(s) - if err != nil { - return nil, err - } - - var sv paych.SignedVoucher - if err := cbor.DecodeInto(data, &sv); err != nil { - return nil, err - } - - return &sv, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/types_test.go b/vendor/github.com/filecoin-project/lotus/chain/types_test.go deleted file mode 100644 index 55baf4a285..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/types_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package chain - -import ( - "encoding/json" - "testing" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/types" -) - -func TestSignedMessageJsonRoundtrip(t *testing.T) { - to, _ := address.NewIDAddress(5234623) - from, _ := address.NewIDAddress(603911192) - smsg := &types.SignedMessage{ - Message: types.Message{ - To: to, - From: from, - Params: []byte("some bytes, idk"), - Method: 1235126, - Value: types.NewInt(123123), - GasPrice: types.NewInt(1234), - GasLimit: 9992969384, - Nonce: 123123, - }, - } - - out, err := json.Marshal(smsg) - if err != nil { - t.Fatal(err) - } - - var osmsg types.SignedMessage - if err := json.Unmarshal(out, &osmsg); err != nil { - t.Fatal(err) - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/validation/applier.go b/vendor/github.com/filecoin-project/lotus/chain/validation/applier.go deleted file mode 100644 index 6c16291042..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/validation/applier.go +++ /dev/null @@ -1,192 +0,0 @@ -package validation - -import ( - "context" - "golang.org/x/xerrors" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/puppet" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/ipfs/go-cid" - - vtypes "github.com/filecoin-project/chain-validation/chain/types" - vstate "github.com/filecoin-project/chain-validation/state" - - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" -) - -// Applier applies messages to state trees and storage. -type Applier struct { - stateWrapper *StateWrapper - syscalls runtime.Syscalls -} - -var _ vstate.Applier = &Applier{} - -func NewApplier(sw *StateWrapper, syscalls runtime.Syscalls) *Applier { - return &Applier{sw, syscalls} -} - -func (a *Applier) ApplyMessage(epoch abi.ChainEpoch, message *vtypes.Message) (vtypes.ApplyMessageResult, error) { - lm := toLotusMsg(message) - receipt, penalty, reward, err := a.applyMessage(epoch, lm) - return vtypes.ApplyMessageResult{ - Receipt: receipt, - Penalty: penalty, - Reward: reward, - Root: a.stateWrapper.Root().String(), - }, err -} - -func (a *Applier) ApplySignedMessage(epoch abi.ChainEpoch, msg *vtypes.SignedMessage) (vtypes.ApplyMessageResult, error) { - var lm types.ChainMsg - switch msg.Signature.Type { - case crypto.SigTypeSecp256k1: - lm = toLotusSignedMsg(msg) - case crypto.SigTypeBLS: - lm = toLotusMsg(&msg.Message) - default: - return vtypes.ApplyMessageResult{}, xerrors.New("Unknown signature type") - } - // TODO: Validate the sig first - receipt, penalty, reward, err := a.applyMessage(epoch, lm) - return vtypes.ApplyMessageResult{ - Receipt: receipt, - Penalty: penalty, - Reward: reward, - Root: a.stateWrapper.Root().String(), - }, err - -} - -func (a *Applier) ApplyTipSetMessages(epoch abi.ChainEpoch, blocks []vtypes.BlockMessagesInfo, rnd vstate.RandomnessSource) (vtypes.ApplyTipSetResult, error) { - cs := store.NewChainStore(a.stateWrapper.bs, a.stateWrapper.ds, a.syscalls) - sm := stmgr.NewStateManager(cs) - - var bms []stmgr.BlockMessages - for _, b := range blocks { - bm := stmgr.BlockMessages{ - Miner: b.Miner, - TicketCount: 1, - } - - for _, m := range b.BLSMessages { - bm.BlsMessages = append(bm.BlsMessages, toLotusMsg(m)) - } - - for _, m := range b.SECPMessages { - bm.SecpkMessages = append(bm.SecpkMessages, toLotusSignedMsg(m)) - } - - bms = append(bms, bm) - } - - var receipts []vtypes.MessageReceipt - sroot, _, err := sm.ApplyBlocks(context.TODO(), a.stateWrapper.Root(), bms, epoch, &randWrapper{rnd}, func(c cid.Cid, msg *types.Message, ret *vm.ApplyRet) error { - if msg.From == builtin.SystemActorAddr { - return nil // ignore reward and cron calls - } - rval := ret.Return - if rval == nil { - rval = []byte{} // chain validation tests expect empty arrays to not be nil... - } - receipts = append(receipts, vtypes.MessageReceipt{ - ExitCode: ret.ExitCode, - ReturnValue: rval, - - GasUsed: vtypes.GasUnits(ret.GasUsed), - }) - return nil - }) - if err != nil { - return vtypes.ApplyTipSetResult{}, err - } - - a.stateWrapper.stateRoot = sroot - - return vtypes.ApplyTipSetResult{ - Receipts: receipts, - Root: a.stateWrapper.Root().String(), - }, nil -} - -type randWrapper struct { - rnd vstate.RandomnessSource -} - -func (w *randWrapper) GetRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return w.rnd.Randomness(ctx, pers, round, entropy) -} - -type vmRand struct { -} - -func (*vmRand) GetRandomness(ctx context.Context, dst crypto.DomainSeparationTag, h abi.ChainEpoch, input []byte) ([]byte, error) { - panic("implement me") -} - -func (a *Applier) applyMessage(epoch abi.ChainEpoch, lm types.ChainMsg) (vtypes.MessageReceipt, abi.TokenAmount, abi.TokenAmount, error) { - ctx := context.TODO() - base := a.stateWrapper.Root() - - lotusVM, err := vm.NewVM(base, epoch, &vmRand{}, a.stateWrapper.bs, a.syscalls) - // need to modify the VM invoker to add the puppet actor - chainValInvoker := vm.NewInvoker() - chainValInvoker.Register(puppet.PuppetActorCodeID, puppet.Actor{}, puppet.State{}) - lotusVM.SetInvoker(chainValInvoker) - if err != nil { - return vtypes.MessageReceipt{}, big.Zero(), big.Zero(), err - } - - ret, err := lotusVM.ApplyMessage(ctx, lm) - if err != nil { - return vtypes.MessageReceipt{}, big.Zero(), big.Zero(), err - } - - rval := ret.Return - if rval == nil { - rval = []byte{} - } - - a.stateWrapper.stateRoot, err = lotusVM.Flush(ctx) - if err != nil { - return vtypes.MessageReceipt{}, big.Zero(), big.Zero(), err - } - - mr := vtypes.MessageReceipt{ - ExitCode: ret.ExitCode, - ReturnValue: rval, - GasUsed: vtypes.GasUnits(ret.GasUsed), - } - - return mr, ret.Penalty, abi.NewTokenAmount(ret.GasUsed), nil -} - -func toLotusMsg(msg *vtypes.Message) *types.Message { - return &types.Message{ - To: msg.To, - From: msg.From, - - Nonce: msg.CallSeqNum, - Method: msg.Method, - - Value: types.BigInt{Int: msg.Value.Int}, - GasPrice: types.BigInt{Int: msg.GasPrice.Int}, - GasLimit: msg.GasLimit, - - Params: msg.Params, - } -} - -func toLotusSignedMsg(msg *vtypes.SignedMessage) *types.SignedMessage { - return &types.SignedMessage{ - Message: *toLotusMsg(&msg.Message), - Signature: msg.Signature, - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/validation/config.go b/vendor/github.com/filecoin-project/lotus/chain/validation/config.go deleted file mode 100644 index 1e59363505..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/validation/config.go +++ /dev/null @@ -1,37 +0,0 @@ -package validation - -// -// Config -// - -type Config struct { - trackGas bool - checkExitCode bool - checkReturnValue bool - checkState bool -} - -func NewConfig(gas, exit, ret, state bool) *Config { - return &Config{ - trackGas: gas, - checkExitCode: exit, - checkReturnValue: ret, - checkState: state, - } -} - -func (v Config) ValidateGas() bool { - return v.trackGas -} - -func (v Config) ValidateExitCode() bool { - return v.checkExitCode -} - -func (v Config) ValidateReturnValue() bool { - return v.checkReturnValue -} - -func (v Config) ValidateStateRoot() bool { - return v.checkState -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/validation/factories.go b/vendor/github.com/filecoin-project/lotus/chain/validation/factories.go deleted file mode 100644 index d3771d87dc..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/validation/factories.go +++ /dev/null @@ -1,34 +0,0 @@ -package validation - -import ( - "github.com/filecoin-project/specs-actors/actors/runtime" - - vstate "github.com/filecoin-project/chain-validation/state" -) - -type Factories struct { - *Applier -} - -var _ vstate.Factories = &Factories{} - -func NewFactories() *Factories { - return &Factories{} -} - -func (f *Factories) NewStateAndApplier(syscalls runtime.Syscalls) (vstate.VMWrapper, vstate.Applier) { - st := NewState() - return st, NewApplier(st, syscalls) -} - -func (f *Factories) NewKeyManager() vstate.KeyManager { - return newKeyManager() -} - -func (f *Factories) NewValidationConfig() vstate.ValidationConfig { - trackGas := true - checkExit := true - checkRet := true - checkState := true - return NewConfig(trackGas, checkExit, checkRet, checkState) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/validation/keymanager.go b/vendor/github.com/filecoin-project/lotus/chain/validation/keymanager.go deleted file mode 100644 index a826d5ea09..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/validation/keymanager.go +++ /dev/null @@ -1,103 +0,0 @@ -package validation - -import ( - "fmt" - "github.com/minio/blake2b-simd" - "math/rand" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-crypto" - acrypto "github.com/filecoin-project/specs-actors/actors/crypto" - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/wallet" -) - -type KeyManager struct { - // Private keys by address - keys map[address.Address]*wallet.Key - - // Seed for deterministic secp key generation. - secpSeed int64 - // Seed for deterministic bls key generation. - blsSeed int64 // nolint: structcheck -} - -func newKeyManager() *KeyManager { - return &KeyManager{ - keys: make(map[address.Address]*wallet.Key), - secpSeed: 0, - } -} - -func (k *KeyManager) NewSECP256k1AccountAddress() address.Address { - secpKey := k.newSecp256k1Key() - k.keys[secpKey.Address] = secpKey - return secpKey.Address -} - -func (k *KeyManager) NewBLSAccountAddress() address.Address { - blsKey := k.newBLSKey() - k.keys[blsKey.Address] = blsKey - return blsKey.Address -} - -func (k *KeyManager) Sign(addr address.Address, data []byte) (acrypto.Signature, error) { - ki, ok := k.keys[addr] - if !ok { - return acrypto.Signature{}, fmt.Errorf("unknown address %v", addr) - } - var sigType acrypto.SigType - if ki.Type == wallet.KTSecp256k1 { - sigType = acrypto.SigTypeBLS - hashed := blake2b.Sum256(data) - sig, err := crypto.Sign(ki.PrivateKey, hashed[:]) - if err != nil { - return acrypto.Signature{}, err - } - - return acrypto.Signature{ - Type: sigType, - Data: sig, - }, nil - } else if ki.Type == wallet.KTBLS { - panic("lotus validator cannot sign BLS messages") - } else { - panic("unknown signature type") - } - -} - -func (k *KeyManager) newSecp256k1Key() *wallet.Key { - randSrc := rand.New(rand.NewSource(k.secpSeed)) - prv, err := crypto.GenerateKeyFromSeed(randSrc) - if err != nil { - panic(err) - } - k.secpSeed++ - key, err := wallet.NewKey(types.KeyInfo{ - Type: wallet.KTSecp256k1, - PrivateKey: prv, - }) - if err != nil { - panic(err) - } - return key -} - -func (k *KeyManager) newBLSKey() *wallet.Key { - // FIXME: bls needs deterministic key generation - //sk := ffi.PrivateKeyGenerate(s.blsSeed) - // s.blsSeed++ - sk := [32]byte{} - sk[0] = uint8(k.blsSeed) // hack to keep gas values determinist - k.blsSeed++ - key, err := wallet.NewKey(types.KeyInfo{ - Type: wallet.KTBLS, - PrivateKey: sk[:], - }) - if err != nil { - panic(err) - } - return key -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/validation/state.go b/vendor/github.com/filecoin-project/lotus/chain/validation/state.go deleted file mode 100644 index 965d0a6387..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/validation/state.go +++ /dev/null @@ -1,217 +0,0 @@ -package validation - -import ( - "context" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - "golang.org/x/xerrors" - - vstate "github.com/filecoin-project/chain-validation/state" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/runtime" - - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/types" -) - -var _ vstate.VMWrapper = &StateWrapper{} - -type StateWrapper struct { - // The blockstore underlying the state tree and storage. - bs blockstore.Blockstore - - ds datastore.Batching - // HAMT-CBOR store on top of the blockstore. - cst cbor.IpldStore - - // CID of the root of the state tree. - stateRoot cid.Cid -} - -func NewState() *StateWrapper { - bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - cst := cbor.NewCborStore(bs) - // Put EmptyObjectCid value in the store. When an actor is initially created its Head is set to this value. - _, err := cst.Put(context.TODO(), map[string]string{}) - if err != nil { - panic(err) - } - - treeImpl, err := state.NewStateTree(cst) - if err != nil { - panic(err) // Never returns error, the error return should be removed. - } - root, err := treeImpl.Flush(context.TODO()) - if err != nil { - panic(err) - } - return &StateWrapper{ - bs: bs, - ds: datastore.NewMapDatastore(), - cst: cst, - stateRoot: root, - } -} - -func (s *StateWrapper) NewVM() { - return -} - -func (s *StateWrapper) Root() cid.Cid { - return s.stateRoot -} - -// StoreGet the value at key from vm store -func (s *StateWrapper) StoreGet(key cid.Cid, out runtime.CBORUnmarshaler) error { - tree, err := state.LoadStateTree(s.cst, s.stateRoot) - if err != nil { - return err - } - return tree.Store.Get(context.Background(), key, out) -} - -// StorePut `value` into vm store -func (s *StateWrapper) StorePut(value runtime.CBORMarshaler) (cid.Cid, error) { - tree, err := state.LoadStateTree(s.cst, s.stateRoot) - if err != nil { - return cid.Undef, err - } - return tree.Store.Put(context.Background(), value) -} - -func (s *StateWrapper) Actor(addr address.Address) (vstate.Actor, error) { - tree, err := state.LoadStateTree(s.cst, s.stateRoot) - if err != nil { - return nil, err - } - fcActor, err := tree.GetActor(addr) - if err != nil { - return nil, err - } - return &actorWrapper{*fcActor}, nil -} - -func (s *StateWrapper) SetActorState(addr address.Address, balance abi.TokenAmount, actorState runtime.CBORMarshaler) (vstate.Actor, error) { - tree, err := state.LoadStateTree(s.cst, s.stateRoot) - if err != nil { - return nil, err - } - // actor should exist - act, err := tree.GetActor(addr) - if err != nil { - return nil, err - } - // add the state to the store and get a new head cid - actHead, err := tree.Store.Put(context.Background(), actorState) - if err != nil { - return nil, err - } - // update the actor object with new head and balance parameter - actr := &actorWrapper{types.Actor{ - Code: act.Code, - Nonce: act.Nonce, - // updates - Head: actHead, - Balance: balance, - }} - if err := tree.SetActor(addr, &actr.Actor); err != nil { - return nil, err - } - return actr, s.flush(tree) -} - -func (s *StateWrapper) CreateActor(code cid.Cid, addr address.Address, balance abi.TokenAmount, actorState runtime.CBORMarshaler) (vstate.Actor, address.Address, error) { - idAddr := addr - tree, err := state.LoadStateTree(s.cst, s.stateRoot) - if err != nil { - return nil, address.Undef, err - } - if addr.Protocol() != address.ID { - - actHead, err := tree.Store.Put(context.Background(), actorState) - if err != nil { - return nil, address.Undef, err - } - actr := &actorWrapper{types.Actor{ - Code: code, - Head: actHead, - Balance: balance, - }} - - idAddr, err = tree.RegisterNewAddress(addr) - if err != nil { - return nil, address.Undef, xerrors.Errorf("register new address for actor: %w", err) - } - - if err := tree.SetActor(addr, &actr.Actor); err != nil { - return nil, address.Undef, xerrors.Errorf("setting new actor for actor: %w", err) - } - } - - // store newState - head, err := tree.Store.Put(context.Background(), actorState) - if err != nil { - return nil, address.Undef, err - } - - // create and store actor object - a := types.Actor{ - Code: code, - Head: head, - Balance: balance, - } - if err := tree.SetActor(idAddr, &a); err != nil { - return nil, address.Undef, err - } - - return &actorWrapper{a}, idAddr, s.flush(tree) -} - -// Flushes a state tree to storage and sets this state's root to that tree's root CID. -func (s *StateWrapper) flush(tree *state.StateTree) (err error) { - s.stateRoot, err = tree.Flush(context.TODO()) - return -} - -// -// Actor Wrapper -// - -type actorWrapper struct { - types.Actor -} - -func (a *actorWrapper) Code() cid.Cid { - return a.Actor.Code -} - -func (a *actorWrapper) Head() cid.Cid { - return a.Actor.Head -} - -func (a *actorWrapper) CallSeqNum() uint64 { - return a.Actor.Nonce -} - -func (a *actorWrapper) Balance() big.Int { - return a.Actor.Balance - -} - -// -// Storage -// - -type contextStore struct { - cbor.IpldStore - ctx context.Context -} - -func (s *contextStore) Context() context.Context { - return s.ctx -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vectors/gen/main.go b/vendor/github.com/filecoin-project/lotus/chain/vectors/gen/main.go deleted file mode 100644 index 2ebcb9a60b..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vectors/gen/main.go +++ /dev/null @@ -1,202 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "math/rand" - "os" - - "github.com/filecoin-project/go-address" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/gen" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/types/mock" - "github.com/filecoin-project/lotus/chain/vectors" - "github.com/filecoin-project/lotus/chain/wallet" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/crypto" - - _ "github.com/filecoin-project/lotus/lib/sigs/bls" - _ "github.com/filecoin-project/lotus/lib/sigs/secp" -) - -func init() { - power.ConsensusMinerMinPower = big.NewInt(2048) -} - -func MakeHeaderVectors() []vectors.HeaderVector { - cg, err := gen.NewGenerator() - if err != nil { - panic(err) - } - - var out []vectors.HeaderVector - for i := 0; i < 5; i++ { - nts, err := cg.NextTipSet() - if err != nil { - panic(err) - } - - h := nts.TipSet.Blocks[0].Header - data, err := h.Serialize() - if err != nil { - panic(err) - } - - out = append(out, vectors.HeaderVector{ - Block: h, - Cid: h.Cid().String(), - CborHex: fmt.Sprintf("%x", data), - }) - } - return out -} - -func MakeMessageSigningVectors() []vectors.MessageSigningVector { - w, err := wallet.NewWallet(wallet.NewMemKeyStore()) - if err != nil { - panic(err) - } - - blsk, err := w.GenerateKey(crypto.SigTypeBLS) - if err != nil { - panic(err) - } - bki, err := w.Export(blsk) - if err != nil { - panic(err) - } - - to, err := address.NewIDAddress(99999) - if err != nil { - panic(err) - } - - bmsg := mock.MkMessage(blsk, to, 55, w) - - blsmsv := vectors.MessageSigningVector{ - Unsigned: &bmsg.Message, - Cid: bmsg.Message.Cid().String(), - CidHexBytes: fmt.Sprintf("%x", bmsg.Message.Cid().Bytes()), - PrivateKey: bki.PrivateKey, - Signature: &bmsg.Signature, - } - - secpk, err := w.GenerateKey(crypto.SigTypeBLS) - if err != nil { - panic(err) - } - ski, err := w.Export(secpk) - if err != nil { - panic(err) - } - - smsg := mock.MkMessage(secpk, to, 55, w) - - smsv := vectors.MessageSigningVector{ - Unsigned: &smsg.Message, - Cid: smsg.Message.Cid().String(), - CidHexBytes: fmt.Sprintf("%x", smsg.Message.Cid().Bytes()), - PrivateKey: ski.PrivateKey, - Signature: &smsg.Signature, - } - - return []vectors.MessageSigningVector{blsmsv, smsv} -} - -func MakeUnsignedMessageVectors() []vectors.UnsignedMessageVector { - froms := []string{ - "t2ch7krq7l35i74rebqbjdsp3ucl47t24e3juxjfa", - "t1pyfq7dg6sq65acyomqvzvbgwni4zllglqffw5dy", - "t1cyg66djxytxhzdq7ynoqfxk7xinp6xsejbeufli", - "t16n7vrq5humzoqll7zg4yw6dta645tuakcoalp6y", - "t1awsiuji4wpbxpzslg36f3wnfxzi4o5gq67tz2mi", - "t14mb3j32uuwajy5b2mliz63isp6zl5xkppzyuhfy", - "t1dzdmyzzdy6q5elobj63eokzv2xnwsp4vm5l6aka", - "t1svd45rkcfpsyqedvvhuv77yvllvu5ygmygjlvka", - "t1mrret5liwh46qde6qhaxrmcwil7jawjeqdijwfq", - "t1ly3ynedw74p4q3ytdnb4stjdkiodrl54moeyxea", - "t1uqexvn66gj4lxkbvmrgposwrlxbyd655o2nayyi", - "t1dwwjod7vw62jzw2eva7gtxohaidjhgh6w2rofui", - "t1slswisymmkfulmvl3jynrnwqi27tkvmsgzhztvy", - "t1e3vymxcdqfkqwz6e6wnxxx6ayuml3vxi5gef4xa", - "t1bgqopgk64ywpprka4citgi62aldclyaegvwvx6y", - "t1aizqgl2klzkzffwu35rufyuzefke2i6ndbewuhi", - "t1mzposcnsd2tc66yu5i3kajtrh5pvwohdjvitcey", - "t1x7xvs6oorrrlefyzn6wlbvaibzj3a2fyt4hsmvq", - "t1ez743nvc4j7qfirwnmxbh4qdqwha3iyalnq4rya", - "t17dvtgkop7cqgi6myjne5kzvrnsbg5wnowjphhwy", - "t1kvar5z3q7dwrfxjqsnuqpq5qsd7mvh2xypblwta", - } - var out []vectors.UnsignedMessageVector - for _, a := range froms { - from, err := address.NewFromString(a) - if err != nil { - panic(err) - } - to, err := address.NewIDAddress(rand.Uint64()) - if err != nil { - panic(err) - } - - params := make([]byte, 32) - rand.Read(params) - - msg := &types.Message{ - To: to, - From: from, - Value: types.NewInt(rand.Uint64()), - Method: abi.MethodNum(rand.Uint64()), - GasPrice: types.NewInt(rand.Uint64()), - GasLimit: rand.Int63(), - Nonce: rand.Uint64(), - Params: params, - } - - ser, err := msg.Serialize() - if err != nil { - panic(err) - } - - out = append(out, vectors.UnsignedMessageVector{ - Message: msg, - HexCbor: fmt.Sprintf("%x", ser), - }) - } - return out -} - -func WriteJsonToFile(fname string, obj interface{}) error { - fi, err := os.Create(fname) - if err != nil { - return err - } - defer fi.Close() //nolint:errcheck - - out, err := json.MarshalIndent(obj, "", " ") - if err != nil { - return err - } - - _, err = fi.Write(out) - if err != nil { - return xerrors.Errorf("writing json: %w", err) - } - - return nil -} - -func main() { - if err := WriteJsonToFile("block_headers.json", MakeHeaderVectors()); err != nil { - panic(err) - } - if err := WriteJsonToFile("message_signing.json", MakeMessageSigningVectors()); err != nil { - panic(err) - } - if err := WriteJsonToFile("unsigned_messages.json", MakeUnsignedMessageVectors()); err != nil { - panic(err) - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vectors/vector_types.go b/vendor/github.com/filecoin-project/lotus/chain/vectors/vector_types.go deleted file mode 100644 index 73216a049e..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vectors/vector_types.go +++ /dev/null @@ -1,25 +0,0 @@ -package vectors - -import ( - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/specs-actors/actors/crypto" -) - -type HeaderVector struct { - Block *types.BlockHeader `json:"block"` - CborHex string `json:"cbor_hex"` - Cid string `json:"cid"` -} - -type MessageSigningVector struct { - Unsigned *types.Message - Cid string - CidHexBytes string - PrivateKey []byte - Signature *crypto.Signature -} - -type UnsignedMessageVector struct { - Message *types.Message `json:"message"` - HexCbor string `json:"hex_cbor"` -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vectors/vectors_test.go b/vendor/github.com/filecoin-project/lotus/chain/vectors/vectors_test.go deleted file mode 100644 index c9ebc98fa9..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vectors/vectors_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package vectors - -import ( - "bytes" - "encoding/hex" - "encoding/json" - "fmt" - "os" - "path/filepath" - "testing" - - "github.com/filecoin-project/lotus/chain/types" -) - -func LoadVector(t *testing.T, f string, out interface{}) { - p := filepath.Join("../../extern/serialization-vectors", f) - fi, err := os.Open(p) - if err != nil { - t.Fatal(err) - } - - if err := json.NewDecoder(fi).Decode(out); err != nil { - t.Fatal(err) - } -} - -func TestBlockHeaderVectors(t *testing.T) { - t.Skip("we need to regenerate for beacon") - var headers []HeaderVector - LoadVector(t, "block_headers.json", &headers) - - for i, hv := range headers { - if hv.Block.Cid().String() != hv.Cid { - t.Fatalf("CID mismatch in test vector %d", i) - } - - data, err := hv.Block.Serialize() - if err != nil { - t.Fatal(err) - } - - if fmt.Sprintf("%x", data) != hv.CborHex { - t.Fatalf("serialized data mismatched for test vector %d", i) - } - } -} - -func TestMessageSigningVectors(t *testing.T) { - var msvs []MessageSigningVector - LoadVector(t, "message_signing.json", &msvs) - - for i, msv := range msvs { - smsg := &types.SignedMessage{ - Message: *msv.Unsigned, - Signature: *msv.Signature, - } - - if smsg.Cid().String() != msv.Cid { - t.Fatalf("cid of message in vector %d mismatches", i) - } - - // TODO: check signature - } -} - -func TestUnsignedMessageVectors(t *testing.T) { - var msvs []UnsignedMessageVector - LoadVector(t, "unsigned_messages.json", &msvs) - - for i, msv := range msvs { - b, err := msv.Message.Serialize() - if err != nil { - t.Fatal(err) - } - - dec, err := hex.DecodeString(msv.HexCbor) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(b, dec) { - t.Fatalf("serialization vector %d mismatches bytes", i) - } - } -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vm/gas.go b/vendor/github.com/filecoin-project/lotus/chain/vm/gas.go deleted file mode 100644 index bee65fd8f8..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vm/gas.go +++ /dev/null @@ -1,212 +0,0 @@ -package vm - -import ( - "fmt" - - "github.com/filecoin-project/go-address" - addr "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime" - vmr "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/ipfs/go-cid" -) - -const ( - GasStorageMulti = 1 - GasComputeMulti = 1 -) - -type GasCharge struct { - Name string - Extra interface{} - - ComputeGas int64 - StorageGas int64 - - VirtualCompute int64 - VirtualStorage int64 -} - -func (g GasCharge) Total() int64 { - return g.ComputeGas*GasComputeMulti + g.StorageGas*GasStorageMulti -} -func (g GasCharge) WithVirtual(compute, storage int64) GasCharge { - out := g - out.VirtualCompute = compute - out.VirtualStorage = storage - return out -} - -func (g GasCharge) WithExtra(extra interface{}) GasCharge { - out := g - out.Extra = extra - return out -} - -func newGasCharge(name string, computeGas int64, storageGas int64) GasCharge { - return GasCharge{ - Name: name, - ComputeGas: computeGas, - StorageGas: storageGas, - } -} - -// Pricelist provides prices for operations in the VM. -// -// Note: this interface should be APPEND ONLY since last chain checkpoint -type Pricelist interface { - // OnChainMessage returns the gas used for storing a message of a given size in the chain. - OnChainMessage(msgSize int) GasCharge - // OnChainReturnValue returns the gas used for storing the response of a message in the chain. - OnChainReturnValue(dataSize int) GasCharge - - // OnMethodInvocation returns the gas used when invoking a method. - OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) GasCharge - - // OnIpldGet returns the gas used for storing an object - OnIpldGet(dataSize int) GasCharge - // OnIpldPut returns the gas used for storing an object - OnIpldPut(dataSize int) GasCharge - - // OnCreateActor returns the gas used for creating an actor - OnCreateActor() GasCharge - // OnDeleteActor returns the gas used for deleting an actor - OnDeleteActor() GasCharge - - OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) - OnHashing(dataSize int) GasCharge - OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge - OnVerifySeal(info abi.SealVerifyInfo) GasCharge - OnVerifyPost(info abi.WindowPoStVerifyInfo) GasCharge - OnVerifyConsensusFault() GasCharge -} - -var prices = map[abi.ChainEpoch]Pricelist{ - abi.ChainEpoch(0): &pricelistV0{ - onChainMessageBase: 0, - onChainMessagePerByte: 2, - onChainReturnValuePerByte: 8, - sendBase: 5, - sendTransferFunds: 5, - sendInvokeMethod: 10, - ipldGetBase: 10, - ipldGetPerByte: 1, - ipldPutBase: 20, - ipldPutPerByte: 2, - createActorBase: 40, // IPLD put + 20 - createActorExtra: 500, - deleteActor: -500, // -createActorExtra - // Dragons: this cost is not persistable, create a LinearCost{a,b} struct that has a `.Cost(x) -> ax + b` - verifySignature: map[crypto.SigType]func(int64) int64{ - crypto.SigTypeBLS: func(x int64) int64 { return 3*x + 2 }, - crypto.SigTypeSecp256k1: func(x int64) int64 { return 3*x + 2 }, - }, - hashingBase: 5, - hashingPerByte: 2, - computeUnsealedSectorCidBase: 100, - verifySealBase: 2000, - verifyPostBase: 700, - verifyConsensusFault: 10, - }, -} - -// PricelistByEpoch finds the latest prices for the given epoch -func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { - // since we are storing the prices as map or epoch to price - // we need to get the price with the highest epoch that is lower or equal to the `epoch` arg - bestEpoch := abi.ChainEpoch(0) - bestPrice := prices[bestEpoch] - for e, pl := range prices { - // if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch` - if e > bestEpoch && e <= epoch { - bestEpoch = e - bestPrice = pl - } - } - if bestPrice == nil { - panic(fmt.Sprintf("bad setup: no gas prices available for epoch %d", epoch)) - } - return bestPrice -} - -type pricedSyscalls struct { - under vmr.Syscalls - pl Pricelist - chargeGas func(GasCharge) -} - -// Verifies that a signature is valid for an address and plaintext. -func (ps pricedSyscalls) VerifySignature(signature crypto.Signature, signer addr.Address, plaintext []byte) error { - c, err := ps.pl.OnVerifySignature(signature.Type, len(plaintext)) - if err != nil { - return err - } - ps.chargeGas(c) - defer ps.chargeGas(gasOnActorExec) - - return ps.under.VerifySignature(signature, signer, plaintext) -} - -// Hashes input data using blake2b with 256 bit output. -func (ps pricedSyscalls) HashBlake2b(data []byte) [32]byte { - ps.chargeGas(ps.pl.OnHashing(len(data))) - defer ps.chargeGas(gasOnActorExec) - - return ps.under.HashBlake2b(data) -} - -// Computes an unsealed sector CID (CommD) from its constituent piece CIDs (CommPs) and sizes. -func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { - ps.chargeGas(ps.pl.OnComputeUnsealedSectorCid(reg, pieces)) - defer ps.chargeGas(gasOnActorExec) - - return ps.under.ComputeUnsealedSectorCID(reg, pieces) -} - -// Verifies a sector seal proof. -func (ps pricedSyscalls) VerifySeal(vi abi.SealVerifyInfo) error { - ps.chargeGas(ps.pl.OnVerifySeal(vi)) - defer ps.chargeGas(gasOnActorExec) - - return ps.under.VerifySeal(vi) -} - -// Verifies a proof of spacetime. -func (ps pricedSyscalls) VerifyPoSt(vi abi.WindowPoStVerifyInfo) error { - ps.chargeGas(ps.pl.OnVerifyPost(vi)) - defer ps.chargeGas(gasOnActorExec) - - return ps.under.VerifyPoSt(vi) -} - -// Verifies that two block headers provide proof of a consensus fault: -// - both headers mined by the same actor -// - headers are different -// - first header is of the same or lower epoch as the second -// - at least one of the headers appears in the current chain at or after epoch `earliest` -// - the headers provide evidence of a fault (see the spec for the different fault types). -// The parameters are all serialized block headers. The third "extra" parameter is consulted only for -// the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the -// blocks in the parent of h2 (i.e. h2's grandparent). -// Returns nil and an error if the headers don't prove a fault. -func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*runtime.ConsensusFault, error) { - ps.chargeGas(ps.pl.OnVerifyConsensusFault()) - defer ps.chargeGas(gasOnActorExec) - - return ps.under.VerifyConsensusFault(h1, h2, extra) -} - -func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) { - var gasChargeSum GasCharge - gasChargeSum.Name = "BatchVerifySeals" - count := int64(0) - for _, svis := range inp { - count += int64(len(svis)) - } - gasChargeSum = gasChargeSum.WithExtra(count).WithVirtual(129778623*count+716683250, 0) - ps.chargeGas(gasChargeSum) // TODO: this is only called by the cron actor. Should we even charge gas? - defer ps.chargeGas(gasOnActorExec) - - return ps.under.BatchVerifySeals(inp) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vm/gas_v0.go b/vendor/github.com/filecoin-project/lotus/chain/vm/gas_v0.go deleted file mode 100644 index 5735d0b83b..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vm/gas_v0.go +++ /dev/null @@ -1,197 +0,0 @@ -package vm - -import ( - "fmt" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/crypto" -) - -type pricelistV0 struct { - /////////////////////////////////////////////////////////////////////////// - // System operations - /////////////////////////////////////////////////////////////////////////// - - // Gas cost charged to the originator of an on-chain message (regardless of - // whether it succeeds or fails in application) is given by: - // OnChainMessageBase + len(serialized message)*OnChainMessagePerByte - // Together, these account for the cost of message propagation and validation, - // up to but excluding any actual processing by the VM. - // This is the cost a block producer burns when including an invalid message. - onChainMessageBase int64 - onChainMessagePerByte int64 - - // Gas cost charged to the originator of a non-nil return value produced - // by an on-chain message is given by: - // len(return value)*OnChainReturnValuePerByte - onChainReturnValuePerByte int64 - - // Gas cost for any message send execution(including the top-level one - // initiated by an on-chain message). - // This accounts for the cost of loading sender and receiver actors and - // (for top-level messages) incrementing the sender's sequence number. - // Load and store of actor sub-state is charged separately. - sendBase int64 - - // Gas cost charged, in addition to SendBase, if a message send - // is accompanied by any nonzero currency amount. - // Accounts for writing receiver's new balance (the sender's state is - // already accounted for). - sendTransferFunds int64 - - // Gas cost charged, in addition to SendBase, if a message invokes - // a method on the receiver. - // Accounts for the cost of loading receiver code and method dispatch. - sendInvokeMethod int64 - - // Gas cost (Base + len*PerByte) for any Get operation to the IPLD store - // in the runtime VM context. - ipldGetBase int64 - ipldGetPerByte int64 - - // Gas cost (Base + len*PerByte) for any Put operation to the IPLD store - // in the runtime VM context. - // - // Note: these costs should be significantly higher than the costs for Get - // operations, since they reflect not only serialization/deserialization - // but also persistent storage of chain data. - ipldPutBase int64 - ipldPutPerByte int64 - - // Gas cost for creating a new actor (via InitActor's Exec method). - // - // Note: this costs assume that the extra will be partially or totally refunded while - // the base is covering for the put. - createActorBase int64 - createActorExtra int64 - - // Gas cost for deleting an actor. - // - // Note: this partially refunds the create cost to incentivise the deletion of the actors. - deleteActor int64 - - verifySignature map[crypto.SigType]func(len int64) int64 - - hashingBase int64 - hashingPerByte int64 - - computeUnsealedSectorCidBase int64 - verifySealBase int64 - verifyPostBase int64 - verifyConsensusFault int64 -} - -var _ Pricelist = (*pricelistV0)(nil) - -// OnChainMessage returns the gas used for storing a message of a given size in the chain. -func (pl *pricelistV0) OnChainMessage(msgSize int) GasCharge { - return newGasCharge("OnChainMessage", 0, pl.onChainMessageBase+pl.onChainMessagePerByte*int64(msgSize)).WithVirtual(77302, 0) -} - -// OnChainReturnValue returns the gas used for storing the response of a message in the chain. -func (pl *pricelistV0) OnChainReturnValue(dataSize int) GasCharge { - return newGasCharge("OnChainReturnValue", 0, int64(dataSize)*pl.onChainReturnValuePerByte).WithVirtual(107294, 0) -} - -// OnMethodInvocation returns the gas used when invoking a method. -func (pl *pricelistV0) OnMethodInvocation(value abi.TokenAmount, methodNum abi.MethodNum) GasCharge { - ret := pl.sendBase - extra := "" - virtGas := int64(1072944) - - if value != abi.NewTokenAmount(0) { - // TODO: fix this, it is comparing pointers instead of values - // see vv - ret += pl.sendTransferFunds - } - if big.Cmp(value, abi.NewTokenAmount(0)) != 0 { - virtGas += 497495 - if methodNum == builtin.MethodSend { - // transfer only - virtGas += 973940 - } - extra += "t" - } - if methodNum != builtin.MethodSend { - ret += pl.sendInvokeMethod - extra += "i" - // running actors is cheaper becase we hand over to actors - virtGas += -295779 - } - return newGasCharge("OnMethodInvocation", ret, 0).WithVirtual(virtGas, 0).WithExtra(extra) -} - -// OnIpldGet returns the gas used for storing an object -func (pl *pricelistV0) OnIpldGet(dataSize int) GasCharge { - return newGasCharge("OnIpldGet", pl.ipldGetBase+int64(dataSize)*pl.ipldGetPerByte, 0). - WithExtra(dataSize).WithVirtual(433685, 0) -} - -// OnIpldPut returns the gas used for storing an object -func (pl *pricelistV0) OnIpldPut(dataSize int) GasCharge { - return newGasCharge("OnIpldPut", pl.ipldPutBase, int64(dataSize)*pl.ipldPutPerByte). - WithExtra(dataSize).WithVirtual(88970, 0) -} - -// OnCreateActor returns the gas used for creating an actor -func (pl *pricelistV0) OnCreateActor() GasCharge { - return newGasCharge("OnCreateActor", pl.createActorBase, pl.createActorExtra).WithVirtual(65636, 0) -} - -// OnDeleteActor returns the gas used for deleting an actor -func (pl *pricelistV0) OnDeleteActor() GasCharge { - return newGasCharge("OnDeleteActor", 0, pl.deleteActor) -} - -// OnVerifySignature - -func (pl *pricelistV0) OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) { - costFn, ok := pl.verifySignature[sigType] - if !ok { - return GasCharge{}, fmt.Errorf("cost function for signature type %d not supported", sigType) - } - sigName, _ := sigType.Name() - virtGas := int64(0) - switch sigType { - case crypto.SigTypeBLS: - virtGas = 220138570 - case crypto.SigTypeSecp256k1: - virtGas = 7053730 - } - - return newGasCharge("OnVerifySignature", costFn(int64(planTextSize)), 0). - WithExtra(map[string]interface{}{ - "type": sigName, - "size": planTextSize, - }).WithVirtual(virtGas, 0), nil -} - -// OnHashing -func (pl *pricelistV0) OnHashing(dataSize int) GasCharge { - return newGasCharge("OnHashing", pl.hashingBase+int64(dataSize)*pl.hashingPerByte, 0).WithExtra(dataSize).WithVirtual(77300, 0) -} - -// OnComputeUnsealedSectorCid -func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge { - // TODO: this needs more cost tunning, check with @lotus - return newGasCharge("OnComputeUnsealedSectorCid", pl.computeUnsealedSectorCidBase, 0).WithVirtual(382370, 0) -} - -// OnVerifySeal -func (pl *pricelistV0) OnVerifySeal(info abi.SealVerifyInfo) GasCharge { - // TODO: this needs more cost tunning, check with @lotus - return newGasCharge("OnVerifySeal", pl.verifySealBase, 0).WithVirtual(199954003, 0) -} - -// OnVerifyPost -func (pl *pricelistV0) OnVerifyPost(info abi.WindowPoStVerifyInfo) GasCharge { - // TODO: this needs more cost tunning, check with @lotus - return newGasCharge("OnVerifyPost", pl.verifyPostBase, 0).WithVirtual(2629471704, 0).WithExtra(len(info.ChallengedSectors)) -} - -// OnVerifyConsensusFault -func (pl *pricelistV0) OnVerifyConsensusFault() GasCharge { - return newGasCharge("OnVerifyConsensusFault", pl.verifyConsensusFault, 0).WithVirtual(551935, 0) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vm/invoker.go b/vendor/github.com/filecoin-project/lotus/chain/vm/invoker.go deleted file mode 100644 index 8bbb27ecce..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vm/invoker.go +++ /dev/null @@ -1,200 +0,0 @@ -package vm - -import ( - "bytes" - "encoding/hex" - "fmt" - "reflect" - - "github.com/filecoin-project/specs-actors/actors/builtin/account" - "github.com/filecoin-project/specs-actors/actors/builtin/verifreg" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - - "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/cron" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - "github.com/filecoin-project/specs-actors/actors/builtin/paych" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/builtin/reward" - "github.com/filecoin-project/specs-actors/actors/builtin/system" - "github.com/filecoin-project/specs-actors/actors/runtime" - vmr "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/util/adt" - - "github.com/filecoin-project/lotus/chain/actors/aerrors" -) - -type Invoker struct { - builtInCode map[cid.Cid]nativeCode - builtInState map[cid.Cid]reflect.Type -} - -type invokeFunc func(rt runtime.Runtime, params []byte) ([]byte, aerrors.ActorError) -type nativeCode []invokeFunc - -func NewInvoker() *Invoker { - inv := &Invoker{ - builtInCode: make(map[cid.Cid]nativeCode), - builtInState: make(map[cid.Cid]reflect.Type), - } - - // add builtInCode using: register(cid, singleton) - inv.Register(builtin.SystemActorCodeID, system.Actor{}, adt.EmptyValue{}) - inv.Register(builtin.InitActorCodeID, init_.Actor{}, init_.State{}) - inv.Register(builtin.RewardActorCodeID, reward.Actor{}, reward.State{}) - inv.Register(builtin.CronActorCodeID, cron.Actor{}, cron.State{}) - inv.Register(builtin.StoragePowerActorCodeID, power.Actor{}, power.State{}) - inv.Register(builtin.StorageMarketActorCodeID, market.Actor{}, market.State{}) - inv.Register(builtin.StorageMinerActorCodeID, miner.Actor{}, miner.State{}) - inv.Register(builtin.MultisigActorCodeID, multisig.Actor{}, multisig.State{}) - inv.Register(builtin.PaymentChannelActorCodeID, paych.Actor{}, paych.State{}) - inv.Register(builtin.VerifiedRegistryActorCodeID, verifreg.Actor{}, verifreg.State{}) - inv.Register(builtin.AccountActorCodeID, account.Actor{}, account.State{}) - - return inv -} - -func (inv *Invoker) Invoke(codeCid cid.Cid, rt runtime.Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { - - code, ok := inv.builtInCode[codeCid] - if !ok { - log.Errorf("no code for actor %s (Addr: %s)", codeCid, rt.Message().Receiver()) - return nil, aerrors.Newf(exitcode.SysErrorIllegalActor, "no code for actor %s(%d)(%s)", codeCid, method, hex.EncodeToString(params)) - } - if method >= abi.MethodNum(len(code)) || code[method] == nil { - return nil, aerrors.Newf(exitcode.SysErrInvalidMethod, "no method %d on actor", method) - } - return code[method](rt, params) - -} - -func (inv *Invoker) Register(c cid.Cid, instance Invokee, state interface{}) { - code, err := inv.transform(instance) - if err != nil { - panic(xerrors.Errorf("%s: %w", string(c.Hash()), err)) - } - inv.builtInCode[c] = code - inv.builtInState[c] = reflect.TypeOf(state) -} - -type Invokee interface { - Exports() []interface{} -} - -func (*Invoker) transform(instance Invokee) (nativeCode, error) { - itype := reflect.TypeOf(instance) - exports := instance.Exports() - for i, m := range exports { - i := i - newErr := func(format string, args ...interface{}) error { - str := fmt.Sprintf(format, args...) - return fmt.Errorf("transform(%s) export(%d): %s", itype.Name(), i, str) - } - - if m == nil { - continue - } - - meth := reflect.ValueOf(m) - t := meth.Type() - if t.Kind() != reflect.Func { - return nil, newErr("is not a function") - } - if t.NumIn() != 2 { - return nil, newErr("wrong number of inputs should be: " + - "vmr.Runtime, ") - } - if t.In(0) != reflect.TypeOf((*vmr.Runtime)(nil)).Elem() { - return nil, newErr("first arguemnt should be vmr.Runtime") - } - if t.In(1).Kind() != reflect.Ptr { - return nil, newErr("second argument should be Runtime") - } - - if t.NumOut() != 1 { - return nil, newErr("wrong number of outputs should be: " + - "cbg.CBORMarshaler") - } - o0 := t.Out(0) - if !o0.Implements(reflect.TypeOf((*cbg.CBORMarshaler)(nil)).Elem()) { - return nil, newErr("output needs to implement cgb.CBORMarshaler") - } - } - code := make(nativeCode, len(exports)) - for id, m := range exports { - meth := reflect.ValueOf(m) - code[id] = reflect.MakeFunc(reflect.TypeOf((invokeFunc)(nil)), - func(in []reflect.Value) []reflect.Value { - paramT := meth.Type().In(1).Elem() - param := reflect.New(paramT) - - inBytes := in[1].Interface().([]byte) - if err := DecodeParams(inBytes, param.Interface()); err != nil { - aerr := aerrors.Absorb(err, 1, "failed to decode parameters") - return []reflect.Value{ - reflect.ValueOf([]byte{}), - // Below is a hack, fixed in Go 1.13 - // https://git.io/fjXU6 - reflect.ValueOf(&aerr).Elem(), - } - } - rt := in[0].Interface().(*Runtime) - rval, aerror := rt.shimCall(func() interface{} { - ret := meth.Call([]reflect.Value{ - reflect.ValueOf(rt), - param, - }) - return ret[0].Interface() - }) - - return []reflect.Value{ - reflect.ValueOf(&rval).Elem(), - reflect.ValueOf(&aerror).Elem(), - } - }).Interface().(invokeFunc) - - } - return code, nil -} - -func DecodeParams(b []byte, out interface{}) error { - um, ok := out.(cbg.CBORUnmarshaler) - if !ok { - return fmt.Errorf("type %T does not implement UnmarshalCBOR", out) - } - - return um.UnmarshalCBOR(bytes.NewReader(b)) -} - -func DumpActorState(code cid.Cid, b []byte) (interface{}, error) { - if code == builtin.AccountActorCodeID { // Account code special case - return nil, nil - } - - i := NewInvoker() // TODO: register builtins in init block - - typ, ok := i.builtInState[code] - if !ok { - return nil, xerrors.Errorf("state type for actor %s not found", code) - } - - rv := reflect.New(typ) - um, ok := rv.Interface().(cbg.CBORUnmarshaler) - if !ok { - return nil, xerrors.New("state type does not implement CBORUnmarshaler") - } - - if err := um.UnmarshalCBOR(bytes.NewReader(b)); err != nil { - return nil, err - } - - return rv.Elem().Interface(), nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vm/invoker_test.go b/vendor/github.com/filecoin-project/lotus/chain/vm/invoker_test.go deleted file mode 100644 index 55b2764219..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vm/invoker_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package vm - -import ( - "fmt" - "io" - "testing" - - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/stretchr/testify/assert" - cbg "github.com/whyrusleeping/cbor-gen" - - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/filecoin-project/specs-actors/actors/util/adt" -) - -type basicContract struct{} -type basicParams struct { - B byte -} - -func (b *basicParams) MarshalCBOR(w io.Writer) error { - _, err := w.Write(cbg.CborEncodeMajorType(cbg.MajUnsignedInt, uint64(b.B))) - return err -} - -func (b *basicParams) UnmarshalCBOR(r io.Reader) error { - maj, val, err := cbg.CborReadHeader(r) - if err != nil { - return err - } - - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("bad cbor type") - } - - b.B = byte(val) - return nil -} - -func init() { - cbor.RegisterCborType(basicParams{}) -} - -func (b basicContract) Exports() []interface{} { - return []interface{}{ - b.InvokeSomething0, - b.BadParam, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - nil, - b.InvokeSomething10, - } -} - -func (basicContract) InvokeSomething0(rt runtime.Runtime, params *basicParams) *adt.EmptyValue { - rt.Abortf(exitcode.ExitCode(params.B), "params.B") - return nil -} - -func (basicContract) BadParam(rt runtime.Runtime, params *basicParams) *adt.EmptyValue { - rt.Abortf(255, "bad params") - return nil -} - -func (basicContract) InvokeSomething10(rt runtime.Runtime, params *basicParams) *adt.EmptyValue { - rt.Abortf(exitcode.ExitCode(params.B+10), "params.B") - return nil -} - -func TestInvokerBasic(t *testing.T) { - inv := Invoker{} - code, err := inv.transform(basicContract{}) - assert.NoError(t, err) - - { - bParam, err := actors.SerializeParams(&basicParams{B: 1}) - assert.NoError(t, err) - - _, aerr := code[0](&Runtime{}, bParam) - - assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1") - if aerrors.IsFatal(aerr) { - t.Fatal("err should not be fatal") - } - } - - { - bParam, err := actors.SerializeParams(&basicParams{B: 2}) - assert.NoError(t, err) - - _, aerr := code[10](&Runtime{}, bParam) - assert.Equal(t, exitcode.ExitCode(12), aerrors.RetCode(aerr), "return code should be 12") - if aerrors.IsFatal(aerr) { - t.Fatal("err should not be fatal") - } - } - - _, aerr := code[1](&Runtime{}, []byte{99}) - if aerrors.IsFatal(aerr) { - t.Fatal("err should not be fatal") - } - assert.Equal(t, exitcode.ExitCode(1), aerrors.RetCode(aerr), "return code should be 1") - -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vm/mkactor.go b/vendor/github.com/filecoin-project/lotus/chain/vm/mkactor.go deleted file mode 100644 index 1a3fd97de8..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vm/mkactor.go +++ /dev/null @@ -1,102 +0,0 @@ -package vm - -import ( - "context" - - "github.com/filecoin-project/lotus/chain/actors" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/types" -) - -func init() { - cst := cbor.NewMemCborStore() - emptyobject, err := cst.Put(context.TODO(), []struct{}{}) - if err != nil { - panic(err) - } - - EmptyObjectCid = emptyobject -} - -var EmptyObjectCid cid.Cid - -// TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses. -func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, aerrors.ActorError) { - addrID, err := rt.state.RegisterNewAddress(addr) - if err != nil { - return nil, aerrors.Escalate(err, "registering actor address") - } - - act, aerr := makeActor(addr) - if aerr != nil { - return nil, aerr - } - - if err := rt.state.SetActor(addrID, act); err != nil { - return nil, aerrors.Escalate(err, "creating new actor failed") - } - - p, err := actors.SerializeParams(&addr) - if err != nil { - return nil, aerrors.Escalate(err, "couldn't serialize params for actor construction") - } - // call constructor on account - - if err := rt.chargeGasSafe(PricelistByEpoch(rt.height).OnCreateActor()); err != nil { - return nil, err - } - - _, aerr = rt.internalSend(builtin.SystemActorAddr, addrID, builtin.MethodsAccount.Constructor, big.Zero(), p) - if aerr != nil { - return nil, aerrors.Wrap(aerr, "failed to invoke account constructor") - } - - act, err = rt.state.GetActor(addrID) - if err != nil { - return nil, aerrors.Escalate(err, "loading newly created actor failed") - } - return act, nil -} - -func makeActor(addr address.Address) (*types.Actor, aerrors.ActorError) { - switch addr.Protocol() { - case address.BLS: - return NewBLSAccountActor(), nil - case address.SECP256K1: - return NewSecp256k1AccountActor(), nil - case address.ID: - return nil, aerrors.Newf(exitcode.SysErrInvalidReceiver, "no actor with given ID: %s", addr) - case address.Actor: - return nil, aerrors.Newf(exitcode.SysErrInvalidReceiver, "no such actor: %s", addr) - default: - return nil, aerrors.Newf(exitcode.SysErrInvalidReceiver, "address has unsupported protocol: %d", addr.Protocol()) - } -} - -func NewBLSAccountActor() *types.Actor { - nact := &types.Actor{ - Code: builtin.AccountActorCodeID, - Balance: types.NewInt(0), - Head: EmptyObjectCid, - } - - return nact -} - -func NewSecp256k1AccountActor() *types.Actor { - nact := &types.Actor{ - Code: builtin.AccountActorCodeID, - Balance: types.NewInt(0), - Head: EmptyObjectCid, - } - - return nact -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vm/runtime.go b/vendor/github.com/filecoin-project/lotus/chain/vm/runtime.go deleted file mode 100644 index d964b73165..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vm/runtime.go +++ /dev/null @@ -1,580 +0,0 @@ -package vm - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - gruntime "runtime" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/abi/big" - "github.com/filecoin-project/specs-actors/actors/builtin" - sainit "github.com/filecoin-project/specs-actors/actors/builtin/init" - sapower "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime" - vmr "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - hamt "github.com/ipfs/go-hamt-ipld" - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/types" -) - -type Runtime struct { - ctx context.Context - - vm *VM - state *state.StateTree - msg *types.Message - vmsg vmr.Message - height abi.ChainEpoch - cst cbor.IpldStore - pricelist Pricelist - - gasAvailable int64 - gasUsed int64 - - sys runtime.Syscalls - - // address that started invoke chain - origin address.Address - originNonce uint64 - - executionTrace types.ExecutionTrace - numActorsCreated uint64 - allowInternal bool - callerValidated bool - lastGasChargeTime time.Time - lastGasCharge *types.GasTrace -} - -func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount { - total := types.FromFil(build.TotalFilecoin) - - rew, err := rt.state.GetActor(builtin.RewardActorAddr) - if err != nil { - rt.Abortf(exitcode.ErrIllegalState, "failed to get reward actor for computing total supply: %s", err) - } - - burnt, err := rt.state.GetActor(builtin.BurntFundsActorAddr) - if err != nil { - rt.Abortf(exitcode.ErrIllegalState, "failed to get reward actor for computing total supply: %s", err) - } - - market, err := rt.state.GetActor(builtin.StorageMarketActorAddr) - if err != nil { - rt.Abortf(exitcode.ErrIllegalState, "failed to get reward actor for computing total supply: %s", err) - } - - power, err := rt.state.GetActor(builtin.StoragePowerActorAddr) - if err != nil { - rt.Abortf(exitcode.ErrIllegalState, "failed to get reward actor for computing total supply: %s", err) - } - - total = types.BigSub(total, rew.Balance) - total = types.BigSub(total, burnt.Balance) - total = types.BigSub(total, market.Balance) - - var st sapower.State - if err := rt.cst.Get(rt.ctx, power.Head, &st); err != nil { - rt.Abortf(exitcode.ErrIllegalState, "failed to get storage power state: %s", err) - } - - return types.BigSub(total, st.TotalPledgeCollateral) -} - -func (rt *Runtime) ResolveAddress(addr address.Address) (ret address.Address, ok bool) { - r, err := rt.state.LookupID(addr) - if err != nil { - if xerrors.Is(err, sainit.ErrAddressNotFound) { - return address.Undef, false - } - panic(aerrors.Fatalf("failed to resolve address %s: %s", addr, err)) - } - return r, true -} - -type notFoundErr interface { - IsNotFound() bool -} - -func (rt *Runtime) Get(c cid.Cid, o vmr.CBORUnmarshaler) bool { - if err := rt.cst.Get(context.TODO(), c, o); err != nil { - var nfe notFoundErr - if xerrors.As(err, &nfe) && nfe.IsNotFound() { - if xerrors.As(err, new(cbor.SerializationError)) { - panic(aerrors.Newf(exitcode.ErrSerialization, "failed to unmarshal cbor object %s", err)) - } - return false - } - - panic(aerrors.Fatalf("failed to get cbor object %s: %s", c, err)) - } - return true -} - -func (rt *Runtime) Put(x vmr.CBORMarshaler) cid.Cid { - c, err := rt.cst.Put(context.TODO(), x) - if err != nil { - if xerrors.As(err, new(cbor.SerializationError)) { - panic(aerrors.Newf(exitcode.ErrSerialization, "failed to marshal cbor object %s", err)) - } - panic(aerrors.Fatalf("failed to put cbor object: %s", err)) - } - return c -} - -var _ vmr.Runtime = (*Runtime)(nil) - -func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) { - defer func() { - if r := recover(); r != nil { - if ar, ok := r.(aerrors.ActorError); ok { - log.Warnf("VM.Call failure: %+v", ar) - aerr = ar - return - } - log.Errorf("spec actors failure: %s", r) - aerr = aerrors.Newf(1, "spec actors failure: %s", r) - } - }() - - ret := f() - - if !rt.callerValidated { - rt.Abortf(exitcode.SysErrorIllegalActor, "Caller MUST be validated during method execution") - } - - switch ret := ret.(type) { - case []byte: - return ret, nil - case *adt.EmptyValue: - return nil, nil - case cbg.CBORMarshaler: - buf := new(bytes.Buffer) - if err := ret.MarshalCBOR(buf); err != nil { - return nil, aerrors.Absorb(err, 2, "failed to marshal response to cbor") - } - return buf.Bytes(), nil - case nil: - return nil, nil - default: - return nil, aerrors.New(3, "could not determine type for response from call") - } -} - -func (rt *Runtime) Message() vmr.Message { - return rt.vmsg -} - -func (rt *Runtime) ValidateImmediateCallerAcceptAny() { - rt.abortIfAlreadyValidated() - return -} - -func (rt *Runtime) CurrentBalance() abi.TokenAmount { - b, err := rt.GetBalance(rt.Message().Receiver()) - if err != nil { - rt.Abortf(err.RetCode(), "get current balance: %v", err) - } - return b -} - -func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) { - act, err := rt.state.GetActor(addr) - if err != nil { - if xerrors.Is(err, types.ErrActorNotFound) { - return cid.Undef, false - } - - panic(aerrors.Fatalf("failed to get actor: %s", err)) - } - - return act.Code, true -} - -func (rt *Runtime) GetRandomness(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - res, err := rt.vm.rand.GetRandomness(rt.ctx, personalization, randEpoch, entropy) - if err != nil { - panic(aerrors.Fatalf("could not get randomness: %s", err)) - } - return res -} - -func (rt *Runtime) Store() vmr.Store { - return rt -} - -func (rt *Runtime) NewActorAddress() address.Address { - var b bytes.Buffer - oa, _ := ResolveToKeyAddr(rt.vm.cstate, rt.vm.cst, rt.origin) - if err := oa.MarshalCBOR(&b); err != nil { // todo: spec says cbor; why not just bytes? - panic(aerrors.Fatalf("writing caller address into a buffer: %v", err)) - } - - if err := binary.Write(&b, binary.BigEndian, rt.originNonce); err != nil { - panic(aerrors.Fatalf("writing nonce address into a buffer: %v", err)) - } - if err := binary.Write(&b, binary.BigEndian, rt.numActorsCreated); err != nil { // TODO: expose on vm - panic(aerrors.Fatalf("writing callSeqNum address into a buffer: %v", err)) - } - addr, err := address.NewActorAddress(b.Bytes()) - if err != nil { - panic(aerrors.Fatalf("create actor address: %v", err)) - } - - rt.incrementNumActorsCreated() - return addr -} - -func (rt *Runtime) CreateActor(codeID cid.Cid, address address.Address) { - if !builtin.IsBuiltinActor(codeID) { - rt.Abortf(exitcode.SysErrorIllegalArgument, "Can only create built-in actors.") - } - - if builtin.IsSingletonActor(codeID) { - rt.Abortf(exitcode.SysErrorIllegalArgument, "Can only have one instance of singleton actors.") - } - - _, err := rt.state.GetActor(address) - if err == nil { - rt.Abortf(exitcode.SysErrorIllegalArgument, "Actor address already exists") - } - - rt.chargeGas(rt.Pricelist().OnCreateActor()) - - err = rt.state.SetActor(address, &types.Actor{ - Code: codeID, - Head: EmptyObjectCid, - Nonce: 0, - Balance: big.Zero(), - }) - if err != nil { - panic(aerrors.Fatalf("creating actor entry: %v", err)) - } - _ = rt.chargeGasSafe(gasOnActorExec) -} - -func (rt *Runtime) DeleteActor(addr address.Address) { - rt.chargeGas(rt.Pricelist().OnDeleteActor()) - act, err := rt.state.GetActor(rt.Message().Receiver()) - if err != nil { - if xerrors.Is(err, types.ErrActorNotFound) { - rt.Abortf(exitcode.SysErrorIllegalActor, "failed to load actor in delete actor: %s", err) - } - panic(aerrors.Fatalf("failed to get actor: %s", err)) - } - if !act.Balance.IsZero() { - if err := rt.vm.transfer(rt.Message().Receiver(), builtin.BurntFundsActorAddr, act.Balance); err != nil { - panic(aerrors.Fatalf("failed to transfer balance to burnt funds actor: %s", err)) - } - } - - if err := rt.state.DeleteActor(rt.Message().Receiver()); err != nil { - panic(aerrors.Fatalf("failed to delete actor: %s", err)) - } - _ = rt.chargeGasSafe(gasOnActorExec) -} - -func (rt *Runtime) Syscalls() vmr.Syscalls { - return rt.sys -} - -func (rt *Runtime) StartSpan(name string) vmr.TraceSpan { - panic("implement me") -} - -func (rt *Runtime) ValidateImmediateCallerIs(as ...address.Address) { - rt.abortIfAlreadyValidated() - imm := rt.Message().Caller() - - for _, a := range as { - if imm == a { - return - } - } - rt.Abortf(exitcode.SysErrForbidden, "caller %s is not one of %s", rt.Message().Caller(), as) -} - -func (rt *Runtime) Context() context.Context { - return rt.ctx -} - -func (rt *Runtime) Abortf(code exitcode.ExitCode, msg string, args ...interface{}) { - log.Warnf("Abortf: " + fmt.Sprintf(msg, args...)) - panic(aerrors.NewfSkip(2, code, msg, args...)) -} - -func (rt *Runtime) AbortStateMsg(msg string) { - panic(aerrors.NewfSkip(3, 101, msg)) -} - -func (rt *Runtime) ValidateImmediateCallerType(ts ...cid.Cid) { - rt.abortIfAlreadyValidated() - callerCid, ok := rt.GetActorCodeCID(rt.Message().Caller()) - if !ok { - panic(aerrors.Fatalf("failed to lookup code cid for caller")) - } - for _, t := range ts { - if t == callerCid { - return - } - } - rt.Abortf(exitcode.SysErrForbidden, "caller cid type %q was not one of %v", callerCid, ts) -} - -func (rt *Runtime) CurrEpoch() abi.ChainEpoch { - return rt.height -} - -type dumbWrapperType struct { - val []byte -} - -func (dwt *dumbWrapperType) Into(um vmr.CBORUnmarshaler) error { - return um.UnmarshalCBOR(bytes.NewReader(dwt.val)) -} - -func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m vmr.CBORMarshaler, value abi.TokenAmount) (vmr.SendReturn, exitcode.ExitCode) { - if !rt.allowInternal { - rt.Abortf(exitcode.SysErrorIllegalActor, "runtime.Send() is currently disallowed") - } - var params []byte - if m != nil { - buf := new(bytes.Buffer) - if err := m.MarshalCBOR(buf); err != nil { - rt.Abortf(exitcode.SysErrInvalidParameters, "failed to marshal input parameters: %s", err) - } - params = buf.Bytes() - } - - ret, err := rt.internalSend(rt.Message().Receiver(), to, method, value, params) - if err != nil { - if err.IsFatal() { - panic(err) - } - log.Warnf("vmctx send failed: to: %s, method: %d: ret: %d, err: %s", to, method, ret, err) - return nil, err.RetCode() - } - _ = rt.chargeGasSafe(gasOnActorExec) - return &dumbWrapperType{ret}, 0 -} - -func (rt *Runtime) internalSend(from, to address.Address, method abi.MethodNum, value types.BigInt, params []byte) ([]byte, aerrors.ActorError) { - - start := time.Now() - ctx, span := trace.StartSpan(rt.ctx, "vmc.Send") - defer span.End() - if span.IsRecordingEvents() { - span.AddAttributes( - trace.StringAttribute("to", to.String()), - trace.Int64Attribute("method", int64(method)), - trace.StringAttribute("value", value.String()), - ) - } - - msg := &types.Message{ - From: from, - To: to, - Method: method, - Value: value, - Params: params, - GasLimit: rt.gasAvailable, - } - - st := rt.state - if err := st.Snapshot(ctx); err != nil { - return nil, aerrors.Fatalf("snapshot failed: %s", err) - } - defer st.ClearSnapshot() - - ret, errSend, subrt := rt.vm.send(ctx, msg, rt, nil, start) - if errSend != nil { - if errRevert := st.Revert(); errRevert != nil { - return nil, aerrors.Escalate(errRevert, "failed to revert state tree after failed subcall") - } - } - - if subrt != nil { - rt.numActorsCreated = subrt.numActorsCreated - } - rt.executionTrace.Subcalls = append(rt.executionTrace.Subcalls, subrt.executionTrace) - return ret, errSend -} - -func (rt *Runtime) State() vmr.StateHandle { - return &shimStateHandle{rt: rt} -} - -type shimStateHandle struct { - rt *Runtime -} - -func (ssh *shimStateHandle) Create(obj vmr.CBORMarshaler) { - c := ssh.rt.Put(obj) - // TODO: handle error below - ssh.rt.stateCommit(EmptyObjectCid, c) -} - -func (ssh *shimStateHandle) Readonly(obj vmr.CBORUnmarshaler) { - act, err := ssh.rt.state.GetActor(ssh.rt.Message().Receiver()) - if err != nil { - ssh.rt.Abortf(exitcode.SysErrorIllegalArgument, "failed to get actor for Readonly state: %s", err) - } - ssh.rt.Get(act.Head, obj) -} - -func (ssh *shimStateHandle) Transaction(obj vmr.CBORer, f func() interface{}) interface{} { - if obj == nil { - ssh.rt.Abortf(exitcode.SysErrorIllegalActor, "Must not pass nil to Transaction()") - } - - act, err := ssh.rt.state.GetActor(ssh.rt.Message().Receiver()) - if err != nil { - ssh.rt.Abortf(exitcode.SysErrorIllegalActor, "failed to get actor for Transaction: %s", err) - } - baseState := act.Head - ssh.rt.Get(baseState, obj) - - ssh.rt.allowInternal = false - out := f() - ssh.rt.allowInternal = true - - c := ssh.rt.Put(obj) - - // TODO: handle error below - ssh.rt.stateCommit(baseState, c) - - return out -} - -func (rt *Runtime) GetBalance(a address.Address) (types.BigInt, aerrors.ActorError) { - act, err := rt.state.GetActor(a) - switch err { - default: - return types.EmptyInt, aerrors.Escalate(err, "failed to look up actor balance") - case hamt.ErrNotFound: - return types.NewInt(0), nil - case nil: - return act.Balance, nil - } -} - -func (rt *Runtime) stateCommit(oldh, newh cid.Cid) aerrors.ActorError { - // TODO: we can make this more efficient in the future... - act, err := rt.state.GetActor(rt.Message().Receiver()) - if err != nil { - return aerrors.Escalate(err, "failed to get actor to commit state") - } - - if act.Head != oldh { - return aerrors.Fatal("failed to update, inconsistent base reference") - } - - act.Head = newh - - if err := rt.state.SetActor(rt.Message().Receiver(), act); err != nil { - return aerrors.Fatalf("failed to set actor in commit state: %s", err) - } - - return nil -} - -func (rt *Runtime) finilizeGasTracing() { - if rt.lastGasCharge != nil { - rt.lastGasCharge.TimeTaken = time.Since(rt.lastGasChargeTime) - } -} - -// ChargeGas is spec actors function -func (rt *Runtime) ChargeGas(name string, compute int64, virtual int64) { - err := rt.chargeGasInternal(newGasCharge(name, compute, 0).WithVirtual(virtual, 0), 1) - if err != nil { - panic(err) - } -} - -func (rt *Runtime) chargeGas(gas GasCharge) { - err := rt.chargeGasInternal(gas, 1) - if err != nil { - panic(err) - } -} - -func (rt *Runtime) chargeGasFunc(skip int) func(GasCharge) { - return func(gas GasCharge) { - err := rt.chargeGasInternal(gas, 1+skip) - if err != nil { - panic(err) - } - } - -} - -func (rt *Runtime) chargeGasInternal(gas GasCharge, skip int) aerrors.ActorError { - toUse := gas.Total() - var callers [10]uintptr - cout := gruntime.Callers(2+skip, callers[:]) - - now := time.Now() - if rt.lastGasCharge != nil { - rt.lastGasCharge.TimeTaken = now.Sub(rt.lastGasChargeTime) - } - - gasTrace := types.GasTrace{ - Name: gas.Name, - Extra: gas.Extra, - - TotalGas: toUse, - ComputeGas: gas.ComputeGas, - StorageGas: gas.StorageGas, - - TotalVirtualGas: gas.VirtualCompute*GasComputeMulti + gas.VirtualStorage*GasStorageMulti, - VirtualComputeGas: gas.VirtualCompute, - VirtualStorageGas: gas.VirtualStorage, - - Callers: callers[:cout], - } - rt.executionTrace.GasCharges = append(rt.executionTrace.GasCharges, &gasTrace) - rt.lastGasChargeTime = now - rt.lastGasCharge = &gasTrace - - if rt.gasUsed+toUse > rt.gasAvailable { - rt.gasUsed = rt.gasAvailable - return aerrors.Newf(exitcode.SysErrOutOfGas, "not enough gas: used=%d, available=%d", - rt.gasUsed, rt.gasAvailable) - } - rt.gasUsed += toUse - return nil -} - -func (rt *Runtime) chargeGasSafe(gas GasCharge) aerrors.ActorError { - return rt.chargeGasInternal(gas, 1) -} - -func (rt *Runtime) Pricelist() Pricelist { - return rt.pricelist -} - -func (rt *Runtime) incrementNumActorsCreated() { - rt.numActorsCreated++ -} - -func (rt *Runtime) abortIfAlreadyValidated() { - if rt.callerValidated { - rt.Abortf(exitcode.SysErrorIllegalActor, "Method must validate caller identity exactly once") - } - rt.callerValidated = true -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vm/runtime_test.go b/vendor/github.com/filecoin-project/lotus/chain/vm/runtime_test.go deleted file mode 100644 index b5c75c177f..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vm/runtime_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package vm - -import ( - "io" - "testing" - - cbor "github.com/ipfs/go-ipld-cbor" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - - "github.com/filecoin-project/lotus/chain/actors/aerrors" -) - -type NotAVeryGoodMarshaler struct{} - -func (*NotAVeryGoodMarshaler) MarshalCBOR(writer io.Writer) error { - return xerrors.Errorf("no") -} - -var _ cbg.CBORMarshaler = &NotAVeryGoodMarshaler{} - -func TestRuntimePutErrors(t *testing.T) { - defer func() { - err := recover() - if err == nil { - t.Fatal("expected non-nil recovery") - } - - aerr := err.(aerrors.ActorError) - if aerr.IsFatal() { - t.Fatal("expected non-fatal actor error") - } - - if aerr.RetCode() != exitcode.ErrSerialization { - t.Fatal("expected serialization error") - } - }() - - rt := Runtime{ - cst: cbor.NewCborStore(nil), - } - - rt.Put(&NotAVeryGoodMarshaler{}) - t.Error("expected panic") -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vm/syscalls.go b/vendor/github.com/filecoin-project/lotus/chain/vm/syscalls.go deleted file mode 100644 index a6a5897613..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vm/syscalls.go +++ /dev/null @@ -1,276 +0,0 @@ -package vm - -import ( - "bytes" - "context" - "fmt" - goruntime "runtime" - "sync" - - "github.com/filecoin-project/go-address" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" - "github.com/minio/blake2b-simd" - mh "github.com/multiformats/go-multihash" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime" - - "github.com/filecoin-project/sector-storage/ffiwrapper" -) - -func init() { - mh.Codes[0xf104] = "filecoin" -} - -// Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there - -func Syscalls(verifier ffiwrapper.Verifier) runtime.Syscalls { - return &syscallShim{verifier: verifier} -} - -type syscallShim struct { - ctx context.Context - - cstate *state.StateTree - cst *cbor.BasicIpldStore - verifier ffiwrapper.Verifier -} - -func (ss *syscallShim) ComputeUnsealedSectorCID(st abi.RegisteredSealProof, pieces []abi.PieceInfo) (cid.Cid, error) { - var sum abi.PaddedPieceSize - for _, p := range pieces { - sum += p.Size - } - - commd, err := ffiwrapper.GenerateUnsealedCID(st, pieces) - if err != nil { - log.Errorf("generate data commitment failed: %s", err) - return cid.Undef, err - } - - return commd, nil -} - -func (ss *syscallShim) HashBlake2b(data []byte) [32]byte { - return blake2b.Sum256(data) -} - -// Checks validity of the submitted consensus fault with the two block headers needed to prove the fault -// and an optional extra one to check common ancestry (as needed). -// Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). -func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime.ConsensusFault, error) { - // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. - // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. - // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so - // (which runs a syntactic check), we do it directly on the CIDs. - - // (0) cheap preliminary checks - - // are blocks the same? - if bytes.Equal(a, b) { - return nil, fmt.Errorf("no consensus fault: submitted blocks are the same") - } - - // can blocks be decoded properly? - var blockA, blockB types.BlockHeader - if decodeErr := blockA.UnmarshalCBOR(bytes.NewReader(a)); decodeErr != nil { - return nil, xerrors.Errorf("cannot decode first block header: %w", decodeErr) - } - - if decodeErr := blockB.UnmarshalCBOR(bytes.NewReader(b)); decodeErr != nil { - return nil, xerrors.Errorf("cannot decode second block header: %f", decodeErr) - } - - // (1) check conditions necessary to any consensus fault - - // were blocks mined by same miner? - if blockA.Miner != blockB.Miner { - return nil, fmt.Errorf("no consensus fault: blocks not mined by same miner") - } - - // block a must be earlier or equal to block b, epoch wise (ie at least as early in the chain). - if blockB.Height < blockA.Height { - return nil, fmt.Errorf("first block must not be of higher height than second") - } - - // (2) check for the consensus faults themselves - var consensusFault *runtime.ConsensusFault - - // (a) double-fork mining fault - if blockA.Height == blockB.Height { - consensusFault = &runtime.ConsensusFault{ - Target: blockA.Miner, - Epoch: blockB.Height, - Type: runtime.ConsensusFaultDoubleForkMining, - } - } - - // (b) time-offset mining fault - // strictly speaking no need to compare heights based on double fork mining check above, - // but at same height this would be a different fault. - if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { - consensusFault = &runtime.ConsensusFault{ - Target: blockA.Miner, - Epoch: blockB.Height, - Type: runtime.ConsensusFaultTimeOffsetMining, - } - } - - // (c) parent-grinding fault - // Here extra is the "witness", a third block that shows the connection between A and B as - // A's sibling and B's parent. - // Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset - var blockC types.BlockHeader - if len(extra) > 0 { - if decodeErr := blockC.UnmarshalCBOR(bytes.NewReader(extra)); decodeErr != nil { - return nil, xerrors.Errorf("cannot decode extra: %w", decodeErr) - } - - if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && - types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { - consensusFault = &runtime.ConsensusFault{ - Target: blockA.Miner, - Epoch: blockB.Height, - Type: runtime.ConsensusFaultParentGrinding, - } - } - } - - // (3) return if no consensus fault by now - if consensusFault == nil { - return consensusFault, nil - } - - // else - // (4) expensive final checks - - // check blocks are properly signed by their respective miner - // note we do not need to check extra's: it is a parent to block b - // which itself is signed, so it was willingly included by the miner - if sigErr := ss.VerifyBlockSig(&blockA); sigErr != nil { - return nil, xerrors.Errorf("cannot verify first block sig: %w", sigErr) - } - - if sigErr := ss.VerifyBlockSig(&blockB); sigErr != nil { - return nil, xerrors.Errorf("cannot verify second block sig: %w", sigErr) - } - - return consensusFault, nil -} - -func (ss *syscallShim) VerifyBlockSig(blk *types.BlockHeader) error { - - // get appropriate miner actor - act, err := ss.cstate.GetActor(blk.Miner) - if err != nil { - return err - } - - // use that to get the miner state - var mas miner.State - if err = ss.cst.Get(ss.ctx, act.Head, &mas); err != nil { - return err - } - - // and use to get resolved workerKey - waddr, err := ResolveToKeyAddr(ss.cstate, ss.cst, mas.Info.Worker) - if err != nil { - return err - } - - if err := sigs.CheckBlockSignature(ss.ctx, blk, waddr); err != nil { - return err - } - - return nil -} - -func (ss *syscallShim) VerifyPoSt(proof abi.WindowPoStVerifyInfo) error { - ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof) - if err != nil { - return err - } - if !ok { - return fmt.Errorf("proof was invalid") - } - return nil -} - -func (ss *syscallShim) VerifySeal(info abi.SealVerifyInfo) error { - //_, span := trace.StartSpan(ctx, "ValidatePoRep") - //defer span.End() - - miner, err := address.NewIDAddress(uint64(info.Miner)) - if err != nil { - return xerrors.Errorf("weirdly failed to construct address: %w", err) - } - - ticket := []byte(info.Randomness) - proof := info.Proof - seed := []byte(info.InteractiveRandomness) - - log.Debugf("Verif r:%x; d:%x; m:%s; t:%x; s:%x; N:%d; p:%x", info.SealedCID, info.UnsealedCID, miner, ticket, seed, info.SectorID.Number, proof) - - //func(ctx context.Context, maddr address.Address, ssize abi.SectorSize, commD, commR, ticket, proof, seed []byte, sectorID abi.SectorNumber) - ok, err := ss.verifier.VerifySeal(info) - if err != nil { - return xerrors.Errorf("failed to validate PoRep: %w", err) - } - if !ok { - return fmt.Errorf("invalid proof") - } - - return nil -} - -func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error { - // TODO: in genesis setup, we are currently faking signatures - - kaddr, err := ResolveToKeyAddr(ss.cstate, ss.cst, addr) - if err != nil { - return err - } - - return sigs.Verify(&sig, kaddr, input) -} - -var BatchSealVerifyParallelism = goruntime.NumCPU() - -func (ss *syscallShim) BatchVerifySeals(inp map[address.Address][]abi.SealVerifyInfo) (map[address.Address][]bool, error) { - out := make(map[address.Address][]bool) - - sema := make(chan struct{}, BatchSealVerifyParallelism) - - var wg sync.WaitGroup - for addr, seals := range inp { - results := make([]bool, len(seals)) - out[addr] = results - - for i, s := range seals { - wg.Add(1) - go func(ma address.Address, ix int, svi abi.SealVerifyInfo, res []bool) { - defer wg.Done() - sema <- struct{}{} - - if err := ss.VerifySeal(svi); err != nil { - log.Warnw("seal verify in batch failed", "miner", ma, "index", ix, "err", err) - res[ix] = false - } else { - res[ix] = true - } - - <-sema - }(addr, i, s, results) - } - } - wg.Wait() - - return out, nil -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vm/validation_test.go b/vendor/github.com/filecoin-project/lotus/chain/vm/validation_test.go deleted file mode 100644 index 880b334017..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vm/validation_test.go +++ /dev/null @@ -1,71 +0,0 @@ -package vm_test - -import ( - "fmt" - "reflect" - "runtime" - "strings" - "testing" - - suites "github.com/filecoin-project/chain-validation/suites" - - factory "github.com/filecoin-project/lotus/chain/validation" -) - -// TestSkipper contains a list of test cases skipped by the implementation. -type TestSkipper struct { - testSkips []suites.TestCase -} - -// Skip return true if the sutire.TestCase should be skipped. -func (ts *TestSkipper) Skip(test suites.TestCase) bool { - for _, skip := range ts.testSkips { - if reflect.ValueOf(skip).Pointer() == reflect.ValueOf(test).Pointer() { - fmt.Printf("=== SKIP %v\n", runtime.FuncForPC(reflect.ValueOf(test).Pointer()).Name()) - return true - } - } - return false -} - -// TestSuiteSkips contains tests we wish to skip. -var TestSuiteSkipper TestSkipper - -func init() { - // initialize the test skipper with tests being skipped - TestSuiteSkipper = TestSkipper{testSkips: []suites.TestCase{ - // tests to skip go here - }} -} - -func TestChainValidationMessageSuite(t *testing.T) { - f := factory.NewFactories() - for _, testCase := range suites.MessageTestCases() { - testCase := testCase - if TestSuiteSkipper.Skip(testCase) { - continue - } - t.Run(caseName(testCase), func(t *testing.T) { - testCase(t, f) - }) - } -} - -func TestChainValidationTipSetSuite(t *testing.T) { - f := factory.NewFactories() - for _, testCase := range suites.TipSetTestCases() { - testCase := testCase - if TestSuiteSkipper.Skip(testCase) { - continue - } - t.Run(caseName(testCase), func(t *testing.T) { - testCase(t, f) - }) - } -} - -func caseName(testCase suites.TestCase) string { - fqName := runtime.FuncForPC(reflect.ValueOf(testCase).Pointer()).Name() - toks := strings.Split(fqName, ".") - return toks[len(toks)-1] -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/vm/vm.go b/vendor/github.com/filecoin-project/lotus/chain/vm/vm.go deleted file mode 100644 index 45114c4c5d..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/vm/vm.go +++ /dev/null @@ -1,732 +0,0 @@ -package vm - -import ( - "bytes" - "context" - "fmt" - "reflect" - "time" - - "github.com/filecoin-project/specs-actors/actors/builtin" - - block "github.com/ipfs/go-block-format" - cid "github.com/ipfs/go-cid" - blockstore "github.com/ipfs/go-ipfs-blockstore" - cbor "github.com/ipfs/go-ipld-cbor" - logging "github.com/ipfs/go-log/v2" - mh "github.com/multiformats/go-multihash" - cbg "github.com/whyrusleeping/cbor-gen" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - commcid "github.com/filecoin-project/go-fil-commcid" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin/account" - init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" - "github.com/filecoin-project/specs-actors/actors/crypto" - "github.com/filecoin-project/specs-actors/actors/runtime" - "github.com/filecoin-project/specs-actors/actors/runtime/exitcode" - - "github.com/filecoin-project/lotus/chain/actors/aerrors" - "github.com/filecoin-project/lotus/chain/state" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/bufbstore" -) - -var log = logging.Logger("vm") -var gasOnActorExec = newGasCharge("OnActorExec", 0, 0) - -// ResolveToKeyAddr returns the public key type of address (`BLS`/`SECP256K1`) of an account actor identified by `addr`. -func ResolveToKeyAddr(state types.StateTree, cst cbor.IpldStore, addr address.Address) (address.Address, aerrors.ActorError) { - if addr.Protocol() == address.BLS || addr.Protocol() == address.SECP256K1 { - return addr, nil - } - - act, err := state.GetActor(addr) - if err != nil { - return address.Undef, aerrors.Newf(exitcode.SysErrInvalidParameters, "failed to find actor: %s", addr) - } - - if act.Code != builtin.AccountActorCodeID { - return address.Undef, aerrors.Newf(exitcode.SysErrInvalidParameters, "address %s was not for an account actor", addr) - } - - var aast account.State - if err := cst.Get(context.TODO(), act.Head, &aast); err != nil { - return address.Undef, aerrors.Absorb(err, exitcode.SysErrInvalidParameters, fmt.Sprintf("failed to get account actor state for %s", addr)) - } - - return aast.Address, nil -} - -var _ cbor.IpldBlockstore = (*gasChargingBlocks)(nil) - -type gasChargingBlocks struct { - chargeGas func(GasCharge) - pricelist Pricelist - under cbor.IpldBlockstore -} - -func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { - bs.chargeGas(newGasCharge("OnIpldGetStart", 0, 0)) - blk, err := bs.under.Get(c) - if err != nil { - return nil, aerrors.Escalate(err, "failed to get block from blockstore") - } - bs.chargeGas(bs.pricelist.OnIpldGet(len(blk.RawData()))) - bs.chargeGas(gasOnActorExec) - - return blk, nil -} - -func (bs *gasChargingBlocks) Put(blk block.Block) error { - bs.chargeGas(bs.pricelist.OnIpldPut(len(blk.RawData()))) - - if err := bs.under.Put(blk); err != nil { - return aerrors.Escalate(err, "failed to write data to disk") - } - bs.chargeGas(gasOnActorExec) - return nil -} - -func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, origin address.Address, originNonce uint64, usedGas int64, nac uint64) *Runtime { - rt := &Runtime{ - ctx: ctx, - vm: vm, - state: vm.cstate, - msg: msg, - origin: origin, - originNonce: originNonce, - height: vm.blockHeight, - - gasUsed: usedGas, - gasAvailable: msg.GasLimit, - numActorsCreated: nac, - pricelist: PricelistByEpoch(vm.blockHeight), - allowInternal: true, - callerValidated: false, - executionTrace: types.ExecutionTrace{Msg: msg}, - } - - rt.cst = &cbor.BasicIpldStore{ - Blocks: &gasChargingBlocks{rt.chargeGasFunc(2), rt.pricelist, vm.cst.Blocks}, - Atlas: vm.cst.Atlas, - } - rt.sys = pricedSyscalls{ - under: vm.Syscalls, - chargeGas: rt.chargeGasFunc(1), - pl: rt.pricelist, - } - - vmm := *msg - resF, ok := rt.ResolveAddress(msg.From) - if !ok { - rt.Abortf(exitcode.SysErrInvalidReceiver, "resolve msg.From address failed") - } - vmm.From = resF - rt.vmsg = &vmm - - return rt -} - -type VM struct { - cstate *state.StateTree - base cid.Cid - cst *cbor.BasicIpldStore - buf *bufbstore.BufferedBS - blockHeight abi.ChainEpoch - inv *Invoker - rand Rand - - Syscalls runtime.Syscalls -} - -func NewVM(base cid.Cid, height abi.ChainEpoch, r Rand, cbs blockstore.Blockstore, syscalls runtime.Syscalls) (*VM, error) { - buf := bufbstore.NewBufferedBstore(cbs) - cst := cbor.NewCborStore(buf) - state, err := state.LoadStateTree(cst, base) - if err != nil { - return nil, err - } - - return &VM{ - cstate: state, - base: base, - cst: cst, - buf: buf, - blockHeight: height, - inv: NewInvoker(), - rand: r, // TODO: Probably should be a syscall - Syscalls: syscalls, - }, nil -} - -type Rand interface { - GetRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) -} - -type ApplyRet struct { - types.MessageReceipt - ActorErr aerrors.ActorError - Penalty types.BigInt - ExecutionTrace types.ExecutionTrace - Duration time.Duration -} - -func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, - gasCharge *GasCharge, start time.Time) ([]byte, aerrors.ActorError, *Runtime) { - - st := vm.cstate - - origin := msg.From - on := msg.Nonce - var nac uint64 = 0 - var gasUsed int64 - if parent != nil { - gasUsed = parent.gasUsed - origin = parent.origin - on = parent.originNonce - nac = parent.numActorsCreated - } - - rt := vm.makeRuntime(ctx, msg, origin, on, gasUsed, nac) - rt.lastGasChargeTime = start - if parent != nil { - rt.lastGasChargeTime = parent.lastGasChargeTime - rt.lastGasCharge = parent.lastGasCharge - defer func() { - parent.gasUsed = rt.gasUsed - parent.lastGasChargeTime = rt.lastGasChargeTime - parent.lastGasCharge = rt.lastGasCharge - }() - } - - if gasCharge != nil { - if err := rt.chargeGasSafe(*gasCharge); err != nil { - // this should never happen - return nil, aerrors.Wrap(err, "not enough gas for initial message charge, this should not happen"), rt - } - } - - ret, err := func() ([]byte, aerrors.ActorError) { - if aerr := rt.chargeGasSafe(rt.Pricelist().OnMethodInvocation(msg.Value, msg.Method)); aerr != nil { - return nil, aerrors.Wrap(aerr, "not enough gas for method invocation") - } - - toActor, err := st.GetActor(msg.To) - if err != nil { - if xerrors.Is(err, init_.ErrAddressNotFound) { - a, err := TryCreateAccountActor(rt, msg.To) - if err != nil { - return nil, aerrors.Wrapf(err, "could not create account") - } - toActor = a - } else { - return nil, aerrors.Escalate(err, "getting actor") - } - } - - if types.BigCmp(msg.Value, types.NewInt(0)) != 0 { - if err := vm.transfer(msg.From, msg.To, msg.Value); err != nil { - return nil, aerrors.Wrap(err, "failed to transfer funds") - } - } - - if msg.Method != 0 { - var ret []byte - _ = rt.chargeGasSafe(gasOnActorExec) - ret, err := vm.Invoke(toActor, rt, msg.Method, msg.Params) - _ = rt.chargeGasSafe(newGasCharge("OnActorExecDone", 0, 0)) - return ret, err - } - return nil, nil - }() - - mr := types.MessageReceipt{ - ExitCode: aerrors.RetCode(err), - Return: ret, - GasUsed: rt.gasUsed, - } - rt.executionTrace.MsgRct = &mr - rt.executionTrace.Duration = time.Since(start) - if err != nil { - rt.executionTrace.Error = err.Error() - } - - return ret, err, rt -} - -func checkMessage(msg *types.Message) error { - if msg.GasLimit == 0 { - return xerrors.Errorf("message has no gas limit set") - } - if msg.GasLimit < 0 { - return xerrors.Errorf("message has negative gas limit") - } - - if msg.GasPrice == types.EmptyInt { - return xerrors.Errorf("message gas no gas price set") - } - - if msg.Value == types.EmptyInt { - return xerrors.Errorf("message no value set") - } - - return nil -} - -func (vm *VM) ApplyImplicitMessage(ctx context.Context, msg *types.Message) (*ApplyRet, error) { - start := time.Now() - ret, actorErr, rt := vm.send(ctx, msg, nil, nil, start) - rt.finilizeGasTracing() - return &ApplyRet{ - MessageReceipt: types.MessageReceipt{ - ExitCode: aerrors.RetCode(actorErr), - Return: ret, - GasUsed: 0, - }, - ActorErr: actorErr, - ExecutionTrace: rt.executionTrace, - Penalty: types.NewInt(0), - Duration: time.Since(start), - }, actorErr -} - -func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, error) { - start := time.Now() - ctx, span := trace.StartSpan(ctx, "vm.ApplyMessage") - defer span.End() - msg := cmsg.VMMessage() - if span.IsRecordingEvents() { - span.AddAttributes( - trace.StringAttribute("to", msg.To.String()), - trace.Int64Attribute("method", int64(msg.Method)), - trace.StringAttribute("value", msg.Value.String()), - ) - } - - if err := checkMessage(msg); err != nil { - return nil, err - } - - pl := PricelistByEpoch(vm.blockHeight) - - msgGas := pl.OnChainMessage(cmsg.ChainLength()) - msgGasCost := msgGas.Total() - // this should never happen, but is currently still exercised by some tests - if msgGasCost > msg.GasLimit { - return &ApplyRet{ - MessageReceipt: types.MessageReceipt{ - ExitCode: exitcode.SysErrOutOfGas, - GasUsed: 0, - }, - Penalty: types.BigMul(msg.GasPrice, types.NewInt(uint64(msgGasCost))), - Duration: time.Since(start), - }, nil - } - - st := vm.cstate - - minerPenaltyAmount := types.BigMul(msg.GasPrice, types.NewInt(uint64(msgGasCost))) - fromActor, err := st.GetActor(msg.From) - // this should never happen, but is currently still exercised by some tests - if err != nil { - if xerrors.Is(err, types.ErrActorNotFound) { - return &ApplyRet{ - MessageReceipt: types.MessageReceipt{ - ExitCode: exitcode.SysErrSenderInvalid, - GasUsed: 0, - }, - Penalty: minerPenaltyAmount, - Duration: time.Since(start), - }, nil - } - return nil, xerrors.Errorf("failed to look up from actor: %w", err) - } - - // this should never happen, but is currently still exercised by some tests - if !fromActor.Code.Equals(builtin.AccountActorCodeID) { - return &ApplyRet{ - MessageReceipt: types.MessageReceipt{ - ExitCode: exitcode.SysErrSenderInvalid, - GasUsed: 0, - }, - Penalty: minerPenaltyAmount, - Duration: time.Since(start), - }, nil - } - - // TODO: We should remove this, we might punish miners for no fault of their own - if msg.Nonce != fromActor.Nonce { - return &ApplyRet{ - MessageReceipt: types.MessageReceipt{ - ExitCode: exitcode.SysErrSenderStateInvalid, - GasUsed: 0, - }, - Penalty: minerPenaltyAmount, - Duration: time.Since(start), - }, nil - } - - gascost := types.BigMul(types.NewInt(uint64(msg.GasLimit)), msg.GasPrice) - totalCost := types.BigAdd(gascost, msg.Value) - if fromActor.Balance.LessThan(totalCost) { - return &ApplyRet{ - MessageReceipt: types.MessageReceipt{ - ExitCode: exitcode.SysErrSenderStateInvalid, - GasUsed: 0, - }, - Penalty: minerPenaltyAmount, - Duration: time.Since(start), - }, nil - } - - gasHolder := &types.Actor{Balance: types.NewInt(0)} - if err := vm.transferToGasHolder(msg.From, gasHolder, gascost); err != nil { - return nil, xerrors.Errorf("failed to withdraw gas funds: %w", err) - } - - if err := vm.incrementNonce(msg.From); err != nil { - return nil, err - } - - if err := st.Snapshot(ctx); err != nil { - return nil, xerrors.Errorf("snapshot failed: %w", err) - } - defer st.ClearSnapshot() - - ret, actorErr, rt := vm.send(ctx, msg, nil, &msgGas, start) - if aerrors.IsFatal(actorErr) { - return nil, xerrors.Errorf("[from=%s,to=%s,n=%d,m=%d,h=%d] fatal error: %w", msg.From, msg.To, msg.Nonce, msg.Method, vm.blockHeight, actorErr) - } - - if actorErr != nil { - log.Warnw("Send actor error", "from", msg.From, "to", msg.To, "nonce", msg.Nonce, "method", msg.Method, "height", vm.blockHeight, "error", fmt.Sprintf("%+v", actorErr)) - } - - if actorErr != nil && len(ret) != 0 { - // This should not happen, something is wonky - return nil, xerrors.Errorf("message invocation errored, but had a return value anyway: %w", actorErr) - } - - if rt == nil { - return nil, xerrors.Errorf("send returned nil runtime, send error was: %s", actorErr) - } - - if len(ret) != 0 { - // safely override actorErr since it must be nil - actorErr = rt.chargeGasSafe(rt.Pricelist().OnChainReturnValue(len(ret))) - if actorErr != nil { - ret = nil - } - } - - var errcode exitcode.ExitCode - var gasUsed int64 - - if errcode = aerrors.RetCode(actorErr); errcode != 0 { - // revert all state changes since snapshot - if err := st.Revert(); err != nil { - return nil, xerrors.Errorf("revert state failed: %w", err) - } - } - gasUsed = rt.gasUsed - if gasUsed < 0 { - gasUsed = 0 - } - // refund unused gas - refund := types.BigMul(types.NewInt(uint64(msg.GasLimit-gasUsed)), msg.GasPrice) - if err := vm.transferFromGasHolder(msg.From, gasHolder, refund); err != nil { - return nil, xerrors.Errorf("failed to refund gas") - } - - gasReward := types.BigMul(msg.GasPrice, types.NewInt(uint64(gasUsed))) - if err := vm.transferFromGasHolder(builtin.RewardActorAddr, gasHolder, gasReward); err != nil { - return nil, xerrors.Errorf("failed to give miner gas reward: %w", err) - } - - if types.BigCmp(types.NewInt(0), gasHolder.Balance) != 0 { - return nil, xerrors.Errorf("gas handling math is wrong") - } - - rt.finilizeGasTracing() - - return &ApplyRet{ - MessageReceipt: types.MessageReceipt{ - ExitCode: errcode, - Return: ret, - GasUsed: gasUsed, - }, - ActorErr: actorErr, - ExecutionTrace: rt.executionTrace, - Penalty: types.NewInt(0), - Duration: time.Since(start), - }, nil -} - -func (vm *VM) ActorBalance(addr address.Address) (types.BigInt, aerrors.ActorError) { - act, err := vm.cstate.GetActor(addr) - if err != nil { - return types.EmptyInt, aerrors.Absorb(err, 1, "failed to find actor") - } - - return act.Balance, nil -} - -func (vm *VM) Flush(ctx context.Context) (cid.Cid, error) { - _, span := trace.StartSpan(ctx, "vm.Flush") - defer span.End() - - from := vm.buf - to := vm.buf.Read() - - root, err := vm.cstate.Flush(ctx) - if err != nil { - return cid.Undef, xerrors.Errorf("flushing vm: %w", err) - } - - if err := Copy(from, to, root); err != nil { - return cid.Undef, xerrors.Errorf("copying tree: %w", err) - } - - return root, nil -} - -// MutateState usage: MutateState(ctx, idAddr, func(cst cbor.IpldStore, st *ActorStateType) error {...}) -func (vm *VM) MutateState(ctx context.Context, addr address.Address, fn interface{}) error { - act, err := vm.cstate.GetActor(addr) - if err != nil { - return xerrors.Errorf("actor not found: %w", err) - } - - st := reflect.New(reflect.TypeOf(fn).In(1).Elem()) - if err := vm.cst.Get(ctx, act.Head, st.Interface()); err != nil { - return xerrors.Errorf("read actor head: %w", err) - } - - out := reflect.ValueOf(fn).Call([]reflect.Value{reflect.ValueOf(vm.cst), st}) - if !out[0].IsNil() && out[0].Interface().(error) != nil { - return out[0].Interface().(error) - } - - head, err := vm.cst.Put(ctx, st.Interface()) - if err != nil { - return xerrors.Errorf("put new actor head: %w", err) - } - - act.Head = head - - if err := vm.cstate.SetActor(addr, act); err != nil { - return xerrors.Errorf("set actor: %w", err) - } - - return nil -} - -func linksForObj(blk block.Block) ([]cid.Cid, error) { - switch blk.Cid().Prefix().Codec { - case cid.DagCBOR: - return cbg.ScanForLinks(bytes.NewReader(blk.RawData())) - default: - return nil, xerrors.Errorf("vm flush copy method only supports dag cbor") - } -} - -func Copy(from, to blockstore.Blockstore, root cid.Cid) error { - var batch []block.Block - batchCp := func(blk block.Block) error { - batch = append(batch, blk) - if len(batch) > 100 { - if err := to.PutMany(batch); err != nil { - return xerrors.Errorf("batch put in copy: %w", err) - } - batch = batch[:0] - } - return nil - } - - if err := copyRec(from, to, root, batchCp); err != nil { - return err - } - - if len(batch) > 0 { - if err := to.PutMany(batch); err != nil { - return xerrors.Errorf("batch put in copy: %w", err) - } - } - - return nil -} - -func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) error) error { - if root.Prefix().MhType == 0 { - // identity cid, skip - return nil - } - - blk, err := from.Get(root) - if err != nil { - return xerrors.Errorf("get %s failed: %w", root, err) - } - - links, err := linksForObj(blk) - if err != nil { - return err - } - - for _, link := range links { - if link.Prefix().MhType == mh.IDENTITY || link.Prefix().MhType == uint64(commcid.FC_SEALED_V1) || link.Prefix().MhType == uint64(commcid.FC_UNSEALED_V1) { - continue - } - - has, err := to.Has(link) - if err != nil { - return err - } - if has { - continue - } - - if err := copyRec(from, to, link, cp); err != nil { - return err - } - } - - if err := cp(blk); err != nil { - return err - } - return nil -} - -func (vm *VM) StateTree() types.StateTree { - return vm.cstate -} - -func (vm *VM) SetBlockHeight(h abi.ChainEpoch) { - vm.blockHeight = h -} - -func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { - ctx, span := trace.StartSpan(rt.ctx, "vm.Invoke") - defer span.End() - if span.IsRecordingEvents() { - span.AddAttributes( - trace.StringAttribute("to", rt.Message().Receiver().String()), - trace.Int64Attribute("method", int64(method)), - trace.StringAttribute("value", rt.Message().ValueReceived().String()), - ) - } - - var oldCtx context.Context - oldCtx, rt.ctx = rt.ctx, ctx - defer func() { - rt.ctx = oldCtx - }() - ret, err := vm.inv.Invoke(act.Code, rt, method, params) - if err != nil { - return nil, err - } - return ret, nil -} - -func (vm *VM) SetInvoker(i *Invoker) { - vm.inv = i -} - -func (vm *VM) incrementNonce(addr address.Address) error { - return vm.cstate.MutateActor(addr, func(a *types.Actor) error { - a.Nonce++ - return nil - }) -} - -func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.ActorError { - if from == to { - return nil - } - - fromID, err := vm.cstate.LookupID(from) - if err != nil { - return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) - } - - toID, err := vm.cstate.LookupID(to) - if err != nil { - return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) - } - - if fromID == toID { - return nil - } - - if amt.LessThan(types.NewInt(0)) { - return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) - } - - f, err := vm.cstate.GetActor(fromID) - if err != nil { - return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) - } - - t, err := vm.cstate.GetActor(toID) - if err != nil { - return aerrors.Fatalf("transfer failed when retrieving receiver actor: %s", err) - } - - if err := deductFunds(f, amt); err != nil { - return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed when deducting funds: %s", err) - } - depositFunds(t, amt) - - if err := vm.cstate.SetActor(fromID, f); err != nil { - return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err) - } - - if err := vm.cstate.SetActor(toID, t); err != nil { - return aerrors.Fatalf("transfer failed when setting sender actor: %s", err) - } - - return nil -} - -func (vm *VM) transferToGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { - if amt.LessThan(types.NewInt(0)) { - return xerrors.Errorf("attempted to transfer negative value to gas holder") - } - - return vm.cstate.MutateActor(addr, func(a *types.Actor) error { - if err := deductFunds(a, amt); err != nil { - return err - } - depositFunds(gasHolder, amt) - return nil - }) -} - -func (vm *VM) transferFromGasHolder(addr address.Address, gasHolder *types.Actor, amt types.BigInt) error { - if amt.LessThan(types.NewInt(0)) { - return xerrors.Errorf("attempted to transfer negative value from gas holder") - } - - return vm.cstate.MutateActor(addr, func(a *types.Actor) error { - if err := deductFunds(gasHolder, amt); err != nil { - return err - } - depositFunds(a, amt) - return nil - }) -} - -func deductFunds(act *types.Actor, amt types.BigInt) error { - if act.Balance.LessThan(amt) { - return fmt.Errorf("not enough funds") - } - - act.Balance = types.BigSub(act.Balance, amt) - return nil -} - -func depositFunds(act *types.Actor, amt types.BigInt) { - act.Balance = types.BigAdd(act.Balance, amt) -} diff --git a/vendor/github.com/filecoin-project/lotus/chain/wallet/memkeystore.go b/vendor/github.com/filecoin-project/lotus/chain/wallet/memkeystore.go deleted file mode 100644 index 9844083c4a..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/wallet/memkeystore.go +++ /dev/null @@ -1,48 +0,0 @@ -package wallet - -import ( - "github.com/filecoin-project/lotus/chain/types" -) - -type MemKeyStore struct { - m map[string]types.KeyInfo -} - -func NewMemKeyStore() *MemKeyStore { - return &MemKeyStore{ - make(map[string]types.KeyInfo), - } -} - -// List lists all the keys stored in the KeyStore -func (mks *MemKeyStore) List() ([]string, error) { - var out []string - for k := range mks.m { - out = append(out, k) - } - return out, nil -} - -// Get gets a key out of keystore and returns KeyInfo corresponding to named key -func (mks *MemKeyStore) Get(k string) (types.KeyInfo, error) { - ki, ok := mks.m[k] - if !ok { - return types.KeyInfo{}, types.ErrKeyInfoNotFound - } - - return ki, nil -} - -// Put saves a key info under given name -func (mks *MemKeyStore) Put(k string, ki types.KeyInfo) error { - mks.m[k] = ki - return nil -} - -// Delete removes a key from keystore -func (mks *MemKeyStore) Delete(k string) error { - delete(mks.m, k) - return nil -} - -var _ (types.KeyStore) = (*MemKeyStore)(nil) diff --git a/vendor/github.com/filecoin-project/lotus/chain/wallet/wallet.go b/vendor/github.com/filecoin-project/lotus/chain/wallet/wallet.go deleted file mode 100644 index 9c069d8192..0000000000 --- a/vendor/github.com/filecoin-project/lotus/chain/wallet/wallet.go +++ /dev/null @@ -1,307 +0,0 @@ -package wallet - -import ( - "context" - "sort" - "strings" - "sync" - - "github.com/filecoin-project/specs-actors/actors/crypto" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-address" - - _ "github.com/filecoin-project/lotus/lib/sigs/bls" // enable bls signatures - _ "github.com/filecoin-project/lotus/lib/sigs/secp" // enable secp signatures - - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/lib/sigs" -) - -var log = logging.Logger("wallet") - -const ( - KNamePrefix = "wallet-" - KTrashPrefix = "trash-" - KDefault = "default" - KTBLS = "bls" - KTSecp256k1 = "secp256k1" -) - -type Wallet struct { - keys map[address.Address]*Key - keystore types.KeyStore - - lk sync.Mutex -} - -func NewWallet(keystore types.KeyStore) (*Wallet, error) { - w := &Wallet{ - keys: make(map[address.Address]*Key), - keystore: keystore, - } - - return w, nil -} - -func KeyWallet(keys ...*Key) *Wallet { - m := make(map[address.Address]*Key) - for _, key := range keys { - m[key.Address] = key - } - - return &Wallet{ - keys: m, - } -} - -func (w *Wallet) Sign(ctx context.Context, addr address.Address, msg []byte) (*crypto.Signature, error) { - ki, err := w.findKey(addr) - if err != nil { - return nil, err - } - if ki == nil { - return nil, xerrors.Errorf("signing using key '%s': %w", addr.String(), types.ErrKeyInfoNotFound) - } - - return sigs.Sign(ActSigType(ki.Type), ki.PrivateKey, msg) -} - -func (w *Wallet) findKey(addr address.Address) (*Key, error) { - w.lk.Lock() - defer w.lk.Unlock() - - k, ok := w.keys[addr] - if ok { - return k, nil - } - if w.keystore == nil { - log.Warn("findKey didn't find the key in in-memory wallet") - return nil, nil - } - - ki, err := w.keystore.Get(KNamePrefix + addr.String()) - if err != nil { - if xerrors.Is(err, types.ErrKeyInfoNotFound) { - return nil, nil - } - return nil, xerrors.Errorf("getting from keystore: %w", err) - } - k, err = NewKey(ki) - if err != nil { - return nil, xerrors.Errorf("decoding from keystore: %w", err) - } - w.keys[k.Address] = k - return k, nil -} - -func (w *Wallet) Export(addr address.Address) (*types.KeyInfo, error) { - k, err := w.findKey(addr) - if err != nil { - return nil, xerrors.Errorf("failed to find key to export: %w", err) - } - - return &k.KeyInfo, nil -} - -func (w *Wallet) Import(ki *types.KeyInfo) (address.Address, error) { - w.lk.Lock() - defer w.lk.Unlock() - - k, err := NewKey(*ki) - if err != nil { - return address.Undef, xerrors.Errorf("failed to make key: %w", err) - } - - if err := w.keystore.Put(KNamePrefix+k.Address.String(), k.KeyInfo); err != nil { - return address.Undef, xerrors.Errorf("saving to keystore: %w", err) - } - - return k.Address, nil -} - -func (w *Wallet) ListAddrs() ([]address.Address, error) { - all, err := w.keystore.List() - if err != nil { - return nil, xerrors.Errorf("listing keystore: %w", err) - } - - sort.Strings(all) - - out := make([]address.Address, 0, len(all)) - for _, a := range all { - if strings.HasPrefix(a, KNamePrefix) { - name := strings.TrimPrefix(a, KNamePrefix) - addr, err := address.NewFromString(name) - if err != nil { - return nil, xerrors.Errorf("converting name to address: %w", err) - } - out = append(out, addr) - } - } - - return out, nil -} - -func (w *Wallet) GetDefault() (address.Address, error) { - w.lk.Lock() - defer w.lk.Unlock() - - ki, err := w.keystore.Get(KDefault) - if err != nil { - return address.Undef, xerrors.Errorf("failed to get default key: %w", err) - } - - k, err := NewKey(ki) - if err != nil { - return address.Undef, xerrors.Errorf("failed to read default key from keystore: %w", err) - } - - return k.Address, nil -} - -func (w *Wallet) SetDefault(a address.Address) error { - w.lk.Lock() - defer w.lk.Unlock() - - ki, err := w.keystore.Get(KNamePrefix + a.String()) - if err != nil { - return err - } - - if err := w.keystore.Delete(KDefault); err != nil { - if !xerrors.Is(err, types.ErrKeyInfoNotFound) { - log.Warnf("failed to unregister current default key: %s", err) - } - } - - if err := w.keystore.Put(KDefault, ki); err != nil { - return err - } - - return nil -} - -func GenerateKey(typ crypto.SigType) (*Key, error) { - pk, err := sigs.Generate(typ) - if err != nil { - return nil, err - } - ki := types.KeyInfo{ - Type: kstoreSigType(typ), - PrivateKey: pk, - } - return NewKey(ki) -} - -func (w *Wallet) GenerateKey(typ crypto.SigType) (address.Address, error) { - w.lk.Lock() - defer w.lk.Unlock() - - k, err := GenerateKey(typ) - if err != nil { - return address.Undef, err - } - - if err := w.keystore.Put(KNamePrefix+k.Address.String(), k.KeyInfo); err != nil { - return address.Undef, xerrors.Errorf("saving to keystore: %w", err) - } - w.keys[k.Address] = k - - _, err = w.keystore.Get(KDefault) - if err != nil { - if !xerrors.Is(err, types.ErrKeyInfoNotFound) { - return address.Undef, err - } - - if err := w.keystore.Put(KDefault, k.KeyInfo); err != nil { - return address.Undef, xerrors.Errorf("failed to set new key as default: %w", err) - } - } - - return k.Address, nil -} - -func (w *Wallet) HasKey(addr address.Address) (bool, error) { - k, err := w.findKey(addr) - if err != nil { - return false, err - } - return k != nil, nil -} - -func (w *Wallet) DeleteKey(addr address.Address) error { - k, err := w.findKey(addr) - if err != nil { - return xerrors.Errorf("failed to delete key %s : %w", addr, err) - } - - if err := w.keystore.Put(KTrashPrefix+k.Address.String(), k.KeyInfo); err != nil { - return xerrors.Errorf("failed to mark key %s as trashed: %w", addr, err) - } - - if err := w.keystore.Delete(KNamePrefix + k.Address.String()); err != nil { - return xerrors.Errorf("failed to delete key %s: %w", addr, err) - } - - return nil -} - -type Key struct { - types.KeyInfo - - PublicKey []byte - Address address.Address -} - -func NewKey(keyinfo types.KeyInfo) (*Key, error) { - k := &Key{ - KeyInfo: keyinfo, - } - - var err error - k.PublicKey, err = sigs.ToPublic(ActSigType(k.Type), k.PrivateKey) - if err != nil { - return nil, err - } - - switch k.Type { - case KTSecp256k1: - k.Address, err = address.NewSecp256k1Address(k.PublicKey) - if err != nil { - return nil, xerrors.Errorf("converting Secp256k1 to address: %w", err) - } - case KTBLS: - k.Address, err = address.NewBLSAddress(k.PublicKey) - if err != nil { - return nil, xerrors.Errorf("converting BLS to address: %w", err) - } - default: - return nil, xerrors.Errorf("unknown key type") - } - return k, nil - -} - -func kstoreSigType(typ crypto.SigType) string { - switch typ { - case crypto.SigTypeBLS: - return KTBLS - case crypto.SigTypeSecp256k1: - return KTSecp256k1 - default: - return "" - } -} - -func ActSigType(typ string) crypto.SigType { - switch typ { - case KTBLS: - return crypto.SigTypeBLS - case KTSecp256k1: - return crypto.SigTypeSecp256k1 - default: - return 0 - } -} diff --git a/vendor/github.com/filecoin-project/lotus/cli/auth.go b/vendor/github.com/filecoin-project/lotus/cli/auth.go deleted file mode 100644 index d59ac37a56..0000000000 --- a/vendor/github.com/filecoin-project/lotus/cli/auth.go +++ /dev/null @@ -1,135 +0,0 @@ -package cli - -import ( - "fmt" - - "github.com/urfave/cli/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-jsonrpc/auth" - - "github.com/filecoin-project/lotus/api/apistruct" - "github.com/filecoin-project/lotus/node/repo" -) - -var authCmd = &cli.Command{ - Name: "auth", - Usage: "Manage RPC permissions", - Subcommands: []*cli.Command{ - authCreateAdminToken, - authApiInfoToken, - }, -} - -var authCreateAdminToken = &cli.Command{ - Name: "create-token", - Usage: "Create token", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "perm", - Usage: "permission to assign to the token, one of: read, write, sign, admin", - }, - }, - - Action: func(cctx *cli.Context) error { - napi, closer, err := GetAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := ReqContext(cctx) - - if !cctx.IsSet("perm") { - return xerrors.New("--perm flag not set") - } - - perm := cctx.String("perm") - idx := 0 - for i, p := range apistruct.AllPermissions { - if auth.Permission(perm) == p { - idx = i + 1 - } - } - - if idx == 0 { - return fmt.Errorf("--perm flag has to be one of: %s", apistruct.AllPermissions) - } - - // slice on [:idx] so for example: 'sign' gives you [read, write, sign] - token, err := napi.AuthNew(ctx, apistruct.AllPermissions[:idx]) - if err != nil { - return err - } - - // TODO: Log in audit log when it is implemented - - fmt.Println(string(token)) - return nil - }, -} - -var authApiInfoToken = &cli.Command{ - Name: "api-info", - Usage: "Get token with API info required to connect to this node", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "perm", - Usage: "permission to assign to the token, one of: read, write, sign, admin", - }, - }, - - Action: func(cctx *cli.Context) error { - napi, closer, err := GetAPI(cctx) - if err != nil { - return err - } - defer closer() - - ctx := ReqContext(cctx) - - if !cctx.IsSet("perm") { - return xerrors.New("--perm flag not set") - } - - perm := cctx.String("perm") - idx := 0 - for i, p := range apistruct.AllPermissions { - if auth.Permission(perm) == p { - idx = i + 1 - } - } - - if idx == 0 { - return fmt.Errorf("--perm flag has to be one of: %s", apistruct.AllPermissions) - } - - // slice on [:idx] so for example: 'sign' gives you [read, write, sign] - token, err := napi.AuthNew(ctx, apistruct.AllPermissions[:idx]) - if err != nil { - return err - } - - ti, ok := cctx.App.Metadata["repoType"] - if !ok { - log.Errorf("unknown repo type, are you sure you want to use GetAPI?") - ti = repo.FullNode - } - t, ok := ti.(repo.RepoType) - if !ok { - log.Errorf("repoType type does not match the type of repo.RepoType") - } - - ainfo, err := GetAPIInfo(cctx, t) - if err != nil { - return xerrors.Errorf("could not get API info: %w", err) - } - - envVar := envForRepo(t) - - // TODO: Log in audit log when it is implemented - - fmt.Printf("%s=%s:%s\n", envVar, string(token), ainfo.Addr) - return nil - }, -} diff --git a/vendor/github.com/filecoin-project/lotus/cli/chain.go b/vendor/github.com/filecoin-project/lotus/cli/chain.go deleted file mode 100644 index c2acee5e5e..0000000000 --- a/vendor/github.com/filecoin-project/lotus/cli/chain.go +++ /dev/null @@ -1,952 +0,0 @@ -package cli - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "os" - "os/exec" - "path" - "strconv" - "strings" - "time" - - "github.com/filecoin-project/go-address" - cborutil "github.com/filecoin-project/go-cbor-util" - "github.com/filecoin-project/specs-actors/actors/abi" - "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/actors/builtin/account" - "github.com/filecoin-project/specs-actors/actors/builtin/market" - "github.com/filecoin-project/specs-actors/actors/builtin/miner" - "github.com/filecoin-project/specs-actors/actors/builtin/power" - "github.com/filecoin-project/specs-actors/actors/util/adt" - cid "github.com/ipfs/go-cid" - "github.com/urfave/cli/v2" - cbg "github.com/whyrusleeping/cbor-gen" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/actors" - types "github.com/filecoin-project/lotus/chain/types" -) - -var chainCmd = &cli.Command{ - Name: "chain", - Usage: "Interact with filecoin blockchain", - Subcommands: []*cli.Command{ - chainHeadCmd, - chainGetBlock, - chainReadObjCmd, - chainStatObjCmd, - chainGetMsgCmd, - chainSetHeadCmd, - chainListCmd, - chainGetCmd, - chainBisectCmd, - chainExportCmd, - slashConsensusFault, - }, -} - -var chainHeadCmd = &cli.Command{ - Name: "head", - Usage: "Print chain head", - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - head, err := api.ChainHead(ctx) - if err != nil { - return err - } - - for _, c := range head.Cids() { - fmt.Println(c) - } - return nil - }, -} - -var chainGetBlock = &cli.Command{ - Name: "getblock", - Usage: "Get a block and print its details", - ArgsUsage: "[blockCid]", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "raw", - Usage: "print just the raw block header", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - if !cctx.Args().Present() { - return fmt.Errorf("must pass cid of block to print") - } - - bcid, err := cid.Decode(cctx.Args().First()) - if err != nil { - return err - } - - blk, err := api.ChainGetBlock(ctx, bcid) - if err != nil { - return xerrors.Errorf("get block failed: %w", err) - } - - if cctx.Bool("raw") { - out, err := json.MarshalIndent(blk, "", " ") - if err != nil { - return err - } - - fmt.Println(string(out)) - return nil - } - - msgs, err := api.ChainGetBlockMessages(ctx, bcid) - if err != nil { - return xerrors.Errorf("failed to get messages: %w", err) - } - - pmsgs, err := api.ChainGetParentMessages(ctx, bcid) - if err != nil { - return xerrors.Errorf("failed to get parent messages: %w", err) - } - - recpts, err := api.ChainGetParentReceipts(ctx, bcid) - if err != nil { - log.Warn(err) - //return xerrors.Errorf("failed to get receipts: %w", err) - } - - cblock := struct { - types.BlockHeader - BlsMessages []*types.Message - SecpkMessages []*types.SignedMessage - ParentReceipts []*types.MessageReceipt - ParentMessages []cid.Cid - }{} - - cblock.BlockHeader = *blk - cblock.BlsMessages = msgs.BlsMessages - cblock.SecpkMessages = msgs.SecpkMessages - cblock.ParentReceipts = recpts - cblock.ParentMessages = apiMsgCids(pmsgs) - - out, err := json.MarshalIndent(cblock, "", " ") - if err != nil { - return err - } - - fmt.Println(string(out)) - return nil - - }, -} - -func apiMsgCids(in []api.Message) []cid.Cid { - out := make([]cid.Cid, len(in)) - for k, v := range in { - out[k] = v.Cid - } - return out -} - -var chainReadObjCmd = &cli.Command{ - Name: "read-obj", - Usage: "Read the raw bytes of an object", - ArgsUsage: "[objectCid]", - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - c, err := cid.Decode(cctx.Args().First()) - if err != nil { - return fmt.Errorf("failed to parse cid input: %s", err) - } - - obj, err := api.ChainReadObj(ctx, c) - if err != nil { - return err - } - - fmt.Printf("%x\n", obj) - return nil - }, -} - -var chainStatObjCmd = &cli.Command{ - Name: "stat-obj", - Usage: "Collect size and ipld link counts for objs", - ArgsUsage: "[cid]", - Description: `Collect object size and ipld link count for an object. - - When a base is provided it will be walked first, and all links visisted - will be ignored when the passed in object is walked. -`, - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "base", - Usage: "ignore links found in this obj", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - obj, err := cid.Decode(cctx.Args().First()) - if err != nil { - return fmt.Errorf("failed to parse cid input: %s", err) - } - - base := cid.Undef - if cctx.IsSet("base") { - base, err = cid.Decode(cctx.String("base")) - if err != nil { - return err - } - } - - stats, err := api.ChainStatObj(ctx, obj, base) - if err != nil { - return err - } - - fmt.Printf("Links: %d\n", stats.Links) - fmt.Printf("Size: %s (%d)\n", types.SizeStr(types.NewInt(stats.Size)), stats.Size) - return nil - }, -} - -var chainGetMsgCmd = &cli.Command{ - Name: "getmessage", - Usage: "Get and print a message by its cid", - ArgsUsage: "[messageCid]", - Action: func(cctx *cli.Context) error { - if !cctx.Args().Present() { - return fmt.Errorf("must pass a cid of a message to get") - } - - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - c, err := cid.Decode(cctx.Args().First()) - if err != nil { - return xerrors.Errorf("failed to parse cid input: %w", err) - } - - mb, err := api.ChainReadObj(ctx, c) - if err != nil { - return xerrors.Errorf("failed to read object: %w", err) - } - - var i interface{} - m, err := types.DecodeMessage(mb) - if err != nil { - sm, err := types.DecodeSignedMessage(mb) - if err != nil { - return xerrors.Errorf("failed to decode object as a message: %w", err) - } - i = sm - } else { - i = m - } - - enc, err := json.MarshalIndent(i, "", " ") - if err != nil { - return err - } - - fmt.Println(string(enc)) - return nil - }, -} - -var chainSetHeadCmd = &cli.Command{ - Name: "sethead", - Usage: "manually set the local nodes head tipset (Caution: normally only used for recovery)", - ArgsUsage: "[tipsetkey]", - Flags: []cli.Flag{ - &cli.BoolFlag{ - Name: "genesis", - Usage: "reset head to genesis", - }, - &cli.Uint64Flag{ - Name: "epoch", - Usage: "reset head to given epoch", - }, - }, - Action: func(cctx *cli.Context) error { - api, closer, err := GetFullNodeAPI(cctx) - if err != nil { - return err - } - defer closer() - ctx := ReqContext(cctx) - - var ts *types.TipSet - - if cctx.Bool("genesis") { - ts, err = api.ChainGetGenesis(ctx) - } - if ts == nil && cctx.IsSet("epoch") { - ts, err = api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(cctx.Uint64("epoch")), types.EmptyTSK) - } - if ts == nil { - ts, err = parseTipSet(api, ctx, cctx.Args().Slice()) - } - if err != nil { - return err - } - - if ts == nil { - return fmt.Errorf("must pass cids for tipset to set as head") - } - - if err := api.ChainSetHead(ctx, ts.Key()); err != nil { - return err - } - - return nil - }, -} - -func parseTipSet(api api.FullNode, ctx context.Context, vals []string) (*types.TipSet, error) { - var headers []*types.BlockHeader - for _, c := range vals { - blkc, err := cid.Decode(c) - if err != nil { - return nil, err - } - - bh, err := api.ChainGetBlock(ctx, blkc) - if err != nil { - return nil, err - } - - headers = append(headers, bh) - } - - return types.NewTipSet(headers) -} - -var chainListCmd = &cli.Command{ - Name: "list", - Usage: "View a segment of the chain", - Flags: []cli.Flag{ - &cli.Uint64Flag{Name: "height"}, - &cli.IntFlag{Name: "count", Value: 30}, - &cli.StringFlag{ - Name: "format", - Usage: "specify the format to print out tipsets", - Value: ": (