diff --git a/Cargo.lock b/Cargo.lock index c2b5d446e..7d8e3ebfa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15,9 +15,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27" +checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" [[package]] name = "arrayref" @@ -178,9 +178,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "dc19a4937b4fbd3fe3379793130e42060d10627a360f2127802b10b87e7baf74" [[package]] name = "base64" @@ -320,16 +320,16 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.8" +version = "3.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71c47df61d9e16dc010b55dba1952a57d8c215dbb533fd13cdd13369aac73b1c" +checksum = "7c167e37342afc5f33fd87bbc870cedd020d2a6dffa05d45ccd9241fbdd146db" dependencies = [ "atty", "bitflags", "clap_derive", + "clap_lex", "indexmap", "lazy_static", - "os_str_bytes", "strsim", "termcolor", "textwrap", @@ -348,6 +348,15 @@ dependencies = [ "syn", ] +[[package]] +name = "clap_lex" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "189ddd3b5d32a70b35e7686054371742a937b0d99128e76dde6340210e966669" +dependencies = [ + "os_str_bytes", +] + [[package]] name = "concurrent-queue" version = "1.2.2" @@ -594,7 +603,6 @@ dependencies = [ name = "fil_actor_init" version = "8.0.0-alpha.1" dependencies = [ - "anyhow", "cid", "fil_actors_runtime", "fvm_ipld_blockstore", @@ -612,7 +620,6 @@ name = "fil_actor_market" version = "8.0.0-alpha.1" dependencies = [ "ahash", - "anyhow", "cid", "fil_actor_power", "fil_actor_reward", @@ -636,7 +643,6 @@ dependencies = [ name = "fil_actor_miner" version = "8.0.0-alpha.1" dependencies = [ - "anyhow", "byteorder", "cid", "fil_actor_account", @@ -658,13 +664,13 @@ dependencies = [ "num-traits", "rand", "serde", + "thiserror", ] [[package]] name = "fil_actor_multisig" version = "8.0.0-alpha.1" dependencies = [ - "anyhow", "cid", "fil_actors_runtime", "fvm_ipld_blockstore", @@ -699,7 +705,6 @@ dependencies = [ name = "fil_actor_power" version = "8.0.0-alpha.1" dependencies = [ - "anyhow", "cid", "fil_actors_runtime", "fvm_ipld_blockstore", @@ -750,7 +755,6 @@ dependencies = [ name = "fil_actor_verifreg" version = "8.0.0-alpha.1" dependencies = [ - "anyhow", "cid", "fil_actors_runtime", "fvm_ipld_blockstore", @@ -774,6 +778,7 @@ dependencies = [ "cid", "derive_builder", "fvm_ipld_amt", + "fvm_ipld_bitfield", "fvm_ipld_blockstore", "fvm_ipld_encoding", "fvm_ipld_hamt", @@ -937,11 +942,8 @@ dependencies = [ [[package]] name = "fvm_ipld_amt" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3394e5f9c2adb4d586519bc24bbfd659366e01e7ffa6cda676be94a62bab474" dependencies = [ "ahash", - "anyhow", "cid", "fvm_ipld_blockstore", "fvm_ipld_encoding", @@ -954,8 +956,6 @@ dependencies = [ [[package]] name = "fvm_ipld_bitfield" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9011349297962982b8ab2663c220034525ec0f95f408c2b561d3d98867f1a803" dependencies = [ "cs_serde_bytes", "fvm_ipld_encoding", @@ -967,10 +967,7 @@ dependencies = [ [[package]] name = "fvm_ipld_blockstore" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1985eae58ec2fbf54535ce115c72a2141459fb7fb4ff7379e17bffae0e302578" dependencies = [ - "anyhow", "cid", ] @@ -992,10 +989,7 @@ dependencies = [ [[package]] name = "fvm_ipld_encoding" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bd635987aac46a753ec81767713af35cb50f182c7cc49d3a429643ede0e709" dependencies = [ - "anyhow", "cid", "cs_serde_bytes", "fvm_ipld_blockstore", @@ -1008,11 +1002,8 @@ dependencies = [ [[package]] name = "fvm_ipld_hamt" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a03c6ae361a882360bc0c0f47265b294429f096baa8d9467247bbd62c6a6683c" +version = "0.5.0" dependencies = [ - "anyhow", "byteorder", "cid", "cs_serde_bytes", @@ -1030,8 +1021,6 @@ dependencies = [ [[package]] name = "fvm_sdk" version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cd152ce62acadd75bdb461dcb8009389a3a7583c0832a122537da8fc17d73e7" dependencies = [ "cid", "fvm_ipld_encoding", @@ -1076,10 +1065,7 @@ dependencies = [ [[package]] name = "fvm_shared" version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "857cf6a95b35d583e8db25d38a939af335cb2eba2f8a5e1f1c0be58f77d52f5b" dependencies = [ - "anyhow", "bimap", "blake2b_simd", "byteorder", @@ -1129,9 +1115,9 @@ dependencies = [ [[package]] name = "gloo-timers" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d12a7f4e95cfe710f1d624fb1210b7d961a5fb05c4fd942f4feab06e61f590e" +checksum = "5fb7d06c1c8cc2a29bee7ec961009a0b2caa0793ee4900c2ffb348734ba1c8f9" dependencies = [ "futures-channel", "futures-core", @@ -1225,9 +1211,9 @@ checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "js-sys" -version = "0.3.56" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" +checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" dependencies = [ "wasm-bindgen", ] @@ -1255,9 +1241,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.121" +version = "0.2.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f" +checksum = "21a41fed9d98f27ab1c6d161da622a4fa35e8a54a8adc24bbf3ddd0ef70b0e50" [[package]] name = "libipld-core" @@ -1391,9 +1377,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ "autocfg", "num-integer", @@ -1442,9 +1428,6 @@ name = "os_str_bytes" version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" -dependencies = [ - "memchr", -] [[package]] name = "parking" @@ -1454,9 +1437,9 @@ checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -1519,18 +1502,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" dependencies = [ "unicode-xid", ] [[package]] name = "quote" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632d02bff7f874a36f33ea8bb416cd484b90cc66c1194b1a1110d067a7013f58" +checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" dependencies = [ "proc-macro2", ] @@ -1708,9 +1691,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "1.0.90" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "704df27628939572cd88d33f171cd6f896f4eaca85252c6e0a72d8d8287ee86f" +checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" dependencies = [ "proc-macro2", "quote", @@ -1807,9 +1790,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", ] @@ -1862,9 +1845,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" +checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -1872,9 +1855,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" +checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" dependencies = [ "bumpalo", "lazy_static", @@ -1887,9 +1870,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" +checksum = "6f741de44b75e14c35df886aff5f1eb73aa114fa5d4d00dcd37b5e01259bf3b2" dependencies = [ "cfg-if", "js-sys", @@ -1899,9 +1882,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" +checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1909,9 +1892,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" +checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" dependencies = [ "proc-macro2", "quote", @@ -1922,15 +1905,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" +checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" [[package]] name = "web-sys" -version = "0.3.56" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" +checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/Cargo.toml b/Cargo.toml index 237a58179..dfdd8b193 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,14 +70,14 @@ members = [ ## Uncomment entries below when working locally on ref-fvm and this repo simultaneously. ## Assumes the ref-fvm checkout is in a sibling directory with the same name. ## (Valid once FVM modules are published to crates.io) -# [patch.crates-io] -# fvm_shared = { path = "../ref-fvm/shared" } -# fvm_sdk = { path = "../ref-fvm/sdk" } -# fvm_ipld_hamt = { path = "../ref-fvm/ipld/hamt" } -# fvm_ipld_amt = { path = "../ref-fvm/ipld/amt" } -# fvm_ipld_bitfield = { path = "../ref-fvm/ipld/bitfield"} -# fvm_ipld_encoding = { path = "../ref-fvm/ipld/encoding"} -# fvm_ipld_blockstore = { path = "../ref-fvm/ipld/blockstore"} +[patch.crates-io] +fvm_shared = { path = "../ref-fvm/shared" } +fvm_sdk = { path = "../ref-fvm/sdk" } +fvm_ipld_hamt = { path = "../ref-fvm/ipld/hamt" } +fvm_ipld_amt = { path = "../ref-fvm/ipld/amt" } +fvm_ipld_bitfield = { path = "../ref-fvm/ipld/bitfield"} +fvm_ipld_encoding = { path = "../ref-fvm/ipld/encoding"} +fvm_ipld_blockstore = { path = "../ref-fvm/ipld/blockstore"} [profile.wasm] inherits = "release" diff --git a/actors/account/src/lib.rs b/actors/account/src/lib.rs index f2838ba74..64da02615 100644 --- a/actors/account/src/lib.rs +++ b/actors/account/src/lib.rs @@ -4,14 +4,15 @@ use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::address::{Address, Protocol}; +use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::FromPrimitive; use fil_actors_runtime::builtin::singletons::SYSTEM_ACTOR_ADDR; -use fil_actors_runtime::cbor; use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{actor_error, ActorError}; +use fil_actors_runtime::{cbor, ActorContext2}; pub use self::state::State; @@ -80,7 +81,7 @@ impl ActorCode for Actor { } Some(Method::PubkeyAddress) => { let addr = Self::pubkey_address(rt)?; - Ok(RawBytes::serialize(addr)?) + Ok(RawBytes::serialize(addr).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), } diff --git a/actors/init/Cargo.toml b/actors/init/Cargo.toml index 51bda4e5d..e252dfac6 100644 --- a/actors/init/Cargo.toml +++ b/actors/init/Cargo.toml @@ -16,12 +16,11 @@ crate-type = ["cdylib", "lib"] [dependencies] fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["fil-actor"] } fvm_shared = { version = "0.6.0", default-features = false } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" serde = { version = "1.0.136", features = ["derive"] } num-traits = "0.2.14" num-derive = "0.3.3" cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } -anyhow = "1.0.56" log = "0.4.14" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" diff --git a/actors/init/src/lib.rs b/actors/init/src/lib.rs index d602368b4..e3a67874a 100644 --- a/actors/init/src/lib.rs +++ b/actors/init/src/lib.rs @@ -3,7 +3,7 @@ use cid::Cid; use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{actor_error, cbor, ActorDowncast, ActorError, SYSTEM_ACTOR_ADDR}; +use fil_actors_runtime::{actor_error, cbor, ActorContext2, ActorError, SYSTEM_ACTOR_ADDR}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::actor::builtin::Type; @@ -43,9 +43,7 @@ impl Actor { { let sys_ref: &Address = &SYSTEM_ACTOR_ADDR; rt.validate_immediate_caller_is(std::iter::once(sys_ref))?; - let state = State::new(rt.store(), params.network_name).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct init actor state") - })?; + let state = State::new(rt.store(), params.network_name)?; rt.create(&state)?; @@ -86,9 +84,8 @@ impl Actor { // Allocate an ID for this actor. // Store mapping of pubkey or actor address to actor ID let id_address: ActorID = rt.transaction(|s: &mut State, rt| { - s.map_address_to_new_id(rt.store(), &robust_address).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to allocate ID address") - }) + s.map_address_to_new_id(rt.store(), &robust_address) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to allocate ID address") })?; // Create an empty actor @@ -124,7 +121,7 @@ impl ActorCode for Actor { } Some(Method::Exec) => { let res = Self::exec(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), } diff --git a/actors/init/src/state.rs b/actors/init/src/state.rs index a08b75e53..4abb6a208 100644 --- a/actors/init/src/state.rs +++ b/actors/init/src/state.rs @@ -1,16 +1,17 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::anyhow; use cid::Cid; +use fil_actors_runtime::ActorError; use fil_actors_runtime::{ - make_empty_map, make_map_with_root_and_bitwidth, FIRST_NON_SINGLETON_ADDR, + make_empty_map, make_map_with_root_and_bitwidth, ActorContext2, FIRST_NON_SINGLETON_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::Cbor; use fvm_ipld_hamt::Error as HamtError; use fvm_shared::address::{Address, Protocol}; +use fvm_shared::error::ExitCode; use fvm_shared::{ActorID, HAMT_BIT_WIDTH}; /// State is reponsible for creating @@ -22,10 +23,11 @@ pub struct State { } impl State { - pub fn new(store: &BS, network_name: String) -> anyhow::Result { + pub fn new(store: &BS, network_name: String) -> Result { let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .map_err(|e| anyhow!("failed to create empty map: {}", e))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to create empty map")?; + Ok(Self { address_map: empty_map, next_id: FIRST_NON_SINGLETON_ADDR, network_name }) } @@ -35,7 +37,7 @@ impl State { &mut self, store: &BS, addr: &Address, - ) -> Result { + ) -> Result> { let id = self.next_id; self.next_id += 1; @@ -60,14 +62,19 @@ impl State { &self, store: &BS, addr: &Address, - ) -> anyhow::Result> { + ) -> Result, ActorError> { if addr.protocol() == Protocol::ID { return Ok(Some(*addr)); } - let map = make_map_with_root_and_bitwidth(&self.address_map, store, HAMT_BIT_WIDTH)?; + let map = make_map_with_root_and_bitwidth(&self.address_map, store, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; - Ok(map.get(&addr.to_bytes())?.copied().map(Address::new_id)) + Ok(map + .get(&addr.to_bytes()) + .exit_code(ExitCode::USR_ILLEGAL_STATE)? + .copied() + .map(Address::new_id)) } } diff --git a/actors/market/Cargo.toml b/actors/market/Cargo.toml index f7b9c6a3b..58ca56ab2 100644 --- a/actors/market/Cargo.toml +++ b/actors/market/Cargo.toml @@ -15,7 +15,7 @@ crate-type = ["cdylib", "lib"] [dependencies] fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["fil-actor"] } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" fvm_shared = { version = "0.6.0", default-features = false } fvm_ipld_bitfield = "0.5.0" num-traits = "0.2.14" @@ -24,7 +24,6 @@ ahash = "0.7.6" serde = { version = "1.0.136", features = ["derive"] } cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } log = "0.4.14" -anyhow = "1.0.56" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" libipld-core = { version = "0.13.1", features = ["serde-codec"] } diff --git a/actors/market/src/balance_table.rs b/actors/market/src/balance_table.rs index 2f4de73a1..4a7a41f1b 100644 --- a/actors/market/src/balance_table.rs +++ b/actors/market/src/balance_table.rs @@ -4,12 +4,14 @@ use cid::Cid; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_hamt::Error as HamtError; -use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser::BigIntDe; use fvm_shared::econ::TokenAmount; +use fvm_shared::{address::Address, error::ExitCode}; use num_traits::{Signed, Zero}; -use fil_actors_runtime::{make_empty_map, make_map_with_root_and_bitwidth, Map}; +use fil_actors_runtime::{ + actor_error, make_empty_map, make_map_with_root_and_bitwidth, ActorContext2, ActorError, Map, +}; pub const BALANCE_TABLE_BITWIDTH: u32 = 6; @@ -25,17 +27,17 @@ where } /// Initializes a balance table from a root Cid - pub fn from_root(bs: &'a BS, cid: &Cid) -> Result { + pub fn from_root(bs: &'a BS, cid: &Cid) -> Result> { Ok(Self(make_map_with_root_and_bitwidth(cid, bs, BALANCE_TABLE_BITWIDTH)?)) } /// Retrieve root from balance table - pub fn root(&mut self) -> Result { + pub fn root(&mut self) -> Result> { self.0.flush() } /// Gets token amount for given address in balance table - pub fn get(&self, key: &Address) -> Result { + pub fn get(&self, key: &Address) -> Result> { if let Some(v) = self.0.get(&key.to_bytes())? { Ok(v.0.clone()) } else { @@ -44,16 +46,23 @@ where } /// Adds token amount to previously initialized account. - pub fn add(&mut self, key: &Address, value: &TokenAmount) -> Result<(), HamtError> { - let prev = self.get(key)?; + pub fn add(&mut self, key: &Address, value: &TokenAmount) -> Result<(), ActorError> { + let prev = self.get(key).exit_code(ExitCode::USR_SERIALIZATION)?; let sum = &prev + value; if sum.is_negative() { - Err(format!("New balance in table cannot be negative: {}", sum).into()) - } else if sum.is_zero() && !prev.is_zero() { - self.0.delete(&key.to_bytes())?; + return Err(actor_error!( + illegal_argument, + "new balance in table cannot be negative: {}", + sum + )); + } + if sum.is_zero() && !prev.is_zero() { + self.0.delete(&key.to_bytes()).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } else { - self.0.set(key.to_bytes().into(), BigIntDe(sum))?; + self.0 + .set(key.to_bytes().into(), BigIntDe(sum)) + .exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } } @@ -66,8 +75,8 @@ where key: &Address, req: &TokenAmount, floor: &TokenAmount, - ) -> Result { - let prev = self.get(key)?; + ) -> Result { + let prev = self.get(key).exit_code(ExitCode::USR_SERIALIZATION)?; let available = std::cmp::max(TokenAmount::zero(), prev - floor); let sub: TokenAmount = std::cmp::min(&available, req).clone(); @@ -79,24 +88,24 @@ where } /// Subtracts value from a balance, and errors if full amount was not substracted. - pub fn must_subtract(&mut self, key: &Address, req: &TokenAmount) -> Result<(), HamtError> { - let prev = self.get(key)?; + pub fn must_subtract(&mut self, key: &Address, req: &TokenAmount) -> Result<(), ActorError> { + let prev = self.get(key).exit_code(ExitCode::USR_SERIALIZATION)?; if req > &prev { - Err("couldn't subtract the requested amount".into()) - } else { - self.add(key, &-req) + return Err(actor_error!(illegal_argument, "couldn't subtract the requested amount")); } + self.add(key, &-req)?; + + Ok(()) } /// Returns total balance held by this balance table #[allow(dead_code)] - pub fn total(&self) -> Result { + pub fn total(&self) -> Result> { let mut total = TokenAmount::default(); self.0.for_each(|_, v: &BigIntDe| { total += &v.0; - Ok(()) })?; Ok(total) diff --git a/actors/market/src/lib.rs b/actors/market/src/lib.rs index 8c5757147..4b1abce45 100644 --- a/actors/market/src/lib.rs +++ b/actors/market/src/lib.rs @@ -24,8 +24,9 @@ use num_traits::{FromPrimitive, Signed, Zero}; use fil_actors_runtime::cbor::serialize_vec; use fil_actors_runtime::runtime::{ActorCode, Policy, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorDowncast, ActorError, BURNT_FUNDS_ACTOR_ADDR, CRON_ACTOR_ADDR, - REWARD_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, + actor_error, cbor, ActorContext, ActorContext2, ActorError, BURNT_FUNDS_ACTOR_ADDR, + CRON_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + VERIFIED_REGISTRY_ACTOR_ADDR, }; use crate::ext::verifreg::UseBytesParams; @@ -60,7 +61,8 @@ where RawBytes::default(), TokenAmount::zero(), )?; - let addrs: ext::miner::GetControlAddressesReturnParams = ret.deserialize()?; + let addrs: ext::miner::GetControlAddressesReturnParams = + ret.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok((addrs.owner, addrs.worker, addrs.control_addresses)) } @@ -92,9 +94,7 @@ impl Actor { { rt.validate_immediate_caller_is(std::iter::once(&*SYSTEM_ACTOR_ADDR))?; - let st = State::new(rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to create market state") - })?; + let st = State::new(rt.store()).context("Failed to create market state")?; rt.create(&st)?; Ok(()) } @@ -125,20 +125,15 @@ impl Actor { msm.with_escrow_table(Permission::Write) .with_locked_table(Permission::Write) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; - msm.escrow_table.as_mut().unwrap().add(&nominal, &msg_value).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to add balance to escrow table", - ) - })?; + msm.escrow_table + .as_mut() + .unwrap() + .add(&nominal, &msg_value) + .context("failed to add balance to escrow table")?; - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(()) })?; @@ -170,32 +165,26 @@ impl Actor { msm.with_escrow_table(Permission::Write) .with_locked_table(Permission::Write) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; // The withdrawable amount might be slightly less than nominal // depending on whether or not all relevant entries have been processed // by cron - let min_balance = msm.locked_table.as_ref().unwrap().get(&nominal).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance") - })?; + let min_balance = msm + .locked_table + .as_ref() + .unwrap() + .get(&nominal) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance")?; let ex = msm .escrow_table .as_mut() .unwrap() .subtract_with_minimum(&nominal, ¶ms.amount, &min_balance) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to subtract from escrow table", - ) - })?; + .context("failed to subtract from escrow table")?; - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(ex) })?; @@ -275,7 +264,7 @@ impl Actor { .with_escrow_table(Permission::ReadOnly) .with_locked_table(Permission::ReadOnly) .build() - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load msm"))?; + .context("failed to load msm")?; for (di, mut deal) in params.deals.into_iter().enumerate() { // drop malformed deals @@ -307,25 +296,18 @@ impl Actor { let lockup = total_client_lockup.entry(client_id).or_default(); *lockup += deal.proposal.client_balance_requirement(); - let client_balance_ok = msm.balance_covered(client, lockup).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to check client balance coverage", - ) - })?; + let client_balance_ok = msm + .balance_covered(client, lockup) + .context("failed to check client balance coverage")?; if !client_balance_ok { info!("invalid deal: {}: insufficient client funds to cover proposal cost", di); continue; } total_provider_lockup += &deal.proposal.provider_collateral; - let provider_balance_ok = - msm.balance_covered(provider, &total_provider_lockup).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to check provider balance coverage", - ) - })?; + let provider_balance_ok = msm + .balance_covered(provider, &total_provider_lockup) + .context("failed to check provider balance coverage")?; if !provider_balance_ok { info!("invalid deal: {}: insufficient provider funds to cover proposal cost", di); @@ -345,12 +327,10 @@ impl Actor { // check proposalCids for duplication within message batch // check state PendingProposals for duplication across messages let duplicate_in_state = - msm.pending_deals.as_ref().unwrap().has(&pcid.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to check for existence of deal proposal", - ) - })?; + msm.pending_deals.as_ref().unwrap().has(&pcid.to_bytes()).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to check for existence of deal proposal", + )?; let duplicate_in_message = proposal_cid_lookup.contains(&pcid); if duplicate_in_state || duplicate_in_message { info!("invalid deal {}: cannot publish duplicate deal proposal", di); @@ -366,7 +346,8 @@ impl Actor { RawBytes::serialize(UseBytesParams { address: client, deal_size: BigInt::from(deal.proposal.piece_size.0), - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ) { info!("invalid deal {}: failed to acquire datacap exitcode: {}", di, e); @@ -410,9 +391,7 @@ impl Actor { .with_escrow_table(Permission::Write) .with_locked_table(Permission::Write) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; // All storage dealProposals will be added in an atomic transaction; this operation will be unrolled if any of them fails. // This should only fail on programmer error because all expected invalid conditions should be filtered in the first set of checks. for (vid, valid_deal) in valid_deals.iter().enumerate() { @@ -422,31 +401,32 @@ impl Actor { let pcid = valid_proposal_cids[vid]; - msm.pending_deals.as_mut().unwrap().put(pcid.to_bytes().into()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to set pending deal") - })?; - msm.deal_proposals.as_mut().unwrap().set(id, valid_deal.proposal.clone()).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to set deal"), - )?; + msm.pending_deals + .as_mut() + .unwrap() + .put(pcid.to_bytes().into()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set pending deal")?; + msm.deal_proposals + .as_mut() + .unwrap() + .set(id, valid_deal.proposal.clone()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal")?; // We randomize the first epoch for when the deal will be processed so an attacker isn't able to // schedule too many deals for the same tick. let process_epoch = gen_rand_next_epoch(rt.policy(), valid_deal.proposal.start_epoch, id); - msm.deals_by_epoch.as_mut().unwrap().put(process_epoch, id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to set deal ops by epoch", - ) - })?; + msm.deals_by_epoch + .as_mut() + .unwrap() + .put(process_epoch, id) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal ops by epoch")?; new_deal_ids.push(id); } - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(()) })?; @@ -470,9 +450,8 @@ impl Actor { let curr_epoch = rt.curr_epoch(); let st: State = rt.state()?; - let proposals = DealArray::load(&st.proposals, rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals") - })?; + let proposals = DealArray::load(&st.proposals, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals")?; let mut weights = Vec::with_capacity(params.sectors.len()); for sector in params.sectors.iter() { @@ -483,12 +462,7 @@ impl Actor { sector.sector_expiry, curr_epoch, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to validate deal proposals for activation", - ) - })?; + .context("failed to validate deal proposals for activation")?; weights.push(SectorWeights { deal_space, deal_weight, verified_deal_weight }); } @@ -516,31 +490,26 @@ impl Actor { params.sector_expiry, curr_epoch, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to validate deal proposals for activation", - ) - })?; + .context("failed to validate deal proposals for activation")?; let mut msm = st.mutator(rt.store()); msm.with_deal_states(Permission::Write) .with_pending_proposals(Permission::ReadOnly) .with_deal_proposals(Permission::ReadOnly) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; for deal_id in params.deal_ids { // This construction could be replaced with a single "update deal state" // state method, possibly batched over all deal ids at once. - let s = msm.deal_states.as_ref().unwrap().get(deal_id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get state for deal_id ({})", deal_id), - ) - })?; + let s = msm + .deal_states + .as_ref() + .unwrap() + .get(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get state for deal_id ({})", deal_id) + })?; if s.is_some() { return Err(actor_error!( illegal_argument, @@ -554,24 +523,23 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get deal_id ({})", deal_id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get deal_id ({})", deal_id) })? .ok_or_else(|| actor_error!(not_found, "no such deal_id: {}", deal_id))?; - let propc = proposal - .cid() - .map_err(|e| ActorError::from(e).wrap("failed to calculate proposal Cid"))?; + let propc = proposal.cid().context_code( + ExitCode::USR_SERIALIZATION, + "failed to calculate proposal Cid", + )?; - let has = - msm.pending_deals.as_ref().unwrap().has(&propc.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get pending proposal ({})", propc), - ) + let has = msm + .pending_deals + .as_ref() + .unwrap() + .has(&propc.to_bytes()) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get pending proposal ({})", propc) })?; if !has { @@ -593,17 +561,13 @@ impl Actor { slash_epoch: EPOCH_UNDEFINED, }, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to set deal state {}", deal_id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set deal state {}", deal_id) })?; } - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush state")?; Ok(()) })?; @@ -629,14 +593,15 @@ impl Actor { msm.with_deal_states(Permission::Write) .with_deal_proposals(Permission::ReadOnly) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; for id in params.deal_ids { - let deal = msm.deal_proposals.as_ref().unwrap().get(id).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get deal proposal") - })?; + let deal = msm + .deal_proposals + .as_ref() + .unwrap() + .get(id) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get deal proposal")?; // The deal may have expired and been deleted before the sector is terminated. // Nothing to do, but continue execution for the other deals. if deal.is_none() { @@ -666,9 +631,7 @@ impl Actor { .as_ref() .unwrap() .get(id) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get deal state") - })? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get deal state")? // A deal with a proposal but no state is not activated, but then it should not be // part of a sector that is terminating. .ok_or_else(|| actor_error!(illegal_argument, "no state for deal {}", id))?; @@ -683,17 +646,16 @@ impl Actor { // and slashing of provider collateral happens in cron_tick. state.slash_epoch = params.epoch; - msm.deal_states.as_mut().unwrap().set(id, state).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to set deal state ({})", id), - ) - })?; + msm.deal_states + .as_mut() + .unwrap() + .set(id, state) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set deal state ({}", id) + })?; } - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(()) })?; Ok(()) @@ -711,20 +673,16 @@ impl Actor { let st: State = rt.state()?; - let proposals = DealArray::load(&st.proposals, rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals") - })?; + let proposals = DealArray::load(&st.proposals, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals")?; let mut commds = Vec::with_capacity(params.inputs.len()); for comm_input in params.inputs.iter() { let mut pieces: Vec = Vec::with_capacity(comm_input.deal_ids.len()); for deal_id in &comm_input.deal_ids { let deal = proposals .get(*deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get deal_id ({})", deal_id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get deal_id ({})", deal_id) })? .ok_or_else(|| { actor_error!(not_found, "proposal doesn't exist ({})", deal_id) @@ -732,12 +690,10 @@ impl Actor { pieces.push(PieceInfo { cid: deal.piece_cid, size: deal.piece_size }); } let commd = - rt.compute_unsealed_sector_cid(comm_input.sector_type, &pieces).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, - "failed to compute unsealed sector CID", - ) - })?; + rt.compute_unsealed_sector_cid(comm_input.sector_type, &pieces).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to compute unsealed sector CID", + )?; commds.push(commd); } @@ -766,9 +722,7 @@ impl Actor { .with_deal_proposals(Permission::Write) .with_pending_proposals(Permission::Write) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; for i in (last_cron + 1)..=rt.curr_epoch() { // TODO specs-actors modifies msm as it's iterated through, which is memory unsafe @@ -782,11 +736,8 @@ impl Actor { .unwrap() .for_each(i, |deal_id| { deal_ids.push(deal_id); - Ok(()) }) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to set deal state") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal state")?; for deal_id in deal_ids { let deal = msm @@ -794,20 +745,16 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get deal_id ({})", deal_id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get deal_id ({})", deal_id) })? .ok_or_else(|| { actor_error!(not_found, "proposal doesn't exist ({})", deal_id) })? .clone(); - let dcid = deal.cid().map_err(|e| { - ActorError::from(e) - .wrap(format!("failed to calculate cid for proposal {}", deal_id)) + let dcid = deal.cid().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to calculate cid for proposal {}", deal_id) })?; let state = msm @@ -815,12 +762,7 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to get deal state", - ) - })? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get deal state")? .cloned(); // deal has been published but not activated yet -> terminate it @@ -845,12 +787,13 @@ impl Actor { } // Delete the proposal (but not state, which doesn't exist). - let deleted = - msm.deal_proposals.as_mut().unwrap().delete(deal_id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete deal proposal {}", deal_id), - ) + let deleted = msm + .deal_proposals + .as_mut() + .unwrap() + .delete(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete deal proposal {}", deal_id) })?; if deleted.is_none() { return Err(actor_error!( @@ -865,11 +808,8 @@ impl Actor { .as_mut() .unwrap() .delete(&dcid.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete pending proposal {}", deal_id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete pending proposal {}", deal_id) })? .ok_or_else(|| { actor_error!( @@ -887,11 +827,8 @@ impl Actor { .as_mut() .unwrap() .delete(&dcid.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete pending proposal {}", dcid), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete pending proposal {}", dcid) })? .ok_or_else(|| { actor_error!( @@ -928,12 +865,10 @@ impl Actor { // Delete proposal and state simultaneously. let deleted = - msm.deal_states.as_mut().unwrap().delete(deal_id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to delete deal state", - ) - })?; + msm.deal_states.as_mut().unwrap().delete(deal_id).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to delete deal state", + )?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -942,12 +877,10 @@ impl Actor { } let deleted = - msm.deal_proposals.as_mut().unwrap().delete(deal_id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to delete deal proposal", - ) - })?; + msm.deal_proposals.as_mut().unwrap().delete(deal_id).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to delete deal proposal", + )?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -972,12 +905,10 @@ impl Actor { } state.last_updated_epoch = curr_epoch; - msm.deal_states.as_mut().unwrap().set(deal_id, state).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to set deal state", - ) - })?; + msm.deal_states.as_mut().unwrap().set(deal_id, state).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to set deal state", + )?; if let Some(ev) = updates_needed.get_mut(&next_epoch) { ev.push(deal_id); @@ -986,29 +917,29 @@ impl Actor { } } } - msm.deals_by_epoch.as_mut().unwrap().remove_all(i).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete deal ops for epoch {}", i), - ) - })?; + msm.deals_by_epoch + .as_mut() + .unwrap() + .remove_all(i) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete deal ops for epoch {}", i) + })?; } // updates_needed is already sorted by epoch. for (epoch, deals) in updates_needed { - msm.deals_by_epoch.as_mut().unwrap().put_many(epoch, &deals).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to reinsert deal IDs for epoch {}", epoch), - ) - })?; + msm.deals_by_epoch + .as_mut() + .unwrap() + .put_many(epoch, &deals) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to reinsert deal IDs for epoch {}", epoch) + })?; } msm.st.last_cron = rt.curr_epoch(); - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(()) })?; @@ -1019,7 +950,8 @@ impl Actor { RawBytes::serialize(ext::verifreg::RestoreBytesParams { address: d.client, deal_size: BigInt::from(d.piece_size.0), - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ); if let Err(e) = res { @@ -1051,11 +983,11 @@ pub fn validate_deals_for_activation( miner_addr: &Address, sector_expiry: ChainEpoch, curr_epoch: ChainEpoch, -) -> anyhow::Result<(BigInt, BigInt, u64)> +) -> Result<(BigInt, BigInt, u64), ActorError> where BS: Blockstore, { - let proposals = DealArray::load(&st.proposals, store)?; + let proposals = DealArray::load(&st.proposals, store).exit_code(ExitCode::USR_SERIALIZATION)?; validate_and_compute_deal_weight(&proposals, deal_ids, miner_addr, sector_expiry, curr_epoch) } @@ -1066,7 +998,7 @@ pub fn validate_and_compute_deal_weight( miner_addr: &Address, sector_expiry: ChainEpoch, sector_activation: ChainEpoch, -) -> anyhow::Result<(BigInt, BigInt, u64)> +) -> Result<(BigInt, BigInt, u64), ActorError> where BS: Blockstore, { @@ -1080,11 +1012,11 @@ where illegal_argument, "deal id {} present multiple times", deal_id - ) - .into()); + )); } let proposal = proposals - .get(*deal_id)? + .get(*deal_id) + .exit_code(ExitCode::USR_SERIALIZATION)? .ok_or_else(|| actor_error!(not_found, "no such deal {}", deal_id))?; validate_deal_can_activate(proposal, miner_addr, sector_expiry, sector_activation) @@ -1243,9 +1175,8 @@ where { // Generate unsigned bytes let sv_bz = serialize_vec(&proposal.proposal, "deal proposal")?; - rt.verify_signature(&proposal.client_signature, &proposal.proposal.client, &sv_bz).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "signature proposal invalid"), - )?; + rt.verify_signature(&proposal.client_signature, &proposal.proposal.client, &sv_bz) + .context_code(ExitCode::USR_ILLEGAL_STATE, "signature proposal invalid")?; Ok(()) } @@ -1290,7 +1221,7 @@ where RawBytes::default(), 0.into(), )?; - let ret: ThisEpochRewardReturn = rwret.deserialize()?; + let ret: ThisEpochRewardReturn = rwret.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok(ret.this_epoch_baseline_power) } @@ -1309,7 +1240,8 @@ where RawBytes::default(), 0.into(), )?; - let ret: ext::power::CurrentTotalPowerReturnParams = rwret.deserialize()?; + let ret: ext::power::CurrentTotalPowerReturnParams = + rwret.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok((ret.raw_byte_power, ret.quality_adj_power)) } @@ -1334,15 +1266,15 @@ impl ActorCode for Actor { } Some(Method::WithdrawBalance) => { let res = Self::withdraw_balance(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::PublishStorageDeals) => { let res = Self::publish_storage_deals(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::VerifyDealsForActivation) => { let res = Self::verify_deals_for_activation(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::ActivateDeals) => { Self::activate_deals(rt, cbor::deserialize_params(params)?)?; @@ -1354,7 +1286,7 @@ impl ActorCode for Actor { } Some(Method::ComputeDataCommitment) => { let res = Self::compute_data_commitment(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::CronTick) => { Self::cron_tick(rt)?; diff --git a/actors/market/src/state.rs b/actors/market/src/state.rs index 08e73e9ff..cd3e80412 100644 --- a/actors/market/src/state.rs +++ b/actors/market/src/state.rs @@ -2,11 +2,10 @@ // SPDX-License-Identifier: Apache-2.0, MIT use crate::balance_table::BalanceTable; -use anyhow::anyhow; use cid::Cid; -use fil_actors_runtime::runtime::Policy; +use fil_actors_runtime::ActorContext2; use fil_actors_runtime::{ - actor_error, make_empty_map, ActorDowncast, ActorError, Array, Set, SetMultimap, + actor_error, make_empty_map, runtime::Policy, ActorContext, ActorError, Array, Set, SetMultimap, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -69,25 +68,32 @@ pub struct State { } impl State { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let empty_proposals_array = Array::<(), BS>::new_with_bit_width(store, PROPOSALS_AMT_BITWIDTH) .flush() - .map_err(|e| anyhow!("Failed to create empty proposals array: {}", e))?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty proposals array", + )?; let empty_states_array = Array::<(), BS>::new_with_bit_width(store, STATES_AMT_BITWIDTH) .flush() - .map_err(|e| anyhow!("Failed to create empty states array: {}", e))?; - - let empty_pending_proposals_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) - .flush() - .map_err(|e| anyhow!("Failed to create empty pending proposals map state: {}", e))?; - let empty_balance_table = BalanceTable::new(store) - .root() - .map_err(|e| anyhow!("Failed to create empty balance table map: {}", e))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; + + let empty_pending_proposals_map = + make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH).flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty pending proposals map state", + )?; + let empty_balance_table = BalanceTable::new(store).root().context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty balance table map", + )?; let empty_deal_ops_hamt = SetMultimap::new(store) .root() - .map_err(|e| anyhow!("Failed to create empty multiset: {}", e))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty multiset")?; + Ok(Self { proposals: empty_proposals_array, states: empty_states_array, @@ -216,17 +222,26 @@ where } } - pub(super) fn build(&mut self) -> anyhow::Result<&mut Self> { + pub(super) fn build(&mut self) -> Result<&mut Self, ActorError> { if self.proposal_permit != Permission::Invalid { - self.deal_proposals = Some(DealArray::load(&self.st.proposals, self.store)?); + self.deal_proposals = Some( + DealArray::load(&self.st.proposals, self.store) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.state_permit != Permission::Invalid { - self.deal_states = Some(DealMetaArray::load(&self.st.states, self.store)?); + self.deal_states = Some( + DealMetaArray::load(&self.st.states, self.store) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.locked_permit != Permission::Invalid { - self.locked_table = Some(BalanceTable::from_root(self.store, &self.st.locked_table)?); + self.locked_table = Some( + BalanceTable::from_root(self.store, &self.st.locked_table) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); self.total_client_locked_collateral = Some(self.st.total_client_locked_collateral.clone()); self.total_client_storage_fee = Some(self.st.total_client_storage_fee.clone()); @@ -235,16 +250,24 @@ where } if self.escrow_permit != Permission::Invalid { - self.escrow_table = Some(BalanceTable::from_root(self.store, &self.st.escrow_table)?); + self.escrow_table = Some( + BalanceTable::from_root(self.store, &self.st.escrow_table) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.pending_permit != Permission::Invalid { - self.pending_deals = Some(Set::from_root(self.store, &self.st.pending_proposals)?); + self.pending_deals = Some( + Set::from_root(self.store, &self.st.pending_proposals) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.dpe_permit != Permission::Invalid { - self.deals_by_epoch = - Some(SetMultimap::from_root(self.store, &self.st.deal_ops_by_epoch)?); + self.deals_by_epoch = Some( + SetMultimap::from_root(self.store, &self.st.deal_ops_by_epoch) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } self.next_deal_id = self.st.next_id; @@ -282,25 +305,28 @@ where self } - pub(super) fn commit_state(&mut self) -> anyhow::Result<()> { + pub(super) fn commit_state(&mut self) -> Result<(), ActorError> { if self.proposal_permit == Permission::Write { if let Some(s) = &mut self.deal_proposals { - self.st.proposals = - s.flush().map_err(|e| e.downcast_wrap("failed to flush deal proposals"))?; + self.st.proposals = s + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush deal proposals")?; } } if self.state_permit == Permission::Write { if let Some(s) = &mut self.deal_states { - self.st.states = - s.flush().map_err(|e| e.downcast_wrap("failed to flush deal states"))?; + self.st.states = s + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush deal states")?; } } if self.locked_permit == Permission::Write { if let Some(s) = &mut self.locked_table { - self.st.locked_table = - s.root().map_err(|e| e.downcast_wrap("failed to flush locked table"))?; + self.st.locked_table = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush locked table")?; } if let Some(s) = &mut self.total_client_locked_collateral { self.st.total_client_locked_collateral = s.clone(); @@ -315,22 +341,25 @@ where if self.escrow_permit == Permission::Write { if let Some(s) = &mut self.escrow_table { - self.st.escrow_table = - s.root().map_err(|e| e.downcast_wrap("failed to flush escrow table"))?; + self.st.escrow_table = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } if self.pending_permit == Permission::Write { if let Some(s) = &mut self.pending_deals { - self.st.pending_proposals = - s.root().map_err(|e| e.downcast_wrap("failed to flush escrow table"))?; + self.st.pending_proposals = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } if self.dpe_permit == Permission::Write { if let Some(s) = &mut self.deals_by_epoch { - self.st.deal_ops_by_epoch = - s.root().map_err(|e| e.downcast_wrap("failed to flush escrow table"))?; + self.st.deal_ops_by_epoch = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } @@ -408,26 +437,16 @@ where // Unlock remaining storage fee self.unlock_balance(&deal.client, &payment_remaining, Reason::ClientStorageFee) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to unlock remaining client storage fee", - ) - })?; + .context("failed to unlock remaining client storage fee")?; // Unlock client collateral self.unlock_balance(&deal.client, &deal.client_collateral, Reason::ClientCollateral) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to unlock client collateral", - ) - })?; + .context("failed to unlock client collateral")?; // slash provider collateral let slashed = deal.provider_collateral.clone(); self.slash_balance(&deal.provider, &slashed, Reason::ProviderCollateral) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "slashing balance"))?; + .context("slashing balance")?; return Ok((slashed, EPOCH_UNDEFINED, true)); } @@ -453,36 +472,20 @@ where deal: &DealProposal, ) -> Result { self.unlock_balance(&deal.client, &deal.total_storage_fee(), Reason::ClientStorageFee) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failure unlocking client storage fee", - ) - })?; + .context("failure unlocking client storage fee")?; self.unlock_balance(&deal.client, &deal.client_collateral, Reason::ClientCollateral) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failure unlocking client collateral", - ) - })?; + .context("failure unlocking client collateral")?; let amount_slashed = collateral_penalty_for_deal_activation_missed(deal.provider_collateral.clone()); let amount_remaining = deal.provider_balance_requirement() - &amount_slashed; - self.slash_balance(&deal.provider, &amount_slashed, Reason::ProviderCollateral).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to slash balance"), - )?; + self.slash_balance(&deal.provider, &amount_slashed, Reason::ProviderCollateral) + .context("failed to slash balance")?; self.unlock_balance(&deal.provider, &amount_remaining, Reason::ProviderCollateral) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to unlock deal provider balance", - ) - })?; + .context("failed to unlock deal provider balance")?; Ok(amount_slashed) } @@ -501,20 +504,10 @@ where } self.unlock_balance(&deal.provider, &deal.provider_collateral, Reason::ProviderCollateral) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed unlocking deal provider balance", - ) - })?; + .context("failed unlocking deal provider balance")?; self.unlock_balance(&deal.client, &deal.client_collateral, Reason::ClientCollateral) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed unlocking deal client balance", - ) - })?; + .context("failed unlocking deal client balance")?; Ok(()) } @@ -530,13 +523,19 @@ where &self, addr: Address, amount_to_lock: &TokenAmount, - ) -> anyhow::Result { - let prev_locked = self.locked_table.as_ref().unwrap().get(&addr).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance") - })?; - let escrow_balance = self.escrow_table.as_ref().unwrap().get(&addr).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get escrow balance") - })?; + ) -> Result { + let prev_locked = self + .locked_table + .as_ref() + .unwrap() + .get(&addr) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance")?; + let escrow_balance = self + .escrow_table + .as_ref() + .unwrap() + .get(&addr) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get escrow balance")?; Ok((prev_locked + amount_to_lock) <= escrow_balance) } @@ -549,13 +548,19 @@ where return Err(actor_error!(illegal_state, "cannot lock negative amount {}", amount)); } - let prev_locked = self.locked_table.as_ref().unwrap().get(addr).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance") - })?; + let prev_locked = self + .locked_table + .as_ref() + .unwrap() + .get(addr) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance")?; - let escrow_balance = self.escrow_table.as_ref().unwrap().get(addr).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get escrow balance") - })?; + let escrow_balance = self + .escrow_table + .as_ref() + .unwrap() + .get(addr) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get escrow balance")?; if &prev_locked + amount > escrow_balance { return Err(actor_error!(insufficient_funds; @@ -564,9 +569,11 @@ where addr, escrow_balance, prev_locked, amount)); } - self.locked_table.as_mut().unwrap().add(addr, amount).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to add locked balance") - })?; + self.locked_table + .as_mut() + .unwrap() + .add(addr, amount) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to add locked balance")?; Ok(()) } @@ -597,9 +604,9 @@ where addr: &Address, amount: &TokenAmount, lock_reason: Reason, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if amount.is_negative() { - return Err(actor_error!(illegal_state, "unlock negative amount: {}", amount).into()); + return Err(actor_error!(illegal_state, "unlock negative amount: {}", amount)); } self.locked_table.as_mut().unwrap().must_subtract(addr, amount)?; @@ -634,17 +641,13 @@ where .as_mut() .unwrap() .must_subtract(from_addr, amount) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "subtract from escrow"))?; + .context("subtract from escrow")?; self.unlock_balance(from_addr, amount, Reason::ClientStorageFee) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "subtract from locked"))?; + .context("subtract from locked")?; // Add subtracted amount to the recipient - self.escrow_table - .as_mut() - .unwrap() - .add(to_addr, amount) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "add to escrow"))?; + self.escrow_table.as_mut().unwrap().add(to_addr, amount).context("add to escrow")?; Ok(()) } @@ -654,9 +657,9 @@ where addr: &Address, amount: &TokenAmount, lock_reason: Reason, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if amount.is_negative() { - return Err(actor_error!(illegal_state, "negative amount to slash: {}", amount).into()); + return Err(actor_error!(illegal_state, "negative amount to slash: {}", amount)); } // Subtract from locked and escrow tables diff --git a/actors/market/tests/market_actor_test.rs b/actors/market/tests/market_actor_test.rs index 7ae9ea9b8..0fa11df93 100644 --- a/actors/market/tests/market_actor_test.rs +++ b/actors/market/tests/market_actor_test.rs @@ -18,12 +18,12 @@ use fil_actor_verifreg::UseBytesParams; use fil_actors_runtime::cbor::deserialize; use fil_actors_runtime::network::EPOCHS_IN_DAY; use fil_actors_runtime::runtime::{Policy, Runtime}; -use fil_actors_runtime::test_utils::*; use fil_actors_runtime::{ make_empty_map, ActorError, SetMultimap, BURNT_FUNDS_ACTOR_ADDR, CRON_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, }; +use fil_actors_runtime::{test_utils::*, ActorContext2}; use fvm_ipld_amt::Amt; use fvm_ipld_encoding::{to_vec, RawBytes}; use fvm_shared::address::Address; @@ -122,12 +122,12 @@ fn simple_construction() { fn label_cbor() { let label = Label::String("i_am_random_string____i_am_random_string____".parse().unwrap()); let _ = to_vec(&label) - .map_err(|e| ActorError::from(e).wrap("failed to serialize DealProposal")) + .context_code(ExitCode::USR_SERIALIZATION, "failed to serialize DealProposal") .unwrap(); let label2 = Label::Bytes(b"i_am_random_____i_am_random_____".to_vec()); let _ = to_vec(&label2) - .map_err(|e| ActorError::from(e).wrap("failed to serialize DealProposal")) + .context_code(ExitCode::USR_SERIALIZATION, "failed to serialize DealProposal") .unwrap(); let empty_string_label = Label::String("".parse().unwrap()); @@ -2475,7 +2475,6 @@ where dobe.for_each(epoch, |id| { assert_eq!(epoch % deal_updates_interval, (id as i64) % deal_updates_interval); count += 1; - Ok(()) }) .unwrap(); assert_eq!(n, count, "unexpected deal count at epoch {}", epoch); diff --git a/actors/miner/Cargo.toml b/actors/miner/Cargo.toml index ef2c10f47..525e498ca 100644 --- a/actors/miner/Cargo.toml +++ b/actors/miner/Cargo.toml @@ -18,7 +18,7 @@ fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features fvm_shared = { version = "0.6.0", default-features = false } fvm_ipld_bitfield = "0.5.0" fvm_ipld_amt = { version = "0.4.0", features = ["go-interop"] } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" serde = { version = "1.0.136", features = ["derive"] } cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } num-traits = "0.2.14" @@ -26,10 +26,10 @@ num-derive = "0.3.3" lazy_static = "1.4.0" log = "0.4.14" byteorder = "1.4.3" -anyhow = "1.0.56" itertools = "0.10.3" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" +thiserror = "1.0" [dev-dependencies] fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["test_utils", "sector-default"] } diff --git a/actors/miner/src/bitfield_queue.rs b/actors/miner/src/bitfield_queue.rs index 6d3c1c969..f4c215df6 100644 --- a/actors/miner/src/bitfield_queue.rs +++ b/actors/miner/src/bitfield_queue.rs @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0, MIT use std::convert::TryInto; +use std::num::TryFromIntError; use cid::Cid; -use fil_actors_runtime::{ActorDowncast, Array}; +use fil_actors_runtime::Array; use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -18,13 +19,27 @@ pub struct BitFieldQueue<'db, BS> { quant: QuantSpec, } +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("amt {0}")] + Amt(#[from] AmtError), + #[error("conversion failure {0}")] + Int(#[from] TryFromIntError), + #[error("bitfield {0}")] + Bitfield(#[from] fvm_ipld_bitfield::OutOfRangeError), +} + impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { - pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result { + pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result> { Ok(Self { amt: Array::load(root, store)?, quant }) } /// Adds values to the queue entry for an epoch. - pub fn add_to_queue(&mut self, raw_epoch: ChainEpoch, values: &BitField) -> anyhow::Result<()> { + pub fn add_to_queue( + &mut self, + raw_epoch: ChainEpoch, + values: &BitField, + ) -> Result<(), Error> { if values.is_empty() { // nothing to do. return Ok(()); @@ -32,16 +47,9 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { let epoch: u64 = self.quant.quantize_up(raw_epoch).try_into()?; - let bitfield = self - .amt - .get(epoch) - .map_err(|e| e.downcast_wrap(format!("failed to lookup queue epoch {}", epoch)))? - .cloned() - .unwrap_or_default(); + let bitfield = self.amt.get(epoch)?.cloned().unwrap_or_default(); - self.amt - .set(epoch, &bitfield | values) - .map_err(|e| e.downcast_wrap(format!("failed to set queue epoch {}", epoch)))?; + self.amt.set(epoch, &bitfield | values)?; Ok(()) } @@ -50,7 +58,7 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { &mut self, epoch: ChainEpoch, values: impl IntoIterator, - ) -> anyhow::Result<()> { + ) -> Result<(), Error> { self.add_to_queue(epoch, &BitField::try_from_bits(values)?) } @@ -58,26 +66,20 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { /// shifting other bits down and removing any newly empty entries. /// /// See the docs on `BitField::cut` to better understand what it does. - pub fn cut(&mut self, to_cut: &BitField) -> anyhow::Result<()> { + pub fn cut(&mut self, to_cut: &BitField) -> Result<(), Error> { let mut epochs_to_remove = Vec::::new(); - self.amt - .for_each_mut(|epoch, bitfield| { - let bf = bitfield.cut(to_cut); + self.amt.for_each_mut(|epoch, bitfield| { + let bf = bitfield.cut(to_cut); - if bf.is_empty() { - epochs_to_remove.push(epoch); - } else { - **bitfield = bf; - } - - Ok(()) - }) - .map_err(|e| e.downcast_wrap("failed to cut from bitfield queue"))?; + if bf.is_empty() { + epochs_to_remove.push(epoch); + } else { + **bitfield = bf; + } + })?; - self.amt - .batch_delete(epochs_to_remove, true) - .map_err(|e| e.downcast_wrap("failed to remove empty epochs from bitfield queue"))?; + self.amt.batch_delete(epochs_to_remove, true)?; Ok(()) } @@ -85,7 +87,7 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { pub fn add_many_to_queue_values( &mut self, values: impl IntoIterator, - ) -> anyhow::Result<()> { + ) -> Result<(), Error> { // Pre-quantize to reduce the number of updates. let mut quantized_values: Vec<_> = values .into_iter() @@ -110,19 +112,19 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { /// Removes and returns all values with keys less than or equal to until. /// Modified return value indicates whether this structure has been changed by the call. - pub fn pop_until(&mut self, until: ChainEpoch) -> anyhow::Result<(BitField, bool)> { + pub fn pop_until(&mut self, until: ChainEpoch) -> Result<(BitField, bool), Error> { let mut popped_values = BitField::new(); let mut popped_keys = Vec::::new(); self.amt.for_each_while(|epoch, bitfield| { if epoch as ChainEpoch > until { // break - return Ok(false); + return false; } popped_keys.push(epoch); popped_values |= bitfield; - Ok(true) + true })?; if popped_keys.is_empty() { diff --git a/actors/miner/src/deadline_assignment.rs b/actors/miner/src/deadline_assignment.rs index 72fb73023..9f3ffa86e 100644 --- a/actors/miner/src/deadline_assignment.rs +++ b/actors/miner/src/deadline_assignment.rs @@ -4,9 +4,7 @@ use std::cmp::Ordering; use std::collections::BinaryHeap; -use anyhow::anyhow; - -use fil_actors_runtime::runtime::Policy; +use fil_actors_runtime::{actor_error, runtime::Policy, ActorError}; use super::{Deadline, SectorOnChainInfo}; @@ -140,7 +138,7 @@ pub fn assign_deadlines( partition_size: u64, deadlines: &[Option], sectors: Vec, -) -> anyhow::Result>> { +) -> Result>, ActorError> { struct Entry { partition_size: u64, info: DeadlineAssignmentInfo, @@ -189,7 +187,8 @@ pub fn assign_deadlines( let info = &mut heap.peek_mut().unwrap().info; if info.max_partitions_reached(partition_size, max_partitions) { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "max partitions limit {} reached for all deadlines", max_partitions )); diff --git a/actors/miner/src/deadline_state.rs b/actors/miner/src/deadline_state.rs index 40816d52f..a275f362f 100644 --- a/actors/miner/src/deadline_state.rs +++ b/actors/miner/src/deadline_state.rs @@ -4,11 +4,10 @@ use std::cmp; use std::collections::BTreeSet; -use anyhow::anyhow; use cid::multihash::Code; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorDowncast, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorContext2, ActorError, Array}; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -55,26 +54,25 @@ impl Deadlines { policy: &Policy, store: &BS, deadline_idx: u64, - ) -> anyhow::Result { + ) -> Result { if deadline_idx >= policy.wpost_period_deadlines { - return Err(anyhow!(actor_error!( - illegal_argument, - "invalid deadline {}", - deadline_idx - ))); + return Err(actor_error!(illegal_argument, "invalid deadline {}", deadline_idx)); } - store.get_cbor(&self.due[deadline_idx as usize])?.ok_or_else(|| { - anyhow!(actor_error!(illegal_state, "failed to lookup deadline {}", deadline_idx)) - }) + store + .get_cbor(&self.due[deadline_idx as usize]) + .exit_code(ExitCode::USR_SERIALIZATION)? + .ok_or_else(|| { + actor_error!(illegal_state, "failed to lookup deadline {}", deadline_idx) + }) } pub fn for_each( &self, policy: &Policy, store: &BS, - mut f: impl FnMut(u64, Deadline) -> anyhow::Result<()>, - ) -> anyhow::Result<()> { + mut f: impl FnMut(u64, Deadline) -> Result<(), ActorError>, + ) -> Result<(), ActorError> { for i in 0..(self.due.len() as u64) { let index = i; let deadline = self.load_deadline(policy, store, index)?; @@ -89,14 +87,15 @@ impl Deadlines { store: &BS, deadline_idx: u64, deadline: &Deadline, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if deadline_idx >= policy.wpost_period_deadlines { - return Err(anyhow!("invalid deadline {}", deadline_idx)); + return Err(actor_error!(illegal_argument, "invalid deadline {}", deadline_idx)); } deadline.validate_state()?; - self.due[deadline_idx as usize] = store.put_cbor(deadline, Code::Blake2b256)?; + self.due[deadline_idx as usize] = + store.put_cbor(deadline, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } } @@ -181,24 +180,26 @@ pub struct DisputeInfo { } impl Deadline { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let empty_partitions_array = Array::<(), BS>::new_with_bit_width(store, DEADLINE_PARTITIONS_AMT_BITWIDTH) .flush() - .map_err(|e| e.downcast_wrap("Failed to create empty states array"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; let empty_deadline_expiration_array = Array::<(), BS>::new_with_bit_width(store, DEADLINE_EXPIRATIONS_AMT_BITWIDTH) .flush() - .map_err(|e| e.downcast_wrap("Failed to create empty states array"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; let empty_post_submissions_array = Array::<(), BS>::new_with_bit_width( store, DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH, ) .flush() - .map_err(|e| e.downcast_wrap("Failed to create empty states array"))?; - let empty_sectors_array = Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) - .flush() - .map_err(|e| e.downcast_wrap("Failed to construct empty sectors snapshot array"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; + let empty_sectors_array = + Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH).flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to construct empty sectors snapshot array", + )?; Ok(Self { partitions: empty_partitions_array, expirations_epochs: empty_deadline_expiration_array, @@ -217,45 +218,45 @@ impl Deadline { pub fn partitions_amt<'db, BS: Blockstore>( &self, store: &'db BS, - ) -> anyhow::Result> { - Ok(Array::load(&self.partitions, store)?) + ) -> Result, ActorError> { + Ok(Array::load(&self.partitions, store).exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn optimistic_proofs_amt<'db, BS: Blockstore>( &self, store: &'db BS, - ) -> anyhow::Result> { - Ok(Array::load(&self.optimistic_post_submissions, store)?) + ) -> Result, ActorError> { + Ok(Array::load(&self.optimistic_post_submissions, store) + .exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn partitions_snapshot_amt<'db, BS: Blockstore>( &self, store: &'db BS, - ) -> anyhow::Result> { - Ok(Array::load(&self.partitions_snapshot, store)?) + ) -> Result, ActorError> { + Ok(Array::load(&self.partitions_snapshot, store).exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn optimistic_proofs_snapshot_amt<'db, BS: Blockstore>( &self, store: &'db BS, - ) -> anyhow::Result> { - Ok(Array::load(&self.optimistic_post_submissions_snapshot, store)?) + ) -> Result, ActorError> { + Ok(Array::load(&self.optimistic_post_submissions_snapshot, store) + .exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn load_partition( &self, store: &BS, partition_idx: u64, - ) -> anyhow::Result { - let partitions = Array::::load(&self.partitions, store)?; + ) -> Result { + let partitions = Array::::load(&self.partitions, store) + .exit_code(ExitCode::USR_SERIALIZATION)?; let partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to lookup partition {}", partition_idx), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup partition {}", partition_idx) })? .ok_or_else(|| actor_error!(not_found, "no partition {}", partition_idx))?; @@ -266,16 +267,14 @@ impl Deadline { &self, store: &BS, partition_idx: u64, - ) -> anyhow::Result { - let partitions = Array::::load(&self.partitions_snapshot, store)?; + ) -> Result { + let partitions = Array::::load(&self.partitions_snapshot, store) + .exit_code(ExitCode::USR_SERIALIZATION)?; let partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to lookup partition snapshot {}", partition_idx), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup partition snapshot {}", partition_idx) })? .ok_or_else(|| actor_error!(not_found, "no partition snapshot {}", partition_idx))?; @@ -289,19 +288,23 @@ impl Deadline { expiration_epoch: ChainEpoch, partitions: &[u64], quant: QuantSpec, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { // Avoid doing any work if there's nothing to reschedule. if partitions.is_empty() { return Ok(()); } let mut queue = BitFieldQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load expiration queue"))?; + .exit_code(ExitCode::USR_SERIALIZATION)?; + queue .add_to_queue_values(expiration_epoch, partitions.iter().copied()) - .map_err(|e| e.downcast_wrap("failed to mutate expiration queue"))?; - self.expirations_epochs = - queue.amt.flush().map_err(|e| e.downcast_wrap("failed to save expiration queue"))?; + .exit_code(ExitCode::USR_SERIALIZATION)?; + + self.expirations_epochs = queue + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save expiration queue")?; Ok(()) } @@ -313,7 +316,7 @@ impl Deadline { store: &BS, until: ChainEpoch, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { let (expired_partitions, modified) = self.pop_expired_partitions(store, until, quant)?; if !modified { @@ -334,16 +337,16 @@ impl Deadline { for i in expired_partitions.iter() { let partition_idx = i; let mut partition = partitions - .get(partition_idx)? + .get(partition_idx) + .exit_code(ExitCode::USR_SERIALIZATION)? .cloned() - .ok_or_else(|| anyhow!("missing expected partition {}", partition_idx))?; + .ok_or_else(|| { + actor_error!(illegal_state, "missing expected partition {}", partition_idx) + })?; let partition_expiration = - partition.pop_expired_sectors(store, until, quant).map_err(|e| { - e.downcast_wrap(format!( - "failed to pop expired sectors from partition {}", - partition_idx - )) + partition.pop_expired_sectors(store, until, quant).with_context(|| { + format!("failed to pop expired sectors from partition {}", partition_idx) })?; if !partition_expiration.early_sectors.is_empty() { @@ -356,10 +359,10 @@ impl Deadline { all_faulty_power += &partition_expiration.faulty_power; all_on_time_pledge += &partition_expiration.on_time_pledge; - partitions.set(partition_idx, partition)?; + partitions.set(partition_idx, partition).exit_code(ExitCode::USR_SERIALIZATION)?; } - self.partitions = partitions.flush()?; + self.partitions = partitions.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Update early expiration bitmap. let new_early_terminations = BitField::try_from_bits(partitions_with_early_terminations) @@ -397,7 +400,7 @@ impl Deadline { mut sectors: &[SectorOnChainInfo], sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { let mut total_power = PowerPair::zero(); if sectors.is_empty() { return Ok(total_power); @@ -417,15 +420,16 @@ impl Deadline { } // Get/create partition to update. - let mut partition = match partitions.get(partition_idx)? { - Some(partition) => partition.clone(), - None => { - // This case will usually happen zero times. - // It would require adding more than a full partition in one go - // to happen more than once. - Partition::new(store)? - } - }; + let mut partition = + match partitions.get(partition_idx).exit_code(ExitCode::USR_SERIALIZATION)? { + Some(partition) => partition.clone(), + None => { + // This case will usually happen zero times. + // It would require adding more than a full partition in one go + // to happen more than once. + Partition::new(store)? + } + }; // Figure out which (if any) sectors we want to add to this partition. let sector_count = partition.sectors.len(); @@ -445,7 +449,7 @@ impl Deadline { total_power += &partition_power; // Save partition back. - partitions.set(partition_idx, partition)?; + partitions.set(partition_idx, partition).exit_code(ExitCode::USR_SERIALIZATION)?; // Record deadline -> partition mapping so we can later update the deadlines. partition_deadline_updates @@ -453,16 +457,17 @@ impl Deadline { } // Save partitions back. - self.partitions = partitions.flush()?; + self.partitions = partitions.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Next, update the expiration queue. let mut deadline_expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load expiration epochs"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load expiration epochs")?; deadline_expirations .add_many_to_queue_values(partition_deadline_updates.iter().copied()) - .map_err(|e| e.downcast_wrap("failed to add expirations for new deadlines"))?; - self.expirations_epochs = deadline_expirations.amt.flush()?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to add expirations for new deadlines")?; + self.expirations_epochs = + deadline_expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(total_power) } @@ -472,7 +477,7 @@ impl Deadline { store: &BS, max_partitions: u64, max_sectors: u64, - ) -> anyhow::Result<(TerminationResult, /* has more */ bool)> { + ) -> Result<(TerminationResult, /* has more */ bool), ActorError> { let mut partitions = self.partitions_amt(store)?; let mut partitions_finished = Vec::::new(); @@ -481,9 +486,11 @@ impl Deadline { for i in self.early_terminations.iter() { let partition_idx = i; - let mut partition = match partitions.get(partition_idx).map_err(|e| { - e.downcast_wrap(format!("failed to load partition {}", partition_idx)) - })? { + let mut partition = match partitions + .get(partition_idx) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? { Some(partition) => partition.clone(), None => { partitions_finished.push(partition_idx); @@ -494,7 +501,7 @@ impl Deadline { // Pop early terminations. let (partition_result, more) = partition .pop_early_terminations(store, max_sectors - result.sectors_processed) - .map_err(|e| e.downcast_wrap("failed to pop terminations from partition"))?; + .context("failed to pop terminations from partition")?; result += partition_result; @@ -504,9 +511,11 @@ impl Deadline { } // Save partition - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_wrap(format!("failed to store partition {}", partition_idx)) - })?; + partitions + .set(partition_idx, partition) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store partition {}", partition_idx) + })?; if !result.below_limit(max_partitions, max_sectors) { break; @@ -519,8 +528,9 @@ impl Deadline { } // Save deadline's partitions - self.partitions = - partitions.flush().map_err(|e| e.downcast_wrap("failed to update partitions"))?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to update partitions")?; // Update global early terminations bitfield. let no_early_terminations = self.early_terminations.is_empty(); @@ -532,14 +542,16 @@ impl Deadline { store: &BS, until: ChainEpoch, quant: QuantSpec, - ) -> anyhow::Result<(BitField, bool)> { - let mut expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant)?; + ) -> Result<(BitField, bool), ActorError> { + let mut expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant) + .exit_code(ExitCode::USR_SERIALIZATION)?; let (popped, modified) = expirations .pop_until(until) - .map_err(|e| e.downcast_wrap("failed to pop expiring partitions"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to pop expiring partitions")?; if modified { - self.expirations_epochs = expirations.amt.flush()?; + self.expirations_epochs = + expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; } Ok((popped, modified)) @@ -555,15 +567,15 @@ impl Deadline { partition_sectors: &mut PartitionSectorMap, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { let mut partitions = self.partitions_amt(store)?; let mut power_lost = PowerPair::zero(); for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_wrap(format!("failed to load partition {}", partition_idx)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) })? .ok_or_else( || actor_error!(not_found; "failed to find partition {}", partition_idx), @@ -580,16 +592,15 @@ impl Deadline { sector_size, quant, ) - .map_err(|e| { - e.downcast_wrap(format!( - "failed to terminate sectors in partition {}", - partition_idx - )) + .with_context(|| { + format!("failed to terminate sectors in partition {}", partition_idx) })?; - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_wrap(format!("failed to store updated partition {}", partition_idx)) - })?; + partitions + .set(partition_idx, partition) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store updated partition {}", partition_idx) + })?; if !removed.is_empty() { // Record that partition now has pending early terminations. @@ -606,8 +617,9 @@ impl Deadline { } // save partitions back - self.partitions = - partitions.flush().map_err(|e| e.downcast_wrap("failed to persist partitions"))?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist partitions")?; Ok(power_lost) } @@ -628,10 +640,9 @@ impl Deadline { BitField, // dead PowerPair, // removed power ), - anyhow::Error, + ActorError, > { - let old_partitions = - self.partitions_amt(store).map_err(|e| e.downcast_wrap("failed to load partitions"))?; + let old_partitions = self.partitions_amt(store).context("failed to load partitions")?; let partition_count = old_partitions.count(); let to_remove_set: BTreeSet<_> = to_remove @@ -644,7 +655,7 @@ impl Deadline { if let Some(&max_partition) = to_remove_set.iter().max() { if max_partition > partition_count { return Err( - actor_error!(illegal_argument; "partition index {} out of range [0, {})", max_partition, partition_count).into() + actor_error!(illegal_argument; "partition index {} out of range [0, {})", max_partition, partition_count), ); } } else { @@ -655,7 +666,7 @@ impl Deadline { // Should already be checked earlier, but we might as well check again. if !self.early_terminations.is_empty() { return Err( - actor_error!(illegal_argument; "cannot remove partitions from deadline with early terminations").into(), + actor_error!(illegal_argument; "cannot remove partitions from deadline with early terminations"), ); } @@ -669,10 +680,12 @@ impl Deadline { // corresponding index, like the Go impl does old_partitions - .for_each(|partition_idx, partition| { + .try_for_each::<_, ActorError>(|partition_idx, partition| { // If we're keeping the partition as-is, append it to the new partitions array. if !to_remove_set.contains(&partition_idx) { - new_partitions.set(new_partitions.count(), partition.clone())?; + new_partitions + .set(new_partitions.count(), partition.clone()) + .exit_code(ExitCode::USR_SERIALIZATION)?; return Ok(()); } @@ -683,8 +696,7 @@ impl Deadline { illegal_argument, "cannot remove partition {}: has faults", partition_idx - ) - .into()); + )); } // Don't allow removing partitions with unproven sectors @@ -694,8 +706,7 @@ impl Deadline { illegal_argument, "cannot remove partition {}: has unproven sectors", partition_idx - ) - .into()); + )); } // Get the live sectors. @@ -707,11 +718,11 @@ impl Deadline { Ok(()) }) - .map_err(|e| e.downcast_wrap("while removing partitions"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "while removing partitions")?; self.partitions = new_partitions .flush() - .map_err(|e| e.downcast_wrap("failed to persist new partition table"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist new partition table")?; let dead = BitField::union(&all_dead_sectors); let live = BitField::union(&all_live_sectors); @@ -725,16 +736,17 @@ impl Deadline { // Update expiration bitfields. let mut expiration_epochs = BitFieldQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load expiration queue"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load expiration queue")?; - expiration_epochs.cut(to_remove).map_err(|e| { - e.downcast_wrap("failed cut removed partitions from deadline expiration queue") - })?; + expiration_epochs.cut(to_remove).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed cut removed partitions from deadline expiration queue", + )?; - self.expirations_epochs = expiration_epochs - .amt - .flush() - .map_err(|e| e.downcast_wrap("failed persist deadline expiration queue"))?; + self.expirations_epochs = expiration_epochs.amt.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed persist deadline expiration queue", + )?; Ok((live, dead, removed_power)) } @@ -747,7 +759,7 @@ impl Deadline { quant: QuantSpec, fault_expiration_epoch: ChainEpoch, partition_sectors: &mut PartitionSectorMap, - ) -> anyhow::Result { + ) -> Result { let mut partitions = self.partitions_amt(store)?; // Record partitions with some fault, for subsequently indexing in the deadline. @@ -758,11 +770,8 @@ impl Deadline { for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partition {}", partition_idx), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) })? .ok_or_else(|| actor_error!(not_found; "no such partition {}", partition_idx))? .clone(); @@ -776,11 +785,8 @@ impl Deadline { sector_size, quant, ) - .map_err(|e| { - e.downcast_wrap(format!( - "failed to declare faults in partition {}", - partition_idx - )) + .with_context(|| { + format!("failed to declare faults in partition {}", partition_idx) })?; self.faulty_power += &partition_new_faulty_power; @@ -789,17 +795,16 @@ impl Deadline { partitions_with_fault.push(partition_idx); } - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to store partition {}", partition_idx), - ) - })?; + partitions + .set(partition_idx, partition) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store partition {}", partition_idx) + })?; } - self.partitions = partitions.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions root") - })?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions root")?; self.add_expiration_partitions( store, @@ -807,12 +812,7 @@ impl Deadline { &partitions_with_fault, quant, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to update expirations for partitions with faults", - ) - })?; + .context("failed to update expirations for partitions with faults")?; Ok(power_delta) } @@ -823,38 +823,34 @@ impl Deadline { sectors: &Sectors<'_, BS>, sector_size: SectorSize, partition_sectors: &mut PartitionSectorMap, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let mut partitions = self.partitions_amt(store)?; for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partition {}", partition_idx), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) })? .ok_or_else(|| actor_error!(not_found; "no such partition {}", partition_idx))? .clone(); partition .declare_faults_recovered(sectors, sector_size, sector_numbers) - .map_err(|e| e.downcast_wrap("failed to add recoveries"))?; + .context("failed to add recoveries")?; - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update partition {}", partition_idx), - ) - })?; + partitions + .set(partition_idx, partition) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update partition {}", partition_idx) + })?; } // Power is not regained until the deadline end, when the recovery is confirmed. - self.partitions = partitions.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions root") - })?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions root")?; Ok(()) } @@ -869,9 +865,7 @@ impl Deadline { fault_expiration_epoch: ChainEpoch, sectors: Cid, ) -> Result<(PowerPair, PowerPair), ActorError> { - let mut partitions = self.partitions_amt(store).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load partitions") - })?; + let mut partitions = self.partitions_amt(store).context("failed to load partitions")?; let mut detected_any = false; let mut rescheduled_partitions = Vec::::new(); @@ -886,11 +880,8 @@ impl Deadline { let mut partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partition {}", partition_idx), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) })? .ok_or_else(|| actor_error!(illegal_state; "no partition {}", partition_idx))? .clone(); @@ -906,14 +897,10 @@ impl Deadline { // Ok, we actually need to process this partition. Make sure we save the partition state back. detected_any = true; - let (part_power_delta, part_penalized_power, part_new_faulty_power) = partition - .record_missed_post(store, fault_expiration_epoch, quant) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to record missed PoSt for partition {}", partition_idx), - ) - })?; + let (part_power_delta, part_penalized_power, part_new_faulty_power) = + partition.record_missed_post(store, fault_expiration_epoch, quant).with_context( + || format!("failed to record missed PoSt for partition {}", partition_idx), + )?; // We marked some sectors faulty, we need to record the new // expiration. We don't want to do this if we're just penalizing @@ -923,12 +910,11 @@ impl Deadline { } // Save new partition state. - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update partition {}", partition_idx), - ) - })?; + partitions + .set(partition_idx, partition) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update partition {}", partition_idx) + })?; self.faulty_power += &part_new_faulty_power; @@ -938,9 +924,9 @@ impl Deadline { // Save modified deadline state. if detected_any { - self.partitions = partitions.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions") - })?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions")?; } self.add_expiration_partitions( @@ -949,12 +935,7 @@ impl Deadline { &rescheduled_partitions, quant, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to update deadline expiration queue", - ) - })?; + .context("failed to update deadline expiration queue")?; // Reset PoSt submissions. self.partitions_posted = BitField::new(); @@ -965,43 +946,43 @@ impl Deadline { DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH, ) .flush() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to clear pending proofs array") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to clear pending proofs array")?; // only snapshot sectors if there's a proof that might be disputed (this is equivalent to asking if the OptimisticPoStSubmissionsSnapshot is empty) if self.optimistic_post_submissions != self.optimistic_post_submissions_snapshot { self.sectors_snapshot = sectors; } else { self.sectors_snapshot = - Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH).flush().map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to clear sectors snapshot array", - ) - }, - )?; + Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) + .flush() + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to clear sectors snapshot array", + )?; } Ok((power_delta, penalized_power)) } + pub fn for_each( &self, store: &BS, - f: impl FnMut(u64, &Partition) -> anyhow::Result<()>, - ) -> anyhow::Result<()> { + f: impl FnMut(u64, &Partition) -> Result<(), ActorError>, + ) -> Result<(), ActorError> { let parts = self.partitions_amt(store)?; - parts.for_each(f)?; + parts.try_for_each(f).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } - pub fn validate_state(&self) -> anyhow::Result<()> { + pub fn validate_state(&self) -> Result<(), ActorError> { if self.live_sectors > self.total_sectors { - return Err(anyhow!("deadline left with more live sectors than total")); + return Err(actor_error!( + illegal_state, + "deadline left with more live sectors than total" + )); } if self.faulty_power.raw.is_negative() || self.faulty_power.qa.is_negative() { - return Err(anyhow!("deadline left with negative faulty power")); + return Err(actor_error!(illegal_state, "deadline left with negative faulty power")); } Ok(()) @@ -1011,10 +992,9 @@ impl Deadline { &self, store: &BS, partitions: BitField, - ) -> anyhow::Result { - let partitions_snapshot = self - .partitions_snapshot_amt(store) - .map_err(|e| e.downcast_wrap("failed to load partitions {}"))?; + ) -> Result { + let partitions_snapshot = + self.partitions_snapshot_amt(store).context("failed to load partitions {}")?; let mut all_sectors = Vec::new(); let mut all_ignored = Vec::new(); @@ -1022,8 +1002,11 @@ impl Deadline { let mut disputed_power = PowerPair::zero(); for part_idx in partitions.iter() { let partition_snapshot = partitions_snapshot - .get(part_idx)? - .ok_or_else(|| anyhow!("failed to find partition {}", part_idx))?; + .get(part_idx) + .exit_code(ExitCode::USR_SERIALIZATION)? + .ok_or_else(|| { + actor_error!(illegal_state, "failed to find partition {}", part_idx) + })?; // Record sectors for proof verification all_sectors.push(partition_snapshot.sectors.clone()); @@ -1116,24 +1099,24 @@ impl Deadline { quant: QuantSpec, fault_expiration: ChainEpoch, post_partitions: &mut [PoStPartition], - ) -> anyhow::Result { + ) -> Result { let partition_indexes = BitField::try_from_bits(post_partitions.iter().map(|p| p.index)) .map_err(|_| actor_error!(illegal_argument; "partition index out of bitfield range"))?; let num_partitions = partition_indexes.len(); if num_partitions != post_partitions.len() as u64 { - return Err(anyhow!(actor_error!(illegal_argument, "duplicate partitions proven"))); + return Err(actor_error!(illegal_argument, "duplicate partitions proven")); } // First check to see if we're proving any already proven partitions. // This is faster than checking one by one. let already_proven = &self.partitions_posted & &partition_indexes; if !already_proven.is_empty() { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_argument, "partition already proven: {:?}", already_proven - ))); + )); } let mut partitions = self.partitions_amt(store)?; @@ -1150,7 +1133,9 @@ impl Deadline { for post in post_partitions { let mut partition = partitions .get(post.index) - .map_err(|e| e.downcast_wrap(format!("failed to load partition {}", post.index)))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", post.index) + })? .ok_or_else(|| actor_error!(not_found; "no such partition {}", post.index))? .clone(); @@ -1166,11 +1151,8 @@ impl Deadline { fault_expiration, &mut post.skipped, ) - .map_err(|e| { - e.downcast_wrap(format!( - "failed to add skipped faults to partition {}", - post.index - )) + .with_context(|| { + format!("failed to add skipped faults to partition {}", post.index) })?; // If we have new faulty power, we've added some faults. We need @@ -1180,12 +1162,9 @@ impl Deadline { } let recovered_power = - partition.recover_faults(store, sectors, sector_size, quant).map_err(|e| { - e.downcast_wrap(format!( - "failed to recover faulty sectors for partition {}", - post.index - )) - })?; + partition.recover_faults(store, sectors, sector_size, quant).with_context( + || format!("failed to recover faulty sectors for partition {}", post.index), + )?; new_power_delta += &partition.activate_unproven(); @@ -1197,12 +1176,11 @@ impl Deadline { all_ignored.push(partition.terminated.clone()); // This will be rolled back if the method aborts with a failed proof. - partitions.set(post.index, partition).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update partition {}", post.index), - ) - })?; + partitions + .set(post.index, partition) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update partition {}", post.index) + })?; new_faulty_power_total += &new_fault_power; retracted_recovery_power_total += &retracted_recovery_power; @@ -1215,20 +1193,15 @@ impl Deadline { } self.add_expiration_partitions(store, fault_expiration, &rescheduled_partitions, quant) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to update expirations for partitions with faults", - ) - })?; + .context("failed to update expirations for partitions with faults")?; // Save everything back. self.faulty_power -= &recovered_power_total; self.faulty_power += &new_faulty_power_total; - self.partitions = partitions.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to persist partitions") - })?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist partitions")?; // Collect all sectors, faults, and recoveries for proof verification. let all_sector_numbers = BitField::union(&all_sectors); @@ -1252,18 +1225,18 @@ impl Deadline { store: &BS, partitions: &BitField, proofs: &[PoStProof], - ) -> anyhow::Result<()> { - let mut proof_arr = self - .optimistic_proofs_amt(store) - .map_err(|e| e.downcast_wrap("failed to load post proofs"))?; + ) -> Result<(), ActorError> { + let mut proof_arr = + self.optimistic_proofs_amt(store).context("failed to load post proofs")?; proof_arr .set( proof_arr.count(), // TODO: Can we do this with out cloning? WindowedPoSt { partitions: partitions.clone(), proofs: proofs.to_vec() }, ) - .map_err(|e| e.downcast_wrap("failed to store proof"))?; - let root = proof_arr.flush().map_err(|e| e.downcast_wrap("failed to save proofs"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store proof")?; + let root = + proof_arr.flush().context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save proofs")?; self.optimistic_post_submissions = root; Ok(()) } @@ -1275,18 +1248,21 @@ impl Deadline { &mut self, store: &BS, idx: u64, - ) -> anyhow::Result<(BitField, Vec)> { + ) -> Result<(BitField, Vec), ActorError> { let mut proof_arr = self .optimistic_proofs_snapshot_amt(store) - .map_err(|e| e.downcast_wrap("failed to load post proofs snapshot amt"))?; + .context("failed to load post proofs snapshot amt")?; // Extract and remove the proof from the proofs array, leaving a hole. // This will not affect concurrent attempts to refute other proofs. let post = proof_arr .delete(idx) - .map_err(|e| e.downcast_wrap(format!("failed to retrieve proof {}", idx)))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to retrieve proof {}", idx) + })? .ok_or_else(|| actor_error!(illegal_argument, "proof {} not found", idx))?; - let root = proof_arr.flush().map_err(|e| e.downcast_wrap("failed to save proofs"))?; + let root = + proof_arr.flush().context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save proofs")?; self.optimistic_post_submissions_snapshot = root; Ok((post.partitions, post.proofs)) } @@ -1307,7 +1283,7 @@ impl Deadline { partition_sectors: &mut PartitionSectorMap, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let mut partitions = self.partitions_amt(store)?; // track partitions with moved expirations. @@ -1315,9 +1291,11 @@ impl Deadline { let mut all_replaced = Vec::new(); for (partition_idx, sector_numbers) in partition_sectors.iter() { - let mut partition = match partitions.get(partition_idx).map_err(|e| { - e.downcast_wrap(format!("failed to load partition {}", partition_idx)) - })? { + let mut partition = match partitions + .get(partition_idx) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? { Some(partition) => partition.clone(), None => { // We failed to find the partition, it could have moved @@ -1336,11 +1314,8 @@ impl Deadline { sector_size, quant, ) - .map_err(|e| { - e.downcast_wrap(format!( - "failed to reschedule expirations in partition {}", - partition_idx - )) + .with_context(|| { + format!("failed to reschedule expirations in partition {}", partition_idx) })?; if replaced.is_empty() { @@ -1350,17 +1325,20 @@ impl Deadline { all_replaced.extend(replaced); rescheduled_partitions.push(partition_idx); - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_wrap(format!("failed to store partition {}", partition_idx)) - })?; + partitions + .set(partition_idx, partition) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store partition {}", partition_idx) + })?; } if !rescheduled_partitions.is_empty() { - self.partitions = - partitions.flush().map_err(|e| e.downcast_wrap("failed to save partitions"))?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save partitions")?; self.add_expiration_partitions(store, expiration, &rescheduled_partitions, quant) - .map_err(|e| e.downcast_wrap("failed to reschedule partition expirations"))?; + .context("failed to reschedule partition expirations")?; } Ok(all_replaced) diff --git a/actors/miner/src/deadlines.rs b/actors/miner/src/deadlines.rs index 3e625f908..625873d8b 100644 --- a/actors/miner/src/deadlines.rs +++ b/actors/miner/src/deadlines.rs @@ -2,10 +2,11 @@ // SPDX-License-Identifier: Apache-2.0, MIT use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::Array; +use fil_actors_runtime::{actor_error, ActorContext2, ActorError, Array}; use fvm_ipld_blockstore::Blockstore; use fvm_shared::clock::{ChainEpoch, QuantSpec}; +use fvm_shared::error::ExitCode; use fvm_shared::sector::SectorNumber; use super::{DeadlineInfo, Deadlines, Partition}; @@ -36,29 +37,32 @@ impl Deadlines { policy: &Policy, store: &BS, sector_number: SectorNumber, - ) -> anyhow::Result<(u64, u64)> { + ) -> Result<(u64, u64), ActorError> { for i in 0..self.due.len() { let deadline_idx = i as u64; let deadline = self.load_deadline(policy, store, deadline_idx)?; - let partitions = Array::::load(&deadline.partitions, store)?; + let partitions = Array::::load(&deadline.partitions, store) + .exit_code(ExitCode::USR_SERIALIZATION)?; let mut partition_idx = None; - partitions.for_each_while(|i, partition| { - if partition.sectors.get(sector_number) { - partition_idx = Some(i); - Ok(false) - } else { - Ok(true) - } - })?; + partitions + .for_each_while(|i, partition| { + if partition.sectors.get(sector_number) { + partition_idx = Some(i); + false + } else { + true + } + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; if let Some(partition_idx) = partition_idx { return Ok((deadline_idx, partition_idx)); } } - Err(anyhow::anyhow!("sector {} not due at any deadline", sector_number)) + Err(actor_error!(illegal_state, "sector {} not due at any deadline", sector_number)) } } diff --git a/actors/miner/src/expiration_queue.rs b/actors/miner/src/expiration_queue.rs index 8f7bb977e..620f4bee2 100644 --- a/actors/miner/src/expiration_queue.rs +++ b/actors/miner/src/expiration_queue.rs @@ -4,10 +4,9 @@ use std::collections::{BTreeMap, BTreeSet}; use std::convert::TryInto; -use anyhow::{anyhow, Context}; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{ActorDowncast, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorContext2, ActorError, Array}; use fvm_ipld_amt::{Error as AmtError, ValueMut}; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -15,6 +14,7 @@ use fvm_ipld_encoding::tuple::*; use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, QuantSpec}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{SectorNumber, SectorSize}; use num_traits::{Signed, Zero}; @@ -59,7 +59,7 @@ impl ExpirationSet { on_time_pledge: &TokenAmount, active_power: &PowerPair, faulty_power: &PowerPair, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { self.on_time_sectors |= on_time_sectors; self.early_sectors |= early_sectors; self.on_time_pledge += on_time_pledge; @@ -78,17 +78,19 @@ impl ExpirationSet { on_time_pledge: &TokenAmount, active_power: &PowerPair, faulty_power: &PowerPair, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { // Check for sector intersection. This could be cheaper with a combined intersection/difference method used below. if !self.on_time_sectors.contains_all(on_time_sectors) { - return Err(anyhow!( + return Err(actor_error!( + illegal_argument, "removing on-time sectors {:?} not contained in {:?}", on_time_sectors, self.on_time_sectors )); } if !self.early_sectors.contains_all(early_sectors) { - return Err(anyhow!( + return Err(actor_error!( + illegal_argument, "removing early sectors {:?} not contained in {:?}", early_sectors, self.early_sectors @@ -103,10 +105,10 @@ impl ExpirationSet { // Check underflow. if self.on_time_pledge.is_negative() { - return Err(anyhow!("expiration set pledge underflow: {:?}", self)); + return Err(actor_error!(illegal_state, "expiration set pledge underflow: {:?}", self)); } if self.active_power.qa.is_negative() || self.faulty_power.qa.is_negative() { - return Err(anyhow!("expiration set power underflow: {:?}", self)); + return Err(actor_error!(illegal_state, "expiration set power underflow: {:?}", self)); } self.validate_state()?; Ok(()) @@ -124,25 +126,37 @@ impl ExpirationSet { } /// validates a set of assertions that must hold for expiration sets - pub fn validate_state(&self) -> anyhow::Result<()> { + pub fn validate_state(&self) -> Result<(), ActorError> { if self.on_time_pledge.is_negative() { - return Err(anyhow!("ExpirationSet left with negative pledge")); + return Err(actor_error!(illegal_state, "ExpirationSet left with negative pledge")); } if self.active_power.raw.is_negative() { - return Err(anyhow!("ExpirationSet left with negative raw active power")); + return Err(actor_error!( + illegal_state, + "ExpirationSet left with negative raw active power" + )); } if self.active_power.qa.is_negative() { - return Err(anyhow!("ExpirationSet left with negative qa active power")); + return Err(actor_error!( + illegal_state, + "ExpirationSet left with negative qa active power" + )); } if self.faulty_power.raw.is_negative() { - return Err(anyhow!("ExpirationSet left with negative raw faulty power")); + return Err(actor_error!( + illegal_state, + "ExpirationSet left with negative raw faulty power" + )); } if self.faulty_power.qa.is_negative() { - return Err(anyhow!("ExpirationSet left with negative qa faulty power")); + return Err(actor_error!( + illegal_state, + "ExpirationSet left with negative qa faulty power" + )); } Ok(()) @@ -162,7 +176,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { /// /// Epochs provided to subsequent method calls will be quantized upwards to quanta mod offsetSeed before being /// written to/read from queue entries. - pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result { + pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result> { Ok(Self { amt: Array::load(root, store)?, quant }) } @@ -173,13 +187,14 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &mut self, sectors: impl IntoIterator, sector_size: SectorSize, - ) -> anyhow::Result<(BitField, PowerPair, TokenAmount)> { + ) -> Result<(BitField, PowerPair, TokenAmount), ActorError> { let mut total_power = PowerPair::zero(); let mut total_pledge = TokenAmount::zero(); let mut total_sectors = Vec::::new(); for group in group_new_sectors_by_declared_expiration(sector_size, sectors, self.quant) { - let sector_numbers = BitField::try_from_bits(group.sectors)?; + let sector_numbers = + BitField::try_from_bits(group.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; self.add( group.epoch, @@ -189,7 +204,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &PowerPair::zero(), &group.pledge, ) - .map_err(|e| e.downcast_wrap("failed to record new sector expirations"))?; + .context("failed to record new sector expirations")?; total_sectors.push(sector_numbers); total_power += &group.power; @@ -209,14 +224,14 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { new_expiration: ChainEpoch, sectors: &[SectorOnChainInfo], sector_size: SectorSize, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if sectors.is_empty() { return Ok(()); } let (sector_numbers, power, pledge) = self .remove_active_sectors(sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to remove sector expirations"))?; + .context("failed to remove sector expirations")?; self.add( new_expiration, @@ -226,7 +241,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &PowerPair::zero(), &pledge, ) - .map_err(|e| e.downcast_wrap("failed to record new sector expirations"))?; + .context("failed to record new sector expirations")?; Ok(()) } @@ -240,7 +255,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { new_expiration: ChainEpoch, sectors: &[SectorOnChainInfo], sector_size: SectorSize, - ) -> anyhow::Result { + ) -> Result { let mut sectors_total = Vec::new(); let mut expiring_power = PowerPair::zero(); let mut rescheduled_power = PowerPair::zero(); @@ -259,7 +274,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { } else { // Remove sectors from on-time expiry and active power. let sectors_bitfield = - BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied())?; + BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)?; group.expiration_set.on_time_sectors -= §ors_bitfield; group.expiration_set.on_time_pledge -= &group.sector_epoch_set.pledge; group.expiration_set.active_power -= &group.sector_epoch_set.power; @@ -276,7 +292,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { if !sectors_total.is_empty() { // Add sectors to new expiration as early-terminating and faulty. - let early_sectors = BitField::try_from_bits(sectors_total)?; + let early_sectors = + BitField::try_from_bits(sectors_total).exit_code(ExitCode::USR_SERIALIZATION)?; self.add( new_expiration, &BitField::new(), @@ -291,39 +308,47 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { } /// Re-schedules *all* sectors to expire at an early expiration epoch, if they wouldn't expire before then anyway. - pub fn reschedule_all_as_faults(&mut self, fault_expiration: ChainEpoch) -> anyhow::Result<()> { + pub fn reschedule_all_as_faults( + &mut self, + fault_expiration: ChainEpoch, + ) -> Result<(), ActorError> { let mut rescheduled_epochs = Vec::::new(); let mut rescheduled_sectors = BitField::new(); let mut rescheduled_power = PowerPair::zero(); let mut mutated_expiration_sets = Vec::<(ChainEpoch, ExpirationSet)>::new(); - self.amt.for_each(|e, expiration_set| { - let epoch: ChainEpoch = e.try_into()?; - - if epoch <= self.quant.quantize_up(fault_expiration) { - let mut expiration_set = expiration_set.clone(); - - // Regardless of whether the sectors were expiring on-time or early, all the power is now faulty. - // Pledge is still on-time. - expiration_set.faulty_power += &expiration_set.active_power; - expiration_set.active_power = PowerPair::zero(); - mutated_expiration_sets.push((epoch, expiration_set)); - } else { - rescheduled_epochs.push(e); - // sanity check to make sure we're not trying to re-schedule already faulty sectors. - if !expiration_set.early_sectors.is_empty() { - return Err(anyhow!( - "attempted to re-schedule early expirations to an earlier epoch" - )); + self.amt + .try_for_each(|e, expiration_set| { + let epoch: ChainEpoch = + e.try_into().map_err(|e| actor_error!(illegal_state, "{}", e))?; + + if epoch <= self.quant.quantize_up(fault_expiration) { + let mut expiration_set = expiration_set.clone(); + + // Regardless of whether the sectors were expiring on-time or early, all the power is now faulty. + // Pledge is still on-time. + expiration_set.faulty_power += &expiration_set.active_power; + expiration_set.active_power = PowerPair::zero(); + mutated_expiration_sets.push((epoch, expiration_set)); + } else { + rescheduled_epochs.push(e); + // sanity check to make sure we're not trying to re-schedule already faulty sectors. + if !expiration_set.early_sectors.is_empty() { + // TODO: correct exit code? + return Err(actor_error!( + illegal_state, + "attempted to re-schedule early expirations to an earlier epoch" + )); + } + rescheduled_sectors |= &expiration_set.on_time_sectors; + rescheduled_power += &expiration_set.active_power; + rescheduled_power += &expiration_set.faulty_power; } - rescheduled_sectors |= &expiration_set.on_time_sectors; - rescheduled_power += &expiration_set.active_power; - rescheduled_power += &expiration_set.faulty_power; - } - Ok(()) - })?; + Ok(()) + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; for (epoch, expiration_set) in mutated_expiration_sets { let res = expiration_set.validate_state(); @@ -347,7 +372,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { )?; // Trim the rescheduled epochs from the queue. - self.amt.batch_delete(rescheduled_epochs, true)?; + self.amt.batch_delete(rescheduled_epochs, true).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -361,7 +386,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &mut self, sectors: Vec, sector_size: SectorSize, - ) -> anyhow::Result { + ) -> Result { let mut remaining: BTreeSet = sectors.iter().map(|sector| sector.sector_number).collect(); @@ -375,14 +400,14 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let on_time_sectors: BTreeSet = expiration_set .on_time_sectors .bounded_iter(ENTRY_SECTORS_MAX) - .context("too many sectors to reschedule")? + .ok_or_else(|| actor_error!(illegal_argument, "too many sectors to reschedule"))? .map(|i| i as SectorNumber) .collect(); let early_sectors: BTreeSet = expiration_set .early_sectors .bounded_iter(ENTRY_SECTORS_MAX) - .context("too many sectors to reschedule")? + .ok_or_else(|| actor_error!(illegal_argument, "too many sectors to reschedule"))? .map(|i| i as SectorNumber) .collect(); @@ -423,7 +448,11 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { })?; if !remaining.is_empty() { - return Err(anyhow!("sectors not found in expiration queue: {:?}", remaining)); + return Err(actor_error!( + not_found, + "sectors not found in expiration queue: {:?}", + remaining + )); } // Re-schedule the removed sectors to their target expiration. @@ -441,14 +470,14 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { old_sectors: &[SectorOnChainInfo], new_sectors: &[SectorOnChainInfo], sector_size: SectorSize, - ) -> anyhow::Result<(BitField, BitField, PowerPair, TokenAmount)> { + ) -> Result<(BitField, BitField, PowerPair, TokenAmount), ActorError> { let (old_sector_numbers, old_power, old_pledge) = self .remove_active_sectors(old_sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to remove replaced sectors"))?; + .context("failed to remove replaced sectors")?; let (new_sector_numbers, new_power, new_pledge) = self .add_active_sectors(new_sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to add replacement sectors"))?; + .context("failed to add replacement sectors")?; Ok(( old_sector_numbers, @@ -469,20 +498,20 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { faults: &BitField, recovering: &BitField, sector_size: SectorSize, - ) -> anyhow::Result<(ExpirationSet, PowerPair)> { + ) -> Result<(ExpirationSet, PowerPair), ActorError> { let mut remaining: BTreeSet<_> = sectors.iter().map(|sector| sector.sector_number).collect(); // ADDRESSED_SECTORS_MAX is defined as 25000, so this will not error. let faults_map: BTreeSet<_> = faults .bounded_iter(policy.addressed_sectors_max) - .context("too many faults to expand")? + .ok_or_else(|| actor_error!(illegal_argument, "too many faults to expand"))? .map(|i| i as SectorNumber) .collect(); let recovering_map: BTreeSet<_> = recovering .bounded_iter(policy.addressed_sectors_max) - .context("too many recoveries to expand")? + .ok_or_else(|| actor_error!(illegal_argument, "too many recoveries to expand"))? .map(|i| i as SectorNumber) .collect(); @@ -511,7 +540,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { // Remove non-faulty sectors. let (removed_sector_numbers, removed_power, removed_pledge) = self .remove_active_sectors(&non_faulty_sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to remove on-time recoveries"))?; + .context("failed to remove on-time recoveries")?; removed.on_time_sectors = removed_sector_numbers; removed.active_power = removed_power; removed.on_time_pledge = removed_pledge; @@ -524,14 +553,16 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let on_time_sectors: BTreeSet = expiration_set .on_time_sectors .bounded_iter(ENTRY_SECTORS_MAX) - .context("too many on-time sectors to expand")? + .ok_or_else(|| { + actor_error!(illegal_argument, "too many on-time sectors to expand") + })? .map(|i| i as SectorNumber) .collect(); let early_sectors: BTreeSet = expiration_set .early_sectors .bounded_iter(ENTRY_SECTORS_MAX) - .context("too many early sectors to expand")? + .ok_or_else(|| actor_error!(illegal_argument, "too many early sectors to expand"))? .map(|i| i as SectorNumber) .collect(); @@ -580,14 +611,18 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { })?; if !remaining.is_empty() { - return Err(anyhow!("sectors not found in expiration queue: {:?}", remaining)); + return Err(actor_error!( + not_found, + "sectors not found in expiration queue: {:?}", + remaining + )); } Ok((removed, recovering_power)) } /// Removes and aggregates entries from the queue up to and including some epoch. - pub fn pop_until(&mut self, until: ChainEpoch) -> anyhow::Result { + pub fn pop_until(&mut self, until: ChainEpoch) -> Result { let mut on_time_sectors = BitField::new(); let mut early_sectors = BitField::new(); let mut active_power = PowerPair::zero(); @@ -595,22 +630,24 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let mut on_time_pledge = TokenAmount::zero(); let mut popped_keys = Vec::::new(); - self.amt.for_each_while(|i, this_value| { - if i as ChainEpoch > until { - return Ok(false); - } + self.amt + .for_each_while(|i, this_value| { + if i as ChainEpoch > until { + return false; + } - popped_keys.push(i); - on_time_sectors |= &this_value.on_time_sectors; - early_sectors |= &this_value.early_sectors; - active_power += &this_value.active_power; - faulty_power += &this_value.faulty_power; - on_time_pledge += &this_value.on_time_pledge; + popped_keys.push(i); + on_time_sectors |= &this_value.on_time_sectors; + early_sectors |= &this_value.early_sectors; + active_power += &this_value.active_power; + faulty_power += &this_value.faulty_power; + on_time_pledge += &this_value.on_time_pledge; - Ok(true) - })?; + true + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; - self.amt.batch_delete(popped_keys, true)?; + self.amt.batch_delete(popped_keys, true).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(ExpirationSet { on_time_sectors, @@ -629,13 +666,13 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { active_power: &PowerPair, faulty_power: &PowerPair, pledge: &TokenAmount, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let epoch = self.quant.quantize_up(raw_epoch); let mut expiration_set = self.may_get(epoch)?; expiration_set .add(on_time_sectors, early_sectors, pledge, active_power, faulty_power) - .map_err(|e| anyhow!("failed to add expiration values for epoch {}: {}", epoch, e))?; + .with_context(|| format!("failed to add expiration values for epoch {}", epoch))?; self.must_update(epoch, expiration_set)?; Ok(()) @@ -649,18 +686,22 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { active_power: &PowerPair, faulty_power: &PowerPair, pledge: &TokenAmount, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let epoch = self.quant.quantize_up(raw_epoch); let mut expiration_set = self .amt - .get(epoch.try_into()?) - .map_err(|e| e.downcast_wrap(format!("failed to lookup queue epoch {}", epoch)))? - .ok_or_else(|| anyhow!("missing expected expiration set at epoch {}", epoch))? + .get(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup queue epoch {}", epoch) + })? + .ok_or_else(|| { + actor_error!(illegal_state, "missing expected expiration set at epoch {}", epoch) + })? .clone(); expiration_set .remove(on_time_sectors, early_sectors, pledge, active_power, faulty_power) - .map_err(|e| { - anyhow!("failed to remove expiration values for queue epoch {}: {}", epoch, e) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to remove expiration values for queue epoch {}", epoch) })?; self.must_update_or_delete(epoch, expiration_set)?; @@ -671,7 +712,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &mut self, sectors: &[SectorOnChainInfo], sector_size: SectorSize, - ) -> anyhow::Result<(BitField, PowerPair, TokenAmount)> { + ) -> Result<(BitField, PowerPair, TokenAmount), ActorError> { let mut removed_sector_numbers = Vec::::new(); let mut removed_power = PowerPair::zero(); let mut removed_pledge = TokenAmount::zero(); @@ -680,7 +721,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let groups = self.find_sectors_by_expiration(sector_size, sectors)?; for group in groups { let sectors_bitfield = - BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied())?; + BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)?; self.remove( group.sector_epoch_set.epoch, §ors_bitfield, @@ -696,7 +738,12 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { removed_pledge += &group.sector_epoch_set.pledge; } - Ok((BitField::try_from_bits(removed_sector_numbers)?, removed_power, removed_pledge)) + Ok(( + BitField::try_from_bits(removed_sector_numbers) + .exit_code(ExitCode::USR_SERIALIZATION)?, + removed_power, + removed_pledge, + )) } /// Traverses the entire queue with a callback function that may mutate entries. @@ -707,32 +754,37 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { mut f: impl FnMut( ChainEpoch, &mut ValueMut<'_, ExpirationSet>, - ) -> anyhow::Result, - ) -> anyhow::Result<()> { + ) -> Result, + ) -> Result<(), ActorError> { let mut epochs_emptied = Vec::::new(); - self.amt.for_each_while_mut(|e, expiration_set| { - let keep_going = f(e.try_into()?, expiration_set)?; - - if expiration_set.is_empty() { - // Mark expiration set as unchanged, it will be removed after the iteration. - expiration_set.mark_unchanged(); - epochs_emptied.push(e); - } + self.amt + .try_for_each_while_mut::<_, ActorError>(|e, expiration_set| { + let keep_going = + f(e.try_into().exit_code(ExitCode::USR_SERIALIZATION)?, expiration_set)?; + + if expiration_set.is_empty() { + // Mark expiration set as unchanged, it will be removed after the iteration. + expiration_set.mark_unchanged(); + epochs_emptied.push(e); + } - Ok(keep_going) - })?; + Ok(keep_going) + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; - self.amt.batch_delete(epochs_emptied, true)?; + self.amt.batch_delete(epochs_emptied, true).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } - fn may_get(&self, key: ChainEpoch) -> anyhow::Result { + fn may_get(&self, key: ChainEpoch) -> Result { Ok(self .amt - .get(key.try_into()?) - .map_err(|e| e.downcast_wrap(format!("failed to lookup queue epoch {}", key)))? + .get(key.try_into().exit_code(ExitCode::USR_SERIALIZATION)?) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup queue epoch {}", key) + })? .cloned() .unwrap_or_default()) } @@ -741,10 +793,12 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &mut self, epoch: ChainEpoch, expiration_set: ExpirationSet, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { self.amt - .set(epoch.try_into()?, expiration_set) - .map_err(|e| e.downcast_wrap(format!("failed to set queue epoch {}", epoch))) + .set(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?, expiration_set) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set queue epoch {}", epoch) + }) } /// Since this might delete the node, it's not safe for use inside an iteration. @@ -752,15 +806,19 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &mut self, epoch: ChainEpoch, expiration_set: ExpirationSet, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if expiration_set.is_empty() { self.amt - .delete(epoch.try_into()?) - .map_err(|e| e.downcast_wrap(format!("failed to delete queue epoch {}", epoch)))?; + .delete(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete queue epoch {}", epoch) + })?; } else { self.amt - .set(epoch.try_into()?, expiration_set) - .map_err(|e| e.downcast_wrap(format!("failed to set queue epoch {}", epoch)))?; + .set(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?, expiration_set) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set queue epoch {}", epoch) + })?; } Ok(()) @@ -775,7 +833,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &self, sector_size: SectorSize, sectors: &[SectorOnChainInfo], - ) -> anyhow::Result> { + ) -> Result, ActorError> { let mut declared_expirations = BTreeMap::::new(); let mut sectors_by_number = BTreeMap::::new(); let mut all_remaining = BTreeSet::::new(); @@ -807,38 +865,40 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { // If sectors remain, traverse next in epoch order. Remaining sectors should be // rescheduled to expire soon, so this traversal should exit early. if !all_remaining.is_empty() { - self.amt.for_each_while(|epoch, es| { - let epoch = epoch as ChainEpoch; - // If this set's epoch is one of our declared epochs, we've already processed it - // in the loop above, so skip processing here. Sectors rescheduled to this epoch - // would have been included in the earlier processing. - if declared_expirations.contains_key(&epoch) { - return Ok(true); - } + self.amt + .try_for_each_while::<_, ActorError>(|epoch, es| { + let epoch = epoch as ChainEpoch; + // If this set's epoch is one of our declared epochs, we've already processed it + // in the loop above, so skip processing here. Sectors rescheduled to this epoch + // would have been included in the earlier processing. + if declared_expirations.contains_key(&epoch) { + return Ok(true); + } - // Sector should not be found in EarlyExpirations which holds faults. An implicit assumption - // of grouping is that it only returns sectors with active power. ExpirationQueue should not - // provide operations that allow this to happen. - check_no_early_sectors(&all_remaining, es)?; - - let group = group_expiration_set( - sector_size, - §ors_by_number, - &mut all_remaining, - es.clone(), - epoch, - ); - - if !group.sector_epoch_set.sectors.is_empty() { - expiration_groups.push(group); - } + // Sector should not be found in EarlyExpirations which holds faults. An implicit assumption + // of grouping is that it only returns sectors with active power. ExpirationQueue should not + // provide operations that allow this to happen. + check_no_early_sectors(&all_remaining, es)?; + + let group = group_expiration_set( + sector_size, + §ors_by_number, + &mut all_remaining, + es.clone(), + epoch, + ); + + if !group.sector_epoch_set.sectors.is_empty() { + expiration_groups.push(group); + } - Ok(!all_remaining.is_empty()) - })?; + Ok(!all_remaining.is_empty()) + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; } if !all_remaining.is_empty() { - return Err(anyhow!("some sectors not found in expiration queue")); + return Err(actor_error!(not_found, "some sectors not found in expiration queue")); } // The built-in stable sort is timsort. It will find the two sorted runs and merge them. @@ -939,10 +999,14 @@ fn group_expiration_set( } /// Checks for invalid overlap between bitfield and a set's early sectors. -fn check_no_early_sectors(set: &BTreeSet, es: &ExpirationSet) -> anyhow::Result<()> { +fn check_no_early_sectors(set: &BTreeSet, es: &ExpirationSet) -> Result<(), ActorError> { for u in es.early_sectors.iter() { if set.contains(&(u as u64)) { - return Err(anyhow!("Invalid attempt to group sector {} with an early expiration", u)); + return Err(actor_error!( + illegal_argument, + "Invalid attempt to group sector {} with an early expiration", + u + )); } } Ok(()) diff --git a/actors/miner/src/lib.rs b/actors/miner/src/lib.rs index 2722108f7..3ff93c2e1 100644 --- a/actors/miner/src/lib.rs +++ b/actors/miner/src/lib.rs @@ -6,7 +6,6 @@ use std::collections::BTreeMap; use std::iter; use std::ops::Neg; -use anyhow::{anyhow, Error}; pub use bitfield_queue::*; use byteorder::{BigEndian, ByteOrder, WriteBytesExt}; use cid::multihash::Code; @@ -18,8 +17,8 @@ pub use deadlines::*; pub use expiration_queue::*; use fil_actors_runtime::runtime::{ActorCode, Policy, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorDowncast, ActorError, BURNT_FUNDS_ACTOR_ADDR, INIT_ACTOR_ADDR, - REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, + actor_error, cbor, ActorContext, ActorContext2, ActorError, BURNT_FUNDS_ACTOR_ADDR, + INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, }; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; use fvm_ipld_blockstore::Blockstore; @@ -151,12 +150,7 @@ impl Actor { let blake2b = |b: &[u8]| rt.hash_blake2b(b); let offset = assign_proving_period_offset(policy, rt.message().receiver(), current_epoch, blake2b) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_SERIALIZATION, - "failed to assign proving period offset", - ) - })?; + .context("failed to assign proving period offset")?; let period_start = current_proving_period_start(policy, current_epoch, offset); if period_start > current_epoch { @@ -185,14 +179,14 @@ impl Actor { params.multi_addresses, params.window_post_proof_type, )?; - let info_cid = rt.store().put_cbor(&info, Blake2b256).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state") - })?; + let info_cid = rt + .store() + .put_cbor(&info, Blake2b256) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; + + let st = State::new(policy, rt.store(), info_cid, period_start, deadline_idx) + .context("failed to construct state")?; - let st = - State::new(policy, rt.store(), info_cid, period_start, deadline_idx).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct state") - })?; rt.create(&st)?; Ok(()) @@ -250,9 +244,7 @@ impl Actor { }) } - state.save_info(rt.store(), &info).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "could not save miner info") - })?; + state.save_info(rt.store(), &info).context("could not save miner info")?; Ok(()) })?; @@ -321,9 +313,7 @@ impl Actor { } } - state.save_info(rt.store(), &info).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save miner info") - })?; + state.save_info(rt.store(), &info).context("failed to save miner info")?; Ok(()) }) @@ -345,9 +335,7 @@ impl Actor { )?; info.peer_id = params.new_id; - state.save_info(rt.store(), &info).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "could not save miner info") - })?; + state.save_info(rt.store(), &info).context("could not save miner info")?; Ok(()) })?; @@ -373,9 +361,7 @@ impl Actor { )?; info.multi_address = params.new_multi_addrs; - state.save_info(rt.store(), &info).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "could not save miner info") - })?; + state.save_info(rt.store(), &info).context("could not save miner info")?; Ok(()) })?; @@ -538,20 +524,15 @@ impl Actor { return Err(actor_error!(illegal_argument, "post commit randomness mismatched")); } - let sectors = Sectors::load(rt.store(), &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors") - })?; + let sectors = Sectors::load(rt.store(), &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors")?; let mut deadlines = - state.load_deadlines(rt.store()).map_err(|e| e.wrap("failed to load deadlines"))?; + state.load_deadlines(rt.store()).context("failed to load deadlines")?; - let mut deadline = - deadlines.load_deadline(rt.policy(), rt.store(), params.deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", params.deadline), - ) - })?; + let mut deadline = deadlines + .load_deadline(rt.policy(), rt.store(), params.deadline) + .with_context(|| format!("failed to load deadline {}", params.deadline))?; // Record proven sectors/partitions, returning updates to power and the final set of sectors // proven/skipped. @@ -573,14 +554,8 @@ impl Actor { fault_expiration, &mut params.partitions, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!( - "failed to process post submission for deadline {}", - params.deadline - ), - ) + .with_context(|| { + format!("failed to process post submission for deadline {}", params.deadline) })?; // Make sure we actually proved something. @@ -600,41 +575,25 @@ impl Actor { if post_result.recovered_power.is_zero() { deadline .record_post_proofs(rt.store(), &post_result.partitions, ¶ms.proofs) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to record proof for optimistic verification", - ) - })? + .context("failed to record proof for optimistic verification")? } else { // Load sector infos for proof, substituting a known-good sector for known-faulty sectors. // Note: this is slightly sub-optimal, loading info for the recovering sectors again after they were already // loaded above. let sector_infos = sectors .load_for_proof(&post_result.sectors, &post_result.ignored_sectors) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load sectors for post verification", - ) - })?; + .context("failed to load sectors for post verification")?; + verify_windowed_post(rt, current_deadline.challenge, §or_infos, params.proofs) - .map_err(|e| e.wrap("window post failed"))?; + .context("window post failed")?; } let deadline_idx = params.deadline; - deadlines.update_deadline(policy, rt.store(), params.deadline, &deadline).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update deadline {}", deadline_idx), - ) - }, - )?; + deadlines + .update_deadline(policy, rt.store(), params.deadline, &deadline) + .with_context(|| format!("failed to update deadline {}", deadline_idx))?; - state.save_deadlines(rt.store(), deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.save_deadlines(rt.store(), deadlines).context("failed to save deadlines")?; Ok(post_result) })?; @@ -646,7 +605,7 @@ impl Actor { request_update_power(rt, post_result.power_delta)?; let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -699,10 +658,9 @@ impl Actor { info.control_addresses.iter().chain(&[info.worker, info.owner]), )?; let store = rt.store(); - let precommits = - state.get_all_precommitted_sectors(store, sector_numbers).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get precommits") - })?; + let precommits = state + .get_all_precommitted_sectors(store, sector_numbers) + .context("failed to get precommits")?; // compute data commitments and validate each precommit let mut compute_data_commitments_inputs = Vec::with_capacity(precommits.len()); @@ -804,9 +762,7 @@ impl Actor { proof: params.aggregate_proof, infos: svis, }) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "aggregate seal verify failed") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "aggregate seal verify failed")?; let rew = request_current_epoch_block_reward(rt)?; let pwr = request_current_total_power(rt)?; @@ -835,7 +791,7 @@ impl Actor { )); } burn_funds(rt, aggregate_fee)?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -868,9 +824,8 @@ impl Actor { )?; let sector_store = rt.store().clone(); - let mut sectors = Sectors::load(§or_store, &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array") - })?; + let mut sectors = Sectors::load(§or_store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut power_delta = PowerPair::zero(); let mut pledge_delta = TokenAmount::zero(); @@ -984,7 +939,8 @@ impl Actor { RawBytes::serialize(ext::market::ActivateDealsParams { deal_ids: update.deals.clone(), sector_expiry: sector_info.expiration, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ); @@ -1072,21 +1028,11 @@ impl Actor { for &dl_idx in deadlines_to_load.iter() { let mut deadline = deadlines .load_deadline(rt.policy(),rt.store(), dl_idx) - .map_err(|e| - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", dl_idx), - ) - )?; + .with_context(|| format!("failed to load deadline {}", dl_idx))?; let mut partitions = deadline .partitions_amt(rt.store()) - .map_err(|e| - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partitions for deadline {}", dl_idx), - ) - )?; + .with_context(||format!("failed to load partitions for deadline {}", dl_idx))?; let quant = state.quant_spec_for_deadline(rt.policy(),dl_idx); @@ -1114,13 +1060,9 @@ impl Actor { new_unsealed_cid: with_details.unsealed_cid, proof: with_details.update.replica_proof.clone(), } - ) - .map_err(|e| - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, - format!("failed to verify replica proof for sector {}", with_details.sector_info.sector_number), - ) - )?; + ).with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to verify replica proof for sector {}", with_details.sector_info.sector_number) + })?; let mut new_sector_info = with_details.sector_info.clone(); @@ -1201,12 +1143,7 @@ impl Actor { let mut partition = partitions .get(with_details.update.partition) - .map_err(|e| - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {} partition {}", with_details.update.deadline, with_details.update.partition), - ) - )? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || format!("failed to load deadline {} partition {}", with_details.update.deadline, with_details.update.partition))? .cloned() .ok_or_else(|| actor_error!(not_found, "no such deadline {} partition {}", dl_idx, with_details.update.partition))?; @@ -1217,44 +1154,28 @@ impl Actor { info.sector_size, quant, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to replace sector at deadline {} partition {}", with_details.update.deadline, with_details.update.partition), - ) - })?; + .with_context(|| format!("failed to replace sector at deadline {} partition {}", with_details.update.deadline, with_details.update.partition))?; power_delta += &partition_power_delta; pledge_delta += &partition_pledge_delta; partitions .set(with_details.update.partition, partition) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save deadline {} partition {}", with_details.update.deadline, with_details.update.partition), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to save deadline {} partition {}", with_details.update.deadline, with_details.update.partition) })?; succeeded.push(new_sector_info.sector_number); new_sectors.push(new_sector_info); } - deadline.partitions = partitions.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save partitions for deadline {}", dl_idx), - ) + deadline.partitions = partitions.flush().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to save partitions for deadline {}", dl_idx) })?; deadlines .update_deadline(rt.policy(), rt.store(), dl_idx, &deadline) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save deadline {}", dl_idx), - ) - })?; + .with_context(|| format!("failed to save deadline {}", dl_idx))?; } let success_len = succeeded.len(); @@ -1276,19 +1197,10 @@ impl Actor { } // Overwrite sector infos. - sectors.store(new_sectors).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to update sector infos", - ) - })?; + sectors.store(new_sectors).context("failed to update sector infos")?; - state.sectors = sectors.amt.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors") - })?; - state.save_deadlines(rt.store(), deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.sectors = sectors.amt.flush().context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors")?; + state.save_deadlines(rt.store(), deadlines).context("failed to save deadlines")?; BitField::try_from_bits(succeeded).map_err(|_|{ actor_error!(illegal_argument; "invalid sector number") @@ -1369,52 +1281,34 @@ impl Actor { let mut dl_current = deadlines_current .load_deadline(policy, rt.store(), params.deadline) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deadline") - })?; + .context("failed to load deadline")?; // Take the post from the snapshot for dispute. // This operation REMOVES the PoSt from the snapshot so // it can't be disputed again. If this method fails, // this operation must be rolled back. - let (partitions, proofs) = - dl_current.take_post_proofs(rt.store(), params.post_index).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load proof for dispute", - ) - })?; + let (partitions, proofs) = dl_current + .take_post_proofs(rt.store(), params.post_index) + .context("failed to load proof for dispute")?; // Load the partition info we need for the dispute. let mut dispute_info = dl_current .load_partitions_for_dispute(rt.store(), partitions) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load partition for dispute", - ) - })?; + .context("failed to load partition for dispute")?; // This includes power that is no longer active (e.g., due to sector terminations). // It must only be used for penalty calculations, not power adjustments. let penalised_power = dispute_info.disputed_power.clone(); // Load sectors for the dispute. - let sectors = - Sectors::load(rt.store(), &dl_current.sectors_snapshot).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load sectors array", - ) - })?; + let sectors = Sectors::load(rt.store(), &dl_current.sectors_snapshot) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let sector_infos = sectors .load_for_proof(&dispute_info.all_sector_nos, &dispute_info.ignored_sector_nos) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load sectors to dispute window post", - ) - })?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load sectors to dispute window post", + )?; // Check proof, we fail if validation succeeds. if verify_windowed_post(rt, target_deadline.challenge, §or_infos, proofs)? { @@ -1439,22 +1333,14 @@ impl Actor { fault_expiration_epoch, &mut dispute_info.disputed_sectors, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to declare faults") - })?; + .context("failed to declare faults")?; deadlines_current .update_deadline(policy, rt.store(), params.deadline, &dl_current) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update deadline {}", params.deadline), - ) - })?; + .with_context(|| format!("failed to update deadline {}", params.deadline))?; - st.save_deadlines(rt.store(), deadlines_current).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + st.save_deadlines(rt.store(), deadlines_current) + .context("failed to save deadlines")?; // --- penalties --- @@ -1483,9 +1369,7 @@ impl Actor { current_epoch, &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to pay debt") - })?; + .context("failed to pay debt")?; let to_burn = &penalty_from_vesting + &penalty_from_balance; @@ -1510,7 +1394,7 @@ impl Actor { notify_pledge_changed(rt, &pledge_delta)?; let st: State = rt.state()?; - st.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + st.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -1752,13 +1636,9 @@ impl Actor { e.wrap("failed to allocate sector numbers") )?; state.put_precommitted_sectors(store, chain_infos) - .map_err(|e| - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to write pre-committed sectors") - )?; + .context("failed to write pre-committed sectors")?; state.add_pre_commit_clean_ups(rt.policy(), store, clean_up_events) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to add pre-commit expiry to queue") - })?; + .context("failed to add pre-commit expiry to queue")?; // Activate miner cron needs_cron = !state.deadline_cron_active; state.deadline_cron_active = true; @@ -1766,7 +1646,7 @@ impl Actor { })?; burn_funds(rt, fee_to_burn)?; let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; if needs_cron { let new_dl_info = state.deadline_info(rt.policy(), curr_epoch); enroll_cron_event( @@ -1800,11 +1680,8 @@ impl Actor { let st: State = rt.state()?; let precommit = st .get_precommitted_sector(rt.store(), sector_number) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load pre-committed sector {}", sector_number), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load pre-committed sector {}", sector_number) })? .ok_or_else(|| actor_error!(not_found, "no pre-commited sector {}", sector_number))?; @@ -1861,7 +1738,7 @@ impl Actor { rt.send( *STORAGE_POWER_ACTOR_ADDR, ext::power::SUBMIT_POREP_FOR_BULK_VERIFY_METHOD, - RawBytes::serialize(&svi)?, + RawBytes::serialize(&svi).exit_code(ExitCode::USR_ILLEGAL_STATE)?, BigInt::zero(), )?; @@ -1890,13 +1767,9 @@ impl Actor { let st: State = rt.state()?; let store = rt.store(); // This skips missing pre-commits. - let precommited_sectors = - st.find_precommitted_sectors(store, ¶ms.sectors).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pre-committed sectors", - ) - })?; + let precommited_sectors = st + .find_precommitted_sectors(store, ¶ms.sectors) + .context("failed to load pre-committed sectors")?; confirm_sector_proofs_valid_internal( rt, precommited_sectors, @@ -2034,28 +1907,20 @@ impl Actor { decls.push(decl); } - let mut sectors = Sectors::load(rt.store(), &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array") - })?; + let mut sectors = Sectors::load(rt.store(), &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut power_delta = PowerPair::zero(); let mut pledge_delta = TokenAmount::zero(); for deadline_idx in deadlines_to_load { let policy = rt.policy(); - let mut deadline = - deadlines.load_deadline(policy, store, deadline_idx).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", deadline_idx), - ) - })?; + let mut deadline = deadlines + .load_deadline(policy, store, deadline_idx) + .with_context(|| format!("failed to load deadline {}", deadline_idx))?; - let mut partitions = deadline.partitions_amt(store).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partitions for deadline {}", deadline_idx), - ) + let mut partitions = deadline.partitions_amt(store).with_context(|| { + format!("failed to load partitions for deadline {}", deadline_idx) })?; let quant = state.quant_spec_for_deadline(policy, deadline_idx); @@ -2069,11 +1934,8 @@ impl Actor { let mut partition = partitions .get(decl.partition) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partition {:?}", key), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {:?}", key) })? .cloned() .ok_or_else(|| actor_error!(not_found, "no such partition {:?}", key))?; @@ -2143,32 +2005,25 @@ impl Actor { .collect::>()?; // Overwrite sector infos. - sectors.store(new_sectors.clone()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update sectors {:?}", decl.sectors), - ) - })?; + sectors + .store(new_sectors.clone()) + .with_context(|| format!("failed to update sectors {:?}", decl.sectors))?; // Remove old sectors from partition and assign new sectors. let (partition_power_delta, partition_pledge_delta) = partition .replace_sectors(store, &old_sectors, &new_sectors, info.sector_size, quant) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to replace sector expirations at {:?}", key), - ) + .with_context(|| { + format!("failed to replace sector expirations at {:?}", key) })?; power_delta += &partition_power_delta; pledge_delta += partition_pledge_delta; // expected to be zero, see note below. - partitions.set(decl.partition, partition).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save partition {:?}", key), - ) - })?; + partitions + .set(decl.partition, partition) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to save partition {:?}", key) + })?; // Record the new partition expiration epoch for setting outside this loop // over declarations. @@ -2183,44 +2038,34 @@ impl Actor { } } - deadline.partitions = partitions.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save partitions for deadline {}", deadline_idx), - ) - })?; + deadline.partitions = + partitions.flush().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to save partitions for deadline {}", deadline_idx) + })?; // Record partitions in deadline expiration queue for epoch in epochs_to_reschedule { let p_idxs = partitions_by_new_epoch.get(&epoch).unwrap(); - deadline.add_expiration_partitions(store, epoch, p_idxs, quant).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!( - "failed to add expiration partitions to \ - deadline {} epoch {}", - deadline_idx, epoch - ), + deadline.add_expiration_partitions(store, epoch, p_idxs, quant).with_context( + || { + format!( + "failed to add expiration partitions to deadline {} epoch {}", + deadline_idx, epoch ) }, )?; } - deadlines.update_deadline(policy, store, deadline_idx, &deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save deadline {}", deadline_idx), - ) - })?; + deadlines + .update_deadline(policy, store, deadline_idx, &deadline) + .with_context(|| format!("failed to save deadline {}", deadline_idx))?; } - state.sectors = sectors.amt.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors") - })?; - state.save_deadlines(store, deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.sectors = sectors + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors")?; + state.save_deadlines(store, deadlines).context("failed to save deadlines")?; Ok((power_delta, pledge_delta)) })?; @@ -2317,9 +2162,8 @@ impl Actor { // We're only reading the sectors, so there's no need to save this back. // However, we still want to avoid re-loading this array per-partition. - let sectors = Sectors::load(store, &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors") - })?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors")?; for (deadline_idx, partition_sectors) in to_process.iter() { // If the deadline is the current or next deadline to prove, don't allow terminating sectors. @@ -2338,13 +2182,9 @@ impl Actor { } let quant = state.quant_spec_for_deadline(rt.policy(), deadline_idx); - let mut deadline = - deadlines.load_deadline(rt.policy(), store, deadline_idx).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", deadline_idx), - ) - })?; + let mut deadline = deadlines + .load_deadline(rt.policy(), store, deadline_idx) + .with_context(|| format!("failed to load deadline {}", deadline_idx))?; let removed_power = deadline .terminate_sectors( @@ -2356,29 +2196,19 @@ impl Actor { info.sector_size, quant, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to terminate sectors in deadline {}", deadline_idx), - ) + .with_context(|| { + format!("failed to terminate sectors in deadline {}", deadline_idx) })?; state.early_terminations.set(deadline_idx); power_delta -= &removed_power; - deadlines.update_deadline(rt.policy(), store, deadline_idx, &deadline).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update deadline {}", deadline_idx), - ) - }, - )?; + deadlines + .update_deadline(rt.policy(), store, deadline_idx, &deadline) + .with_context(|| format!("failed to update deadline {}", deadline_idx))?; } - state.save_deadlines(store, deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.save_deadlines(store, deadlines).context("failed to save deadlines")?; Ok((had_early_terminations, power_delta)) })?; @@ -2401,7 +2231,7 @@ impl Actor { schedule_early_termination_work(rt)?; } let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; request_update_power(rt, power_delta)?; Ok(TerminateSectorsReturn { done: !more }) @@ -2462,9 +2292,8 @@ impl Actor { let mut deadlines = state.load_deadlines(store).map_err(|e| e.wrap("failed to load deadlines"))?; - let sectors = Sectors::load(store, &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array") - })?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut new_fault_power_total = PowerPair::zero(); let curr_epoch = rt.curr_epoch(); @@ -2494,13 +2323,9 @@ impl Actor { ) })?; - let mut deadline = - deadlines.load_deadline(policy, store, deadline_idx).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", deadline_idx), - ) - })?; + let mut deadline = deadlines + .load_deadline(policy, store, deadline_idx) + .with_context(|| format!("failed to load deadline {}", deadline_idx))?; let fault_expiration_epoch = target_deadline.last() + policy.fault_max_age; @@ -2513,26 +2338,18 @@ impl Actor { fault_expiration_epoch, partition_map, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to declare faults for deadline {}", deadline_idx), - ) + .with_context(|| { + format!("failed to declare faults for deadline {}", deadline_idx) })?; - deadlines.update_deadline(policy, store, deadline_idx, &deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to store deadline {} partitions", deadline_idx), - ) - })?; + deadlines.update_deadline(policy, store, deadline_idx, &deadline).with_context( + || format!("failed to store deadline {} partitions", deadline_idx), + )?; new_fault_power_total += &deadline_power_delta; } - state.save_deadlines(store, deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.save_deadlines(store, deadlines).context("failed to save deadlines")?; Ok(new_fault_power_total) })?; @@ -2616,9 +2433,8 @@ impl Actor { let mut deadlines = state.load_deadlines(store).map_err(|e| e.wrap("failed to load deadlines"))?; - let sectors = Sectors::load(store, &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array") - })?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let curr_epoch = rt.curr_epoch(); for (deadline_idx, partition_map) in to_process.iter() { let policy = rt.policy(); @@ -2646,41 +2462,29 @@ impl Actor { ) })?; - let mut deadline = - deadlines.load_deadline(policy, store, deadline_idx).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", deadline_idx), - ) - })?; + let mut deadline = deadlines + .load_deadline(policy, store, deadline_idx) + .with_context(|| format!("failed to load deadline {}", deadline_idx))?; deadline .declare_faults_recovered(store, §ors, info.sector_size, partition_map) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to declare recoveries for deadline {}", deadline_idx), - ) + .with_context(|| { + format!("failed to declare recoveries for deadline {}", deadline_idx) })?; - deadlines.update_deadline(policy, store, deadline_idx, &deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to store deadline {}", deadline_idx), - ) - })?; + deadlines + .update_deadline(policy, store, deadline_idx, &deadline) + .with_context(|| format!("failed to store deadline {}", deadline_idx))?; } - state.save_deadlines(store, deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.save_deadlines(store, deadlines).context("failed to save deadlines")?; Ok(fee_to_burn) })?; burn_funds(rt, fee_to_burn)?; let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; // Power is not restored yet, but when the recovered sectors are successfully PoSted. Ok(()) @@ -2755,29 +2559,19 @@ impl Actor { let mut deadlines = state.load_deadlines(store).map_err(|e| e.wrap("failed to load deadlines"))?; - let mut deadline = - deadlines.load_deadline(policy, store, params_deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", params_deadline), - ) - })?; + let mut deadline = deadlines + .load_deadline(policy, store, params_deadline) + .with_context(|| format!("failed to load deadline {}", params_deadline))?; let (live, dead, removed_power) = - deadline.remove_partitions(store, partitions, quant).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to remove partitions from deadline {}", params_deadline), - ) + deadline.remove_partitions(store, partitions, quant).with_context(|| { + format!("failed to remove partitions from deadline {}", params_deadline) })?; - state.delete_sectors(store, &dead).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to delete dead sectors") - })?; + state.delete_sectors(store, &dead).context("failed to delete dead sectors")?; - let sectors = state.load_sector_infos(store, &live).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load moved sectors") - })?; + let sectors = + state.load_sector_infos(store, &live).context("failed to load moved sectors")?; let proven = true; let added_power = deadline .add_sectors( @@ -2788,12 +2582,7 @@ impl Actor { info.sector_size, quant, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to add back moved sectors", - ) - })?; + .context("failed to add back moved sectors")?; if removed_power != added_power { return Err(actor_error!( @@ -2804,19 +2593,13 @@ impl Actor { )); } - deadlines.update_deadline(policy, store, params_deadline, &deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update deadline {}", params_deadline), - ) - })?; + deadlines + .update_deadline(policy, store, params_deadline, &deadline) + .with_context(|| format!("failed to update deadline {}", params_deadline))?; - state.save_deadlines(store, deadlines).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save deadline {}", params_deadline), - ) - })?; + state + .save_deadlines(store, deadlines) + .with_context(|| format!("failed to save deadline {}", params_deadline))?; Ok(()) })?; @@ -2943,9 +2726,7 @@ impl Actor { rt.curr_epoch(), &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to repay penalty") - })?; + .context("failed to repay penalty")?; pledge_delta_total -= &penalty_from_vesting; let to_burn = penalty_from_vesting + penalty_from_balance; Ok((pledge_delta_total, to_burn)) @@ -2954,7 +2735,7 @@ impl Actor { notify_pledge_changed(rt, &pledge_delta_total)?; burn_funds(rt, to_burn)?; let st: State = rt.state()?; - st.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + st.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -2974,7 +2755,7 @@ impl Actor { let fault = rt .verify_consensus_fault(¶ms.header1, ¶ms.header2, ¶ms.header_extra) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "fault not verified"))? + .context_code(ExitCode::USR_ILLEGAL_STATE, "fault not verified")? .ok_or_else(|| actor_error!(illegal_argument, "No consensus fault found"))?; if fault.target != rt.message().receiver() { return Err(actor_error!( @@ -3031,9 +2812,7 @@ impl Actor { rt.curr_epoch(), &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to pay fees") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to pay fees")?; let mut burn_amount = &penalty_from_vesting + &penalty_from_balance; pledge_delta -= penalty_from_vesting; @@ -3045,9 +2824,7 @@ impl Actor { info.consensus_fault_elapsed = rt.curr_epoch() + rt.policy().consensus_fault_ineligibility_duration; - st.save_info(rt.store(), &info).map_err(|e| { - e.downcast_default(ExitCode::USR_SERIALIZATION, "failed to save miner info") - })?; + st.save_info(rt.store(), &info).context("failed to save miner info")?; Ok((burn_amount, reward_amount)) })?; @@ -3060,7 +2837,7 @@ impl Actor { notify_pledge_changed(rt, &pledge_delta)?; let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -3099,10 +2876,9 @@ impl Actor { } // Unlock vested funds so we can spend them. - let newly_vested = - state.unlock_vested_funds(rt.store(), rt.curr_epoch()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to vest fund") - })?; + let newly_vested = state + .unlock_vested_funds(rt.store(), rt.curr_epoch()) + .context("Failed to vest fund")?; // available balance already accounts for fee debt so it is correct to call // this before RepayDebts. We would have to @@ -3146,7 +2922,7 @@ impl Actor { burn_funds(rt, fee_to_burn)?; notify_pledge_changed(rt, &newly_vested.neg())?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(WithdrawBalanceReturn { amount_withdrawn: amount_withdrawn.clone() }) } @@ -3168,9 +2944,7 @@ impl Actor { rt.curr_epoch(), &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to unlock fee debt") - })?; + .context("failed to unlock fee debt")?; Ok((from_vesting, from_balance, state.clone())) })?; @@ -3179,7 +2953,7 @@ impl Actor { notify_pledge_changed(rt, &from_vesting.neg())?; burn_funds(rt, burn_amount)?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -3220,7 +2994,7 @@ impl Actor { } }; let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(()) } } @@ -3249,12 +3023,7 @@ where policy.addressed_partitions_max, policy.addressed_sectors_max, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to pop early terminations", - ) - })?; + .context("failed to pop early terminations")?; // Nothing to do, don't waste any time. // This can happen if we end up processing early terminations @@ -3265,9 +3034,8 @@ where } let info = get_miner_info(rt.store(), state)?; - let sectors = Sectors::load(store, &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array") - })?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut total_initial_pledge = TokenAmount::zero(); let mut deals_to_terminate = @@ -3279,7 +3047,7 @@ where for (epoch, sector_numbers) in result.iter() { let sectors = sectors .load_sector(sector_numbers) - .map_err(|e| e.wrap("failed to load sector infos"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector infos")?; penalty += termination_penalty( info.sector_size, @@ -3318,9 +3086,7 @@ where rt.curr_epoch(), &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to repay penalty") - })?; + .context("failed to repay penalty")?; penalty = &penalty_from_vesting + penalty_from_balance; pledge_delta -= penalty_from_vesting; @@ -3380,7 +3146,7 @@ where // from locked vesting funds before funds free this epoch. let newly_vested = state .unlock_vested_funds(rt.store(), rt.curr_epoch()) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to vest funds"))?; + .context("failed to vest funds")?; pledge_delta_total -= newly_vested; @@ -3390,12 +3156,7 @@ where let deposit_to_burn = state .cleanup_expired_pre_commits(policy, rt.store(), rt.curr_epoch()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to expire pre-committed sectors", - ) - })?; + .context("failed to expire pre-committed sectors")?; state .apply_penalty(&deposit_to_burn) @@ -3411,9 +3172,9 @@ where // That way, don't re-schedule a cron callback if one is already scheduled. had_early_terminations = have_pending_early_terminations(state); - let result = state.advance_deadline(policy, rt.store(), rt.curr_epoch()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to advance deadline") - })?; + let result = state + .advance_deadline(policy, rt.store(), rt.curr_epoch()) + .context("failed to advance deadline")?; // Faults detected by this missed PoSt pay no penalty, but sectors that were already faulty // and remain faulty through this deadline pay the fault fee. @@ -3442,9 +3203,7 @@ where rt.curr_epoch(), &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to unlock penalty") - })?; + .context("failed to unlock penalty")?; penalty_total = &penalty_from_vesting + penalty_from_balance; pledge_delta_total -= penalty_from_vesting; @@ -3569,12 +3328,7 @@ where { let replace_sector = state .get_sector(store, params.replace_sector_number) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load sector {}", params.replace_sector_number), - ) - })? + .with_context(|| format!("failed to load sector {}", params.replace_sector_number))? .ok_or_else(|| { actor_error!(not_found, "no such sector {} to replace", params.replace_sector_number) })?; @@ -3636,12 +3390,7 @@ where params.replace_sector_partition, params.replace_sector_number, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to replace sector {}", params.replace_sector_number), - ) - })?; + .with_context(|| format!("failed to replace sector {}", params.replace_sector_number))?; Ok(()) } @@ -3685,7 +3434,8 @@ where RawBytes::serialize(ext::power::UpdateClaimedPowerParams { raw_byte_delta: delta.raw, quality_adjusted_delta: delta.qa, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ) .map_err(|e| e.wrap(format!("failed to update power with {:?}", delta_clone)))?; @@ -3711,7 +3461,8 @@ where RawBytes::serialize(ext::market::OnMinerSectorsTerminateParamsRef { epoch, deal_ids: chunk, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )?; } @@ -3852,10 +3603,12 @@ where ext::market::COMPUTE_DATA_COMMITMENT_METHOD, RawBytes::serialize(ext::market::ComputeDataCommitmentParamsRef { inputs: data_commitment_inputs, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )? - .deserialize()?; + .deserialize() + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; if data_commitment_inputs.len() != ret.commds.len() { return Err(actor_error!(illegal_state, "number of data commitments computed {} does not match number of data commitment inputs {}", @@ -3895,11 +3648,12 @@ where let serialized = rt.send( *STORAGE_MARKET_ACTOR_ADDR, ext::market::VERIFY_DEALS_FOR_ACTIVATION_METHOD, - RawBytes::serialize(ext::market::VerifyDealsForActivationParamsRef { sectors })?, + RawBytes::serialize(ext::market::VerifyDealsForActivationParamsRef { sectors }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )?; - Ok(serialized.deserialize()?) + Ok(serialized.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?) } /// Requests the current epoch target block reward from the reward actor. @@ -4039,7 +3793,7 @@ where rt.send( *STORAGE_POWER_ACTOR_ADDR, ext::power::UPDATE_PLEDGE_TOTAL_METHOD, - RawBytes::serialize(BigIntSer(pledge_delta))?, + RawBytes::serialize(BigIntSer(pledge_delta)).exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )?; } @@ -4053,9 +3807,11 @@ fn assign_proving_period_offset( addr: Address, current_epoch: ChainEpoch, blake2b: impl FnOnce(&[u8]) -> [u8; 32], -) -> anyhow::Result { - let mut my_addr = addr.marshal_cbor()?; - my_addr.write_i64::(current_epoch)?; +) -> Result { + let mut my_addr = addr.marshal_cbor().exit_code(ExitCode::USR_ILLEGAL_STATE)?; + my_addr + .write_i64::(current_epoch) + .map_err(|err| actor_error!(serialization, "{}", err))?; let digest = blake2b(&my_addr); @@ -4101,9 +3857,10 @@ fn declaration_deadline_info( period_start: ChainEpoch, deadline_idx: u64, current_epoch: ChainEpoch, -) -> anyhow::Result { +) -> Result { if deadline_idx >= policy.wpost_period_deadlines { - return Err(anyhow!( + return Err(actor_error!( + illegal_argument, "invalid deadline {}, must be < {}", deadline_idx, policy.wpost_period_deadlines @@ -4116,9 +3873,9 @@ fn declaration_deadline_info( } /// Checks that a fault or recovery declaration at a specific deadline is outside the exclusion window for the deadline. -fn validate_fr_declaration_deadline(deadline: &DeadlineInfo) -> anyhow::Result<()> { +fn validate_fr_declaration_deadline(deadline: &DeadlineInfo) -> Result<(), ActorError> { if deadline.fault_cutoff_passed() { - Err(anyhow!("late fault or recovery declaration")) + Err(actor_error!(illegal_argument, "late fault or recovery declaration")) } else { Ok(()) } @@ -4128,14 +3885,16 @@ fn validate_fr_declaration_deadline(deadline: &DeadlineInfo) -> anyhow::Result<( fn validate_partition_contains_sectors( partition: &Partition, sectors: &mut UnvalidatedBitField, -) -> anyhow::Result<()> { - let sectors = sectors.validate().map_err(|e| anyhow!("failed to check sectors: {}", e))?; +) -> Result<(), ActorError> { + let sectors = sectors + .validate() + .map_err(|e| actor_error!(illegal_argument, "failed to check sectors: {}", e))?; // Check that the declared sectors are actually assigned to the partition. if partition.sectors.contains_all(sectors) { Ok(()) } else { - Err(anyhow!("not all sectors are assigned to the partition")) + Err(actor_error!(illegal_argument, "not all sectors are assigned to the partition")) } } @@ -4190,9 +3949,7 @@ fn get_miner_info(store: &BS, state: &State) -> Result( @@ -4217,9 +3974,7 @@ where info.worker = pending_worker_key.new_worker; info.pending_worker_key = None; - state - .save_info(rt.store(), info) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save miner info")) + state.save_info(rt.store(), info).context("failed to save miner info") } /// Repays all fee debt and then verifies that the miner has amount needed to cover @@ -4234,9 +3989,9 @@ where BS: Blockstore, RT: Runtime, { - let res = state.repay_debts(&rt.current_balance()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "unlocked balance can not repay fee debt") - })?; + let res = state + .repay_debts(&rt.current_balance()) + .context("unlocked balance can not repay fee debt")?; info!("RepayDebtsOrAbort was called and succeeded"); Ok(res) } @@ -4332,7 +4087,8 @@ where RawBytes::serialize(ext::market::ActivateDealsParams { deal_ids: pre_commit.info.deal_ids.clone(), sector_expiry: pre_commit.info.expiration, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ); @@ -4433,13 +4189,11 @@ where new_sectors.push(new_sector_info); } - state.put_sectors(store, new_sectors.clone()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to put new sectors") - })?; + state.put_sectors(store, new_sectors.clone()).context("failed to put new sectors")?; - state.delete_precommitted_sectors(store, &new_sector_numbers).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to delete precommited sectors") - })?; + state + .delete_precommitted_sectors(store, &new_sector_numbers) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to delete precommited sectors")?; state .assign_sectors_to_deadlines( @@ -4450,12 +4204,7 @@ where info.window_post_partition_sectors, info.sector_size, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to assign new sectors to deadlines", - ) - })?; + .context("failed to assign new sectors to deadlines")?; let newly_vested = TokenAmount::zero(); @@ -4480,7 +4229,7 @@ where .add_initial_pledge(&total_pledge) .map_err(|e| actor_error!(illegal_state, "failed to add initial pledge: {}", e))?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok((total_pledge, newly_vested)) })?; @@ -4491,14 +4240,6 @@ where Ok(()) } -// XXX: probably better to push this one level down into state -fn balance_invariants_broken(e: Error) -> ActorError { - ActorError::unchecked( - ERR_BALANCE_INVARIANTS_BROKEN, - format!("balance invariants broken: {}", e), - ) -} - impl ActorCode for Actor { fn invoke_method( rt: &mut RT, @@ -4516,7 +4257,7 @@ impl ActorCode for Actor { } Some(Method::ControlAddresses) => { let res = Self::control_addresses(rt)?; - Ok(RawBytes::serialize(&res)?) + Ok(RawBytes::serialize(&res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::ChangeWorkerAddress) => { Self::change_worker_address(rt, cbor::deserialize_params(params)?)?; @@ -4544,7 +4285,7 @@ impl ActorCode for Actor { } Some(Method::TerminateSectors) => { let ret = Self::terminate_sectors(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(ret)?) + Ok(RawBytes::serialize(ret).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::DeclareFaults) => { Self::declare_faults(rt, cbor::deserialize_params(params)?)?; @@ -4572,7 +4313,7 @@ impl ActorCode for Actor { } Some(Method::WithdrawBalance) => { let res = Self::withdraw_balance(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(&res)?) + Ok(RawBytes::serialize(&res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::ConfirmSectorProofsValid) => { Self::confirm_sector_proofs_valid(rt, cbor::deserialize_params(params)?)?; @@ -4616,7 +4357,7 @@ impl ActorCode for Actor { } Some(Method::ProveReplicaUpdates) => { let res = Self::prove_replica_updates(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message, "Invalid method")), } diff --git a/actors/miner/src/partition_state.rs b/actors/miner/src/partition_state.rs index 5d5420982..3c2819688 100644 --- a/actors/miner/src/partition_state.rs +++ b/actors/miner/src/partition_state.rs @@ -4,10 +4,9 @@ use std::convert::TryInto; use std::ops::{self, Neg}; -use anyhow::{anyhow, Context}; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorDowncast, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorContext2, ActorError, Array}; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -65,15 +64,17 @@ pub struct Partition { } impl Partition { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let empty_expiration_array = Array::::new_with_bit_width(store, PARTITION_EXPIRATION_AMT_BITWIDTH) - .flush()?; + .flush() + .exit_code(ExitCode::USR_SERIALIZATION)?; let empty_early_termination_array = Array::::new_with_bit_width( store, PARTITION_EARLY_TERMINATION_ARRAY_AMT_BITWIDTH, ) - .flush()?; + .flush() + .exit_code(ExitCode::USR_SERIALIZATION)?; Ok(Self { sectors: BitField::new(), @@ -116,21 +117,21 @@ impl Partition { sectors: &[SectorOnChainInfo], sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; let (sector_numbers, power, _) = expirations .add_active_sectors(sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to record new sector expirations"))?; + .context("failed to record new sector expirations")?; self.expirations_epochs = expirations .amt .flush() - .map_err(|e| e.downcast_wrap("failed to store sector expirations"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store sector expirations")?; if self.sectors.contains_any(§or_numbers) { - return Err(anyhow!("not all added sectors are new")); + return Err(actor_error!(illegal_argument, "not all added sectors are new")); } // Update other metadata using the calculated totals. @@ -159,19 +160,18 @@ impl Partition { fault_expiration: ChainEpoch, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result<(PowerPair, PowerPair)> { + ) -> Result<(PowerPair, PowerPair), ActorError> { // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load partition queue"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load partition queue")?; // Reschedule faults - let new_faulty_power = - queue - .reschedule_as_faults(fault_expiration, sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to add faults to partition queue"))?; + let new_faulty_power = queue + .reschedule_as_faults(fault_expiration, sectors, sector_size) + .context("failed to add faults to partition queue")?; // Save expiration queue - self.expirations_epochs = queue.amt.flush()?; + self.expirations_epochs = queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Update partition metadata self.faults |= sector_numbers; @@ -187,8 +187,8 @@ impl Partition { let mut power_delta = new_faulty_power.clone().neg(); - let unproven_infos = select_sectors(sectors, &unproven) - .map_err(|e| e.downcast_wrap("failed to select unproven sectors"))?; + let unproven_infos = + select_sectors(sectors, &unproven).context("failed to select unproven sectors")?; if !unproven_infos.is_empty() { let lost_unproven_power = power_for_sectors(sector_size, &unproven_infos); self.unproven_power -= &lost_unproven_power; @@ -217,13 +217,14 @@ impl Partition { fault_expiration_epoch: ChainEpoch, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result<(BitField, PowerPair, PowerPair)> { + ) -> Result<(BitField, PowerPair, PowerPair), ActorError> { validate_partition_contains_sectors(self, sector_numbers) .map_err(|e| actor_error!(illegal_argument; "failed fault declaration: {}", e))?; - let sector_numbers = sector_numbers - .validate() - .map_err(|e| anyhow!("failed to intersect sectors with recoveries: {}", e))?; + let sector_numbers = sector_numbers.validate().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to intersect sectors with recoveries", + )?; // Split declarations into declarations of new faults, and retraction of declared recoveries. let retracted_recoveries = &self.recoveries & sector_numbers; @@ -246,7 +247,7 @@ impl Partition { sector_size, quant, ) - .map_err(|e| e.downcast_wrap("failed to add faults"))? + .context("failed to add faults")? } else { Default::default() }; @@ -267,7 +268,7 @@ impl Partition { sectors: &Sectors<'_, BS>, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { // Process recoveries, assuming the proof will be successful. // This similarly updates state. let recovered_sectors = sectors @@ -276,15 +277,15 @@ impl Partition { // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| anyhow!("failed to load partition queue: {:?}", e))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load partition queue")?; // Reschedule recovered let power = queue .reschedule_recovered(recovered_sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to reschedule faults in partition queue"))?; + .context("failed to reschedule faults in partition queue")?; // Save expiration queue - self.expirations_epochs = queue.amt.flush()?; + self.expirations_epochs = queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Update partition metadata self.faults -= &self.recoveries; @@ -313,23 +314,22 @@ impl Partition { sectors: &Sectors<'_, BS>, sector_size: SectorSize, sector_numbers: &mut UnvalidatedBitField, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { // Check that the declared sectors are actually assigned to the partition. validate_partition_contains_sectors(self, sector_numbers) .map_err(|e| actor_error!(illegal_argument; "failed fault declaration: {}", e))?; let sector_numbers = sector_numbers .validate() - .map_err(|e| anyhow!("failed to validate recoveries: {}", e))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to validate recoveries")?; // Ignore sectors not faulty or already declared recovered let mut recoveries = sector_numbers & &self.faults; recoveries -= &self.recoveries; // Record the new recoveries for processing at Window PoSt or deadline cron. - let recovery_sectors = sectors - .load_sector(&recoveries) - .map_err(|e| e.wrap("failed to load recovery sectors"))?; + let recovery_sectors = + sectors.load_sector(&recoveries).context("failed to load recovery sectors")?; self.recoveries |= &recoveries; @@ -375,7 +375,7 @@ impl Partition { sector_numbers: &mut UnvalidatedBitField, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let sector_numbers = sector_numbers.validate().map_err(|e| { actor_error!(illegal_argument, "failed to validate rescheduled sectors: {}", e) })?; @@ -391,9 +391,9 @@ impl Partition { let sector_infos = sectors.load_sector(&active)?; let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; expirations.reschedule_expirations(new_expiration, §or_infos, sector_size)?; - self.expirations_epochs = expirations.amt.flush()?; + self.expirations_epochs = expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // check invariants self.validate_state()?; @@ -413,25 +413,26 @@ impl Partition { new_sectors: &[SectorOnChainInfo], sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result<(PowerPair, TokenAmount)> { + ) -> Result<(PowerPair, TokenAmount), ActorError> { let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; let (old_sector_numbers, new_sector_numbers, power_delta, pledge_delta) = expirations .replace_sectors(old_sectors, new_sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to replace sector expirations"))?; + .context("failed to replace sector expirations")?; self.expirations_epochs = expirations .amt .flush() - .map_err(|e| e.downcast_wrap("failed to save sector expirations"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sector expirations")?; // Check the sectors being removed are active (alive, not faulty). let active = self.active_sectors(); let all_active = active.contains_all(&old_sector_numbers); if !all_active { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "refusing to replace inactive sectors in {:?} (active: {:?})", old_sector_numbers, active @@ -457,19 +458,22 @@ impl Partition { store: &BS, epoch: ChainEpoch, sectors: &BitField, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let mut early_termination_queue = - BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION) - .map_err(|e| e.downcast_wrap("failed to load early termination queue"))?; + BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load early termination queue", + )?; - early_termination_queue - .add_to_queue(epoch, sectors) - .map_err(|e| e.downcast_wrap("failed to add to early termination queue"))?; + early_termination_queue.add_to_queue(epoch, sectors).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to add to early termination queue", + )?; self.early_terminated = early_termination_queue .amt .flush() - .map_err(|e| e.downcast_wrap("failed to save early termination queue"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save early termination queue")?; Ok(()) } @@ -487,33 +491,33 @@ impl Partition { sector_numbers: &mut UnvalidatedBitField, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { let live_sectors = self.live_sectors(); let sector_numbers = sector_numbers.validate().map_err(|e| { actor_error!(illegal_argument, "failed to validate terminating sectors: {}", e) })?; if !live_sectors.contains_all(sector_numbers) { - return Err(actor_error!(illegal_argument, "can only terminate live sectors").into()); + return Err(actor_error!(illegal_argument, "can only terminate live sectors")); } let sector_infos = sectors.load_sector(sector_numbers)?; let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; let (mut removed, removed_recovering) = expirations .remove_sectors(policy, §or_infos, &self.faults, &self.recoveries, sector_size) - .map_err(|e| e.downcast_wrap("failed to remove sector expirations"))?; + .context("failed to remove sector expirations")?; self.expirations_epochs = expirations .amt .flush() - .map_err(|e| e.downcast_wrap("failed to save sector expirations"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sector expirations")?; let removed_sectors = &removed.on_time_sectors | &removed.early_sectors; // Record early termination. self.record_early_termination(store, epoch, &removed_sectors) - .map_err(|e| e.downcast_wrap("failed to record early sector termination"))?; + .context("failed to record early sector termination")?; let unproven_nos = &removed_sectors & &self.unproven; @@ -546,21 +550,23 @@ impl Partition { store: &BS, until: ChainEpoch, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { // This is a sanity check to make sure we handle proofs _before_ // handling sector expirations. if !self.unproven.is_empty() { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "Cannot pop expired sectors from a partition with unproven sectors" )); } let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load expiration queue"))?; - let popped = expirations.pop_until(until).map_err(|e| { - e.downcast_wrap(format!("failed to pop expiration queue until {}", until)) - })?; - self.expirations_epochs = expirations.amt.flush()?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load expiration queue")?; + let popped = expirations + .pop_until(until) + .with_context(|| format!("failed to pop expiration queue until {}", until))?; + + self.expirations_epochs = expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; let expired_sectors = &popped.on_time_sectors | &popped.early_sectors; @@ -569,15 +575,21 @@ impl Partition { // and all recoveries retracted. // No recoveries may be posted until the deadline is closed. if !self.recoveries.is_empty() { - return Err(anyhow!("unexpected recoveries while processing expirations")); + return Err(actor_error!( + illegal_state, + "unexpected recoveries while processing expirations" + )); } if !self.recovering_power.is_zero() { - return Err(anyhow!("unexpected recovering power while processing expirations")); + return Err(actor_error!( + illegal_state, + "unexpected recovering power while processing expirations" + )); } // Nothing expiring now should have already terminated. if self.terminated.contains_any(&expired_sectors) { - return Err(anyhow!("expiring sectors already terminated")); + return Err(actor_error!(illegal_state, "expiring sectors already terminated")); } // Mark the sectors as terminated and subtract sector power. @@ -588,7 +600,7 @@ impl Partition { // Record the epoch of any sectors expiring early, for termination fee calculation later. self.record_early_termination(store, until, &popped.early_sectors) - .map_err(|e| e.downcast_wrap("failed to record early terminations"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to record early terminations")?; // check invariants self.validate_state()?; @@ -604,18 +616,18 @@ impl Partition { store: &BS, fault_expiration: ChainEpoch, quant: QuantSpec, - ) -> anyhow::Result<(PowerPair, PowerPair, PowerPair)> { + ) -> Result<(PowerPair, PowerPair, PowerPair), ActorError> { // Collapse tail of queue into the last entry, and mark all power faulty. // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load partition queue"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load partition queue")?; queue .reschedule_all_as_faults(fault_expiration) - .map_err(|e| e.downcast_wrap("failed to reschedule all as faults"))?; + .context("failed to reschedule all as faults")?; // Save expiration queue - self.expirations_epochs = queue.amt.flush()?; + self.expirations_epochs = queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Compute faulty power for penalization. New faulty power is the total power minus already faulty. let new_faulty_power = &self.live_power - &self.faulty_power; @@ -645,10 +657,11 @@ impl Partition { &mut self, store: &BS, max_sectors: u64, - ) -> anyhow::Result<(TerminationResult, /* has more */ bool)> { + ) -> Result<(TerminationResult, /* has more */ bool), ActorError> { // Load early terminations. let mut early_terminated_queue = - BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION)?; + BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION) + .exit_code(ExitCode::USR_SERIALIZATION)?; let mut processed = Vec::::new(); let mut remaining: Option<(BitField, ChainEpoch)> = None; @@ -657,14 +670,15 @@ impl Partition { early_terminated_queue .amt - .for_each_while(|i, sectors| { - let epoch: ChainEpoch = i.try_into()?; + .try_for_each_while::<_, ActorError>(|i, sectors| { + let epoch: ChainEpoch = i.try_into().exit_code(ExitCode::USR_SERIALIZATION)?; let count = sectors.len(); let limit = max_sectors - result.sectors_processed; let to_process = if limit < count { - let to_process = - sectors.slice(0, limit).context("expected more sectors in bitfield")?; + let to_process = sectors.slice(0, limit).ok_or_else(|| { + actor_error!(illegal_state, "expected more sectors in bitfield") + })?; let rest = sectors - &to_process; remaining = Some((rest, epoch)); result.sectors_processed += limit; @@ -680,24 +694,29 @@ impl Partition { let keep_going = result.sectors_processed < max_sectors; Ok(keep_going) }) - .map_err(|e| e.downcast_wrap("failed to walk early terminations queue"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to walk early terminations queue")?; // Update early terminations - early_terminated_queue.amt.batch_delete(processed, true).map_err(|e| { - e.downcast_wrap("failed to remove entries from early terminations queue") - })?; + early_terminated_queue.amt.batch_delete(processed, true).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to remove entries from early terminations queue", + )?; if let Some((remaining_sectors, remaining_epoch)) = remaining.take() { - early_terminated_queue.amt.set(remaining_epoch as u64, remaining_sectors).map_err( - |e| e.downcast_wrap("failed to update remaining entry early terminations queue"), - )?; + early_terminated_queue + .amt + .set(remaining_epoch as u64, remaining_sectors) + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to update remaining entry early terminations queue", + )?; } // Save early terminations. - self.early_terminated = early_terminated_queue - .amt - .flush() - .map_err(|e| e.downcast_wrap("failed to store early terminations queue"))?; + self.early_terminated = early_terminated_queue.amt.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to store early terminations queue", + )?; // check invariants self.validate_state()?; @@ -720,7 +739,7 @@ impl Partition { quant: QuantSpec, fault_expiration: ChainEpoch, skipped: &mut UnvalidatedBitField, - ) -> anyhow::Result<(PowerPair, PowerPair, PowerPair, bool)> { + ) -> Result<(PowerPair, PowerPair, PowerPair, bool), ActorError> { let skipped = skipped.validate().map_err(|e| { actor_error!(illegal_argument, "failed to validate skipped sectors: {}", e) })?; @@ -734,8 +753,7 @@ impl Partition { return Err(actor_error!( illegal_argument, "skipped faults contains sectors outside partition" - ) - .into()); + )); } // Find all skipped faults that have been labeled recovered @@ -760,9 +778,7 @@ impl Partition { sector_size, quant, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to add skipped faults") - })?; + .context("failed to add skipped faults")?; // Remove faulty recoveries self.remove_recoveries(&retracted_recoveries, &retracted_recovery_power); @@ -774,59 +790,68 @@ impl Partition { } /// Test invariants about the partition power are valid. - pub fn validate_power_state(&self) -> anyhow::Result<()> { + pub fn validate_power_state(&self) -> Result<(), ActorError> { if self.live_power.raw.is_negative() || self.live_power.qa.is_negative() { - return Err(anyhow!("Partition left with negative live power")); + return Err(actor_error!(illegal_state, "Partition left with negative live power")); } if self.unproven_power.raw.is_negative() || self.unproven_power.qa.is_negative() { - return Err(anyhow!("Partition left with negative unproven power")); + return Err(actor_error!(illegal_state, "Partition left with negative unproven power")); } if self.faulty_power.raw.is_negative() || self.faulty_power.qa.is_negative() { - return Err(anyhow!("Partition left with negative faulty power")); + return Err(actor_error!(illegal_state, "Partition left with negative faulty power")); } if self.recovering_power.raw.is_negative() || self.recovering_power.qa.is_negative() { - return Err(anyhow!("Partition left with negative recovering power")); + return Err(actor_error!( + illegal_state, + "Partition left with negative recovering power" + )); } if self.unproven_power.raw > self.live_power.raw { - return Err(anyhow!("Partition left with invalid unproven power")); + return Err(actor_error!(illegal_state, "Partition left with invalid unproven power")); } if self.faulty_power.raw > self.live_power.raw { - return Err(anyhow!("Partition left with invalid faulty power")); + return Err(actor_error!(illegal_state, "Partition left with invalid faulty power")); } // The first half of this conditional shouldn't matter, keeping for readability if self.recovering_power.raw > self.live_power.raw || self.recovering_power.raw > self.faulty_power.raw { - return Err(anyhow!("Partition left with invalid recovering power")); + return Err(actor_error!( + illegal_state, + "Partition left with invalid recovering power" + )); } Ok(()) } - pub fn validate_bf_state(&self) -> anyhow::Result<()> { + pub fn validate_bf_state(&self) -> Result<(), ActorError> { let mut merge = &self.unproven | &self.faults; // Unproven or faulty sectors should not be in terminated if self.terminated.contains_any(&merge) { - return Err(anyhow!("Partition left with terminated sectors in multiple states")); + return Err(actor_error!( + illegal_state, + "Partition left with terminated sectors in multiple states" + )); } merge |= &self.terminated; // All merged sectors should exist in partition sectors if !self.sectors.contains_all(&merge) { - return Err(anyhow!("Partition left with invalid sector state")); + return Err(actor_error!(illegal_state, "Partition left with invalid sector state")); } // All recoveries should exist in partition faults if !self.faults.contains_all(&self.recoveries) { - return Err(anyhow!("Partition left with invalid recovery state")); + return Err(actor_error!(illegal_state, "Partition left with invalid recovery state")); } Ok(()) } - pub fn validate_state(&self) -> anyhow::Result<()> { + pub fn validate_state(&self) -> Result<(), ActorError> { self.validate_power_state()?; self.validate_bf_state()?; Ok(()) diff --git a/actors/miner/src/sector_map.rs b/actors/miner/src/sector_map.rs index 82ab755e1..9e467b865 100644 --- a/actors/miner/src/sector_map.rs +++ b/actors/miner/src/sector_map.rs @@ -3,11 +3,11 @@ use std::collections::BTreeMap; -use anyhow::anyhow; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; +use fvm_shared::error::ExitCode; use serde::{Deserialize, Serialize}; -use fil_actors_runtime::runtime::Policy; +use fil_actors_runtime::{actor_error, runtime::Policy, ActorContext, ActorContext2, ActorError}; /// Maps deadlines to partition maps. #[derive(Default)] @@ -21,34 +21,42 @@ impl DeadlineSectorMap { /// Check validates all bitfields and counts the number of partitions & sectors /// contained within the map, and returns an error if they exceed the given /// maximums. - pub fn check(&mut self, max_partitions: u64, max_sectors: u64) -> anyhow::Result<()> { - let (partition_count, sector_count) = - self.count().map_err(|e| anyhow!("failed to count sectors: {:?}", e))?; + pub fn check(&mut self, max_partitions: u64, max_sectors: u64) -> Result<(), ActorError> { + let (partition_count, sector_count) = self.count().context("failed to count sectors")?; if partition_count > max_partitions { - return Err(anyhow!("too many partitions {}, max {}", partition_count, max_partitions)); + return Err(actor_error!( + illegal_argument, + "too many partitions {}, max {}", + partition_count, + max_partitions + )); } if sector_count > max_sectors { - return Err(anyhow!("too many sectors {}, max {}", sector_count, max_sectors)); + return Err(actor_error!( + illegal_argument, + "too many sectors {}, max {}", + sector_count, + max_sectors + )); } Ok(()) } /// Counts the number of partitions & sectors within the map. - pub fn count(&mut self) -> anyhow::Result<(/* partitions */ u64, /* sectors */ u64)> { + pub fn count(&mut self) -> Result<(/* partitions */ u64, /* sectors */ u64), ActorError> { self.0.iter_mut().try_fold((0_u64, 0_u64), |(partitions, sectors), (deadline_idx, pm)| { - let (partition_count, sector_count) = pm - .count() - .map_err(|e| anyhow!("when counting deadline {}: {:?}", deadline_idx, e))?; + let (partition_count, sector_count) = + pm.count().with_context(|| format!("when counting deadline {}", deadline_idx))?; Ok(( - partitions - .checked_add(partition_count) - .ok_or_else(|| anyhow!("integer overflow when counting partitions"))?, - sectors - .checked_add(sector_count) - .ok_or_else(|| anyhow!("integer overflow when counting sectors"))?, + partitions.checked_add(partition_count).ok_or_else(|| { + actor_error!(illegal_state, "integer overflow when counting partitions") + })?, + sectors.checked_add(sector_count).ok_or_else(|| { + actor_error!(illegal_state, "integer overflow when counting sectors") + })?, )) }) } @@ -60,9 +68,9 @@ impl DeadlineSectorMap { deadline_idx: u64, partition_idx: u64, sector_numbers: UnvalidatedBitField, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if deadline_idx >= policy.wpost_period_deadlines { - return Err(anyhow!("invalid deadline {}", deadline_idx)); + return Err(actor_error!(illegal_argument, "invalid deadline {}", deadline_idx)); } self.0.entry(deadline_idx).or_default().add(partition_idx, sector_numbers) @@ -75,12 +83,14 @@ impl DeadlineSectorMap { deadline_idx: u64, partition_idx: u64, sector_numbers: &[u64], - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { self.add( policy, deadline_idx, partition_idx, - BitField::try_from_bits(sector_numbers.iter().copied())?.into(), + BitField::try_from_bits(sector_numbers.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)? + .into(), ) } @@ -105,8 +115,11 @@ impl PartitionSectorMap { &mut self, partition_idx: u64, sector_numbers: Vec, - ) -> anyhow::Result<()> { - self.add(partition_idx, BitField::try_from_bits(sector_numbers)?.into()) + ) -> Result<(), ActorError> { + self.add( + partition_idx, + BitField::try_from_bits(sector_numbers).exit_code(ExitCode::USR_SERIALIZATION)?.into(), + ) } /// Records the given sector bitfield at the given partition index, merging /// it with any existing bitfields if necessary. @@ -114,15 +127,17 @@ impl PartitionSectorMap { &mut self, partition_idx: u64, mut sector_numbers: UnvalidatedBitField, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { match self.0.get_mut(&partition_idx) { Some(old_sector_numbers) => { - let old = old_sector_numbers - .validate_mut() - .map_err(|e| anyhow!("failed to validate sector bitfield: {}", e))?; - let new = sector_numbers - .validate() - .map_err(|e| anyhow!("failed to validate new sector bitfield: {}", e))?; + let old = old_sector_numbers.validate_mut().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to validate sector bitfield", + )?; + let new = sector_numbers.validate().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to validate new sector bitfield", + )?; *old |= new; } None => { @@ -133,14 +148,14 @@ impl PartitionSectorMap { } /// Counts the number of partitions & sectors within the map. - pub fn count(&mut self) -> anyhow::Result<(/* partitions */ u64, /* sectors */ u64)> { + pub fn count(&mut self) -> Result<(/* partitions */ u64, /* sectors */ u64), ActorError> { let sectors = self.0.iter_mut().try_fold(0_u64, |sectors, (partition_idx, bf)| { - let validated = bf.validate().map_err(|e| { - anyhow!("failed to parse bitmap for partition {}: {}", partition_idx, e) + let validated = bf.validate().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to parse bitmap for partition {}", partition_idx) })?; - sectors - .checked_add(validated.len() as u64) - .ok_or_else(|| anyhow!("integer overflow when counting sectors")) + sectors.checked_add(validated.len() as u64).ok_or_else(|| { + actor_error!(illegal_state, "integer overflow when counting sectors") + }) })?; Ok((self.0.len() as u64, sectors)) } diff --git a/actors/miner/src/sectors.rs b/actors/miner/src/sectors.rs index 49b0bd959..8e892e6af 100644 --- a/actors/miner/src/sectors.rs +++ b/actors/miner/src/sectors.rs @@ -3,14 +3,15 @@ use std::collections::BTreeSet; -use anyhow::anyhow; use cid::Cid; -use fil_actors_runtime::{actor_error, ActorDowncast, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext2, ActorError, Array}; use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; -use fvm_shared::error::ExitCode; -use fvm_shared::sector::{SectorNumber, MAX_SECTOR_NUMBER}; +use fvm_shared::{ + error::ExitCode, + sector::{SectorNumber, MAX_SECTOR_NUMBER}, +}; use super::SectorOnChainInfo; @@ -19,7 +20,7 @@ pub struct Sectors<'db, BS> { } impl<'db, BS: Blockstore> Sectors<'db, BS> { - pub fn load(store: &'db BS, root: &Cid) -> Result { + pub fn load(store: &'db BS, root: &Cid) -> Result> { Ok(Self { amt: Array::load(root, store)? }) } @@ -37,11 +38,8 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { let sector_on_chain = self .amt .get(sector_number) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load sector {}", sector_number), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load sector {}", sector_number) })? .cloned() .ok_or_else(|| actor_error!(not_found; "sector not found: {}", sector_number))?; @@ -50,32 +48,44 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { Ok(sector_infos) } - pub fn get(&self, sector_number: SectorNumber) -> anyhow::Result> { + pub fn get( + &self, + sector_number: SectorNumber, + ) -> Result, ActorError> { Ok(self .amt .get(sector_number) - .map_err(|e| e.downcast_wrap(format!("failed to get sector {}", sector_number)))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get sector {}", sector_number) + })? .cloned()) } - pub fn store(&mut self, infos: Vec) -> anyhow::Result<()> { + pub fn store(&mut self, infos: Vec) -> Result<(), ActorError> { for info in infos { let sector_number = info.sector_number; if sector_number > MAX_SECTOR_NUMBER { - return Err(anyhow!("sector number {} out of range", info.sector_number)); + return Err(actor_error!( + illegal_argument, + "sector number {} out of range", + info.sector_number + )); } - self.amt.set(sector_number, info).map_err(|e| { - e.downcast_wrap(format!("failed to store sector {}", sector_number)) - })?; + self.amt + .set(sector_number, info) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store sector {}", sector_number) + })?; } Ok(()) } - pub fn must_get(&self, sector_number: SectorNumber) -> anyhow::Result { - self.get(sector_number)?.ok_or_else(|| anyhow!("sector {} not found", sector_number)) + pub fn must_get(&self, sector_number: SectorNumber) -> Result { + self.get(sector_number)? + .ok_or_else(|| actor_error!(not_found, "sector {} not found", sector_number)) } /// Loads info for a set of sectors to be proven. @@ -85,7 +95,7 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { &self, proven_sectors: &BitField, expected_faults: &BitField, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let non_faults = proven_sectors - expected_faults; if non_faults.is_empty() { @@ -108,7 +118,7 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { sectors: &BitField, faults: &BitField, fault_stand_in: SectorNumber, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let stand_in_info = self.must_get(fault_stand_in)?; // Expand faults into a map for quick lookups. @@ -131,13 +141,17 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { pub(crate) fn select_sectors( sectors: &[SectorOnChainInfo], field: &BitField, -) -> anyhow::Result> { +) -> Result, ActorError> { let mut to_include: BTreeSet<_> = field.iter().collect(); let included = sectors.iter().filter(|si| to_include.remove(&si.sector_number)).cloned().collect(); if !to_include.is_empty() { - return Err(anyhow!("failed to find {} expected sectors", to_include.len())); + return Err(actor_error!( + not_found, + "failed to find {} expected sectors", + to_include.len() + )); } Ok(included) diff --git a/actors/miner/src/state.rs b/actors/miner/src/state.rs index 1e96acc49..9a7c45c7a 100644 --- a/actors/miner/src/state.rs +++ b/actors/miner/src/state.rs @@ -4,15 +4,13 @@ use std::cmp; use std::ops::Neg; -use anyhow::anyhow; use cid::multihash::Code; use cid::Cid; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ - actor_error, make_empty_map, make_map_with_root_and_bitwidth, u64_key, ActorDowncast, - ActorError, Array, + actor_error, make_empty_map, make_map_with_root_and_bitwidth, u64_key, ActorContext, + ActorContext2, ActorError, Array, }; -use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -27,6 +25,8 @@ use fvm_shared::sector::{RegisteredPoStProof, SectorNumber, SectorSize, MAX_SECT use fvm_shared::HAMT_BIT_WIDTH; use num_traits::{Signed, Zero}; +use crate::ERR_BALANCE_INVARIANTS_BROKEN; + use super::deadlines::new_deadline_info; use super::policy::*; use super::types::*; @@ -128,50 +128,39 @@ impl State { info_cid: Cid, period_start: ChainEpoch, deadline_idx: u64, - ) -> anyhow::Result { - let empty_precommit_map = - make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH).flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to construct empty precommit map", - ) - })?; + ) -> Result { + let empty_precommit_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct empty precommit map")?; + let empty_precommits_cleanup_array = Array::::new_with_bit_width(store, PRECOMMIT_EXPIRY_AMT_BITWIDTH) .flush() - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to construct empty precommits array", - ) - })?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to construct empty precommits array", + )?; + let empty_sectors_array = Array::::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) .flush() - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to construct sectors array", - ) - })?; - let empty_bitfield = store.put_cbor(&BitField::new(), Code::Blake2b256).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct empty bitfield") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct sectors array")?; + + let empty_bitfield = store + .put_cbor(&BitField::new(), Code::Blake2b256) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct empty bitfield")?; let deadline = Deadline::new(store)?; - let empty_deadline = store.put_cbor(&deadline, Code::Blake2b256).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state") - })?; + let empty_deadline = store + .put_cbor(&deadline, Code::Blake2b256) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; let empty_deadlines = store .put_cbor(&Deadlines::new(policy, empty_deadline), Code::Blake2b256) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; - let empty_vesting_funds_cid = - store.put_cbor(&VestingFunds::new(), Code::Blake2b256).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state") - })?; + let empty_vesting_funds_cid = store + .put_cbor(&VestingFunds::new(), Code::Blake2b256) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; Ok(Self { info: info_cid, @@ -196,11 +185,11 @@ impl State { }) } - pub fn get_info(&self, store: &BS) -> anyhow::Result { + pub fn get_info(&self, store: &BS) -> Result { match store.get_cbor(&self.info) { Ok(Some(info)) => Ok(info), - Ok(None) => Err(actor_error!(not_found, "failed to get miner info").into()), - Err(e) => Err(e.downcast_wrap("failed to get miner info")), + Ok(None) => Err(actor_error!(not_found, "failed to get miner info")), + Err(e) => Err(actor_error!(illegal_state, "failed to get miner info: {:?}", e)), } } @@ -208,8 +197,8 @@ impl State { &mut self, store: &BS, info: &MinerInfo, - ) -> anyhow::Result<()> { - let cid = store.put_cbor(&info, Code::Blake2b256)?; + ) -> Result<(), ActorError> { + let cid = store.put_cbor(&info, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; self.info = cid; Ok(()) } @@ -253,12 +242,7 @@ impl State { ) -> Result<(), ActorError> { let prior_allocation = store .get_cbor(&self.allocated_sectors) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load allocated sectors bitfield", - ) - })? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load allocated sectors bitfield")? .ok_or_else(|| actor_error!(illegal_state, "allocated sectors bitfield not found"))?; if policy != CollisionPolicy::AllowCollisions { @@ -274,16 +258,15 @@ impl State { } } let new_allocation = &prior_allocation | sector_numbers; - self.allocated_sectors = - store.put_cbor(&new_allocation, Code::Blake2b256).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, - format!( - "failed to store allocated sectors bitfield after adding {:?}", - sector_numbers, - ), + self.allocated_sectors = store + .put_cbor(&new_allocation, Code::Blake2b256) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!( + "failed to store allocated sectors bitfield after adding {:?}", + sector_numbers, ) })?; + Ok(()) } @@ -292,22 +275,28 @@ impl State { &mut self, store: &BS, precommits: Vec, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let mut precommitted = - make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; + make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_SERIALIZATION)?; for precommit in precommits.into_iter() { let sector_no = precommit.info.sector_number; let modified = precommitted .set_if_absent(u64_key(precommit.info.sector_number), precommit) - .map_err(|e| { - e.downcast_wrap(format!("failed to store precommitment for {:?}", sector_no,)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store precommitment for {:?}", sector_no,) })?; + if !modified { - return Err(anyhow!("sector {} already pre-commited", sector_no)); + return Err(actor_error!( + illegal_argument, + "sector {} already pre-commited", + sector_no + )); } } - self.pre_committed_sectors = precommitted.flush()?; + self.pre_committed_sectors = precommitted.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -315,7 +304,7 @@ impl State { &self, store: &BS, sector_num: SectorNumber, - ) -> Result, HamtError> { + ) -> Result, HamtError> { let precommitted = make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; Ok(precommitted.get(&u64_key(sector_num))?.cloned()) @@ -326,18 +315,21 @@ impl State { &self, store: &BS, sector_numbers: &[SectorNumber], - ) -> anyhow::Result> { + ) -> Result, ActorError> { let precommitted = make_map_with_root_and_bitwidth::<_, SectorPreCommitOnChainInfo>( &self.pre_committed_sectors, store, HAMT_BIT_WIDTH, - )?; + ) + .exit_code(ExitCode::USR_SERIALIZATION)?; let mut result = Vec::with_capacity(sector_numbers.len()); for §or_number in sector_numbers { - let info = match precommitted.get(&u64_key(sector_number)).map_err(|e| { - e.downcast_wrap(format!("failed to load precommitment for {}", sector_number)) - })? { + let info = match precommitted + .get(&u64_key(sector_number)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load precommitment for {}", sector_number) + })? { Some(info) => info.clone(), None => continue, }; @@ -352,7 +344,7 @@ impl State { &mut self, store: &BS, sector_nums: &[SectorNumber], - ) -> Result<(), HamtError> { + ) -> Result<(), HamtError> { let mut precommitted = make_map_with_root_and_bitwidth::<_, SectorPreCommitOnChainInfo>( &self.pre_committed_sectors, store, @@ -371,8 +363,8 @@ impl State { &self, store: &BS, sector_num: SectorNumber, - ) -> anyhow::Result { - let sectors = Sectors::load(store, &self.sectors)?; + ) -> Result { + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(sectors.get(sector_num)?.is_some()) } @@ -380,14 +372,16 @@ impl State { &mut self, store: &BS, new_sectors: Vec, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let mut sectors = Sectors::load(store, &self.sectors) - .map_err(|e| e.downcast_wrap("failed to load sectors"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors")?; sectors.store(new_sectors)?; - self.sectors = - sectors.amt.flush().map_err(|e| e.downcast_wrap("failed to persist sectors"))?; + self.sectors = sectors + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist sectors")?; Ok(()) } @@ -396,8 +390,8 @@ impl State { &self, store: &BS, sector_num: SectorNumber, - ) -> anyhow::Result> { - let sectors = Sectors::load(store, &self.sectors)?; + ) -> Result, ActorError> { + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; sectors.get(sector_num) } @@ -405,26 +399,27 @@ impl State { &mut self, store: &BS, sector_nos: &BitField, - ) -> Result<(), AmtError> { - let mut sectors = Sectors::load(store, &self.sectors)?; + ) -> Result<(), ActorError> { + let mut sectors = + Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; for sector_num in sector_nos.iter() { sectors .amt .delete(sector_num) - .map_err(|e| e.downcast_wrap("could not delete sector number"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "could not delete sector number")?; } - self.sectors = sectors.amt.flush()?; + self.sectors = sectors.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } - pub fn for_each_sector(&self, store: &BS, mut f: F) -> anyhow::Result<()> + pub fn for_each_sector(&self, store: &BS, mut f: F) -> Result<(), ActorError> where - F: FnMut(&SectorOnChainInfo) -> anyhow::Result<()>, + F: FnMut(&SectorOnChainInfo) -> Result<(), ActorError>, { - let sectors = Sectors::load(store, &self.sectors)?; - sectors.amt.for_each(|_, v| f(v))?; + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; + sectors.amt.try_for_each(|_, v| f(v)).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -434,7 +429,7 @@ impl State { policy: &Policy, store: &BS, sector_number: SectorNumber, - ) -> anyhow::Result<(u64, u64)> { + ) -> Result<(u64, u64), ActorError> { let deadlines = self.load_deadlines(store)?; deadlines.find_sector(policy, store, sector_number) } @@ -455,9 +450,9 @@ impl State { current_epoch: ChainEpoch, sector_size: SectorSize, mut deadline_sectors: DeadlineSectorMap, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let mut deadlines = self.load_deadlines(store)?; - let sectors = Sectors::load(store, &self.sectors)?; + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; let mut all_replaced = Vec::new(); for (deadline_idx, partition_sectors) in deadline_sectors.iter() { @@ -498,7 +493,7 @@ impl State { mut sectors: Vec, partition_size: u64, sector_size: SectorSize, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let mut deadlines = self.load_deadlines(store)?; // Sort sectors by number to get better runs in partition bitfields. @@ -565,7 +560,7 @@ impl State { store: &BS, max_partitions: u64, max_sectors: u64, - ) -> anyhow::Result<(TerminationResult, /* has more */ bool)> { + ) -> Result<(TerminationResult, /* has more */ bool), ActorError> { // Anything to do? This lets us avoid loading the deadlines if there's nothing to do. if self.early_terminations.is_empty() { return Ok((Default::default(), false)); @@ -590,11 +585,8 @@ impl State { max_partitions - result.partitions_processed, max_sectors - result.sectors_processed, ) - .map_err(|e| { - e.downcast_wrap(format!( - "failed to pop early terminations for deadline {}", - deadline_idx - )) + .with_context(|| { + format!("failed to pop early terminations for deadline {}", deadline_idx) })?; result += deadline_result; @@ -635,7 +627,7 @@ impl State { partition_idx: u64, sector_number: SectorNumber, require_proven: bool, - ) -> anyhow::Result { + ) -> Result { let dls = self.load_deadlines(store)?; let dl = dls.load_deadline(policy, store, deadline_idx)?; let partition = dl.load_partition(store, partition_idx)?; @@ -646,8 +638,7 @@ impl State { not_found; "sector {} not a member of partition {}, deadline {}", sector_number, partition_idx, deadline_idx - ) - .into()); + )); } let faulty = partition.faults.get(sector_number); @@ -676,7 +667,7 @@ impl State { deadline_idx: u64, partition_idx: u64, sector_number: SectorNumber, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let deadlines = self.load_deadlines(store)?; let deadline = deadlines.load_deadline(policy, store, deadline_idx)?; let partition = deadline.load_partition(store, partition_idx)?; @@ -686,8 +677,7 @@ impl State { not_found; "sector {} not a member of partition {}, deadline {}", sector_number, partition_idx, deadline_idx - ) - .into()); + )); } if partition.faults.get(sector_number) { @@ -695,8 +685,7 @@ impl State { forbidden; "sector {} not a member of partition {}, deadline {}", sector_number, partition_idx, deadline_idx - ) - .into()); + )); } if partition.terminated.get(sector_number) { @@ -704,8 +693,7 @@ impl State { not_found; "sector {} not of partition {}, deadline {} is terminated", sector_number, partition_idx, deadline_idx - ) - .into()); + )); } Ok(()) @@ -716,16 +704,16 @@ impl State { &self, store: &BS, sectors: &BitField, - ) -> anyhow::Result> { - Ok(Sectors::load(store, &self.sectors)?.load_sector(sectors)?) + ) -> Result, ActorError> { + Sectors::load(store, &self.sectors) + .exit_code(ExitCode::USR_SERIALIZATION)? + .load_sector(sectors) } pub fn load_deadlines(&self, store: &BS) -> Result { store .get_cbor::(&self.deadlines) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deadlines") - })? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deadlines")? .ok_or_else( || actor_error!(illegal_state; "failed to load deadlines {}", self.deadlines), ) @@ -735,21 +723,25 @@ impl State { &mut self, store: &BS, deadlines: Deadlines, - ) -> anyhow::Result<()> { - self.deadlines = store.put_cbor(&deadlines, Code::Blake2b256)?; + ) -> Result<(), ActorError> { + self.deadlines = + store.put_cbor(&deadlines, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } /// Loads the vesting funds table from the store. - pub fn load_vesting_funds(&self, store: &BS) -> anyhow::Result { - Ok(store + pub fn load_vesting_funds( + &self, + store: &BS, + ) -> Result { + store .get_cbor(&self.vesting_funds) - .map_err(|e| { - e.downcast_wrap(format!("failed to load vesting funds {}", self.vesting_funds)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load vesting funds {}", self.vesting_funds) })? .ok_or_else( || actor_error!(not_found; "failed to load vesting funds {:?}", self.vesting_funds), - )?) + ) } /// Saves the vesting table to the store. @@ -757,8 +749,9 @@ impl State { &mut self, store: &BS, funds: &VestingFunds, - ) -> anyhow::Result<()> { - self.vesting_funds = store.put_cbor(funds, Code::Blake2b256)?; + ) -> Result<(), ActorError> { + self.vesting_funds = + store.put_cbor(funds, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -773,10 +766,11 @@ impl State { // Funds and vesting // - pub fn add_pre_commit_deposit(&mut self, amount: &TokenAmount) -> anyhow::Result<()> { + pub fn add_pre_commit_deposit(&mut self, amount: &TokenAmount) -> Result<(), ActorError> { let new_total = &self.pre_commit_deposits + amount; if new_total.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_argument, "negative pre-commit deposit {} after adding {} to prior {}", new_total, amount, @@ -787,10 +781,11 @@ impl State { Ok(()) } - pub fn add_initial_pledge(&mut self, amount: &TokenAmount) -> anyhow::Result<()> { + pub fn add_initial_pledge(&mut self, amount: &TokenAmount) -> Result<(), ActorError> { let new_total = &self.initial_pledge + amount; if new_total.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_argument, "negative initial pledge requirement {} after adding {} to prior {}", new_total, amount, @@ -801,9 +796,9 @@ impl State { Ok(()) } - pub fn apply_penalty(&mut self, penalty: &TokenAmount) -> anyhow::Result<()> { + pub fn apply_penalty(&mut self, penalty: &TokenAmount) -> Result<(), ActorError> { if penalty.is_negative() { - Err(anyhow!("applying negative penalty {} not allowed", penalty)) + Err(actor_error!(illegal_argument, "applying negative penalty {} not allowed", penalty)) } else { self.fee_debt += penalty; Ok(()) @@ -817,9 +812,9 @@ impl State { current_epoch: ChainEpoch, vesting_sum: &TokenAmount, spec: &VestSpec, - ) -> anyhow::Result { + ) -> Result { if vesting_sum.is_negative() { - return Err(anyhow!("negative vesting sum {}", vesting_sum)); + return Err(actor_error!(illegal_argument, "negative vesting sum {}", vesting_sum)); } let mut vesting_funds = self.load_vesting_funds(store)?; @@ -828,7 +823,8 @@ impl State { let amount_unlocked = vesting_funds.unlock_vested_funds(current_epoch); self.locked_funds -= &amount_unlocked; if self.locked_funds.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "negative locked funds {} after unlocking {}", self.locked_funds, amount_unlocked @@ -858,7 +854,7 @@ impl State { TokenAmount, // from vesting TokenAmount, // from balance ), - anyhow::Error, + ActorError, > { let unlocked_balance = self.get_unlocked_balance(curr_balance)?; @@ -867,7 +863,10 @@ impl State { // * It may be possible the go implementation catches a potential panic here if from_vesting > self.fee_debt { - return Err(anyhow!("should never unlock more than the debt we need to repay")); + return Err(actor_error!( + illegal_state, + "should never unlock more than the debt we need to repay" + )); } self.fee_debt -= &from_vesting; @@ -881,7 +880,7 @@ impl State { /// burnt and an error if there are not sufficient funds to cover repayment. /// Miner state repays from unlocked funds and fails if unlocked funds are insufficient to cover fee debt. /// FeeDebt will be zero after a successful call. - pub fn repay_debts(&mut self, curr_balance: &TokenAmount) -> anyhow::Result { + pub fn repay_debts(&mut self, curr_balance: &TokenAmount) -> Result { let unlocked_balance = self.get_unlocked_balance(curr_balance)?; if unlocked_balance < self.fee_debt { return Err(actor_error!( @@ -889,8 +888,7 @@ impl State { "unlocked balance can not repay fee debt ({} < {})", unlocked_balance, self.fee_debt - ) - .into()); + )); } Ok(std::mem::take(&mut self.fee_debt)) @@ -903,7 +901,7 @@ impl State { store: &BS, current_epoch: ChainEpoch, target: &TokenAmount, - ) -> anyhow::Result { + ) -> Result { if target.is_zero() || self.locked_funds.is_zero() { return Ok(TokenAmount::zero()); } @@ -912,7 +910,8 @@ impl State { let amount_unlocked = vesting_funds.unlock_unvested_funds(current_epoch, target); self.locked_funds -= &amount_unlocked; if self.locked_funds.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "negative locked funds {} after unlocking {}", self.locked_funds, amount_unlocked @@ -929,7 +928,7 @@ impl State { &mut self, store: &BS, current_epoch: ChainEpoch, - ) -> anyhow::Result { + ) -> Result { if self.locked_funds.is_zero() { return Ok(TokenAmount::zero()); } @@ -938,9 +937,10 @@ impl State { let amount_unlocked = vesting_funds.unlock_vested_funds(current_epoch); self.locked_funds -= &amount_unlocked; if self.locked_funds.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "vesting cause locked funds to become negative: {}", - self.locked_funds, + self.locked_funds )); } @@ -953,7 +953,7 @@ impl State { &self, store: &BS, current_epoch: ChainEpoch, - ) -> anyhow::Result { + ) -> Result { let vesting_funds = self.load_vesting_funds(store)?; Ok(vesting_funds .funds @@ -963,11 +963,18 @@ impl State { } /// Unclaimed funds that are not locked -- includes funds used to cover initial pledge requirement. - pub fn get_unlocked_balance(&self, actor_balance: &TokenAmount) -> anyhow::Result { + pub fn get_unlocked_balance( + &self, + actor_balance: &TokenAmount, + ) -> Result { let unlocked_balance = actor_balance - &self.locked_funds - &self.pre_commit_deposits - &self.initial_pledge; if unlocked_balance.is_negative() { - return Err(anyhow!("negative unlocked balance {}", unlocked_balance)); + return Err(actor_error!( + illegal_state, + "negative unlocked balance {}", + unlocked_balance + )); } Ok(unlocked_balance) } @@ -977,28 +984,39 @@ impl State { pub fn get_available_balance( &self, actor_balance: &TokenAmount, - ) -> anyhow::Result { + ) -> Result { // (actor_balance - &self.locked_funds) - &self.pre_commit_deposit Ok(self.get_unlocked_balance(actor_balance)? - &self.fee_debt) } - pub fn check_balance_invariants(&self, balance: &TokenAmount) -> anyhow::Result<()> { + pub fn check_balance_invariants(&self, balance: &TokenAmount) -> Result<(), ActorError> { + // XXX: probably better to push this one level down into state + let fail = |msg| { + ActorError::unchecked( + ERR_BALANCE_INVARIANTS_BROKEN, + format!("balance invariants broken: {}", msg), + ) + }; + if self.pre_commit_deposits.is_negative() { - return Err(anyhow!("pre-commit deposit is negative: {}", self.pre_commit_deposits)); + return Err(fail(format!( + "pre-commit deposit is negative: {}", + self.pre_commit_deposits + ))); } if self.locked_funds.is_negative() { - return Err(anyhow!("locked funds is negative: {}", self.locked_funds)); + return Err(fail(format!("locked funds is negative: {}", self.locked_funds))); } if self.initial_pledge.is_negative() { - return Err(anyhow!("initial pledge is negative: {}", self.initial_pledge)); + return Err(fail(format!("initial pledge is negative: {}", self.initial_pledge))); } if self.fee_debt.is_negative() { - return Err(anyhow!("fee debt is negative: {}", self.fee_debt)); + return Err(fail(format!("fee debt is negative: {}", self.fee_debt))); } let min_balance = &self.pre_commit_deposits + &self.locked_funds + &self.initial_pledge; if balance < &min_balance { - return Err(anyhow!("fee debt is negative: {}", self.fee_debt)); + return Err(fail(format!("fee debt is negative: {}", self.fee_debt))); } Ok(()) @@ -1014,15 +1032,21 @@ impl State { policy: &Policy, store: &BS, cleanup_events: Vec<(ChainEpoch, u64)>, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { // Load BitField Queue for sector expiry let quant = self.quant_spec_every_deadline(policy); let mut queue = super::BitFieldQueue::new(store, &self.pre_committed_sectors_cleanup, quant) - .map_err(|e| e.downcast_wrap("failed to load pre-commit clean up queue"))?; - - queue.add_many_to_queue_values(cleanup_events.into_iter())?; - self.pre_committed_sectors_cleanup = queue.amt.flush()?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load pre-commit clean up queue", + )?; + + queue + .add_many_to_queue_values(cleanup_events.into_iter()) + .exit_code(ExitCode::USR_SERIALIZATION)?; + self.pre_committed_sectors_cleanup = + queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -1031,7 +1055,7 @@ impl State { policy: &Policy, store: &BS, current_epoch: ChainEpoch, - ) -> anyhow::Result { + ) -> Result { let mut deposit_to_burn = TokenAmount::zero(); // cleanup expired pre-committed sectors @@ -1039,12 +1063,15 @@ impl State { store, &self.pre_committed_sectors_cleanup, self.quant_spec_every_deadline(policy), - )?; + ) + .exit_code(ExitCode::USR_SERIALIZATION)?; - let (sectors, modified) = cleanup_queue.pop_until(current_epoch)?; + let (sectors, modified) = + cleanup_queue.pop_until(current_epoch).exit_code(ExitCode::USR_SERIALIZATION)?; if modified { - self.pre_committed_sectors_cleanup = cleanup_queue.amt.flush()?; + self.pre_committed_sectors_cleanup = + cleanup_queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; } let mut precommits_to_delete = Vec::new(); @@ -1052,7 +1079,10 @@ impl State { for i in sectors.iter() { let sector_number = i as SectorNumber; - let sector = match self.get_precommitted_sector(store, sector_number)? { + let sector = match self + .get_precommitted_sector(store, sector_number) + .exit_code(ExitCode::USR_SERIALIZATION)? + { Some(sector) => sector, // already committed/deleted None => continue, @@ -1067,12 +1097,14 @@ impl State { // Actually delete it. if !precommits_to_delete.is_empty() { - self.delete_precommitted_sectors(store, &precommits_to_delete)?; + self.delete_precommitted_sectors(store, &precommits_to_delete) + .exit_code(ExitCode::USR_SERIALIZATION)?; } self.pre_commit_deposits -= &deposit_to_burn; if self.pre_commit_deposits.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "pre-commit clean up caused negative deposits: {}", self.pre_commit_deposits )); @@ -1086,7 +1118,7 @@ impl State { policy: &Policy, store: &BS, current_epoch: ChainEpoch, - ) -> anyhow::Result { + ) -> Result { let mut pledge_delta = TokenAmount::zero(); let dl_info = self.deadline_info(policy, current_epoch); @@ -1169,20 +1201,19 @@ impl State { &self, store: &BS, sector_nos: &BitField, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let mut precommits = Vec::new(); let precommitted = - make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; + make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_SERIALIZATION)?; for sector_no in sector_nos.iter() { if sector_no as u64 > MAX_SECTOR_NUMBER { - return Err( - actor_error!(illegal_argument; "sector number greater than maximum").into() - ); + return Err(actor_error!(illegal_argument; "sector number greater than maximum")); } - let info: &SectorPreCommitOnChainInfo = - precommitted - .get(&u64_key(sector_no as u64))? - .ok_or_else(|| actor_error!(not_found, "sector {} not found", sector_no))?; + let info: &SectorPreCommitOnChainInfo = precommitted + .get(&u64_key(sector_no as u64)) + .exit_code(ExitCode::USR_SERIALIZATION)? + .ok_or_else(|| actor_error!(not_found, "sector {} not found", sector_no))?; precommits.push(info.clone()); } Ok(precommits) diff --git a/actors/miner/tests/util.rs b/actors/miner/tests/util.rs index a1a006e39..3629dc7cc 100644 --- a/actors/miner/tests/util.rs +++ b/actors/miner/tests/util.rs @@ -1187,7 +1187,7 @@ impl ActorHarness { let live = part.live_sectors(); let to_prove = &live & §or_nos; if to_prove.is_empty() { - return Ok(()); + return; } let mut to_skip = &live - &to_prove; @@ -1204,7 +1204,6 @@ impl ActorHarness { if skipped_proven.get(i) { skipped_proven_sector_infos.push(sector.clone()); } - Ok(()) }) .unwrap(); let new_faulty_power = @@ -1218,7 +1217,6 @@ impl ActorHarness { if new_proven.get(i) { new_proven_infos.push(sector.clone()); } - Ok(()) }) .unwrap(); let new_proven_power = self.power_pair_for_sectors(&new_proven_infos); @@ -1230,8 +1228,6 @@ impl ActorHarness { index: part_idx, skipped: UnvalidatedBitField::Validated(to_skip), }); - - Ok(()) }) .unwrap(); @@ -1605,7 +1601,6 @@ where let arr = Array::::load(c, &rt.store).unwrap(); arr.for_each(|_, v: &T| { result.push(v.clone()); - Ok(()) }) .unwrap(); result diff --git a/actors/multisig/Cargo.toml b/actors/multisig/Cargo.toml index 853961436..b66e00e91 100644 --- a/actors/multisig/Cargo.toml +++ b/actors/multisig/Cargo.toml @@ -16,14 +16,13 @@ crate-type = ["cdylib", "lib"] [dependencies] fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["fil-actor"] } fvm_shared = { version = "0.6.0", default-features = false } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" num-traits = "0.2.14" num-derive = "0.3.3" cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } indexmap = { version = "1.8.0", features = ["serde-1"] } integer-encoding = { version = "3.0.3", default-features = false } serde = { version = "1.0.136", features = ["derive"] } -anyhow = "1.0.56" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" diff --git a/actors/multisig/src/lib.rs b/actors/multisig/src/lib.rs index 5d15cc982..6ab70338d 100644 --- a/actors/multisig/src/lib.rs +++ b/actors/multisig/src/lib.rs @@ -6,8 +6,8 @@ use std::collections::BTreeSet; use fil_actors_runtime::cbor::serialize_vec; use fil_actors_runtime::runtime::{ActorCode, Primitives, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, make_empty_map, make_map_with_root, resolve_to_id_addr, ActorDowncast, - ActorError, Map, INIT_ACTOR_ADDR, + actor_error, cbor, make_empty_map, make_map_with_root, resolve_to_id_addr, ActorContext, + ActorContext2, ActorError, Map, INIT_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -73,12 +73,8 @@ impl Actor { let mut resolved_signers = Vec::with_capacity(params.signers.len()); let mut dedup_signers = BTreeSet::new(); for signer in ¶ms.signers { - let resolved = resolve_to_id_addr(rt, signer).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", signer), - ) - })?; + let resolved = resolve_to_id_addr(rt, signer) + .with_context(|| format!("failed to resolve addr {} to ID addr", signer))?; if !dedup_signers.insert(resolved.id().expect("address should be resolved")) { return Err( actor_error!(illegal_argument; "duplicate signer not allowed: {}", signer), @@ -101,10 +97,9 @@ impl Actor { return Err(actor_error!(illegal_argument; "negative unlock duration disallowed")); } - let empty_root = - make_empty_map::<_, ()>(rt.store(), HAMT_BIT_WIDTH).flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty map") - })?; + let empty_root = make_empty_map::<_, ()>(rt.store(), HAMT_BIT_WIDTH) + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty map")?; let mut st: State = State { signers: resolved_signers, @@ -150,12 +145,8 @@ impl Actor { return Err(actor_error!(forbidden, "{} is not a signer", proposer)); } - let mut ptx = make_map_with_root(&st.pending_txs, rt.store()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pending transactions", - ) - })?; + let mut ptx = make_map_with_root(&st.pending_txs, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; let t_id = st.next_tx_id; st.next_tx_id.0 += 1; @@ -168,19 +159,15 @@ impl Actor { approved: Vec::new(), }; - ptx.set(t_id.key(), txn.clone()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to put transaction for propose", - ) - })?; + ptx.set(t_id.key(), txn.clone()).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to put transaction for propose", + )?; - st.pending_txs = ptx.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to flush pending transactions", - ) - })?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; Ok((t_id, txn)) })?; @@ -205,12 +192,8 @@ impl Actor { return Err(actor_error!(forbidden; "{} is not a signer", approver)); } - let ptx = make_map_with_root(&st.pending_txs, rt.store()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pending transactions", - ) - })?; + let ptx = make_map_with_root(&st.pending_txs, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; let txn = get_transaction(rt, &ptx, params.id, params.proposal_hash)?; @@ -245,20 +228,12 @@ impl Actor { } let mut ptx = make_map_with_root::<_, Transaction>(&st.pending_txs, rt.store()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pending transactions", - ) - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; let (_, tx) = ptx .delete(¶ms.id.key()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to pop transaction {:?} for cancel", params.id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to pop transaction {:?} for cancel", params.id,) })? .ok_or_else(|| { actor_error!(not_found, "no such transaction {:?} to cancel", params.id) @@ -269,23 +244,19 @@ impl Actor { return Err(actor_error!(forbidden; "Cannot cancel another signers transaction")); } - let calculated_hash = compute_proposal_hash(&tx, rt).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to compute proposal hash for (tx: {:?})", params.id), - ) - })?; + let calculated_hash = compute_proposal_hash(&tx, rt) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to compute proposal hash for (tx: {:?})", params.id) + })?; if !params.proposal_hash.is_empty() && params.proposal_hash != calculated_hash { return Err(actor_error!(illegal_state, "hash does not match proposal params")); } - st.pending_txs = ptx.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to flush pending transactions", - ) - })?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; Ok(()) }) @@ -299,12 +270,8 @@ impl Actor { { let receiver = rt.message().receiver(); rt.validate_immediate_caller_is(std::iter::once(&receiver))?; - let resolved_new_signer = resolve_to_id_addr(rt, ¶ms.signer).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve address {}", params.signer), - ) - })?; + let resolved_new_signer = resolve_to_id_addr(rt, ¶ms.signer) + .with_context(|| format!("failed to resolve address {}", params.signer))?; rt.transaction(|st: &mut State, _| { if st.signers.len() >= SIGNERS_MAX { @@ -336,12 +303,8 @@ impl Actor { { let receiver = rt.message().receiver(); rt.validate_immediate_caller_is(std::iter::once(&receiver))?; - let resolved_old_signer = resolve_to_id_addr(rt, ¶ms.signer).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve address {}", params.signer), - ) - })?; + let resolved_old_signer = resolve_to_id_addr(rt, ¶ms.signer) + .with_context(|| format!("failed to resolve address {}", params.signer))?; rt.transaction(|st: &mut State, rt| { if !st.is_signer(&resolved_old_signer) { @@ -374,12 +337,9 @@ impl Actor { } // Remove approvals from removed signer - st.purge_approvals(rt.store(), &resolved_old_signer).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to purge approvals of removed signer", - ) - })?; + st.purge_approvals(rt.store(), &resolved_old_signer) + .context("failed to purge approvals of removed signer")?; + st.signers.retain(|s| s != &resolved_old_signer); Ok(()) @@ -396,18 +356,10 @@ impl Actor { { let receiver = rt.message().receiver(); rt.validate_immediate_caller_is(std::iter::once(&receiver))?; - let from_resolved = resolve_to_id_addr(rt, ¶ms.from).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve address {}", params.from), - ) - })?; - let to_resolved = resolve_to_id_addr(rt, ¶ms.to).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve address {}", params.to), - ) - })?; + let from_resolved = resolve_to_id_addr(rt, ¶ms.from) + .with_context(|| format!("failed to resolve address {}", params.from))?; + let to_resolved = resolve_to_id_addr(rt, ¶ms.to) + .with_context(|| format!("failed to resolve address {}", params.to))?; rt.transaction(|st: &mut State, rt| { if !st.is_signer(&from_resolved) { @@ -424,12 +376,9 @@ impl Actor { // Add new signer st.signers.push(to_resolved); - st.purge_approvals(rt.store(), &from_resolved).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to purge approvals of removed signer", - ) - })?; + st.purge_approvals(rt.store(), &from_resolved) + .context("failed to purge approvals of removed signer")?; + Ok(()) })?; @@ -510,29 +459,21 @@ impl Actor { } let st = rt.transaction(|st: &mut State, rt| { - let mut ptx = make_map_with_root(&st.pending_txs, rt.store()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pending transactions", - ) - })?; + let mut ptx = make_map_with_root(&st.pending_txs, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; // update approved on the transaction txn.approved.push(rt.message().caller()); - ptx.set(tx_id.key(), txn.clone()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to put transaction {} for approval", tx_id.0), - ) - })?; + ptx.set(tx_id.key(), txn.clone()) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to put transaction {} for approval", tx_id.0,) + })?; - st.pending_txs = ptx.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to flush pending transactions", - ) - })?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; // Go implementation holds reference to state after transaction so this must be cloned // to match to handle possible exit code inconsistency @@ -573,26 +514,17 @@ where rt.transaction(|st: &mut State, rt| { let mut ptx = make_map_with_root::<_, Transaction>(&st.pending_txs, rt.store()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pending transactions", - ) - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; + + ptx.delete(&txn_id.key()).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to delete transaction for cleanup", + )?; - ptx.delete(&txn_id.key()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to delete transaction for cleanup", - ) - })?; - - st.pending_txs = ptx.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to flush pending transactions", - ) - })?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; Ok(()) })?; } @@ -612,21 +544,14 @@ where { let txn = ptx .get(&txn_id.key()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load transaction {:?} for approval", txn_id), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load transaction {:?} for approval", txn_id,) })? .ok_or_else(|| actor_error!(not_found, "no such transaction {:?} for approval", txn_id))?; if !proposal_hash.is_empty() { - let calculated_hash = compute_proposal_hash(txn, rt).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to compute proposal hash for (tx: {:?})", txn_id), - ) - })?; + let calculated_hash = compute_proposal_hash(txn, rt) + .with_context(|| format!("failed to compute proposal hash for (tx: {:?})", txn_id))?; if proposal_hash != calculated_hash { return Err(actor_error!( @@ -641,7 +566,10 @@ where /// Computes a digest of a proposed transaction. This digest is used to confirm identity /// of the transaction associated with an ID, which might change under chain re-orgs. -pub fn compute_proposal_hash(txn: &Transaction, sys: &dyn Primitives) -> anyhow::Result<[u8; 32]> { +pub fn compute_proposal_hash( + txn: &Transaction, + sys: &dyn Primitives, +) -> Result<[u8; 32], ActorError> { let proposal_hash = ProposalHashData { requester: txn.approved.get(0), to: &txn.to, @@ -670,11 +598,11 @@ impl ActorCode for Actor { } Some(Method::Propose) => { let res = Self::propose(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::Approve) => { let res = Self::approve(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::Cancel) => { Self::cancel(rt, cbor::deserialize_params(params)?)?; diff --git a/actors/multisig/src/state.rs b/actors/multisig/src/state.rs index 4e9360aa8..5dbaa5854 100644 --- a/actors/multisig/src/state.rs +++ b/actors/multisig/src/state.rs @@ -1,8 +1,10 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::anyhow; use cid::Cid; +use fil_actors_runtime::actor_error; +use fil_actors_runtime::ActorContext2; +use fil_actors_runtime::ActorError; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::Cbor; @@ -10,6 +12,7 @@ use fvm_shared::address::Address; use fvm_shared::bigint::{bigint_ser, Integer}; use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use indexmap::IndexMap; use num_traits::Zero; @@ -75,8 +78,9 @@ impl State { &mut self, store: &BS, addr: &Address, - ) -> anyhow::Result<()> { - let mut txns = make_map_with_root(&self.pending_txs, store)?; + ) -> Result<(), ActorError> { + let mut txns = + make_map_with_root(&self.pending_txs, store).exit_code(ExitCode::USR_ILLEGAL_STATE)?; // Identify transactions that need updating let mut txn_ids_to_purge = IndexMap::new(); @@ -86,21 +90,21 @@ impl State { txn_ids_to_purge.insert(tx_id.0.clone(), txn.clone()); } } - Ok(()) - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; // Update or remove those transactions. for (tx_id, mut txn) in txn_ids_to_purge { txn.approved.retain(|approver| approver != addr); if !txn.approved.is_empty() { - txns.set(tx_id.into(), txn)?; + txns.set(tx_id.into(), txn).exit_code(ExitCode::USR_ILLEGAL_STATE)?; } else { - txns.delete(&tx_id)?; + txns.delete(&tx_id).exit_code(ExitCode::USR_ILLEGAL_STATE)?; } } - self.pending_txs = txns.flush()?; + self.pending_txs = txns.flush().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok(()) } @@ -110,12 +114,17 @@ impl State { balance: TokenAmount, amount_to_spend: &TokenAmount, curr_epoch: ChainEpoch, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if amount_to_spend < &0.into() { - return Err(anyhow!("amount to spend {} less than zero", amount_to_spend)); + return Err(actor_error!( + insufficient_funds, + "amount to spend {} less than zero", + amount_to_spend + )); } if &balance < amount_to_spend { - return Err(anyhow!( + return Err(actor_error!( + insufficient_funds, "current balance {} less than amount to spend {}", balance, amount_to_spend @@ -131,7 +140,8 @@ impl State { let remaining_balance = balance - amount_to_spend; let amount_locked = self.amount_locked(curr_epoch - self.start_epoch); if remaining_balance < amount_locked { - return Err(anyhow!( + return Err(actor_error!( + insufficient_funds, "actor balance {} if spent {} would be less than required locked amount {}", remaining_balance, amount_to_spend, diff --git a/actors/multisig/tests/util.rs b/actors/multisig/tests/util.rs index a68ad47fe..dc1a1daf7 100644 --- a/actors/multisig/tests/util.rs +++ b/actors/multisig/tests/util.rs @@ -191,8 +191,7 @@ impl ActorHarness { let ptx = make_map_with_root::<_, Transaction>(&st.pending_txs, &rt.store).unwrap(); let mut actual_txns = Vec::new(); ptx.for_each(|k, txn: &Transaction| { - actual_txns.push((TxnID(parse_uint_key(k)? as i64), txn.clone())); - Ok(()) + actual_txns.push((TxnID(parse_uint_key(k).unwrap() as i64), txn.clone())); }) .unwrap(); expect_txns.sort_by_key(|(TxnID(id), _txn)| (*id)); diff --git a/actors/paych/Cargo.toml b/actors/paych/Cargo.toml index 7648403b9..62fdad7d6 100644 --- a/actors/paych/Cargo.toml +++ b/actors/paych/Cargo.toml @@ -20,11 +20,11 @@ num-traits = "0.2.14" num-derive = "0.3.3" serde = { version = "1.0.136", features = ["derive"] } cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } -anyhow = "1.0.56" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" [dev-dependencies] +anyhow = "1.0.56" fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["test_utils", "sector-default"] } fvm_ipld_amt = { version = "0.4.0", features = ["go-interop"] } derive_builder = "0.10.2" diff --git a/actors/paych/src/lib.rs b/actors/paych/src/lib.rs index 2dc7c6db8..1d931d5f1 100644 --- a/actors/paych/src/lib.rs +++ b/actors/paych/src/lib.rs @@ -2,7 +2,9 @@ // SPDX-License-Identifier: Apache-2.0, MIT use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{actor_error, cbor, resolve_to_id_addr, ActorDowncast, ActorError, Array}; +use fil_actors_runtime::{ + actor_error, cbor, resolve_to_id_addr, ActorContext, ActorContext2, ActorError, Array, +}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::actor::builtin::Type; @@ -58,9 +60,7 @@ impl Actor { let empty_arr_cid = Array::<(), _>::new_with_bit_width(rt.store(), LANE_STATES_AMT_BITWIDTH) .flush() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to create empty AMT") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to create empty AMT")?; rt.create(&State::new(from, to, empty_arr_cid))?; Ok(()) @@ -72,12 +72,8 @@ impl Actor { BS: Blockstore, RT: Runtime, { - let resolved = resolve_to_id_addr(rt, raw).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve address {}", raw), - ) - })?; + let resolved = resolve_to_id_addr(rt, raw) + .with_context(|| format!("failed to resolve address {}", raw))?; let code_cid = rt .get_actor_code_cid(&resolved) @@ -135,9 +131,8 @@ impl Actor { })?; // Validate signature - rt.verify_signature(sig, &signer, &sv_bz).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "voucher signature invalid") - })?; + rt.verify_signature(sig, &signer, &sv_bz) + .context_code(ExitCode::USR_ILLEGAL_STATE, "voucher signature invalid")?; let pch_addr = rt.message().receiver(); let svpch_id_addr = rt.resolve_address(&sv.channel_addr).ok_or_else(|| { @@ -177,16 +172,15 @@ impl Actor { rt.send( extra.actor, extra.method, - RawBytes::serialize(&extra.data)?, + RawBytes::serialize(&extra.data).exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::from(0u8), ) .map_err(|e| e.wrap("spend voucher verification failed"))?; } rt.transaction(|st: &mut State, rt| { - let mut l_states = Array::load(&st.lane_states, rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load lane states") - })?; + let mut l_states = Array::load(&st.lane_states, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load lane states")?; // Find the voucher lane, create and insert it in sorted order if necessary. let lane_id = sv.lane; @@ -226,12 +220,11 @@ impl Actor { redeemed_from_others += &other_ls.redeemed; other_ls.nonce = merge.nonce; - l_states.set(merge.lane, other_ls).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to store lane {}", merge.lane), - ) - })?; + l_states + .set(merge.lane, other_ls) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store lane {}", merge.lane,) + })?; } // 2. To prevent double counting, remove already redeemed amounts (from @@ -268,16 +261,15 @@ impl Actor { } } - l_states.set(lane_id, lane_state).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to store lane {}", lane_id), - ) - })?; + l_states + .set(lane_id, lane_state) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store lane {}", lane_id,) + })?; - st.lane_states = l_states.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save lanes") - })?; + st.lane_states = l_states + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save lanes")?; Ok(()) }) } @@ -338,9 +330,8 @@ where return Err(actor_error!(illegal_argument; "maximum lane ID is 2^63-1")); } - ls.get(id).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, format!("failed to load lane {}", id)) - }) + ls.get(id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || format!("failed to load lane {}", id)) } impl ActorCode for Actor { diff --git a/actors/power/Cargo.toml b/actors/power/Cargo.toml index ea1371695..1764fd542 100644 --- a/actors/power/Cargo.toml +++ b/actors/power/Cargo.toml @@ -16,7 +16,7 @@ crate-type = ["cdylib", "lib"] [dependencies] fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["fil-actor"] } fvm_shared = { version = "0.6.0", default-features = false } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" num-traits = "0.2.14" num-derive = "0.3.3" log = "0.4.14" @@ -25,7 +25,6 @@ cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] integer-encoding = { version = "3.0.3", default-features = false } lazy_static = "1.4.0" serde = { version = "1.0.136", features = ["derive"] } -anyhow = "1.0.56" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" diff --git a/actors/power/src/lib.rs b/actors/power/src/lib.rs index f34769db8..df16bb138 100644 --- a/actors/power/src/lib.rs +++ b/actors/power/src/lib.rs @@ -4,12 +4,11 @@ use std::collections::BTreeSet; use std::convert::TryInto; -use anyhow::anyhow; use ext::init; use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, make_map_with_root_and_bitwidth, ActorDowncast, ActorError, Multimap, - CRON_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + actor_error, cbor, make_map_with_root_and_bitwidth, ActorContext, ActorContext2, ActorError, + Multimap, CRON_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -74,9 +73,7 @@ impl Actor { { rt.validate_immediate_caller_is(std::iter::once(&*SYSTEM_ACTOR_ADDR))?; - let st = State::new(rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to create power actor state") - })?; + let st = State::new(rt.store()).context("Failed to create power actor state")?; rt.create(&st)?; Ok(()) } @@ -99,7 +96,8 @@ impl Actor { peer_id: params.peer, multi_addresses: params.multiaddrs, control_addresses: Default::default(), - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let miner_actor_code_cid = rt.get_code_cid_for_type(Type::Miner); let ext::init::ExecReturn { id_address, robust_address } = rt @@ -109,17 +107,18 @@ impl Actor { RawBytes::serialize(init::ExecParams { code_cid: miner_actor_code_cid, constructor_params, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, value, )? - .deserialize()?; + .deserialize() + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let window_post_proof_type = params.window_post_proof_type; rt.transaction(|st: &mut State, rt| { let mut claims = - make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims"), - )?; + make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; set_claim( &mut claims, &id_address, @@ -129,12 +128,8 @@ impl Actor { raw_byte_power: Default::default(), }, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to put power in claimed table while creating miner", - ) - })?; + .context("failed to put power in claimed table while creating miner")?; + st.miner_count += 1; st.update_stats_for_new_miner(rt.policy(), window_post_proof_type).map_err(|e| { @@ -146,9 +141,9 @@ impl Actor { ) })?; - st.claims = claims.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims") - })?; + st.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims")?; Ok(()) })?; Ok(CreateMinerReturn { id_address, robust_address }) @@ -169,9 +164,8 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut claims = - make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims"), - )?; + make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; st.add_to_claim( rt.policy(), @@ -180,19 +174,16 @@ impl Actor { ¶ms.raw_byte_delta, ¶ms.quality_adjusted_delta, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!( - "failed to update power raw {}, qa {}", - params.raw_byte_delta, params.quality_adjusted_delta, - ), + .with_context(|| { + format!( + "failed to update power raw {}, qa {}", + params.raw_byte_delta, params.quality_adjusted_delta, ) })?; - st.claims = claims.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims") - })?; + st.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims")?; Ok(()) }) } @@ -225,17 +216,14 @@ impl Actor { CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load cron events") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load cron events")?; - st.append_cron_event(&mut events, params.event_epoch, miner_event).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to enroll cron event") - })?; + st.append_cron_event(&mut events, params.event_epoch, miner_event) + .context("failed to enroll cron event")?; - st.cron_event_queue = events.root().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush cron events") - })?; + st.cron_event_queue = events + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush cron events")?; Ok(()) })?; Ok(()) @@ -256,7 +244,8 @@ impl Actor { TokenAmount::zero(), ) .map_err(|e| e.wrap("failed to check epoch baseline power"))? - .deserialize()?; + .deserialize() + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; if let Err(e) = Self::process_batch_proof_verifies(rt, &rewret) { error!("unexpected error processing batch proof verifies: {}. Skipping all verification for epoch {}", e, rt.curr_epoch()); @@ -278,7 +267,7 @@ impl Actor { rt.send( *REWARD_ACTOR_ADDR, ext::reward::UPDATE_NETWORK_KPI, - this_epoch_raw_byte_power?, + this_epoch_raw_byte_power.exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::from(0_u32), ) .map_err(|e| e.wrap("failed to update network KPI with reward actor"))?; @@ -326,23 +315,17 @@ impl Actor { HAMT_BIT_WIDTH, PROOF_VALIDATION_BATCH_AMT_BITWIDTH, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load proof batching set", - ) - })? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load proof batching set")? } else { debug!("ProofValidationBatch created"); Multimap::new(rt.store(), HAMT_BIT_WIDTH, PROOF_VALIDATION_BATCH_AMT_BITWIDTH) }; let miner_addr = rt.message().caller(); - let arr = mmap.get::(&miner_addr.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get seal verify infos at addr {}", miner_addr), - ) - })?; + let arr = mmap + .get::(&miner_addr.to_bytes()) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get seal verify infos at addr {}", miner_addr) + })?; if let Some(arr) = arr { if arr.count() >= MAX_MINER_PROVE_COMMITS_PER_EPOCH { return Err(ActorError::unchecked( @@ -355,13 +338,12 @@ impl Actor { } } - mmap.add(miner_addr.to_bytes().into(), seal_info).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to insert proof into set") - })?; + mmap.add(miner_addr.to_bytes().into(), seal_info) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to insert proof into set")?; - let mmrc = mmap.root().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush proofs batch map") - })?; + let mmrc = mmap + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush proofs batch map")?; rt.charge_gas("OnSubmitVerifySeal", GAS_ON_SUBMIT_VERIFY_SEAL); st.proof_validation_batch = Some(mmrc); @@ -434,17 +416,17 @@ impl Actor { } }; - if let Err(e) = mmap.for_all::<_, SealVerifyInfo>(|k, arr| { + if let Err(e) = mmap.try_for_all::<_, SealVerifyInfo, _>(|k, arr| { let addr = match Address::from_bytes(&k.0) { Ok(addr) => addr, Err(e) => { - return Err(anyhow!("failed to parse address key: {}", e)); + return Err(format!("failed to parse address key: {}", e)); } }; let contains_claim = match claims.contains_key(&addr.to_bytes()) { Ok(contains_claim) => contains_claim, - Err(e) => return Err(anyhow!("failed to look up clain: {}", e)), + Err(e) => return Err(format!("failed to look up clain: {}", e)), }; if !contains_claim { @@ -452,17 +434,18 @@ impl Actor { return Ok(()); } - let num_proofs: usize = arr.count().try_into()?; + let num_proofs: usize = arr + .count() + .try_into() + .map_err(|_| "can not convert u64 to usize".to_string())?; infos.reserve(num_proofs); arr.for_each(|_, svi| { infos.push(svi.clone()); - Ok(()) }) .map_err(|e| { - anyhow!( + format!( "failed to iterate over proof verify array for miner {}: {}", - addr, - e + addr, e ) })?; @@ -545,35 +528,23 @@ impl Actor { CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load cron events") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load cron events")?; let claims = make_map_with_root_and_bitwidth::<_, Claim>(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; for epoch in st.first_cron_epoch..=rt_epoch { - let epoch_events = load_cron_events(&events, epoch).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load cron events at {}", epoch), - ) - })?; + let epoch_events = load_cron_events(&events, epoch) + .with_context(|| format!("failed to load cron events at {}", epoch))?; if epoch_events.is_empty() { continue; } for evt in epoch_events.into_iter() { - let miner_has_claim = - claims.contains_key(&evt.miner_addr.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to look up claim", - ) - })?; + let miner_has_claim = claims + .contains_key(&evt.miner_addr.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to look up claim")?; if !miner_has_claim { debug!("skipping cron event for unknown miner: {}", evt.miner_addr); continue; @@ -581,18 +552,17 @@ impl Actor { cron_events.push(evt); } - events.remove_all(&epoch_key(epoch)).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to clear cron events at {}", epoch), - ) - })?; + events + .remove_all(&epoch_key(epoch)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to clear cron events at {}", epoch) + })?; } st.first_cron_epoch = rt_epoch + 1; - st.cron_event_queue = events.root().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush events") - })?; + st.cron_event_queue = events + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush events")?; Ok(()) })?; @@ -603,7 +573,8 @@ impl Actor { event_payload: event.callback_payload.bytes().to_owned(), reward_smoothed: rewret.this_epoch_reward_smoothed.clone(), quality_adj_power_smoothed: st.this_epoch_qa_power_smoothed.clone(), - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let res = rt.send( event.miner_addr, ext::miner::ON_DEFERRED_CRON_EVENT_METHOD, @@ -624,9 +595,7 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut claims = make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; // Remove power and leave miner frozen for miner_addr in failed_miner_crons { @@ -641,9 +610,9 @@ impl Actor { st.miner_count -= 1 } - st.claims = claims.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims") - })?; + st.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims")?; Ok(()) })?; } @@ -668,7 +637,7 @@ impl ActorCode for Actor { } Some(Method::CreateMiner) => { let res = Self::create_miner(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::UpdateClaimedPower) => { Self::update_claimed_power(rt, cbor::deserialize_params(params)?)?; @@ -693,7 +662,7 @@ impl ActorCode for Actor { } Some(Method::CurrentTotalPower) => { let res = Self::current_total_power(rt)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), } diff --git a/actors/power/src/state.rs b/actors/power/src/state.rs index bfac87564..a22bbbd61 100644 --- a/actors/power/src/state.rs +++ b/actors/power/src/state.rs @@ -3,12 +3,11 @@ use std::ops::Neg; -use anyhow::{anyhow, Context}; use cid::Cid; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ - actor_error, make_empty_map, make_map_with_root, make_map_with_root_and_bitwidth, - ActorDowncast, ActorError, Map, Multimap, + actor_error, make_empty_map, make_map_with_root, make_map_with_root_and_bitwidth, ActorContext, + ActorContext2, ActorError, Map, Multimap, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -75,16 +74,15 @@ pub struct State { } impl State { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .map_err(|e| anyhow!("Failed to create empty map: {}", e))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty map")?; let empty_mmap = Multimap::new(store, CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH) .root() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to get empty multimap cid") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to get empty multimap cid")?; + Ok(State { cron_event_queue: empty_mmap, claims: empty_map, @@ -106,11 +104,14 @@ impl State { policy: &Policy, s: &BS, miner: &Address, - ) -> anyhow::Result { - let claims = make_map_with_root_and_bitwidth(&self.claims, s, HAMT_BIT_WIDTH)?; + ) -> Result { + let claims = make_map_with_root_and_bitwidth(&self.claims, s, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; - let claim = - get_claim(&claims, miner)?.ok_or_else(|| anyhow!("no claim for actor: {}", miner))?; + let claim = get_claim(&claims, miner)? + .with_context_code(ExitCode::USR_NOT_FOUND, || { + format!("no claim for actor: {}", miner) + })?; let miner_nominal_power = &claim.raw_byte_power; let miner_min_power = consensus_miner_min_power(policy, claim.window_post_proof_type) @@ -132,8 +133,8 @@ impl State { &self, s: &BS, miner: &Address, - ) -> anyhow::Result> { - let claims = make_map_with_root(&self.claims, s)?; + ) -> Result, ActorError> { + let claims = make_map_with_root(&self.claims, s).exit_code(ExitCode::USR_ILLEGAL_STATE)?; get_claim(&claims, miner).map(|s| s.cloned()) } @@ -144,7 +145,7 @@ impl State { miner: &Address, power: &StoragePower, qa_power: &StoragePower, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let old_claim = get_claim(claims, miner)? .ok_or_else(|| actor_error!(not_found, "no claim for actor {}", miner))?; @@ -185,25 +186,25 @@ impl State { } if new_claim.raw_byte_power.is_negative() { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_state, "negative claimed raw byte power: {}", new_claim.raw_byte_power - ))); + )); } if new_claim.quality_adj_power.is_negative() { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_state, "negative claimed quality adjusted power: {}", new_claim.quality_adj_power - ))); + )); } if self.miner_above_min_power_count < 0 { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_state, "negative amount of miners lather than min: {}", self.miner_above_min_power_count - ))); + )); } set_claim(claims, miner, new_claim) @@ -218,14 +219,16 @@ impl State { events: &mut Multimap, epoch: ChainEpoch, event: CronEvent, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if epoch < self.first_cron_epoch { self.first_cron_epoch = epoch; } - events.add(epoch_key(epoch), event).map_err(|e| { - e.downcast_wrap(format!("failed to store cron event at epoch {}", epoch)) - })?; + events + .add(epoch_key(epoch), event) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store cron event at epoch {}", epoch) + })?; Ok(()) } @@ -253,7 +256,7 @@ impl State { &mut self, policy: &Policy, window_post_proof: RegisteredPoStProof, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let min_power = consensus_miner_min_power(policy, window_post_proof)?; if !min_power.is_positive() { @@ -271,13 +274,13 @@ impl State { where BS: Blockstore, { - let claims = make_map_with_root::<_, Claim>(&self.claims, store).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims") - })?; + let claims = make_map_with_root::<_, Claim>(&self.claims, store) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; - if !claims.contains_key(&miner_addr.to_bytes()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to look up claim") - })? { + if !claims + .contains_key(&miner_addr.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to look up claim")? + { return Err(actor_error!( forbidden, "unknown miner {} forbidden to interact with power actor", @@ -291,12 +294,10 @@ impl State { &self, store: &BS, miner: &Address, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let claims = make_map_with_root_and_bitwidth::<_, Claim>(&self.claims, store, HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; let claim = get_claim(&claims, miner)?; Ok(claim.cloned()) @@ -307,23 +308,26 @@ impl State { policy: &Policy, claims: &mut Map, miner: &Address, - ) -> anyhow::Result<()> { - let (rbp, qap) = - match get_claim(claims, miner).map_err(|e| e.downcast_wrap("failed to get claim"))? { - None => { - return Ok(()); - } - Some(claim) => (claim.raw_byte_power.clone(), claim.quality_adj_power.clone()), - }; + ) -> Result<(), ActorError> { + let (rbp, qap) = match get_claim(claims, miner).context("failed to get claim")? { + None => { + return Ok(()); + } + Some(claim) => (claim.raw_byte_power.clone(), claim.quality_adj_power.clone()), + }; // Subtract from stats to remove power self.add_to_claim(policy, claims, miner, &rbp.neg(), &qap.neg()) - .map_err(|e| e.downcast_wrap("failed to subtract miner power before deleting claim"))?; + .context("failed to subtract miner power before deleting claim")?; claims .delete(&miner.to_bytes()) - .map_err(|e| e.downcast_wrap(format!("failed to delete claim for address {}", miner)))? - .ok_or_else(|| anyhow!("failed to delete claim for address: doesn't exist"))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete claim for address {}", miner) + })? + .ok_or_else(|| { + actor_error!(illegal_state, "failed to delete claim for address: doesn't exist") + })?; Ok(()) } } @@ -331,13 +335,13 @@ impl State { pub(super) fn load_cron_events( mmap: &Multimap, epoch: ChainEpoch, -) -> anyhow::Result> { +) -> Result, ActorError> { let mut events = Vec::new(); mmap.for_each(&epoch_key(epoch), |_, v: &CronEvent| { events.push(v.clone()); - Ok(()) - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok(events) } @@ -346,35 +350,38 @@ pub(super) fn load_cron_events( fn get_claim<'m, BS: Blockstore>( claims: &'m Map, a: &Address, -) -> anyhow::Result> { - claims - .get(&a.to_bytes()) - .map_err(|e| e.downcast_wrap(format!("failed to get claim for address {}", a))) +) -> Result, ActorError> { + claims.get(&a.to_bytes()).with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get claim for address {}", a) + }) } pub fn set_claim( claims: &mut Map, a: &Address, claim: Claim, -) -> anyhow::Result<()> { +) -> Result<(), ActorError> { if claim.raw_byte_power.is_negative() { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_state, "negative claim raw power {}", claim.raw_byte_power - ))); + )); } if claim.quality_adj_power.is_negative() { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_state, "negative claim quality-adjusted power {}", claim.quality_adj_power - ))); + )); } claims .set(a.to_bytes().into(), claim) - .map_err(|e| e.downcast_wrap(format!("failed to set claim for address {}", a)))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set claim for address {}", a) + })?; + Ok(()) } @@ -409,7 +416,7 @@ impl Cbor for CronEvent {} pub fn consensus_miner_min_power( policy: &Policy, p: RegisteredPoStProof, -) -> anyhow::Result { +) -> Result { use RegisteredPoStProof::*; match p { StackedDRGWinning2KiBV1 @@ -422,7 +429,7 @@ pub fn consensus_miner_min_power( | StackedDRGWindow512MiBV1 | StackedDRGWindow32GiBV1 | StackedDRGWindow64GiBV1 => Ok(policy.minimum_consensus_power.clone()), - Invalid(i) => Err(anyhow::anyhow!("unsupported proof type: {}", i)), + Invalid(i) => Err(actor_error!(illegal_argument, "unsupported proof type: {}", i)), } } diff --git a/actors/power/tests/harness/mod.rs b/actors/power/tests/harness/mod.rs index 7cdf07170..eedbeb64e 100644 --- a/actors/power/tests/harness/mod.rs +++ b/actors/power/tests/harness/mod.rs @@ -245,7 +245,6 @@ impl Harness { events_map .for_each::<_, CronEvent>(&epoch_key(epoch), |_, v| { events.push(v.to_owned()); - Ok(()) }) .unwrap(); @@ -347,7 +346,7 @@ impl Harness { } /// Collects all keys from a map into a vector. -fn collect_keys(m: Map) -> Result, Error> +fn collect_keys(m: Map) -> Result, Error> where BS: Blockstore, V: DeserializeOwned + Serialize, @@ -355,7 +354,6 @@ where let mut ret_keys = Vec::new(); m.for_each(|k, _| { ret_keys.push(k.clone()); - Ok(()) })?; Ok(ret_keys) diff --git a/actors/reward/src/lib.rs b/actors/reward/src/lib.rs index a46dcc242..ac5397ecc 100644 --- a/actors/reward/src/lib.rs +++ b/actors/reward/src/lib.rs @@ -3,14 +3,15 @@ use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorError, BURNT_FUNDS_ACTOR_ADDR, EXPECTED_LEADERS_PER_EPOCH, - STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + actor_error, cbor, ActorContext2, ActorError, BURNT_FUNDS_ACTOR_ADDR, + EXPECTED_LEADERS_PER_EPOCH, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::bigint::bigint_ser::BigIntDe; use fvm_shared::bigint::{Integer, Sign}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::StoragePower; use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR, METHOD_SEND}; use log::{error, warn}; @@ -159,7 +160,7 @@ impl Actor { let res = rt.send( miner_addr, ext::miner::APPLY_REWARDS_METHOD, - RawBytes::serialize(&reward_params)?, + RawBytes::serialize(&reward_params).exit_code(ExitCode::USR_ILLEGAL_STATE)?, total_reward.clone(), ); if let Err(e) = res { @@ -251,7 +252,7 @@ impl ActorCode for Actor { } Some(Method::ThisEpochReward) => { let res = Self::this_epoch_reward(rt)?; - Ok(RawBytes::serialize(&res)?) + Ok(RawBytes::serialize(&res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::UpdateNetworkKPI) => { let param: Option = cbor::deserialize_params(params)?; diff --git a/actors/runtime/Cargo.toml b/actors/runtime/Cargo.toml index 1813dbd1b..dc7e45e85 100644 --- a/actors/runtime/Cargo.toml +++ b/actors/runtime/Cargo.toml @@ -8,9 +8,10 @@ edition = "2021" repository = "https://github.com/filecoin-project/builtin-actors" [dependencies] -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" fvm_ipld_amt = { version = "0.4.0", features = ["go-interop"] } fvm_shared = { version = "0.6.0", default-features = false } +fvm_ipld_bitfield = "0.5.0" num-traits = "0.2.14" num-derive = "0.3.3" serde = { version = "1.0.136", features = ["derive"] } diff --git a/actors/runtime/src/actor_error.rs b/actors/runtime/src/actor_error.rs index ba80e4ffc..e2d66a3d1 100644 --- a/actors/runtime/src/actor_error.rs +++ b/actors/runtime/src/actor_error.rs @@ -1,3 +1,5 @@ +use std::fmt::Display; + use fvm_shared::error::ExitCode; use thiserror::Error; @@ -64,13 +66,6 @@ impl ActorError { } } -/// Converts a raw encoding error into an ErrSerialization. -impl From for ActorError { - fn from(e: fvm_ipld_encoding::Error) -> Self { - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - /// Converts an actor deletion error into an actor error with the appropriate exit code. This /// facilitates propagation. #[cfg(feature = "fil-actor")] @@ -108,3 +103,94 @@ macro_rules! actor_error { $crate::actor_error!($code; $msg $(, $ex)*) }; } + +pub trait ActorContext { + fn context(self, context: C) -> Result + where + C: Display + Send + Sync + 'static; + fn with_context(self, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C; +} + +pub trait ActorContext2: Sized { + fn exit_code(self, code: ExitCode) -> Result; + + fn context_code(self, code: ExitCode, context: C) -> Result + where + C: Display + Send + Sync + 'static, + { + self.with_context_code(code, || context) + } + + fn with_context_code(self, code: ExitCode, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C; +} + +// hack to allow anyhow::Error + std::error::Error, can be dropped once Runtime is fixed +impl ActorContext2 for Result { + fn exit_code(self, code: ExitCode) -> Result { + self.map_err(|err| ActorError { exit_code: code, msg: err.to_string() }) + } + + fn with_context_code(self, code: ExitCode, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C, + { + self.map_err(|err| ActorError { exit_code: code, msg: format!("{}: {}", f(), err) }) + } +} + +impl ActorContext2 for Option { + fn exit_code(self, code: ExitCode) -> Result { + self.ok_or_else(|| ActorError { exit_code: code, msg: "None".to_string() }) + } + + fn with_context_code(self, code: ExitCode, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C, + { + self.ok_or_else(|| ActorError { exit_code: code, msg: format!("{}", f()) }) + } +} + +// TODO: remove once the runtime doesn't use anyhow::Result anymore +impl From for ActorError { + fn from(e: anyhow::Error) -> Self { + match e.downcast::() { + Ok(actor_err) => actor_err, + Err(other) => ActorError::unchecked( + ExitCode::USR_ILLEGAL_ARGUMENT, + format!("runtime error: {}", other), + ), + } + } +} + +impl ActorContext for Result { + fn context(self, context: C) -> Result + where + C: Display + Send + Sync + 'static, + { + self.map_err(|mut err| { + err.msg = format!("{}: {}", context, err.msg); + err + }) + } + + fn with_context(self, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C, + { + self.map_err(|mut err| { + err.msg = format!("{}: {}", f(), err.msg); + err + }) + } +} diff --git a/actors/runtime/src/builtin/shared.rs b/actors/runtime/src/builtin/shared.rs index 2ba233b78..3bcd450dc 100644 --- a/actors/runtime/src/builtin/shared.rs +++ b/actors/runtime/src/builtin/shared.rs @@ -6,13 +6,15 @@ use fvm_shared::address::Address; use fvm_shared::METHOD_SEND; use crate::runtime::Runtime; +use crate::{actor_error, ActorError}; pub const HAMT_BIT_WIDTH: u32 = 5; /// ResolveToIDAddr resolves the given address to it's ID address form. /// If an ID address for the given address dosen't exist yet, it tries to create one by sending /// a zero balance to the given address. -pub fn resolve_to_id_addr(rt: &mut RT, address: &Address) -> anyhow::Result
+// TODO: return RuntimeError +pub fn resolve_to_id_addr(rt: &mut RT, address: &Address) -> Result where BS: Blockstore, RT: Runtime, @@ -23,13 +25,15 @@ where } // send 0 balance to the account so an ID address for it is created and then try to resolve - rt.send(*address, METHOD_SEND, Default::default(), Default::default()) - .map_err(|e| e.wrap(&format!("failed to send zero balance to address {}", address)))?; + rt.send(*address, METHOD_SEND, Default::default(), Default::default())?; - rt.resolve_address(address).ok_or_else(|| { - anyhow::anyhow!( + let addr = rt.resolve_address(address).ok_or_else(|| { + actor_error!( + illegal_state, "failed to resolve address {} to ID address even after sending zero balance", - address, + address ) - }) + })?; + + Ok(addr) } diff --git a/actors/runtime/src/lib.rs b/actors/runtime/src/lib.rs index ce322f463..8f1e60d43 100644 --- a/actors/runtime/src/lib.rs +++ b/actors/runtime/src/lib.rs @@ -67,7 +67,7 @@ where pub fn make_map_with_root<'bs, BS, V>( root: &Cid, store: &'bs BS, -) -> Result, HamtError> +) -> Result, HamtError> where BS: Blockstore, V: DeserializeOwned + Serialize, @@ -81,7 +81,7 @@ pub fn make_map_with_root_and_bitwidth<'bs, BS, V>( root: &Cid, store: &'bs BS, bitwidth: u32, -) -> Result, HamtError> +) -> Result, HamtError> where BS: Blockstore, V: DeserializeOwned + Serialize, diff --git a/actors/runtime/src/runtime/actor_blockstore.rs b/actors/runtime/src/runtime/actor_blockstore.rs index 497efd0bd..e711a8798 100644 --- a/actors/runtime/src/runtime/actor_blockstore.rs +++ b/actors/runtime/src/runtime/actor_blockstore.rs @@ -1,12 +1,11 @@ use std::convert::TryFrom; -use anyhow::Result; use cid::multihash::Code; use cid::Cid; use fvm_ipld_blockstore::Block; use fvm_sdk as fvm; -use crate::actor_error; +use crate::{actor_error, ActorError}; /// A blockstore suitable for use within actors. /// @@ -16,25 +15,27 @@ pub struct ActorBlockstore; /// Implements a blockstore delegating to IPLD syscalls. impl fvm_ipld_blockstore::Blockstore for ActorBlockstore { - fn get(&self, cid: &Cid) -> Result>> { + type Error = ActorError; + + fn get(&self, cid: &Cid) -> Result>, Self::Error> { // If this fails, the _CID_ is invalid. I.e., we have a bug. - fvm::ipld::get(cid).map(Some).map_err(|c| { - actor_error!(illegal_state; "get failed with {:?} on CID '{}'", c, cid).into() - }) + fvm::ipld::get(cid) + .map(Some) + .map_err(|c| actor_error!(illegal_state; "get failed with {:?} on CID '{}'", c, cid)) } - fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<()> { + fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<(), Self::Error> { let code = Code::try_from(k.hash().code()) .map_err(|e| actor_error!(serialization, e.to_string()))?; let k2 = self.put(code, &Block::new(k.codec(), block))?; if k != &k2 { - Err(actor_error!(serialization; "put block with cid {} but has cid {}", k, k2).into()) + Err(actor_error!(serialization; "put block with cid {} but has cid {}", k, k2)) } else { Ok(()) } } - fn put(&self, code: Code, block: &Block) -> Result + fn put(&self, code: Code, block: &Block) -> Result where D: AsRef<[u8]>, { diff --git a/actors/runtime/src/util/chaos/mod.rs b/actors/runtime/src/util/chaos/mod.rs index 9e563e802..8bf61624b 100644 --- a/actors/runtime/src/util/chaos/mod.rs +++ b/actors/runtime/src/util/chaos/mod.rs @@ -14,7 +14,7 @@ pub use state::*; pub use types::*; use crate::runtime::{ActorCode, Runtime}; -use crate::{actor_error, cbor, ActorError}; +use crate::{actor_error, cbor, ActorContext2, ActorError}; mod state; mod types; @@ -219,12 +219,12 @@ impl ActorCode for Actor { } Some(Method::ResolveAddress) => { let res = Self::resolve_address(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::Send) => { let res: SendReturn = Self::send(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::DeleteActor) => { @@ -244,7 +244,7 @@ impl ActorCode for Actor { Some(Method::InspectRuntime) => { let inspect = Self::inspect_runtime(rt)?; - Ok(RawBytes::serialize(inspect)?) + Ok(RawBytes::serialize(inspect).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), diff --git a/actors/runtime/src/util/downcast.rs b/actors/runtime/src/util/downcast.rs deleted file mode 100644 index 2b8b1d621..000000000 --- a/actors/runtime/src/util/downcast.rs +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2019-2022 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -use anyhow::anyhow; -use fvm_ipld_amt::Error as AmtError; -use fvm_ipld_encoding::Error as EncodingError; -use fvm_ipld_hamt::Error as HamtError; -use fvm_shared::error::ExitCode; - -use crate::ActorError; - -/// Trait to allow multiple error types to be able to be downcasted into an `ActorError`. -pub trait ActorDowncast { - /// Downcast a dynamic std Error into an `ActorError`. If the error cannot be downcasted - /// into an ActorError automatically, use the provided `ExitCode` to generate a new error. - fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError; - - /// Wrap the error with a message, without overwriting an exit code. - fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error; -} - -impl ActorDowncast for anyhow::Error { - fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { - match downcast_util(self) { - Ok(actor_error) => actor_error.wrap(msg), - Err(other) => { - ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) - } - } - } - fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { - match downcast_util(self) { - Ok(actor_error) => anyhow!(actor_error.wrap(msg)), - Err(other) => anyhow!("{}: {}", msg.as_ref(), other), - } - } -} - -impl ActorDowncast for AmtError { - fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { - match self { - AmtError::Dynamic(e) => e.downcast_default(default_exit_code, msg), - other => { - ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) - } - } - } - fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { - match self { - AmtError::Dynamic(e) => e.downcast_wrap(msg), - other => anyhow!("{}: {}", msg.as_ref(), other), - } - } -} - -impl ActorDowncast for HamtError { - fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { - match self { - HamtError::Dynamic(e) => e.downcast_default(default_exit_code, msg), - other => { - ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) - } - } - } - fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { - match self { - HamtError::Dynamic(e) => e.downcast_wrap(msg), - other => anyhow!("{}: {}", msg.as_ref(), other), - } - } -} - -/// Attempts to downcast a `Box` into an actor error. -/// Returns `Ok` with the actor error if it can be downcasted automatically -/// and returns `Err` with the original error if it cannot. -fn downcast_util(error: anyhow::Error) -> anyhow::Result { - // Check if error is ActorError, return as such - let error = match error.downcast::() { - Ok(actor_err) => return Ok(actor_err), - Err(other) => other, - }; - - // Check if error is Encoding error, if so return `ErrSerialization` - let error = match error.downcast::() { - Ok(enc_error) => { - return Ok(ActorError::unchecked(ExitCode::USR_SERIALIZATION, enc_error.to_string())) - } - Err(other) => other, - }; - - // Dynamic errors can come from Array and Hamt through blockstore usages, check them. - let error = match error.downcast::() { - Ok(amt_err) => match amt_err { - AmtError::Dynamic(de) => match downcast_util(de) { - Ok(a) => return Ok(a), - Err(other) => other, - }, - other => anyhow!(other), - }, - Err(other) => other, - }; - let error = match error.downcast::() { - Ok(amt_err) => match amt_err { - HamtError::Dynamic(de) => match downcast_util(de) { - Ok(a) => return Ok(a), - Err(other) => other, - }, - other => anyhow!(other), - }, - Err(other) => other, - }; - - // Could not be downcasted automatically to actor error, return initial dynamic error. - Err(error) -} diff --git a/actors/runtime/src/util/mod.rs b/actors/runtime/src/util/mod.rs index a3610581c..d7d282804 100644 --- a/actors/runtime/src/util/mod.rs +++ b/actors/runtime/src/util/mod.rs @@ -1,14 +1,12 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -pub use self::downcast::*; -pub use self::multimap::*; +pub use self::multimap::{EitherError as MultiMapEitherError, Error as MultiMapError, Multimap}; pub use self::set::Set; pub use self::set_multimap::SetMultimap; pub mod cbor; pub mod chaos; -mod downcast; mod multimap; mod set; mod set_multimap; diff --git a/actors/runtime/src/util/multimap.rs b/actors/runtime/src/util/multimap.rs index ff9b2856c..62cce8897 100644 --- a/actors/runtime/src/util/multimap.rs +++ b/actors/runtime/src/util/multimap.rs @@ -3,12 +3,19 @@ use cid::Cid; use fvm_ipld_blockstore::Blockstore; -use fvm_ipld_hamt::Error; use serde::de::DeserializeOwned; use serde::Serialize; use crate::{make_empty_map, make_map_with_root_and_bitwidth, Array, BytesKey, Map}; +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("amt: {0}")] + Amt(#[from] fvm_ipld_amt::Error), + #[error("hamt: {0}")] + Hamt(#[from] fvm_ipld_hamt::Error), +} + /// Multimap stores multiple values per key in a Hamt of Amts. /// The order of insertion of values for each key is retained. pub struct Multimap<'a, BS>(Map<'a, BS, Cid>, u32); @@ -29,18 +36,19 @@ where cid: &Cid, outer_bitwidth: u32, inner_bitwidth: u32, - ) -> Result { + ) -> Result> { Ok(Self(make_map_with_root_and_bitwidth(cid, bs, outer_bitwidth)?, inner_bitwidth)) } /// Retrieve root from the multimap. #[inline] - pub fn root(&mut self) -> Result { - self.0.flush() + pub fn root(&mut self) -> Result> { + let cid = self.0.flush()?; + Ok(cid) } /// Adds a value for a key. - pub fn add(&mut self, key: BytesKey, value: V) -> Result<(), Error> + pub fn add(&mut self, key: BytesKey, value: V) -> Result<(), Error> where V: Serialize + DeserializeOwned, { @@ -50,10 +58,10 @@ where .unwrap_or_else(|| Array::new_with_bit_width(self.0.store(), self.1)); // Set value at next index - arr.set(arr.count(), value).map_err(|e| anyhow::anyhow!(e))?; + arr.set(arr.count(), value)?; // flush to get new array root to put in hamt - let new_root = arr.flush().map_err(|e| anyhow::anyhow!(e))?; + let new_root = arr.flush()?; // Set hamt node to array root self.0.set(key, new_root)?; @@ -62,51 +70,100 @@ where /// Gets the Array of value type `V` using the multimap store. #[inline] - pub fn get(&self, key: &[u8]) -> Result>, Error> + pub fn get(&self, key: &[u8]) -> Result>, Error> where V: DeserializeOwned + Serialize, { match self.0.get(key)? { - Some(cid) => { - Ok(Some(Array::load(cid, *self.0.store()).map_err(|e| anyhow::anyhow!(e))?)) - } + Some(cid) => Ok(Some(Array::load(cid, *self.0.store())?)), None => Ok(None), } } /// Removes all values for a key. #[inline] - pub fn remove_all(&mut self, key: &[u8]) -> Result<(), Error> { + pub fn remove_all(&mut self, key: &[u8]) -> Result<(), Error> { // Remove entry from table - self.0.delete(key)?.ok_or("failed to delete from multimap")?; + self.0.delete(key)?; + + Ok(()) + } + + /// Iterates through all values in the array at a given key. + pub fn try_for_each(&self, key: &[u8], f: F) -> Result<(), EitherError> + where + V: Serialize + DeserializeOwned, + F: FnMut(u64, &V) -> Result<(), U>, + { + if let Some(amt) = self.get::(key)? { + amt.try_for_each(f).map_err(|err| match err { + fvm_ipld_amt::EitherError::User(e) => EitherError::User(e), + fvm_ipld_amt::EitherError::Amt(e) => EitherError::MultiMap(e.into()), + })?; + } Ok(()) } /// Iterates through all values in the array at a given key. - pub fn for_each(&self, key: &[u8], f: F) -> Result<(), Error> + pub fn for_each(&self, key: &[u8], f: F) -> Result<(), Error> where V: Serialize + DeserializeOwned, - F: FnMut(u64, &V) -> anyhow::Result<()>, + F: FnMut(u64, &V), { if let Some(amt) = self.get::(key)? { - amt.for_each(f).map_err(|e| anyhow::anyhow!(e))?; + amt.for_each(f)?; } Ok(()) } /// Iterates through all arrays in the multimap - pub fn for_all(&self, mut f: F) -> Result<(), Error> + pub fn try_for_all(&self, mut f: F) -> Result<(), EitherError> where V: Serialize + DeserializeOwned, - F: FnMut(&BytesKey, &Array) -> anyhow::Result<()>, + F: FnMut(&BytesKey, &Array) -> Result<(), U>, { - self.0.for_each::<_>(|key, arr_root| { - let arr = Array::load(arr_root, *self.0.store())?; - f(key, &arr) + self.0 + .try_for_each::<_, EitherError>(|key, arr_root| { + let arr = Array::load(arr_root, *self.0.store()) + .map_err(|e| EitherError::MultiMap(e.into()))?; + f(key, &arr).map_err(EitherError::User)?; + Ok(()) + }) + .map_err(|err| match err { + fvm_ipld_hamt::EitherError::User(e) => e, + fvm_ipld_hamt::EitherError::Hamt(e) => EitherError::MultiMap(e.into()), + })?; + + Ok(()) + } + + /// Iterates through all arrays in the multimap + pub fn for_all(&self, mut f: F) -> Result<(), Error> + where + V: Serialize + DeserializeOwned, + F: FnMut(&BytesKey, &Array), + { + self.try_for_all(|key, root| { + f(key, root); + Ok(()) + }) + .map_err(|err| match err { + EitherError::User(()) => unreachable!(), + EitherError::MultiMap(e) => e, })?; Ok(()) } } + +/// This error wraps around around two different errors, either the native `Error` from `multimap`, or +/// a custom user error, returned from executing a user defined function. +#[derive(Debug, thiserror::Error)] +pub enum EitherError { + #[error("user: {0}")] + User(U), + #[error("multimap: {0}")] + MultiMap(#[from] Error), +} diff --git a/actors/runtime/src/util/set.rs b/actors/runtime/src/util/set.rs index 67f0faca3..277301c5e 100644 --- a/actors/runtime/src/util/set.rs +++ b/actors/runtime/src/util/set.rs @@ -33,19 +33,19 @@ where } /// Initializes a Set from a root Cid. - pub fn from_root(bs: &'a BS, cid: &Cid) -> Result { + pub fn from_root(bs: &'a BS, cid: &Cid) -> Result> { Ok(Self(make_map_with_root(cid, bs)?)) } /// Retrieve root from the Set. #[inline] - pub fn root(&mut self) -> Result { + pub fn root(&mut self) -> Result> { self.0.flush() } /// Adds key to the set. #[inline] - pub fn put(&mut self, key: BytesKey) -> Result<(), Error> { + pub fn put(&mut self, key: BytesKey) -> Result<(), Error> { // Set hamt node to array root self.0.set(key, ())?; Ok(()) @@ -53,13 +53,13 @@ where /// Checks if key exists in the set. #[inline] - pub fn has(&self, key: &[u8]) -> Result { + pub fn has(&self, key: &[u8]) -> Result> { self.0.contains_key(key) } /// Deletes key from set. #[inline] - pub fn delete(&mut self, key: &[u8]) -> Result, Error> { + pub fn delete(&mut self, key: &[u8]) -> Result, Error> { match self.0.delete(key)? { Some(_) => Ok(Some(())), None => Ok(None), @@ -67,21 +67,32 @@ where } /// Iterates through all keys in the set. - pub fn for_each(&self, mut f: F) -> Result<(), Error> + pub fn try_for_each(&self, mut f: F) -> Result<(), Error> where - F: FnMut(&BytesKey) -> anyhow::Result<()>, + F: FnMut(&BytesKey) -> Result<(), Error>, + { + // Calls the for each function on the hamt with ignoring the value + self.0.try_for_each(|s, _: &()| f(s)).map_err(|err| match err { + fvm_ipld_hamt::EitherError::User(e) => e, + fvm_ipld_hamt::EitherError::Hamt(e) => e, + }) + } + + /// Iterates through all keys in the set. + pub fn for_each(&self, mut f: F) -> Result<(), Error> + where + F: FnMut(&BytesKey), { // Calls the for each function on the hamt with ignoring the value self.0.for_each(|s, _: &()| f(s)) } /// Collects all keys from the set into a vector. - pub fn collect_keys(&self) -> Result, Error> { + pub fn collect_keys(&self) -> Result, Error> { let mut ret_keys = Vec::new(); self.for_each(|k| { ret_keys.push(k.clone()); - Ok(()) })?; Ok(ret_keys) diff --git a/actors/runtime/src/util/set_multimap.rs b/actors/runtime/src/util/set_multimap.rs index e5f0a402a..c1fdf6516 100644 --- a/actors/runtime/src/util/set_multimap.rs +++ b/actors/runtime/src/util/set_multimap.rs @@ -26,18 +26,18 @@ where } /// Initializes a SetMultimap from a root Cid. - pub fn from_root(bs: &'a BS, cid: &Cid) -> Result { + pub fn from_root(bs: &'a BS, cid: &Cid) -> Result> { Ok(Self(make_map_with_root(cid, bs)?)) } /// Retrieve root from the SetMultimap. #[inline] - pub fn root(&mut self) -> Result { + pub fn root(&mut self) -> Result> { self.0.flush() } /// Puts the DealID in the hash set of the key. - pub fn put(&mut self, key: ChainEpoch, value: DealID) -> Result<(), Error> { + pub fn put(&mut self, key: ChainEpoch, value: DealID) -> Result<(), Error> { // Get construct amt from retrieved cid or create new let mut set = self.get(key)?.unwrap_or_else(|| Set::new(self.0.store())); @@ -52,7 +52,7 @@ where } /// Puts slice of DealIDs in the hash set of the key. - pub fn put_many(&mut self, key: ChainEpoch, values: &[DealID]) -> Result<(), Error> { + pub fn put_many(&mut self, key: ChainEpoch, values: &[DealID]) -> Result<(), Error> { // Get construct amt from retrieved cid or create new let mut set = self.get(key)?.unwrap_or_else(|| Set::new(self.0.store())); @@ -70,7 +70,7 @@ where /// Gets the set at the given index of the `SetMultimap` #[inline] - pub fn get(&self, key: ChainEpoch) -> Result>, Error> { + pub fn get(&self, key: ChainEpoch) -> Result>, Error> { match self.0.get(&u64_key(key as u64))? { Some(cid) => Ok(Some(Set::from_root(*self.0.store(), cid)?)), None => Ok(None), @@ -79,7 +79,7 @@ where /// Removes a DealID from a key hash set. #[inline] - pub fn remove(&mut self, key: ChainEpoch, v: DealID) -> Result<(), Error> { + pub fn remove(&mut self, key: ChainEpoch, v: DealID) -> Result<(), Error> { // Get construct amt from retrieved cid and return if no set exists let mut set = match self.get(key)? { Some(s) => s, @@ -96,7 +96,7 @@ where /// Removes set at index. #[inline] - pub fn remove_all(&mut self, key: ChainEpoch) -> Result<(), Error> { + pub fn remove_all(&mut self, key: ChainEpoch) -> Result<(), Error> { // Remove entry from table self.0.delete(&u64_key(key as u64))?; @@ -104,9 +104,29 @@ where } /// Iterates through keys and converts them to a DealID to call a function on each. - pub fn for_each(&self, key: ChainEpoch, mut f: F) -> Result<(), Error> + pub fn try_for_each(&self, key: ChainEpoch, mut f: F) -> Result<(), Error> where - F: FnMut(DealID) -> Result<(), Error>, + F: FnMut(DealID) -> Result<(), Error>, + { + // Get construct amt from retrieved cid and return if no set exists + let set = match self.get(key)? { + Some(s) => s, + None => return Ok(()), + }; + + set.try_for_each(|k| { + let v = parse_uint_key(k).expect("TODO"); + + // Run function on all parsed keys + f(v)?; + Ok(()) + }) + } + + /// Iterates through keys and converts them to a DealID to call a function on each. + pub fn for_each(&self, key: ChainEpoch, mut f: F) -> Result<(), Error> + where + F: FnMut(DealID), { // Get construct amt from retrieved cid and return if no set exists let set = match self.get(key)? { @@ -115,11 +135,10 @@ where }; set.for_each(|k| { - let v = parse_uint_key(k) - .map_err(|e| anyhow::anyhow!("Could not parse key: {:?}, ({})", &k.0, e))?; + let v = parse_uint_key(k).expect("TODO"); // Run function on all parsed keys - Ok(f(v)?) + f(v); }) } } diff --git a/actors/runtime/tests/multimap_test.rs b/actors/runtime/tests/multimap_test.rs index 3f2ef2952..3d131c254 100644 --- a/actors/runtime/tests/multimap_test.rs +++ b/actors/runtime/tests/multimap_test.rs @@ -39,7 +39,6 @@ fn for_each() { let mut vals: Vec<(u64, u64)> = Vec::new(); mm.for_each(&addr.to_bytes(), |i, v| { vals.push((i, *v)); - Ok(()) }) .unwrap(); diff --git a/actors/runtime/tests/set_multimap_test.rs b/actors/runtime/tests/set_multimap_test.rs index 47278e74b..534982467 100644 --- a/actors/runtime/tests/set_multimap_test.rs +++ b/actors/runtime/tests/set_multimap_test.rs @@ -41,7 +41,6 @@ fn for_each() { let mut vals: Vec = Vec::new(); smm.for_each(epoch, |i| { vals.push(i); - Ok(()) }) .unwrap(); diff --git a/actors/system/src/lib.rs b/actors/system/src/lib.rs index d8b8c2c5b..d01c8b896 100644 --- a/actors/system/src/lib.rs +++ b/actors/system/src/lib.rs @@ -1,7 +1,8 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::anyhow; + use cid::{multihash, Cid}; +use fil_actors_runtime::ActorContext2; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::CborStore; @@ -12,7 +13,7 @@ use num_derive::FromPrimitive; use num_traits::FromPrimitive; use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{actor_error, ActorDowncast, ActorError, SYSTEM_ACTOR_ADDR}; +use fil_actors_runtime::{actor_error, ActorError, SYSTEM_ACTOR_ADDR}; #[cfg(feature = "fil-actor")] fil_actors_runtime::wasm_trampoline!(Actor); @@ -35,10 +36,11 @@ pub struct State { impl Cbor for State {} impl State { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let c = store .put_cbor(&Vec::<(String, Cid)>::new(), multihash::Code::Blake2b256) - .map_err(|e| anyhow!("failed to put system state to store: {}", e))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to put system state to store")?; + Ok(Self { builtin_actors: c }) } @@ -65,10 +67,10 @@ impl Actor { { rt.validate_immediate_caller_is(std::iter::once(&*SYSTEM_ACTOR_ADDR))?; - let state = State::new(rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct state") - })?; + let state = State::new(rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct state")?; rt.create(&state)?; + Ok(()) } } diff --git a/actors/verifreg/Cargo.toml b/actors/verifreg/Cargo.toml index fc0919208..8680859f6 100644 --- a/actors/verifreg/Cargo.toml +++ b/actors/verifreg/Cargo.toml @@ -21,8 +21,7 @@ num-traits = "0.2.14" num-derive = "0.3.3" cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } lazy_static = "1.4.0" -anyhow = "1.0.56" -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" diff --git a/actors/verifreg/src/lib.rs b/actors/verifreg/src/lib.rs index 4ef46e036..168b25941 100644 --- a/actors/verifreg/src/lib.rs +++ b/actors/verifreg/src/lib.rs @@ -3,8 +3,8 @@ use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, make_map_with_root_and_bitwidth, resolve_to_id_addr, ActorDowncast, - ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + actor_error, cbor, make_map_with_root_and_bitwidth, resolve_to_id_addr, ActorContext, + ActorContext2, ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -55,9 +55,8 @@ impl Actor { .resolve_address(&root_key) .ok_or_else(|| actor_error!(illegal_argument, "root should be an ID address"))?; - let st = State::new(rt.store(), id_addr).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to create verifreg state") - })?; + let st = State::new(rt.store(), id_addr) + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create verifreg state")?; rt.create(&st)?; Ok(()) @@ -77,12 +76,8 @@ impl Actor { )); } - let verifier = resolve_to_id_addr(rt, ¶ms.address).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", params.address), - ) - })?; + let verifier = resolve_to_id_addr(rt, ¶ms.address) + .with_context(|| format!("failed to resolve addr {} to ID addr", params.address))?; let st: State = rt.state()?; rt.validate_immediate_caller_is(std::iter::once(&st.root_key))?; @@ -94,27 +89,20 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verifiers = make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; + let verified_clients = make_map_with_root_and_bitwidth::<_, BigIntDe>( &st.verified_clients, rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; - let found = verified_clients.contains_key(&verifier.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get client state for {}", verifier), - ) - })?; + let found = verified_clients + .contains_key(&verifier.to_bytes()) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get client state for {}", verifier) + })?; if found { return Err(actor_error!( illegal_argument, @@ -123,12 +111,13 @@ impl Actor { )); } - verifiers.set(verifier.to_bytes().into(), BigIntDe(params.allowance.clone())).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to add verifier"), - )?; - st.verifiers = verifiers.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers") - })?; + verifiers + .set(verifier.to_bytes().into(), BigIntDe(params.allowance.clone())) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to add verifier")?; + + st.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; Ok(()) })?; @@ -141,12 +130,8 @@ impl Actor { BS: Blockstore, RT: Runtime, { - let verifier = resolve_to_id_addr(rt, &verifier_addr).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", verifier_addr), - ) - })?; + let verifier = resolve_to_id_addr(rt, &verifier_addr) + .with_context(|| format!("failed to resolve addr {} to ID addr", verifier_addr))?; let state: State = rt.state()?; rt.validate_immediate_caller_is(std::iter::once(&state.root_key))?; @@ -157,21 +142,19 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; verifiers .delete(&verifier.to_bytes()) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to remove verifier") - })? - .ok_or_else(|| { - actor_error!(illegal_argument, "failed to remove verifier: not found") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to remove verifier")? + .context_code( + ExitCode::USR_ILLEGAL_ARGUMENT, + "failed to remove verifier: not found", + )?; + + st.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; - st.verifiers = verifiers.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers") - })?; Ok(()) })?; @@ -198,12 +181,8 @@ impl Actor { )); } - let client = resolve_to_id_addr(rt, ¶ms.address).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", params.address), - ) - })?; + let client = resolve_to_id_addr(rt, ¶ms.address) + .with_context(|| format!("failed to resolve addr {} to ID addr", params.address))?; let st: State = rt.state()?; if client == st.root_key { @@ -213,37 +192,28 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verifiers = make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; + let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; // Validate caller is one of the verifiers. let verifier = rt.message().caller(); let BigIntDe(verifier_cap) = verifiers .get(&verifier.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get Verifier {}", verifier), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get Verifier {}", verifier) })? - .ok_or_else(|| actor_error!(not_found, format!("no such Verifier {}", verifier)))?; + .with_context_code(ExitCode::USR_NOT_FOUND, || { + format!("no such Verifier {}", verifier) + })?; // Validate client to be added isn't a verifier - let found = verifiers.contains_key(&client.to_bytes()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier") - })?; + let found = verifiers + .contains_key(&client.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; + if found { return Err(actor_error!( illegal_argument, @@ -263,19 +233,18 @@ impl Actor { } let new_verifier_cap = verifier_cap - ¶ms.allowance; - verifiers.set(verifier.to_bytes().into(), BigIntDe(new_verifier_cap)).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to update new verifier cap for {}", verifier), - ) - })?; + verifiers + .set(verifier.to_bytes().into(), BigIntDe(new_verifier_cap)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to update new verifier cap for {}", verifier) + })?; + + let client_cap = verified_clients + .get(&client.to_bytes()) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to get verified client {}", client) + })?; - let client_cap = verified_clients.get(&client.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to get verified client {}", client), - ) - })?; // if verified client exists, add allowance to existing cap // otherwise, create new client with allownace let client_cap = if let Some(BigIntDe(client_cap)) = client_cap { @@ -284,24 +253,18 @@ impl Actor { params.allowance }; - verified_clients.set(client.to_bytes().into(), BigIntDe(client_cap.clone())).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!( - "Failed to add verified client {} with cap {}", - client, client_cap, - ), - ) - }, - )?; + verified_clients + .set(client.to_bytes().into(), BigIntDe(client_cap.clone())) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to add verified client {} with cap {}", client, client_cap,) + })?; - st.verifiers = verifiers.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers") - })?; - st.verified_clients = verified_clients.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients") - })?; + st.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; + st.verified_clients = verified_clients + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients")?; Ok(()) })?; @@ -319,12 +282,8 @@ impl Actor { { rt.validate_immediate_caller_is(std::iter::once(&*STORAGE_MARKET_ACTOR_ADDR))?; - let client = resolve_to_id_addr(rt, ¶ms.address).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", params.address), - ) - })?; + let client = resolve_to_id_addr(rt, ¶ms.address) + .with_context(|| format!("failed to resolve addr {} to ID addr", params.address))?; if params.deal_size < rt.policy().minimum_verified_deal_size { return Err(actor_error!( @@ -337,22 +296,16 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let BigIntDe(vc_cap) = verified_clients .get(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get verified client {}", &client), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get verified client {}", client) })? - .ok_or_else(|| actor_error!(not_found, "no such verified client {}", client))?; + .with_context_code(ExitCode::USR_NOT_FOUND, || { + format!("no such verified client {}", client) + })?; if vc_cap.is_negative() { return Err(actor_error!( illegal_state, @@ -378,33 +331,24 @@ impl Actor { // Will be restored later if the deal did not get activated with a ProvenSector. verified_clients .delete(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to delete verified client {}", client), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to delete verified client {}", client) })? - .ok_or_else(|| { - actor_error!( - illegal_state, - "Failed to delete verified client {}: not found", - client - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to delete verified client {}: not found", client) })?; } else { - verified_clients.set(client.to_bytes().into(), BigIntDe(new_vc_cap)).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to update verified client {}", client), - ) - }, - )?; + verified_clients + .set(client.to_bytes().into(), BigIntDe(new_vc_cap)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to update verified client {}", client) + })?; } - st.verified_clients = verified_clients.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients") - })?; + st.verified_clients = verified_clients + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients")?; + Ok(()) })?; @@ -427,12 +371,8 @@ impl Actor { )); } - let client = resolve_to_id_addr(rt, ¶ms.address).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", params.address), - ) - })?; + let client = resolve_to_id_addr(rt, ¶ms.address) + .with_context(|| format!("failed to resolve addr {} to ID addr", params.address))?; let st: State = rt.state()?; if client == st.root_key { @@ -442,25 +382,20 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; + let verifiers = make_map_with_root_and_bitwidth::<_, BigIntDe>( &st.verifiers, rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers")?; // validate we are NOT attempting to do this for a verifier - let found = verifiers.contains_key(&client.to_bytes()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier") - })?; + let found = verifiers + .contains_key(&client.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; + if found { return Err(actor_error!( illegal_argument, @@ -472,27 +407,24 @@ impl Actor { // Get existing cap let BigIntDe(vc_cap) = verified_clients .get(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get verified client {}", &client), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get verified client {}", &client) })? .cloned() .unwrap_or_default(); // Update to new cap let new_vc_cap = vc_cap + ¶ms.deal_size; - verified_clients.set(client.to_bytes().into(), BigIntDe(new_vc_cap)).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to put verified client {}", client), - ) - })?; + verified_clients + .set(client.to_bytes().into(), BigIntDe(new_vc_cap)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to put verified client {}", client) + })?; + + st.verified_clients = verified_clients + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients")?; - st.verified_clients = verified_clients.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients") - })?; Ok(()) })?; @@ -508,35 +440,27 @@ impl Actor { BS: Blockstore, RT: Runtime, { - let client = resolve_to_id_addr(rt, ¶ms.verified_client_to_remove).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, + let client = + resolve_to_id_addr(rt, ¶ms.verified_client_to_remove).with_context(|| { format!( "failed to resolve client addr {} to ID addr", params.verified_client_to_remove - ), - ) - })?; + ) + })?; let verifier_1 = - resolve_to_id_addr(rt, ¶ms.verifier_request_1.verifier).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, - format!( - "failed to resolve verifier addr {} to ID addr", - params.verifier_request_1.verifier - ), + resolve_to_id_addr(rt, ¶ms.verifier_request_1.verifier).with_context(|| { + format!( + "failed to resolve verifier addr {} to ID addr", + params.verifier_request_1.verifier ) })?; let verifier_2 = - resolve_to_id_addr(rt, ¶ms.verifier_request_2.verifier).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, - format!( - "failed to resolve verifier addr {} to ID addr", - params.verifier_request_2.verifier - ), + resolve_to_id_addr(rt, ¶ms.verifier_request_2.verifier).with_context(|| { + format!( + "failed to resolve verifier addr {} to ID addr", + params.verifier_request_2.verifier ) })?; @@ -557,9 +481,7 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; // check that `client` is currently a verified client if !is_verifier(rt, st, client)? { @@ -569,11 +491,8 @@ impl Actor { // get existing cap allocated to client let BigIntDe(previous_data_cap) = verified_clients .get(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get verified client {}", &client), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get verified client {}", &client) })? .cloned() .unwrap_or_default(); @@ -594,12 +513,10 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load datacap removal proposal ids", - ) - })?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load datacap removal proposal ids", + )?; let verifier_1_id = use_proposal_id(&mut proposal_ids, verifier_1, client)?; let verifier_2_id = use_proposal_id(&mut proposal_ids, verifier_2, client)?; @@ -622,22 +539,18 @@ impl Actor { let new_data_cap = &previous_data_cap - ¶ms.data_cap_amount_to_remove; if new_data_cap <= Zero::zero() { // no DataCap remaining, delete verified client - verified_clients.delete(&client.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete verified client {}", &client), - ) - })?; + verified_clients + .delete(&client.to_bytes()) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete verified client {}", &client) + })?; removed_data_cap_amount = previous_data_cap; } else { // update DataCap amount after removal verified_clients .set(BytesKey::from(client.to_bytes()), BigIntDe(new_data_cap)) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update datacap for verified client {}", &client), - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update datacap for verified client {}", &client) })?; removed_data_cap_amount = params.data_cap_amount_to_remove.clone(); } @@ -676,14 +589,12 @@ where rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; // check that the `address` is currently a verified client let found = verified_clients .contains_key(&address.to_bytes()) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; Ok(found) } diff --git a/actors/verifreg/src/state.rs b/actors/verifreg/src/state.rs index 02fa90507..0ba5c38fc 100644 --- a/actors/verifreg/src/state.rs +++ b/actors/verifreg/src/state.rs @@ -18,10 +18,11 @@ pub struct State { } impl State { - pub fn new(store: &BS, root_key: Address) -> anyhow::Result { - let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) - .flush() - .map_err(|e| anyhow::anyhow!("Failed to create empty map: {}", e))?; + pub fn new( + store: &BS, + root_key: Address, + ) -> Result> { + let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH).flush()?; Ok(State { root_key, diff --git a/test_vm/Cargo.toml b/test_vm/Cargo.toml index 59767fe42..2b06317a0 100644 --- a/test_vm/Cargo.toml +++ b/test_vm/Cargo.toml @@ -26,7 +26,7 @@ lazy_static = "1.4.0" fvm_shared = { version = "0.6.0", default-features = false } fvm_ipld_encoding = { version = "0.1.0", default-features = false } fvm_ipld_blockstore = { version = "0.1.0", default-features = false } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" num-traits = "0.2.14" num-derive = "0.3.3" log = "0.4.14" diff --git a/test_vm/src/lib.rs b/test_vm/src/lib.rs index 21cb70c83..ceb8ea653 100644 --- a/test_vm/src/lib.rs +++ b/test_vm/src/lib.rs @@ -953,8 +953,8 @@ impl Error for TestVMError { } } -impl From for TestVMError { - fn from(h_err: fvm_ipld_hamt::Error) -> Self { +impl From> for TestVMError { + fn from(h_err: fvm_ipld_hamt::Error) -> Self { vm_err(h_err.to_string().as_str()) } }