From 0777263a3accc6241f35cf922b5ac733bf1c91b7 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 12 Apr 2022 11:54:16 +0200 Subject: [PATCH 01/10] part 1, setup --- Cargo.toml | 16 +- actors/power/Cargo.toml | 1 - actors/power/src/lib.rs | 107 ++++------- actors/power/src/state.rs | 96 +++++----- actors/power/tests/harness/mod.rs | 11 +- actors/runtime/src/actor_error.rs | 75 ++++++++ actors/runtime/src/builtin/shared.rs | 18 +- actors/runtime/src/lib.rs | 4 +- .../runtime/src/runtime/actor_blockstore.rs | 11 +- actors/runtime/src/util/downcast.rs | 47 +---- actors/runtime/src/util/mod.rs | 2 +- actors/runtime/src/util/multimap.rs | 70 ++++--- actors/runtime/src/util/set.rs | 21 ++- actors/runtime/src/util/set_multimap.rs | 24 +-- actors/runtime/tests/multimap_test.rs | 2 +- actors/system/src/lib.rs | 9 +- actors/verifreg/Cargo.toml | 1 - actors/verifreg/src/lib.rs | 175 +++++++----------- actors/verifreg/src/state.rs | 9 +- 19 files changed, 350 insertions(+), 349 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 237a58179..dfdd8b193 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,14 +70,14 @@ members = [ ## Uncomment entries below when working locally on ref-fvm and this repo simultaneously. ## Assumes the ref-fvm checkout is in a sibling directory with the same name. ## (Valid once FVM modules are published to crates.io) -# [patch.crates-io] -# fvm_shared = { path = "../ref-fvm/shared" } -# fvm_sdk = { path = "../ref-fvm/sdk" } -# fvm_ipld_hamt = { path = "../ref-fvm/ipld/hamt" } -# fvm_ipld_amt = { path = "../ref-fvm/ipld/amt" } -# fvm_ipld_bitfield = { path = "../ref-fvm/ipld/bitfield"} -# fvm_ipld_encoding = { path = "../ref-fvm/ipld/encoding"} -# fvm_ipld_blockstore = { path = "../ref-fvm/ipld/blockstore"} +[patch.crates-io] +fvm_shared = { path = "../ref-fvm/shared" } +fvm_sdk = { path = "../ref-fvm/sdk" } +fvm_ipld_hamt = { path = "../ref-fvm/ipld/hamt" } +fvm_ipld_amt = { path = "../ref-fvm/ipld/amt" } +fvm_ipld_bitfield = { path = "../ref-fvm/ipld/bitfield"} +fvm_ipld_encoding = { path = "../ref-fvm/ipld/encoding"} +fvm_ipld_blockstore = { path = "../ref-fvm/ipld/blockstore"} [profile.wasm] inherits = "release" diff --git a/actors/power/Cargo.toml b/actors/power/Cargo.toml index ea1371695..3270ee307 100644 --- a/actors/power/Cargo.toml +++ b/actors/power/Cargo.toml @@ -25,7 +25,6 @@ cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] integer-encoding = { version = "3.0.3", default-features = false } lazy_static = "1.4.0" serde = { version = "1.0.136", features = ["derive"] } -anyhow = "1.0.56" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" diff --git a/actors/power/src/lib.rs b/actors/power/src/lib.rs index f34769db8..1eff6ccb9 100644 --- a/actors/power/src/lib.rs +++ b/actors/power/src/lib.rs @@ -4,12 +4,11 @@ use std::collections::BTreeSet; use std::convert::TryInto; -use anyhow::anyhow; use ext::init; use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, make_map_with_root_and_bitwidth, ActorDowncast, ActorError, Multimap, - CRON_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + actor_error, cbor, make_map_with_root_and_bitwidth, ActorContext, ActorDowncast, ActorError, + Multimap, CRON_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -74,9 +73,7 @@ impl Actor { { rt.validate_immediate_caller_is(std::iter::once(&*SYSTEM_ACTOR_ADDR))?; - let st = State::new(rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to create power actor state") - })?; + let st = State::new(rt.store()).context("Failed to create power actor state")?; rt.create(&st)?; Ok(()) } @@ -129,12 +126,8 @@ impl Actor { raw_byte_power: Default::default(), }, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to put power in claimed table while creating miner", - ) - })?; + .context("failed to put power in claimed table while creating miner")?; + st.miner_count += 1; st.update_stats_for_new_miner(rt.policy(), window_post_proof_type).map_err(|e| { @@ -180,13 +173,10 @@ impl Actor { ¶ms.raw_byte_delta, ¶ms.quality_adjusted_delta, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!( - "failed to update power raw {}, qa {}", - params.raw_byte_delta, params.quality_adjusted_delta, - ), + .with_context(|| { + format!( + "failed to update power raw {}, qa {}", + params.raw_byte_delta, params.quality_adjusted_delta, ) })?; @@ -225,17 +215,12 @@ impl Actor { CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load cron events") - })?; + .context("failed to load cron events")?; - st.append_cron_event(&mut events, params.event_epoch, miner_event).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to enroll cron event") - })?; + st.append_cron_event(&mut events, params.event_epoch, miner_event) + .context("failed to enroll cron event")?; - st.cron_event_queue = events.root().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush cron events") - })?; + st.cron_event_queue = events.root().context("failed to flush cron events")?; Ok(()) })?; Ok(()) @@ -326,22 +311,14 @@ impl Actor { HAMT_BIT_WIDTH, PROOF_VALIDATION_BATCH_AMT_BITWIDTH, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load proof batching set", - ) - })? + .context("failed to load proof batching set")? } else { debug!("ProofValidationBatch created"); Multimap::new(rt.store(), HAMT_BIT_WIDTH, PROOF_VALIDATION_BATCH_AMT_BITWIDTH) }; let miner_addr = rt.message().caller(); - let arr = mmap.get::(&miner_addr.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get seal verify infos at addr {}", miner_addr), - ) + let arr = mmap.get::(&miner_addr.to_bytes()).with_context(|| { + format!("failed to get seal verify infos at addr {}", miner_addr) })?; if let Some(arr) = arr { if arr.count() >= MAX_MINER_PROVE_COMMITS_PER_EPOCH { @@ -355,13 +332,10 @@ impl Actor { } } - mmap.add(miner_addr.to_bytes().into(), seal_info).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to insert proof into set") - })?; + mmap.add(miner_addr.to_bytes().into(), seal_info) + .context("failed to insert proof into set")?; - let mmrc = mmap.root().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush proofs batch map") - })?; + let mmrc = mmap.root().context("failed to flush proofs batch map")?; rt.charge_gas("OnSubmitVerifySeal", GAS_ON_SUBMIT_VERIFY_SEAL); st.proof_validation_batch = Some(mmrc); @@ -434,17 +408,17 @@ impl Actor { } }; - if let Err(e) = mmap.for_all::<_, SealVerifyInfo>(|k, arr| { + if let Err(e) = mmap.for_all::<_, SealVerifyInfo, _>(|k, arr| { let addr = match Address::from_bytes(&k.0) { Ok(addr) => addr, Err(e) => { - return Err(anyhow!("failed to parse address key: {}", e)); + return Err(format!("failed to parse address key: {}", e)); } }; let contains_claim = match claims.contains_key(&addr.to_bytes()) { Ok(contains_claim) => contains_claim, - Err(e) => return Err(anyhow!("failed to look up clain: {}", e)), + Err(e) => return Err(format!("failed to look up clain: {}", e)), }; if !contains_claim { @@ -452,17 +426,19 @@ impl Actor { return Ok(()); } - let num_proofs: usize = arr.count().try_into()?; + let num_proofs: usize = arr + .count() + .try_into() + .map_err(|_| "can not convert u64 to usize".to_string())?; infos.reserve(num_proofs); - arr.for_each(|_, svi| { + arr.for_each::<_, ActorError>(|_, svi| { infos.push(svi.clone()); Ok(()) }) .map_err(|e| { - anyhow!( + format!( "failed to iterate over proof verify array for miner {}: {}", - addr, - e + addr, e ) })?; @@ -545,9 +521,7 @@ impl Actor { CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load cron events") - })?; + .context("failed to load cron events")?; let claims = make_map_with_root_and_bitwidth::<_, Claim>(&st.claims, rt.store(), HAMT_BIT_WIDTH) @@ -555,12 +529,8 @@ impl Actor { e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims") })?; for epoch in st.first_cron_epoch..=rt_epoch { - let epoch_events = load_cron_events(&events, epoch).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load cron events at {}", epoch), - ) - })?; + let epoch_events = load_cron_events(&events, epoch) + .with_context(|| format!("failed to load cron events at {}", epoch))?; if epoch_events.is_empty() { continue; @@ -581,18 +551,13 @@ impl Actor { cron_events.push(evt); } - events.remove_all(&epoch_key(epoch)).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to clear cron events at {}", epoch), - ) - })?; + events + .remove_all(&epoch_key(epoch)) + .with_context(|| format!("failed to clear cron events at {}", epoch))?; } st.first_cron_epoch = rt_epoch + 1; - st.cron_event_queue = events.root().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush events") - })?; + st.cron_event_queue = events.root().context("failed to flush events")?; Ok(()) })?; diff --git a/actors/power/src/state.rs b/actors/power/src/state.rs index bfac87564..be3067045 100644 --- a/actors/power/src/state.rs +++ b/actors/power/src/state.rs @@ -3,11 +3,10 @@ use std::ops::Neg; -use anyhow::{anyhow, Context}; use cid::Cid; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ - actor_error, make_empty_map, make_map_with_root, make_map_with_root_and_bitwidth, + actor_error, make_empty_map, make_map_with_root, make_map_with_root_and_bitwidth, ActorContext, ActorDowncast, ActorError, Map, Multimap, }; use fvm_ipld_blockstore::Blockstore; @@ -75,16 +74,15 @@ pub struct State { } impl State { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .map_err(|e| anyhow!("Failed to create empty map: {}", e))?; + .context("Failed to create empty map")?; let empty_mmap = Multimap::new(store, CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH) .root() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to get empty multimap cid") - })?; + .context("Failed to get empty multimap cid")?; + Ok(State { cron_event_queue: empty_mmap, claims: empty_map, @@ -106,11 +104,11 @@ impl State { policy: &Policy, s: &BS, miner: &Address, - ) -> anyhow::Result { + ) -> Result { let claims = make_map_with_root_and_bitwidth(&self.claims, s, HAMT_BIT_WIDTH)?; - let claim = - get_claim(&claims, miner)?.ok_or_else(|| anyhow!("no claim for actor: {}", miner))?; + let claim = get_claim(&claims, miner)? + .ok_or_else(|| actor_error!(not_found, "no claim for actor: {}", miner))?; let miner_nominal_power = &claim.raw_byte_power; let miner_min_power = consensus_miner_min_power(policy, claim.window_post_proof_type) @@ -132,7 +130,7 @@ impl State { &self, s: &BS, miner: &Address, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let claims = make_map_with_root(&self.claims, s)?; get_claim(&claims, miner).map(|s| s.cloned()) } @@ -144,7 +142,7 @@ impl State { miner: &Address, power: &StoragePower, qa_power: &StoragePower, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let old_claim = get_claim(claims, miner)? .ok_or_else(|| actor_error!(not_found, "no claim for actor {}", miner))?; @@ -185,25 +183,25 @@ impl State { } if new_claim.raw_byte_power.is_negative() { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_state, "negative claimed raw byte power: {}", new_claim.raw_byte_power - ))); + )); } if new_claim.quality_adj_power.is_negative() { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_state, "negative claimed quality adjusted power: {}", new_claim.quality_adj_power - ))); + )); } if self.miner_above_min_power_count < 0 { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_state, "negative amount of miners lather than min: {}", self.miner_above_min_power_count - ))); + )); } set_claim(claims, miner, new_claim) @@ -218,14 +216,14 @@ impl State { events: &mut Multimap, epoch: ChainEpoch, event: CronEvent, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if epoch < self.first_cron_epoch { self.first_cron_epoch = epoch; } - events.add(epoch_key(epoch), event).map_err(|e| { - e.downcast_wrap(format!("failed to store cron event at epoch {}", epoch)) - })?; + events + .add(epoch_key(epoch), event) + .with_context(|| format!("failed to store cron event at epoch {}", epoch))?; Ok(()) } @@ -253,7 +251,7 @@ impl State { &mut self, policy: &Policy, window_post_proof: RegisteredPoStProof, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let min_power = consensus_miner_min_power(policy, window_post_proof)?; if !min_power.is_positive() { @@ -291,7 +289,7 @@ impl State { &self, store: &BS, miner: &Address, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let claims = make_map_with_root_and_bitwidth::<_, Claim>(&self.claims, store, HAMT_BIT_WIDTH) .map_err(|e| { @@ -307,23 +305,24 @@ impl State { policy: &Policy, claims: &mut Map, miner: &Address, - ) -> anyhow::Result<()> { - let (rbp, qap) = - match get_claim(claims, miner).map_err(|e| e.downcast_wrap("failed to get claim"))? { - None => { - return Ok(()); - } - Some(claim) => (claim.raw_byte_power.clone(), claim.quality_adj_power.clone()), - }; + ) -> Result<(), ActorError> { + let (rbp, qap) = match get_claim(claims, miner).context("failed to get claim")? { + None => { + return Ok(()); + } + Some(claim) => (claim.raw_byte_power.clone(), claim.quality_adj_power.clone()), + }; // Subtract from stats to remove power self.add_to_claim(policy, claims, miner, &rbp.neg(), &qap.neg()) - .map_err(|e| e.downcast_wrap("failed to subtract miner power before deleting claim"))?; + .context("failed to subtract miner power before deleting claim")?; claims .delete(&miner.to_bytes()) - .map_err(|e| e.downcast_wrap(format!("failed to delete claim for address {}", miner)))? - .ok_or_else(|| anyhow!("failed to delete claim for address: doesn't exist"))?; + .with_context(|| format!("failed to delete claim for address {}", miner))? + .ok_or_else(|| { + actor_error!(illegal_state, "failed to delete claim for address: doesn't exist") + })?; Ok(()) } } @@ -331,10 +330,10 @@ impl State { pub(super) fn load_cron_events( mmap: &Multimap, epoch: ChainEpoch, -) -> anyhow::Result> { +) -> Result, ActorError> { let mut events = Vec::new(); - mmap.for_each(&epoch_key(epoch), |_, v: &CronEvent| { + mmap.for_each::<_, _, ActorError>(&epoch_key(epoch), |_, v: &CronEvent| { events.push(v.clone()); Ok(()) })?; @@ -346,35 +345,34 @@ pub(super) fn load_cron_events( fn get_claim<'m, BS: Blockstore>( claims: &'m Map, a: &Address, -) -> anyhow::Result> { - claims - .get(&a.to_bytes()) - .map_err(|e| e.downcast_wrap(format!("failed to get claim for address {}", a))) +) -> Result, ActorError> { + claims.get(&a.to_bytes()).with_context(|| format!("failed to get claim for address {}", a)) } pub fn set_claim( claims: &mut Map, a: &Address, claim: Claim, -) -> anyhow::Result<()> { +) -> Result<(), ActorError> { if claim.raw_byte_power.is_negative() { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_state, "negative claim raw power {}", claim.raw_byte_power - ))); + )); } if claim.quality_adj_power.is_negative() { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_state, "negative claim quality-adjusted power {}", claim.quality_adj_power - ))); + )); } claims .set(a.to_bytes().into(), claim) - .map_err(|e| e.downcast_wrap(format!("failed to set claim for address {}", a)))?; + .with_context(|| format!("failed to set claim for address {}", a))?; + Ok(()) } @@ -409,7 +407,7 @@ impl Cbor for CronEvent {} pub fn consensus_miner_min_power( policy: &Policy, p: RegisteredPoStProof, -) -> anyhow::Result { +) -> Result { use RegisteredPoStProof::*; match p { StackedDRGWinning2KiBV1 @@ -422,7 +420,7 @@ pub fn consensus_miner_min_power( | StackedDRGWindow512MiBV1 | StackedDRGWindow32GiBV1 | StackedDRGWindow64GiBV1 => Ok(policy.minimum_consensus_power.clone()), - Invalid(i) => Err(anyhow::anyhow!("unsupported proof type: {}", i)), + Invalid(i) => Err(actor_error!(illegal_argument, "unsupported proof type: {}", i)), } } diff --git a/actors/power/tests/harness/mod.rs b/actors/power/tests/harness/mod.rs index 7cdf07170..9cbc310f3 100644 --- a/actors/power/tests/harness/mod.rs +++ b/actors/power/tests/harness/mod.rs @@ -8,6 +8,7 @@ use fil_actors_runtime::Multimap; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::{BytesDe, RawBytes}; use fvm_ipld_hamt::BytesKey; +use fvm_ipld_hamt::EitherError; use fvm_ipld_hamt::Error; use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser::BigIntDe; @@ -347,15 +348,19 @@ impl Harness { } /// Collects all keys from a map into a vector. -fn collect_keys(m: Map) -> Result, Error> +fn collect_keys(m: Map) -> Result, Error> where BS: Blockstore, V: DeserializeOwned + Serialize, { let mut ret_keys = Vec::new(); - m.for_each(|k, _| { + m.for_each::<_, ()>(|k, _| { ret_keys.push(k.clone()); Ok(()) + }) + .map_err(|err| match err { + EitherError::User(()) => unreachable!(), + EitherError::Hamt(e) => e, })?; Ok(ret_keys) @@ -364,5 +369,5 @@ where pub fn verify_empty_map(rt: &MockRuntime, key: Cid) { let map = make_map_with_root_and_bitwidth::<_, BigIntDe>(&key, &rt.store, HAMT_BIT_WIDTH).unwrap(); - map.for_each(|_key, _val| panic!("expected no keys")).unwrap(); + map.for_each::<_, ()>(|_key, _val| panic!("expected no keys")).unwrap(); } diff --git a/actors/runtime/src/actor_error.rs b/actors/runtime/src/actor_error.rs index ba80e4ffc..9c976c61b 100644 --- a/actors/runtime/src/actor_error.rs +++ b/actors/runtime/src/actor_error.rs @@ -1,3 +1,5 @@ +use std::fmt::Display; + use fvm_shared::error::ExitCode; use thiserror::Error; @@ -71,6 +73,44 @@ impl From for ActorError { } } +impl From> for ActorError { + fn from(e: fvm_ipld_amt::Error) -> Self { + Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } + } +} + +impl From> for ActorError { + fn from(e: fvm_ipld_hamt::Error) -> Self { + Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } + } +} + +impl From> for ActorError { + fn from(e: fvm_ipld_encoding::CborStoreError) -> Self { + Self { exit_code: ExitCode::USR_ILLEGAL_STATE, msg: e.to_string() } + } +} + +impl From> for ActorError { + fn from(e: crate::util::MultiMapError) -> Self { + match e { + crate::util::MultiMapError::Amt(e) => e.into(), + crate::util::MultiMapError::Hamt(e) => e.into(), + } + } +} + +impl, E: std::error::Error> From> + for ActorError +{ + fn from(e: crate::util::MultiMapEitherError) -> Self { + match e { + crate::util::MultiMapEitherError::User(e) => e.into(), + crate::util::MultiMapEitherError::MultiMap(e) => e.into(), + } + } +} + /// Converts an actor deletion error into an actor error with the appropriate exit code. This /// facilitates propagation. #[cfg(feature = "fil-actor")] @@ -108,3 +148,38 @@ macro_rules! actor_error { $crate::actor_error!($code; $msg $(, $ex)*) }; } + +pub trait ActorContext { + fn context(self, context: C) -> Result + where + C: Display + Send + Sync + 'static; + fn with_context(self, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C; +} + +impl> ActorContext for Result { + fn context(self, context: C) -> Result + where + C: Display + Send + Sync + 'static, + { + self.map_err(|err| { + let mut err: ActorError = err.into(); + err.msg = format!("{}: {}", context, err.msg); + err + }) + } + + fn with_context(self, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C, + { + self.map_err(|err| { + let mut err: ActorError = err.into(); + err.msg = format!("{}: {}", f(), err.msg); + err + }) + } +} diff --git a/actors/runtime/src/builtin/shared.rs b/actors/runtime/src/builtin/shared.rs index 2ba233b78..3bcd450dc 100644 --- a/actors/runtime/src/builtin/shared.rs +++ b/actors/runtime/src/builtin/shared.rs @@ -6,13 +6,15 @@ use fvm_shared::address::Address; use fvm_shared::METHOD_SEND; use crate::runtime::Runtime; +use crate::{actor_error, ActorError}; pub const HAMT_BIT_WIDTH: u32 = 5; /// ResolveToIDAddr resolves the given address to it's ID address form. /// If an ID address for the given address dosen't exist yet, it tries to create one by sending /// a zero balance to the given address. -pub fn resolve_to_id_addr(rt: &mut RT, address: &Address) -> anyhow::Result
+// TODO: return RuntimeError +pub fn resolve_to_id_addr(rt: &mut RT, address: &Address) -> Result where BS: Blockstore, RT: Runtime, @@ -23,13 +25,15 @@ where } // send 0 balance to the account so an ID address for it is created and then try to resolve - rt.send(*address, METHOD_SEND, Default::default(), Default::default()) - .map_err(|e| e.wrap(&format!("failed to send zero balance to address {}", address)))?; + rt.send(*address, METHOD_SEND, Default::default(), Default::default())?; - rt.resolve_address(address).ok_or_else(|| { - anyhow::anyhow!( + let addr = rt.resolve_address(address).ok_or_else(|| { + actor_error!( + illegal_state, "failed to resolve address {} to ID address even after sending zero balance", - address, + address ) - }) + })?; + + Ok(addr) } diff --git a/actors/runtime/src/lib.rs b/actors/runtime/src/lib.rs index ce322f463..8f1e60d43 100644 --- a/actors/runtime/src/lib.rs +++ b/actors/runtime/src/lib.rs @@ -67,7 +67,7 @@ where pub fn make_map_with_root<'bs, BS, V>( root: &Cid, store: &'bs BS, -) -> Result, HamtError> +) -> Result, HamtError> where BS: Blockstore, V: DeserializeOwned + Serialize, @@ -81,7 +81,7 @@ pub fn make_map_with_root_and_bitwidth<'bs, BS, V>( root: &Cid, store: &'bs BS, bitwidth: u32, -) -> Result, HamtError> +) -> Result, HamtError> where BS: Blockstore, V: DeserializeOwned + Serialize, diff --git a/actors/runtime/src/runtime/actor_blockstore.rs b/actors/runtime/src/runtime/actor_blockstore.rs index 497efd0bd..3f3a0e754 100644 --- a/actors/runtime/src/runtime/actor_blockstore.rs +++ b/actors/runtime/src/runtime/actor_blockstore.rs @@ -1,12 +1,11 @@ use std::convert::TryFrom; -use anyhow::Result; use cid::multihash::Code; use cid::Cid; use fvm_ipld_blockstore::Block; use fvm_sdk as fvm; -use crate::actor_error; +use crate::{actor_error, ActorError}; /// A blockstore suitable for use within actors. /// @@ -16,14 +15,16 @@ pub struct ActorBlockstore; /// Implements a blockstore delegating to IPLD syscalls. impl fvm_ipld_blockstore::Blockstore for ActorBlockstore { - fn get(&self, cid: &Cid) -> Result>> { + type Error = ActorError; + + fn get(&self, cid: &Cid) -> Result>, Self::Error> { // If this fails, the _CID_ is invalid. I.e., we have a bug. fvm::ipld::get(cid).map(Some).map_err(|c| { actor_error!(illegal_state; "get failed with {:?} on CID '{}'", c, cid).into() }) } - fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<()> { + fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<(), Self::Error> { let code = Code::try_from(k.hash().code()) .map_err(|e| actor_error!(serialization, e.to_string()))?; let k2 = self.put(code, &Block::new(k.codec(), block))?; @@ -34,7 +35,7 @@ impl fvm_ipld_blockstore::Blockstore for ActorBlockstore { } } - fn put(&self, code: Code, block: &Block) -> Result + fn put(&self, code: Code, block: &Block) -> Result where D: AsRef<[u8]>, { diff --git a/actors/runtime/src/util/downcast.rs b/actors/runtime/src/util/downcast.rs index 2b8b1d621..cf299fe2f 100644 --- a/actors/runtime/src/util/downcast.rs +++ b/actors/runtime/src/util/downcast.rs @@ -3,7 +3,6 @@ use anyhow::anyhow; use fvm_ipld_amt::Error as AmtError; -use fvm_ipld_encoding::Error as EncodingError; use fvm_ipld_hamt::Error as HamtError; use fvm_shared::error::ExitCode; @@ -36,10 +35,11 @@ impl ActorDowncast for anyhow::Error { } } -impl ActorDowncast for AmtError { +impl ActorDowncast for AmtError { fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { match self { - AmtError::Dynamic(e) => e.downcast_default(default_exit_code, msg), + // AmtError::Dynamic(e) => e.downcast_default(default_exit_code, msg), + // todo: proper downcast other => { ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) } @@ -47,16 +47,18 @@ impl ActorDowncast for AmtError { } fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { match self { - AmtError::Dynamic(e) => e.downcast_wrap(msg), + // AmtError::Dynamic(e) => e.downcast_wrap(msg), + // todo: proper downcast other => anyhow!("{}: {}", msg.as_ref(), other), } } } -impl ActorDowncast for HamtError { +impl ActorDowncast for HamtError { fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { match self { - HamtError::Dynamic(e) => e.downcast_default(default_exit_code, msg), + // HamtError::Dynamic(e) => e.downcast_default(default_exit_code, msg), + // todo: proper downcast other => { ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) } @@ -64,7 +66,8 @@ impl ActorDowncast for HamtError { } fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { match self { - HamtError::Dynamic(e) => e.downcast_wrap(msg), + // HamtError::Dynamic(e) => e.downcast_wrap(msg), + // todo: proper downcast other => anyhow!("{}: {}", msg.as_ref(), other), } } @@ -80,36 +83,6 @@ fn downcast_util(error: anyhow::Error) -> anyhow::Result { Err(other) => other, }; - // Check if error is Encoding error, if so return `ErrSerialization` - let error = match error.downcast::() { - Ok(enc_error) => { - return Ok(ActorError::unchecked(ExitCode::USR_SERIALIZATION, enc_error.to_string())) - } - Err(other) => other, - }; - - // Dynamic errors can come from Array and Hamt through blockstore usages, check them. - let error = match error.downcast::() { - Ok(amt_err) => match amt_err { - AmtError::Dynamic(de) => match downcast_util(de) { - Ok(a) => return Ok(a), - Err(other) => other, - }, - other => anyhow!(other), - }, - Err(other) => other, - }; - let error = match error.downcast::() { - Ok(amt_err) => match amt_err { - HamtError::Dynamic(de) => match downcast_util(de) { - Ok(a) => return Ok(a), - Err(other) => other, - }, - other => anyhow!(other), - }, - Err(other) => other, - }; - // Could not be downcasted automatically to actor error, return initial dynamic error. Err(error) } diff --git a/actors/runtime/src/util/mod.rs b/actors/runtime/src/util/mod.rs index a3610581c..f47015884 100644 --- a/actors/runtime/src/util/mod.rs +++ b/actors/runtime/src/util/mod.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0, MIT pub use self::downcast::*; -pub use self::multimap::*; +pub use self::multimap::{EitherError as MultiMapEitherError, Error as MultiMapError, Multimap}; pub use self::set::Set; pub use self::set_multimap::SetMultimap; diff --git a/actors/runtime/src/util/multimap.rs b/actors/runtime/src/util/multimap.rs index ff9b2856c..e5ccd9c0a 100644 --- a/actors/runtime/src/util/multimap.rs +++ b/actors/runtime/src/util/multimap.rs @@ -3,12 +3,19 @@ use cid::Cid; use fvm_ipld_blockstore::Blockstore; -use fvm_ipld_hamt::Error; use serde::de::DeserializeOwned; use serde::Serialize; use crate::{make_empty_map, make_map_with_root_and_bitwidth, Array, BytesKey, Map}; +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("amt: {0}")] + Amt(#[from] fvm_ipld_amt::Error), + #[error("hamt: {0}")] + Hamt(#[from] fvm_ipld_hamt::Error), +} + /// Multimap stores multiple values per key in a Hamt of Amts. /// The order of insertion of values for each key is retained. pub struct Multimap<'a, BS>(Map<'a, BS, Cid>, u32); @@ -29,18 +36,19 @@ where cid: &Cid, outer_bitwidth: u32, inner_bitwidth: u32, - ) -> Result { + ) -> Result> { Ok(Self(make_map_with_root_and_bitwidth(cid, bs, outer_bitwidth)?, inner_bitwidth)) } /// Retrieve root from the multimap. #[inline] - pub fn root(&mut self) -> Result { - self.0.flush() + pub fn root(&mut self) -> Result> { + let cid = self.0.flush()?; + Ok(cid) } /// Adds a value for a key. - pub fn add(&mut self, key: BytesKey, value: V) -> Result<(), Error> + pub fn add(&mut self, key: BytesKey, value: V) -> Result<(), Error> where V: Serialize + DeserializeOwned, { @@ -50,10 +58,10 @@ where .unwrap_or_else(|| Array::new_with_bit_width(self.0.store(), self.1)); // Set value at next index - arr.set(arr.count(), value).map_err(|e| anyhow::anyhow!(e))?; + arr.set(arr.count(), value)?; // flush to get new array root to put in hamt - let new_root = arr.flush().map_err(|e| anyhow::anyhow!(e))?; + let new_root = arr.flush()?; // Set hamt node to array root self.0.set(key, new_root)?; @@ -62,51 +70,69 @@ where /// Gets the Array of value type `V` using the multimap store. #[inline] - pub fn get(&self, key: &[u8]) -> Result>, Error> + pub fn get(&self, key: &[u8]) -> Result>, Error> where V: DeserializeOwned + Serialize, { match self.0.get(key)? { - Some(cid) => { - Ok(Some(Array::load(cid, *self.0.store()).map_err(|e| anyhow::anyhow!(e))?)) - } + Some(cid) => Ok(Some(Array::load(cid, *self.0.store())?)), None => Ok(None), } } /// Removes all values for a key. #[inline] - pub fn remove_all(&mut self, key: &[u8]) -> Result<(), Error> { + pub fn remove_all(&mut self, key: &[u8]) -> Result<(), Error> { // Remove entry from table - self.0.delete(key)?.ok_or("failed to delete from multimap")?; + self.0.delete(key)?; Ok(()) } /// Iterates through all values in the array at a given key. - pub fn for_each(&self, key: &[u8], f: F) -> Result<(), Error> + pub fn for_each(&self, key: &[u8], f: F) -> Result<(), EitherError> where V: Serialize + DeserializeOwned, - F: FnMut(u64, &V) -> anyhow::Result<()>, + F: FnMut(u64, &V) -> Result<(), U>, { if let Some(amt) = self.get::(key)? { - amt.for_each(f).map_err(|e| anyhow::anyhow!(e))?; + amt.for_each(f).map_err(|err| match err { + fvm_ipld_amt::EitherError::User(e) => EitherError::User(e), + fvm_ipld_amt::EitherError::Amt(e) => EitherError::MultiMap(e.into()), + })?; } Ok(()) } /// Iterates through all arrays in the multimap - pub fn for_all(&self, mut f: F) -> Result<(), Error> + pub fn for_all(&self, mut f: F) -> Result<(), EitherError> where V: Serialize + DeserializeOwned, - F: FnMut(&BytesKey, &Array) -> anyhow::Result<()>, + F: FnMut(&BytesKey, &Array) -> Result<(), U>, { - self.0.for_each::<_>(|key, arr_root| { - let arr = Array::load(arr_root, *self.0.store())?; - f(key, &arr) - })?; + self.0 + .for_each::<_, EitherError>(|key, arr_root| { + let arr = Array::load(arr_root, *self.0.store()) + .map_err(|e| EitherError::MultiMap(e.into()))?; + f(key, &arr).map_err(EitherError::User)?; + Ok(()) + }) + .map_err(|err| match err { + fvm_ipld_hamt::EitherError::User(e) => e, + fvm_ipld_hamt::EitherError::Hamt(e) => EitherError::MultiMap(e.into()), + })?; Ok(()) } } + +/// This error wraps around around two different errors, either the native `Error` from `multimap`, or +/// a custom user error, returned from executing a user defined function. +#[derive(Debug, thiserror::Error)] +pub enum EitherError { + #[error("user: {0}")] + User(U), + #[error("multimap: {0}")] + MultiMap(#[from] Error), +} diff --git a/actors/runtime/src/util/set.rs b/actors/runtime/src/util/set.rs index 67f0faca3..eb6a849b4 100644 --- a/actors/runtime/src/util/set.rs +++ b/actors/runtime/src/util/set.rs @@ -33,19 +33,19 @@ where } /// Initializes a Set from a root Cid. - pub fn from_root(bs: &'a BS, cid: &Cid) -> Result { + pub fn from_root(bs: &'a BS, cid: &Cid) -> Result> { Ok(Self(make_map_with_root(cid, bs)?)) } /// Retrieve root from the Set. #[inline] - pub fn root(&mut self) -> Result { + pub fn root(&mut self) -> Result> { self.0.flush() } /// Adds key to the set. #[inline] - pub fn put(&mut self, key: BytesKey) -> Result<(), Error> { + pub fn put(&mut self, key: BytesKey) -> Result<(), Error> { // Set hamt node to array root self.0.set(key, ())?; Ok(()) @@ -53,13 +53,13 @@ where /// Checks if key exists in the set. #[inline] - pub fn has(&self, key: &[u8]) -> Result { + pub fn has(&self, key: &[u8]) -> Result> { self.0.contains_key(key) } /// Deletes key from set. #[inline] - pub fn delete(&mut self, key: &[u8]) -> Result, Error> { + pub fn delete(&mut self, key: &[u8]) -> Result, Error> { match self.0.delete(key)? { Some(_) => Ok(Some(())), None => Ok(None), @@ -67,16 +67,19 @@ where } /// Iterates through all keys in the set. - pub fn for_each(&self, mut f: F) -> Result<(), Error> + pub fn for_each(&self, mut f: F) -> Result<(), Error> where - F: FnMut(&BytesKey) -> anyhow::Result<()>, + F: FnMut(&BytesKey) -> Result<(), Error>, { // Calls the for each function on the hamt with ignoring the value - self.0.for_each(|s, _: &()| f(s)) + self.0.for_each(|s, _: &()| f(s)).map_err(|err| match err { + fvm_ipld_hamt::EitherError::User(e) => e, + fvm_ipld_hamt::EitherError::Hamt(e) => e.into(), + }) } /// Collects all keys from the set into a vector. - pub fn collect_keys(&self) -> Result, Error> { + pub fn collect_keys(&self) -> Result, Error> { let mut ret_keys = Vec::new(); self.for_each(|k| { diff --git a/actors/runtime/src/util/set_multimap.rs b/actors/runtime/src/util/set_multimap.rs index e5f0a402a..969bf489e 100644 --- a/actors/runtime/src/util/set_multimap.rs +++ b/actors/runtime/src/util/set_multimap.rs @@ -26,18 +26,18 @@ where } /// Initializes a SetMultimap from a root Cid. - pub fn from_root(bs: &'a BS, cid: &Cid) -> Result { + pub fn from_root(bs: &'a BS, cid: &Cid) -> Result> { Ok(Self(make_map_with_root(cid, bs)?)) } /// Retrieve root from the SetMultimap. #[inline] - pub fn root(&mut self) -> Result { + pub fn root(&mut self) -> Result> { self.0.flush() } /// Puts the DealID in the hash set of the key. - pub fn put(&mut self, key: ChainEpoch, value: DealID) -> Result<(), Error> { + pub fn put(&mut self, key: ChainEpoch, value: DealID) -> Result<(), Error> { // Get construct amt from retrieved cid or create new let mut set = self.get(key)?.unwrap_or_else(|| Set::new(self.0.store())); @@ -52,7 +52,7 @@ where } /// Puts slice of DealIDs in the hash set of the key. - pub fn put_many(&mut self, key: ChainEpoch, values: &[DealID]) -> Result<(), Error> { + pub fn put_many(&mut self, key: ChainEpoch, values: &[DealID]) -> Result<(), Error> { // Get construct amt from retrieved cid or create new let mut set = self.get(key)?.unwrap_or_else(|| Set::new(self.0.store())); @@ -70,7 +70,7 @@ where /// Gets the set at the given index of the `SetMultimap` #[inline] - pub fn get(&self, key: ChainEpoch) -> Result>, Error> { + pub fn get(&self, key: ChainEpoch) -> Result>, Error> { match self.0.get(&u64_key(key as u64))? { Some(cid) => Ok(Some(Set::from_root(*self.0.store(), cid)?)), None => Ok(None), @@ -79,7 +79,7 @@ where /// Removes a DealID from a key hash set. #[inline] - pub fn remove(&mut self, key: ChainEpoch, v: DealID) -> Result<(), Error> { + pub fn remove(&mut self, key: ChainEpoch, v: DealID) -> Result<(), Error> { // Get construct amt from retrieved cid and return if no set exists let mut set = match self.get(key)? { Some(s) => s, @@ -96,7 +96,7 @@ where /// Removes set at index. #[inline] - pub fn remove_all(&mut self, key: ChainEpoch) -> Result<(), Error> { + pub fn remove_all(&mut self, key: ChainEpoch) -> Result<(), Error> { // Remove entry from table self.0.delete(&u64_key(key as u64))?; @@ -104,9 +104,9 @@ where } /// Iterates through keys and converts them to a DealID to call a function on each. - pub fn for_each(&self, key: ChainEpoch, mut f: F) -> Result<(), Error> + pub fn for_each(&self, key: ChainEpoch, mut f: F) -> Result<(), Error> where - F: FnMut(DealID) -> Result<(), Error>, + F: FnMut(DealID) -> Result<(), Error>, { // Get construct amt from retrieved cid and return if no set exists let set = match self.get(key)? { @@ -115,11 +115,11 @@ where }; set.for_each(|k| { - let v = parse_uint_key(k) - .map_err(|e| anyhow::anyhow!("Could not parse key: {:?}, ({})", &k.0, e))?; + let v = parse_uint_key(k).expect("TODO"); // Run function on all parsed keys - Ok(f(v)?) + f(v)?; + Ok(()) }) } } diff --git a/actors/runtime/tests/multimap_test.rs b/actors/runtime/tests/multimap_test.rs index 3f2ef2952..d0599ffbd 100644 --- a/actors/runtime/tests/multimap_test.rs +++ b/actors/runtime/tests/multimap_test.rs @@ -37,7 +37,7 @@ fn for_each() { mm.add("Some other string".into(), 7).unwrap(); let mut vals: Vec<(u64, u64)> = Vec::new(); - mm.for_each(&addr.to_bytes(), |i, v| { + mm.for_each::<_, _, ()>(&addr.to_bytes(), |i, v| { vals.push((i, *v)); Ok(()) }) diff --git a/actors/system/src/lib.rs b/actors/system/src/lib.rs index d8b8c2c5b..93b9befd3 100644 --- a/actors/system/src/lib.rs +++ b/actors/system/src/lib.rs @@ -2,17 +2,17 @@ // SPDX-License-Identifier: Apache-2.0, MIT use anyhow::anyhow; use cid::{multihash, Cid}; +use fil_actors_runtime::ActorContext; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::CborStore; use fvm_ipld_encoding::{Cbor, RawBytes}; -use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::FromPrimitive; use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{actor_error, ActorDowncast, ActorError, SYSTEM_ACTOR_ADDR}; +use fil_actors_runtime::{actor_error, ActorError, SYSTEM_ACTOR_ADDR}; #[cfg(feature = "fil-actor")] fil_actors_runtime::wasm_trampoline!(Actor); @@ -65,10 +65,9 @@ impl Actor { { rt.validate_immediate_caller_is(std::iter::once(&*SYSTEM_ACTOR_ADDR))?; - let state = State::new(rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct state") - })?; + let state = State::new(rt.store()).context("failed to construct state")?; rt.create(&state)?; + Ok(()) } } diff --git a/actors/verifreg/Cargo.toml b/actors/verifreg/Cargo.toml index fc0919208..62e9dbd08 100644 --- a/actors/verifreg/Cargo.toml +++ b/actors/verifreg/Cargo.toml @@ -21,7 +21,6 @@ num-traits = "0.2.14" num-derive = "0.3.3" cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } lazy_static = "1.4.0" -anyhow = "1.0.56" fvm_ipld_hamt = "0.4.0" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" diff --git a/actors/verifreg/src/lib.rs b/actors/verifreg/src/lib.rs index 4ef46e036..bc3fee9c7 100644 --- a/actors/verifreg/src/lib.rs +++ b/actors/verifreg/src/lib.rs @@ -3,8 +3,8 @@ use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, make_map_with_root_and_bitwidth, resolve_to_id_addr, ActorDowncast, - ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + actor_error, cbor, make_map_with_root_and_bitwidth, resolve_to_id_addr, ActorContext, + ActorDowncast, ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -77,12 +77,8 @@ impl Actor { )); } - let verifier = resolve_to_id_addr(rt, ¶ms.address).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", params.address), - ) - })?; + let verifier = resolve_to_id_addr(rt, ¶ms.address) + .with_context(|| format!("failed to resolve addr {} to ID addr", params.address))?; let st: State = rt.state()?; rt.validate_immediate_caller_is(std::iter::once(&st.root_key))?; @@ -141,12 +137,8 @@ impl Actor { BS: Blockstore, RT: Runtime, { - let verifier = resolve_to_id_addr(rt, &verifier_addr).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", verifier_addr), - ) - })?; + let verifier = resolve_to_id_addr(rt, &verifier_addr) + .with_context(|| format!("failed to resolve addr {} to ID addr", verifier_addr))?; let state: State = rt.state()?; rt.validate_immediate_caller_is(std::iter::once(&state.root_key))?; @@ -157,21 +149,19 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; + .map_err(Into::into) + .context("failed to load verified clients")?; verifiers .delete(&verifier.to_bytes()) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to remove verifier") - })? + .map_err(Into::into) + .context("failed to remove verifier")? .ok_or_else(|| { actor_error!(illegal_argument, "failed to remove verifier: not found") })?; - st.verifiers = verifiers.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers") - })?; + st.verifiers = + verifiers.flush().map_err(Into::into).context("failed to flush verifiers")?; + Ok(()) })?; @@ -198,12 +188,8 @@ impl Actor { )); } - let client = resolve_to_id_addr(rt, ¶ms.address).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", params.address), - ) - })?; + let client = resolve_to_id_addr(rt, ¶ms.address) + .with_context(|| format!("failed to resolve addr {} to ID addr", params.address))?; let st: State = rt.state()?; if client == st.root_key { @@ -213,37 +199,28 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verifiers = make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; + .map_err(Into::into) + .context("failed to load verified clients")?; + let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; + .map_err(Into::into) + .context("failed to load verified clients")?; // Validate caller is one of the verifiers. let verifier = rt.message().caller(); let BigIntDe(verifier_cap) = verifiers .get(&verifier.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get Verifier {}", verifier), - ) - })? + .map_err(Into::into) + .with_context(|| format!("failed to get Verifier {}", verifier))? .ok_or_else(|| actor_error!(not_found, format!("no such Verifier {}", verifier)))?; // Validate client to be added isn't a verifier - let found = verifiers.contains_key(&client.to_bytes()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier") - })?; + let found = verifiers + .contains_key(&client.to_bytes()) + .map_err(Into::into) + .context("failed to get verifier")?; + if found { return Err(actor_error!( illegal_argument, @@ -263,19 +240,16 @@ impl Actor { } let new_verifier_cap = verifier_cap - ¶ms.allowance; - verifiers.set(verifier.to_bytes().into(), BigIntDe(new_verifier_cap)).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to update new verifier cap for {}", verifier), - ) - })?; + verifiers + .set(verifier.to_bytes().into(), BigIntDe(new_verifier_cap)) + .map_err(Into::into) + .with_context(|| format!("Failed to update new verifier cap for {}", verifier))?; + + let client_cap = verified_clients + .get(&client.to_bytes()) + .map_err(Into::into) + .with_context(|| format!("Failed to get verified client {}", client))?; - let client_cap = verified_clients.get(&client.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to get verified client {}", client), - ) - })?; // if verified client exists, add allowance to existing cap // otherwise, create new client with allownace let client_cap = if let Some(BigIntDe(client_cap)) = client_cap { @@ -284,24 +258,19 @@ impl Actor { params.allowance }; - verified_clients.set(client.to_bytes().into(), BigIntDe(client_cap.clone())).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!( - "Failed to add verified client {} with cap {}", - client, client_cap, - ), - ) - }, - )?; + verified_clients + .set(client.to_bytes().into(), BigIntDe(client_cap.clone())) + .map_err(Into::into) + .with_context(|| { + format!("Failed to add verified client {} with cap {}", client, client_cap,) + })?; - st.verifiers = verifiers.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers") - })?; - st.verified_clients = verified_clients.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients") - })?; + st.verifiers = + verifiers.flush().map_err(Into::into).context("failed to flush verifiers")?; + st.verified_clients = verified_clients + .flush() + .map_err(Into::into) + .context("failed to flush verified clients")?; Ok(()) })?; @@ -319,12 +288,8 @@ impl Actor { { rt.validate_immediate_caller_is(std::iter::once(&*STORAGE_MARKET_ACTOR_ADDR))?; - let client = resolve_to_id_addr(rt, ¶ms.address).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", params.address), - ) - })?; + let client = resolve_to_id_addr(rt, ¶ms.address) + .with_context(|| format!("failed to resolve addr {} to ID addr", params.address))?; if params.deal_size < rt.policy().minimum_verified_deal_size { return Err(actor_error!( @@ -427,12 +392,8 @@ impl Actor { )); } - let client = resolve_to_id_addr(rt, ¶ms.address).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", params.address), - ) - })?; + let client = resolve_to_id_addr(rt, ¶ms.address) + .with_context(|| format!("failed to resolve addr {} to ID addr", params.address))?; let st: State = rt.state()?; if client == st.root_key { @@ -508,35 +469,27 @@ impl Actor { BS: Blockstore, RT: Runtime, { - let client = resolve_to_id_addr(rt, ¶ms.verified_client_to_remove).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, + let client = + resolve_to_id_addr(rt, ¶ms.verified_client_to_remove).with_context(|| { format!( "failed to resolve client addr {} to ID addr", params.verified_client_to_remove - ), - ) - })?; + ) + })?; let verifier_1 = - resolve_to_id_addr(rt, ¶ms.verifier_request_1.verifier).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, - format!( - "failed to resolve verifier addr {} to ID addr", - params.verifier_request_1.verifier - ), + resolve_to_id_addr(rt, ¶ms.verifier_request_1.verifier).with_context(|| { + format!( + "failed to resolve verifier addr {} to ID addr", + params.verifier_request_1.verifier ) })?; let verifier_2 = - resolve_to_id_addr(rt, ¶ms.verifier_request_2.verifier).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, - format!( - "failed to resolve verifier addr {} to ID addr", - params.verifier_request_2.verifier - ), + resolve_to_id_addr(rt, ¶ms.verifier_request_2.verifier).with_context(|| { + format!( + "failed to resolve verifier addr {} to ID addr", + params.verifier_request_2.verifier ) })?; diff --git a/actors/verifreg/src/state.rs b/actors/verifreg/src/state.rs index 02fa90507..0ba5c38fc 100644 --- a/actors/verifreg/src/state.rs +++ b/actors/verifreg/src/state.rs @@ -18,10 +18,11 @@ pub struct State { } impl State { - pub fn new(store: &BS, root_key: Address) -> anyhow::Result { - let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) - .flush() - .map_err(|e| anyhow::anyhow!("Failed to create empty map: {}", e))?; + pub fn new( + store: &BS, + root_key: Address, + ) -> Result> { + let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH).flush()?; Ok(State { root_key, From 5fb899471be8c26531cf6c08e7f97091973cc688 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 12 Apr 2022 13:04:20 +0200 Subject: [PATCH 02/10] paych and multisig actor --- Cargo.lock | 1 - actors/multisig/Cargo.toml | 1 - actors/multisig/src/lib.rs | 82 +++++++---------------- actors/multisig/src/state.rs | 20 ++++-- actors/multisig/tests/util.rs | 3 +- actors/paych/Cargo.toml | 2 +- actors/paych/src/lib.rs | 12 ++-- actors/power/src/lib.rs | 5 +- actors/power/src/state.rs | 3 +- actors/power/tests/harness/mod.rs | 10 +-- actors/runtime/src/util/multimap.rs | 39 +++++++++-- actors/runtime/src/util/set.rs | 14 +++- actors/runtime/src/util/set_multimap.rs | 23 ++++++- actors/runtime/tests/multimap_test.rs | 3 +- actors/runtime/tests/set_multimap_test.rs | 1 - actors/verifreg/src/lib.rs | 26 ++----- 16 files changed, 125 insertions(+), 120 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2b5d446e..a06d4d5c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -664,7 +664,6 @@ dependencies = [ name = "fil_actor_multisig" version = "8.0.0-alpha.1" dependencies = [ - "anyhow", "cid", "fil_actors_runtime", "fvm_ipld_blockstore", diff --git a/actors/multisig/Cargo.toml b/actors/multisig/Cargo.toml index 853961436..52be22713 100644 --- a/actors/multisig/Cargo.toml +++ b/actors/multisig/Cargo.toml @@ -23,7 +23,6 @@ cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] indexmap = { version = "1.8.0", features = ["serde-1"] } integer-encoding = { version = "3.0.3", default-features = false } serde = { version = "1.0.136", features = ["derive"] } -anyhow = "1.0.56" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" diff --git a/actors/multisig/src/lib.rs b/actors/multisig/src/lib.rs index 5d15cc982..3322c0c5a 100644 --- a/actors/multisig/src/lib.rs +++ b/actors/multisig/src/lib.rs @@ -6,8 +6,8 @@ use std::collections::BTreeSet; use fil_actors_runtime::cbor::serialize_vec; use fil_actors_runtime::runtime::{ActorCode, Primitives, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, make_empty_map, make_map_with_root, resolve_to_id_addr, ActorDowncast, - ActorError, Map, INIT_ACTOR_ADDR, + actor_error, cbor, make_empty_map, make_map_with_root, resolve_to_id_addr, ActorContext, + ActorDowncast, ActorError, Map, INIT_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -73,12 +73,8 @@ impl Actor { let mut resolved_signers = Vec::with_capacity(params.signers.len()); let mut dedup_signers = BTreeSet::new(); for signer in ¶ms.signers { - let resolved = resolve_to_id_addr(rt, signer).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve addr {} to ID addr", signer), - ) - })?; + let resolved = resolve_to_id_addr(rt, signer) + .with_context(|| format!("failed to resolve addr {} to ID addr", signer))?; if !dedup_signers.insert(resolved.id().expect("address should be resolved")) { return Err( actor_error!(illegal_argument; "duplicate signer not allowed: {}", signer), @@ -269,11 +265,8 @@ impl Actor { return Err(actor_error!(forbidden; "Cannot cancel another signers transaction")); } - let calculated_hash = compute_proposal_hash(&tx, rt).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to compute proposal hash for (tx: {:?})", params.id), - ) + let calculated_hash = compute_proposal_hash(&tx, rt).with_context(|| { + format!("failed to compute proposal hash for (tx: {:?})", params.id) })?; if !params.proposal_hash.is_empty() && params.proposal_hash != calculated_hash { @@ -299,12 +292,8 @@ impl Actor { { let receiver = rt.message().receiver(); rt.validate_immediate_caller_is(std::iter::once(&receiver))?; - let resolved_new_signer = resolve_to_id_addr(rt, ¶ms.signer).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve address {}", params.signer), - ) - })?; + let resolved_new_signer = resolve_to_id_addr(rt, ¶ms.signer) + .with_context(|| format!("failed to resolve address {}", params.signer))?; rt.transaction(|st: &mut State, _| { if st.signers.len() >= SIGNERS_MAX { @@ -336,12 +325,8 @@ impl Actor { { let receiver = rt.message().receiver(); rt.validate_immediate_caller_is(std::iter::once(&receiver))?; - let resolved_old_signer = resolve_to_id_addr(rt, ¶ms.signer).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve address {}", params.signer), - ) - })?; + let resolved_old_signer = resolve_to_id_addr(rt, ¶ms.signer) + .with_context(|| format!("failed to resolve address {}", params.signer))?; rt.transaction(|st: &mut State, rt| { if !st.is_signer(&resolved_old_signer) { @@ -374,12 +359,9 @@ impl Actor { } // Remove approvals from removed signer - st.purge_approvals(rt.store(), &resolved_old_signer).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to purge approvals of removed signer", - ) - })?; + st.purge_approvals(rt.store(), &resolved_old_signer) + .context("failed to purge approvals of removed signer")?; + st.signers.retain(|s| s != &resolved_old_signer); Ok(()) @@ -396,18 +378,10 @@ impl Actor { { let receiver = rt.message().receiver(); rt.validate_immediate_caller_is(std::iter::once(&receiver))?; - let from_resolved = resolve_to_id_addr(rt, ¶ms.from).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve address {}", params.from), - ) - })?; - let to_resolved = resolve_to_id_addr(rt, ¶ms.to).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve address {}", params.to), - ) - })?; + let from_resolved = resolve_to_id_addr(rt, ¶ms.from) + .with_context(|| format!("failed to resolve address {}", params.from))?; + let to_resolved = resolve_to_id_addr(rt, ¶ms.to) + .with_context(|| format!("failed to resolve address {}", params.to))?; rt.transaction(|st: &mut State, rt| { if !st.is_signer(&from_resolved) { @@ -424,12 +398,9 @@ impl Actor { // Add new signer st.signers.push(to_resolved); - st.purge_approvals(rt.store(), &from_resolved).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to purge approvals of removed signer", - ) - })?; + st.purge_approvals(rt.store(), &from_resolved) + .context("failed to purge approvals of removed signer")?; + Ok(()) })?; @@ -621,12 +592,8 @@ where .ok_or_else(|| actor_error!(not_found, "no such transaction {:?} for approval", txn_id))?; if !proposal_hash.is_empty() { - let calculated_hash = compute_proposal_hash(txn, rt).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to compute proposal hash for (tx: {:?})", txn_id), - ) - })?; + let calculated_hash = compute_proposal_hash(txn, rt) + .with_context(|| format!("failed to compute proposal hash for (tx: {:?})", txn_id))?; if proposal_hash != calculated_hash { return Err(actor_error!( @@ -641,7 +608,10 @@ where /// Computes a digest of a proposed transaction. This digest is used to confirm identity /// of the transaction associated with an ID, which might change under chain re-orgs. -pub fn compute_proposal_hash(txn: &Transaction, sys: &dyn Primitives) -> anyhow::Result<[u8; 32]> { +pub fn compute_proposal_hash( + txn: &Transaction, + sys: &dyn Primitives, +) -> Result<[u8; 32], ActorError> { let proposal_hash = ProposalHashData { requester: txn.approved.get(0), to: &txn.to, diff --git a/actors/multisig/src/state.rs b/actors/multisig/src/state.rs index 4e9360aa8..738bab2f4 100644 --- a/actors/multisig/src/state.rs +++ b/actors/multisig/src/state.rs @@ -1,8 +1,9 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::anyhow; use cid::Cid; +use fil_actors_runtime::actor_error; +use fil_actors_runtime::ActorError; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::Cbor; @@ -75,7 +76,7 @@ impl State { &mut self, store: &BS, addr: &Address, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let mut txns = make_map_with_root(&self.pending_txs, store)?; // Identify transactions that need updating @@ -86,7 +87,6 @@ impl State { txn_ids_to_purge.insert(tx_id.0.clone(), txn.clone()); } } - Ok(()) })?; // Update or remove those transactions. @@ -110,12 +110,17 @@ impl State { balance: TokenAmount, amount_to_spend: &TokenAmount, curr_epoch: ChainEpoch, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if amount_to_spend < &0.into() { - return Err(anyhow!("amount to spend {} less than zero", amount_to_spend)); + return Err(actor_error!( + insufficient_funds, + "amount to spend {} less than zero", + amount_to_spend + )); } if &balance < amount_to_spend { - return Err(anyhow!( + return Err(actor_error!( + insufficient_funds, "current balance {} less than amount to spend {}", balance, amount_to_spend @@ -131,7 +136,8 @@ impl State { let remaining_balance = balance - amount_to_spend; let amount_locked = self.amount_locked(curr_epoch - self.start_epoch); if remaining_balance < amount_locked { - return Err(anyhow!( + return Err(actor_error!( + insufficient_funds, "actor balance {} if spent {} would be less than required locked amount {}", remaining_balance, amount_to_spend, diff --git a/actors/multisig/tests/util.rs b/actors/multisig/tests/util.rs index a68ad47fe..dc1a1daf7 100644 --- a/actors/multisig/tests/util.rs +++ b/actors/multisig/tests/util.rs @@ -191,8 +191,7 @@ impl ActorHarness { let ptx = make_map_with_root::<_, Transaction>(&st.pending_txs, &rt.store).unwrap(); let mut actual_txns = Vec::new(); ptx.for_each(|k, txn: &Transaction| { - actual_txns.push((TxnID(parse_uint_key(k)? as i64), txn.clone())); - Ok(()) + actual_txns.push((TxnID(parse_uint_key(k).unwrap() as i64), txn.clone())); }) .unwrap(); expect_txns.sort_by_key(|(TxnID(id), _txn)| (*id)); diff --git a/actors/paych/Cargo.toml b/actors/paych/Cargo.toml index 7648403b9..62fdad7d6 100644 --- a/actors/paych/Cargo.toml +++ b/actors/paych/Cargo.toml @@ -20,11 +20,11 @@ num-traits = "0.2.14" num-derive = "0.3.3" serde = { version = "1.0.136", features = ["derive"] } cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } -anyhow = "1.0.56" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" [dev-dependencies] +anyhow = "1.0.56" fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["test_utils", "sector-default"] } fvm_ipld_amt = { version = "0.4.0", features = ["go-interop"] } derive_builder = "0.10.2" diff --git a/actors/paych/src/lib.rs b/actors/paych/src/lib.rs index 2dc7c6db8..343c83482 100644 --- a/actors/paych/src/lib.rs +++ b/actors/paych/src/lib.rs @@ -2,7 +2,9 @@ // SPDX-License-Identifier: Apache-2.0, MIT use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{actor_error, cbor, resolve_to_id_addr, ActorDowncast, ActorError, Array}; +use fil_actors_runtime::{ + actor_error, cbor, resolve_to_id_addr, ActorContext, ActorDowncast, ActorError, Array, +}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::actor::builtin::Type; @@ -72,12 +74,8 @@ impl Actor { BS: Blockstore, RT: Runtime, { - let resolved = resolve_to_id_addr(rt, raw).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to resolve address {}", raw), - ) - })?; + let resolved = resolve_to_id_addr(rt, raw) + .with_context(|| format!("failed to resolve address {}", raw))?; let code_cid = rt .get_actor_code_cid(&resolved) diff --git a/actors/power/src/lib.rs b/actors/power/src/lib.rs index 1eff6ccb9..4a9cdcab3 100644 --- a/actors/power/src/lib.rs +++ b/actors/power/src/lib.rs @@ -408,7 +408,7 @@ impl Actor { } }; - if let Err(e) = mmap.for_all::<_, SealVerifyInfo, _>(|k, arr| { + if let Err(e) = mmap.try_for_all::<_, SealVerifyInfo, _>(|k, arr| { let addr = match Address::from_bytes(&k.0) { Ok(addr) => addr, Err(e) => { @@ -431,9 +431,8 @@ impl Actor { .try_into() .map_err(|_| "can not convert u64 to usize".to_string())?; infos.reserve(num_proofs); - arr.for_each::<_, ActorError>(|_, svi| { + arr.for_each(|_, svi| { infos.push(svi.clone()); - Ok(()) }) .map_err(|e| { format!( diff --git a/actors/power/src/state.rs b/actors/power/src/state.rs index be3067045..46dcbf1e8 100644 --- a/actors/power/src/state.rs +++ b/actors/power/src/state.rs @@ -333,9 +333,8 @@ pub(super) fn load_cron_events( ) -> Result, ActorError> { let mut events = Vec::new(); - mmap.for_each::<_, _, ActorError>(&epoch_key(epoch), |_, v: &CronEvent| { + mmap.for_each(&epoch_key(epoch), |_, v: &CronEvent| { events.push(v.clone()); - Ok(()) })?; Ok(events) diff --git a/actors/power/tests/harness/mod.rs b/actors/power/tests/harness/mod.rs index 9cbc310f3..a91180e03 100644 --- a/actors/power/tests/harness/mod.rs +++ b/actors/power/tests/harness/mod.rs @@ -8,7 +8,6 @@ use fil_actors_runtime::Multimap; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::{BytesDe, RawBytes}; use fvm_ipld_hamt::BytesKey; -use fvm_ipld_hamt::EitherError; use fvm_ipld_hamt::Error; use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser::BigIntDe; @@ -354,13 +353,8 @@ where V: DeserializeOwned + Serialize, { let mut ret_keys = Vec::new(); - m.for_each::<_, ()>(|k, _| { + m.for_each(|k, _| { ret_keys.push(k.clone()); - Ok(()) - }) - .map_err(|err| match err { - EitherError::User(()) => unreachable!(), - EitherError::Hamt(e) => e, })?; Ok(ret_keys) @@ -369,5 +363,5 @@ where pub fn verify_empty_map(rt: &MockRuntime, key: Cid) { let map = make_map_with_root_and_bitwidth::<_, BigIntDe>(&key, &rt.store, HAMT_BIT_WIDTH).unwrap(); - map.for_each::<_, ()>(|_key, _val| panic!("expected no keys")).unwrap(); + map.for_each(|_key, _val| panic!("expected no keys")).unwrap(); } diff --git a/actors/runtime/src/util/multimap.rs b/actors/runtime/src/util/multimap.rs index e5ccd9c0a..62cce8897 100644 --- a/actors/runtime/src/util/multimap.rs +++ b/actors/runtime/src/util/multimap.rs @@ -90,13 +90,13 @@ where } /// Iterates through all values in the array at a given key. - pub fn for_each(&self, key: &[u8], f: F) -> Result<(), EitherError> + pub fn try_for_each(&self, key: &[u8], f: F) -> Result<(), EitherError> where V: Serialize + DeserializeOwned, F: FnMut(u64, &V) -> Result<(), U>, { if let Some(amt) = self.get::(key)? { - amt.for_each(f).map_err(|err| match err { + amt.try_for_each(f).map_err(|err| match err { fvm_ipld_amt::EitherError::User(e) => EitherError::User(e), fvm_ipld_amt::EitherError::Amt(e) => EitherError::MultiMap(e.into()), })?; @@ -105,14 +105,27 @@ where Ok(()) } + /// Iterates through all values in the array at a given key. + pub fn for_each(&self, key: &[u8], f: F) -> Result<(), Error> + where + V: Serialize + DeserializeOwned, + F: FnMut(u64, &V), + { + if let Some(amt) = self.get::(key)? { + amt.for_each(f)?; + } + + Ok(()) + } + /// Iterates through all arrays in the multimap - pub fn for_all(&self, mut f: F) -> Result<(), EitherError> + pub fn try_for_all(&self, mut f: F) -> Result<(), EitherError> where V: Serialize + DeserializeOwned, F: FnMut(&BytesKey, &Array) -> Result<(), U>, { self.0 - .for_each::<_, EitherError>(|key, arr_root| { + .try_for_each::<_, EitherError>(|key, arr_root| { let arr = Array::load(arr_root, *self.0.store()) .map_err(|e| EitherError::MultiMap(e.into()))?; f(key, &arr).map_err(EitherError::User)?; @@ -125,6 +138,24 @@ where Ok(()) } + + /// Iterates through all arrays in the multimap + pub fn for_all(&self, mut f: F) -> Result<(), Error> + where + V: Serialize + DeserializeOwned, + F: FnMut(&BytesKey, &Array), + { + self.try_for_all(|key, root| { + f(key, root); + Ok(()) + }) + .map_err(|err| match err { + EitherError::User(()) => unreachable!(), + EitherError::MultiMap(e) => e, + })?; + + Ok(()) + } } /// This error wraps around around two different errors, either the native `Error` from `multimap`, or diff --git a/actors/runtime/src/util/set.rs b/actors/runtime/src/util/set.rs index eb6a849b4..ca31b9ffc 100644 --- a/actors/runtime/src/util/set.rs +++ b/actors/runtime/src/util/set.rs @@ -67,24 +67,32 @@ where } /// Iterates through all keys in the set. - pub fn for_each(&self, mut f: F) -> Result<(), Error> + pub fn try_for_each(&self, mut f: F) -> Result<(), Error> where F: FnMut(&BytesKey) -> Result<(), Error>, { // Calls the for each function on the hamt with ignoring the value - self.0.for_each(|s, _: &()| f(s)).map_err(|err| match err { + self.0.try_for_each(|s, _: &()| f(s)).map_err(|err| match err { fvm_ipld_hamt::EitherError::User(e) => e, fvm_ipld_hamt::EitherError::Hamt(e) => e.into(), }) } + /// Iterates through all keys in the set. + pub fn for_each(&self, mut f: F) -> Result<(), Error> + where + F: FnMut(&BytesKey), + { + // Calls the for each function on the hamt with ignoring the value + self.0.for_each(|s, _: &()| f(s)) + } + /// Collects all keys from the set into a vector. pub fn collect_keys(&self) -> Result, Error> { let mut ret_keys = Vec::new(); self.for_each(|k| { ret_keys.push(k.clone()); - Ok(()) })?; Ok(ret_keys) diff --git a/actors/runtime/src/util/set_multimap.rs b/actors/runtime/src/util/set_multimap.rs index 969bf489e..c1fdf6516 100644 --- a/actors/runtime/src/util/set_multimap.rs +++ b/actors/runtime/src/util/set_multimap.rs @@ -104,7 +104,7 @@ where } /// Iterates through keys and converts them to a DealID to call a function on each. - pub fn for_each(&self, key: ChainEpoch, mut f: F) -> Result<(), Error> + pub fn try_for_each(&self, key: ChainEpoch, mut f: F) -> Result<(), Error> where F: FnMut(DealID) -> Result<(), Error>, { @@ -114,7 +114,7 @@ where None => return Ok(()), }; - set.for_each(|k| { + set.try_for_each(|k| { let v = parse_uint_key(k).expect("TODO"); // Run function on all parsed keys @@ -122,4 +122,23 @@ where Ok(()) }) } + + /// Iterates through keys and converts them to a DealID to call a function on each. + pub fn for_each(&self, key: ChainEpoch, mut f: F) -> Result<(), Error> + where + F: FnMut(DealID), + { + // Get construct amt from retrieved cid and return if no set exists + let set = match self.get(key)? { + Some(s) => s, + None => return Ok(()), + }; + + set.for_each(|k| { + let v = parse_uint_key(k).expect("TODO"); + + // Run function on all parsed keys + f(v); + }) + } } diff --git a/actors/runtime/tests/multimap_test.rs b/actors/runtime/tests/multimap_test.rs index d0599ffbd..3d131c254 100644 --- a/actors/runtime/tests/multimap_test.rs +++ b/actors/runtime/tests/multimap_test.rs @@ -37,9 +37,8 @@ fn for_each() { mm.add("Some other string".into(), 7).unwrap(); let mut vals: Vec<(u64, u64)> = Vec::new(); - mm.for_each::<_, _, ()>(&addr.to_bytes(), |i, v| { + mm.for_each(&addr.to_bytes(), |i, v| { vals.push((i, *v)); - Ok(()) }) .unwrap(); diff --git a/actors/runtime/tests/set_multimap_test.rs b/actors/runtime/tests/set_multimap_test.rs index 47278e74b..534982467 100644 --- a/actors/runtime/tests/set_multimap_test.rs +++ b/actors/runtime/tests/set_multimap_test.rs @@ -41,7 +41,6 @@ fn for_each() { let mut vals: Vec = Vec::new(); smm.for_each(epoch, |i| { vals.push(i); - Ok(()) }) .unwrap(); diff --git a/actors/verifreg/src/lib.rs b/actors/verifreg/src/lib.rs index bc3fee9c7..a106919e1 100644 --- a/actors/verifreg/src/lib.rs +++ b/actors/verifreg/src/lib.rs @@ -149,18 +149,15 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .map_err(Into::into) .context("failed to load verified clients")?; verifiers .delete(&verifier.to_bytes()) - .map_err(Into::into) .context("failed to remove verifier")? .ok_or_else(|| { actor_error!(illegal_argument, "failed to remove verifier: not found") })?; - st.verifiers = - verifiers.flush().map_err(Into::into).context("failed to flush verifiers")?; + st.verifiers = verifiers.flush().context("failed to flush verifiers")?; Ok(()) })?; @@ -199,27 +196,22 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verifiers = make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .map_err(Into::into) .context("failed to load verified clients")?; let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .map_err(Into::into) .context("failed to load verified clients")?; // Validate caller is one of the verifiers. let verifier = rt.message().caller(); let BigIntDe(verifier_cap) = verifiers .get(&verifier.to_bytes()) - .map_err(Into::into) .with_context(|| format!("failed to get Verifier {}", verifier))? .ok_or_else(|| actor_error!(not_found, format!("no such Verifier {}", verifier)))?; // Validate client to be added isn't a verifier - let found = verifiers - .contains_key(&client.to_bytes()) - .map_err(Into::into) - .context("failed to get verifier")?; + let found = + verifiers.contains_key(&client.to_bytes()).context("failed to get verifier")?; if found { return Err(actor_error!( @@ -242,12 +234,10 @@ impl Actor { verifiers .set(verifier.to_bytes().into(), BigIntDe(new_verifier_cap)) - .map_err(Into::into) .with_context(|| format!("Failed to update new verifier cap for {}", verifier))?; let client_cap = verified_clients .get(&client.to_bytes()) - .map_err(Into::into) .with_context(|| format!("Failed to get verified client {}", client))?; // if verified client exists, add allowance to existing cap @@ -260,17 +250,13 @@ impl Actor { verified_clients .set(client.to_bytes().into(), BigIntDe(client_cap.clone())) - .map_err(Into::into) .with_context(|| { format!("Failed to add verified client {} with cap {}", client, client_cap,) })?; - st.verifiers = - verifiers.flush().map_err(Into::into).context("failed to flush verifiers")?; - st.verified_clients = verified_clients - .flush() - .map_err(Into::into) - .context("failed to flush verified clients")?; + st.verifiers = verifiers.flush().context("failed to flush verifiers")?; + st.verified_clients = + verified_clients.flush().context("failed to flush verified clients")?; Ok(()) })?; From c401aa94ef099599c9c363a428565ddeba0848e2 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 12 Apr 2022 13:32:40 +0200 Subject: [PATCH 03/10] just miner and market missing --- Cargo.lock | 1 - actors/init/Cargo.toml | 1 - actors/init/src/lib.rs | 13 +-- actors/init/src/state.rs | 11 +- actors/multisig/src/lib.rs | 111 ++++----------------- actors/paych/src/lib.rs | 43 +++----- actors/power/src/lib.rs | 44 +++----- actors/power/src/state.rs | 16 +-- actors/runtime/src/actor_error.rs | 10 ++ actors/verifreg/src/lib.rs | 160 +++++++++--------------------- 10 files changed, 121 insertions(+), 289 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a06d4d5c1..6e41ec3f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -594,7 +594,6 @@ dependencies = [ name = "fil_actor_init" version = "8.0.0-alpha.1" dependencies = [ - "anyhow", "cid", "fil_actors_runtime", "fvm_ipld_blockstore", diff --git a/actors/init/Cargo.toml b/actors/init/Cargo.toml index 51bda4e5d..ec8253e93 100644 --- a/actors/init/Cargo.toml +++ b/actors/init/Cargo.toml @@ -21,7 +21,6 @@ serde = { version = "1.0.136", features = ["derive"] } num-traits = "0.2.14" num-derive = "0.3.3" cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } -anyhow = "1.0.56" log = "0.4.14" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" diff --git a/actors/init/src/lib.rs b/actors/init/src/lib.rs index d602368b4..e5dc7508a 100644 --- a/actors/init/src/lib.rs +++ b/actors/init/src/lib.rs @@ -3,12 +3,11 @@ use cid::Cid; use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{actor_error, cbor, ActorDowncast, ActorError, SYSTEM_ACTOR_ADDR}; +use fil_actors_runtime::{actor_error, cbor, ActorContext, ActorError, SYSTEM_ACTOR_ADDR}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::actor::builtin::Type; use fvm_shared::address::Address; -use fvm_shared::error::ExitCode; use fvm_shared::{ActorID, MethodNum, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::FromPrimitive; @@ -43,9 +42,8 @@ impl Actor { { let sys_ref: &Address = &SYSTEM_ACTOR_ADDR; rt.validate_immediate_caller_is(std::iter::once(sys_ref))?; - let state = State::new(rt.store(), params.network_name).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct init actor state") - })?; + let state = State::new(rt.store(), params.network_name) + .context("failed to construct init actor state")?; rt.create(&state)?; @@ -86,9 +84,8 @@ impl Actor { // Allocate an ID for this actor. // Store mapping of pubkey or actor address to actor ID let id_address: ActorID = rt.transaction(|s: &mut State, rt| { - s.map_address_to_new_id(rt.store(), &robust_address).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to allocate ID address") - }) + s.map_address_to_new_id(rt.store(), &robust_address) + .context("failed to allocate ID address") })?; // Create an empty actor diff --git a/actors/init/src/state.rs b/actors/init/src/state.rs index a08b75e53..b640e4b6b 100644 --- a/actors/init/src/state.rs +++ b/actors/init/src/state.rs @@ -1,11 +1,11 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::anyhow; use cid::Cid; use fil_actors_runtime::{ make_empty_map, make_map_with_root_and_bitwidth, FIRST_NON_SINGLETON_ADDR, }; +use fil_actors_runtime::{ActorContext, ActorError}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::Cbor; @@ -22,10 +22,11 @@ pub struct State { } impl State { - pub fn new(store: &BS, network_name: String) -> anyhow::Result { + pub fn new(store: &BS, network_name: String) -> Result { let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .map_err(|e| anyhow!("failed to create empty map: {}", e))?; + .context("failed to create empty map")?; + Ok(Self { address_map: empty_map, next_id: FIRST_NON_SINGLETON_ADDR, network_name }) } @@ -35,7 +36,7 @@ impl State { &mut self, store: &BS, addr: &Address, - ) -> Result { + ) -> Result> { let id = self.next_id; self.next_id += 1; @@ -60,7 +61,7 @@ impl State { &self, store: &BS, addr: &Address, - ) -> anyhow::Result> { + ) -> Result, ActorError> { if addr.protocol() == Protocol::ID { return Ok(Some(*addr)); } diff --git a/actors/multisig/src/lib.rs b/actors/multisig/src/lib.rs index 3322c0c5a..2cca97075 100644 --- a/actors/multisig/src/lib.rs +++ b/actors/multisig/src/lib.rs @@ -7,7 +7,7 @@ use fil_actors_runtime::cbor::serialize_vec; use fil_actors_runtime::runtime::{ActorCode, Primitives, Runtime}; use fil_actors_runtime::{ actor_error, cbor, make_empty_map, make_map_with_root, resolve_to_id_addr, ActorContext, - ActorDowncast, ActorError, Map, INIT_ACTOR_ADDR, + ActorError, Map, INIT_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -97,10 +97,9 @@ impl Actor { return Err(actor_error!(illegal_argument; "negative unlock duration disallowed")); } - let empty_root = - make_empty_map::<_, ()>(rt.store(), HAMT_BIT_WIDTH).flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty map") - })?; + let empty_root = make_empty_map::<_, ()>(rt.store(), HAMT_BIT_WIDTH) + .flush() + .context("Failed to create empty map")?; let mut st: State = State { signers: resolved_signers, @@ -146,12 +145,8 @@ impl Actor { return Err(actor_error!(forbidden, "{} is not a signer", proposer)); } - let mut ptx = make_map_with_root(&st.pending_txs, rt.store()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pending transactions", - ) - })?; + let mut ptx = make_map_with_root(&st.pending_txs, rt.store()) + .context("failed to load pending transactions")?; let t_id = st.next_tx_id; st.next_tx_id.0 += 1; @@ -164,19 +159,9 @@ impl Actor { approved: Vec::new(), }; - ptx.set(t_id.key(), txn.clone()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to put transaction for propose", - ) - })?; + ptx.set(t_id.key(), txn.clone()).context("failed to put transaction for propose")?; - st.pending_txs = ptx.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to flush pending transactions", - ) - })?; + st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; Ok((t_id, txn)) })?; @@ -201,12 +186,8 @@ impl Actor { return Err(actor_error!(forbidden; "{} is not a signer", approver)); } - let ptx = make_map_with_root(&st.pending_txs, rt.store()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pending transactions", - ) - })?; + let ptx = make_map_with_root(&st.pending_txs, rt.store()) + .context("failed to load pending transactions")?; let txn = get_transaction(rt, &ptx, params.id, params.proposal_hash)?; @@ -241,21 +222,11 @@ impl Actor { } let mut ptx = make_map_with_root::<_, Transaction>(&st.pending_txs, rt.store()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pending transactions", - ) - })?; + .context("failed to load pending transactions")?; let (_, tx) = ptx .delete(¶ms.id.key()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to pop transaction {:?} for cancel", params.id), - ) - })? + .with_context(|| format!("failed to pop transaction {:?} for cancel", params.id,))? .ok_or_else(|| { actor_error!(not_found, "no such transaction {:?} to cancel", params.id) })?; @@ -273,12 +244,7 @@ impl Actor { return Err(actor_error!(illegal_state, "hash does not match proposal params")); } - st.pending_txs = ptx.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to flush pending transactions", - ) - })?; + st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; Ok(()) }) @@ -481,29 +447,16 @@ impl Actor { } let st = rt.transaction(|st: &mut State, rt| { - let mut ptx = make_map_with_root(&st.pending_txs, rt.store()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pending transactions", - ) - })?; + let mut ptx = make_map_with_root(&st.pending_txs, rt.store()) + .context("failed to load pending transactions")?; // update approved on the transaction txn.approved.push(rt.message().caller()); - ptx.set(tx_id.key(), txn.clone()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to put transaction {} for approval", tx_id.0), - ) - })?; + ptx.set(tx_id.key(), txn.clone()) + .with_context(|| format!("failed to put transaction {} for approval", tx_id.0,))?; - st.pending_txs = ptx.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to flush pending transactions", - ) - })?; + st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; // Go implementation holds reference to state after transaction so this must be cloned // to match to handle possible exit code inconsistency @@ -544,26 +497,11 @@ where rt.transaction(|st: &mut State, rt| { let mut ptx = make_map_with_root::<_, Transaction>(&st.pending_txs, rt.store()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pending transactions", - ) - })?; + .context("failed to load pending transactions")?; - ptx.delete(&txn_id.key()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to delete transaction for cleanup", - ) - })?; + ptx.delete(&txn_id.key()).context("failed to delete transaction for cleanup")?; - st.pending_txs = ptx.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to flush pending transactions", - ) - })?; + st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; Ok(()) })?; } @@ -583,12 +521,7 @@ where { let txn = ptx .get(&txn_id.key()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load transaction {:?} for approval", txn_id), - ) - })? + .with_context(|| format!("failed to load transaction {:?} for approval", txn_id,))? .ok_or_else(|| actor_error!(not_found, "no such transaction {:?} for approval", txn_id))?; if !proposal_hash.is_empty() { diff --git a/actors/paych/src/lib.rs b/actors/paych/src/lib.rs index 343c83482..acf5f6c73 100644 --- a/actors/paych/src/lib.rs +++ b/actors/paych/src/lib.rs @@ -2,9 +2,7 @@ // SPDX-License-Identifier: Apache-2.0, MIT use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{ - actor_error, cbor, resolve_to_id_addr, ActorContext, ActorDowncast, ActorError, Array, -}; +use fil_actors_runtime::{actor_error, cbor, resolve_to_id_addr, ActorContext, ActorError, Array}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::actor::builtin::Type; @@ -60,9 +58,7 @@ impl Actor { let empty_arr_cid = Array::<(), _>::new_with_bit_width(rt.store(), LANE_STATES_AMT_BITWIDTH) .flush() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to create empty AMT") - })?; + .context("failed to create empty AMT")?; rt.create(&State::new(from, to, empty_arr_cid))?; Ok(()) @@ -133,9 +129,7 @@ impl Actor { })?; // Validate signature - rt.verify_signature(sig, &signer, &sv_bz).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "voucher signature invalid") - })?; + rt.verify_signature(sig, &signer, &sv_bz).context("voucher signature invalid")?; let pch_addr = rt.message().receiver(); let svpch_id_addr = rt.resolve_address(&sv.channel_addr).ok_or_else(|| { @@ -182,9 +176,8 @@ impl Actor { } rt.transaction(|st: &mut State, rt| { - let mut l_states = Array::load(&st.lane_states, rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load lane states") - })?; + let mut l_states = + Array::load(&st.lane_states, rt.store()).context("failed to load lane states")?; // Find the voucher lane, create and insert it in sorted order if necessary. let lane_id = sv.lane; @@ -224,12 +217,9 @@ impl Actor { redeemed_from_others += &other_ls.redeemed; other_ls.nonce = merge.nonce; - l_states.set(merge.lane, other_ls).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to store lane {}", merge.lane), - ) - })?; + l_states + .set(merge.lane, other_ls) + .with_context(|| format!("failed to store lane {}", merge.lane,))?; } // 2. To prevent double counting, remove already redeemed amounts (from @@ -266,16 +256,11 @@ impl Actor { } } - l_states.set(lane_id, lane_state).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to store lane {}", lane_id), - ) - })?; + l_states + .set(lane_id, lane_state) + .with_context(|| format!("failed to store lane {}", lane_id,))?; - st.lane_states = l_states.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save lanes") - })?; + st.lane_states = l_states.flush().context("failed to save lanes")?; Ok(()) }) } @@ -336,9 +321,7 @@ where return Err(actor_error!(illegal_argument; "maximum lane ID is 2^63-1")); } - ls.get(id).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, format!("failed to load lane {}", id)) - }) + ls.get(id).with_context(|| format!("failed to load lane {}", id)) } impl ActorCode for Actor { diff --git a/actors/power/src/lib.rs b/actors/power/src/lib.rs index 4a9cdcab3..ee24d0e81 100644 --- a/actors/power/src/lib.rs +++ b/actors/power/src/lib.rs @@ -7,8 +7,8 @@ use std::convert::TryInto; use ext::init; use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, make_map_with_root_and_bitwidth, ActorContext, ActorDowncast, ActorError, - Multimap, CRON_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + actor_error, cbor, make_map_with_root_and_bitwidth, ActorContext, ActorError, Multimap, + CRON_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -114,9 +114,8 @@ impl Actor { let window_post_proof_type = params.window_post_proof_type; rt.transaction(|st: &mut State, rt| { let mut claims = - make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims"), - )?; + make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) + .context("failed to load claims")?; set_claim( &mut claims, &id_address, @@ -139,9 +138,7 @@ impl Actor { ) })?; - st.claims = claims.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims") - })?; + st.claims = claims.flush().context("failed to flush claims")?; Ok(()) })?; Ok(CreateMinerReturn { id_address, robust_address }) @@ -162,9 +159,8 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut claims = - make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims"), - )?; + make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) + .context("failed to load claims")?; st.add_to_claim( rt.policy(), @@ -180,9 +176,7 @@ impl Actor { ) })?; - st.claims = claims.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims") - })?; + st.claims = claims.flush().context("failed to flush claims")?; Ok(()) }) } @@ -524,9 +518,7 @@ impl Actor { let claims = make_map_with_root_and_bitwidth::<_, Claim>(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims") - })?; + .context("failed to load claims")?; for epoch in st.first_cron_epoch..=rt_epoch { let epoch_events = load_cron_events(&events, epoch) .with_context(|| format!("failed to load cron events at {}", epoch))?; @@ -536,13 +528,9 @@ impl Actor { } for evt in epoch_events.into_iter() { - let miner_has_claim = - claims.contains_key(&evt.miner_addr.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to look up claim", - ) - })?; + let miner_has_claim = claims + .contains_key(&evt.miner_addr.to_bytes()) + .context("failed to look up claim")?; if !miner_has_claim { debug!("skipping cron event for unknown miner: {}", evt.miner_addr); continue; @@ -588,9 +576,7 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut claims = make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims") - })?; + .context("failed to load claims")?; // Remove power and leave miner frozen for miner_addr in failed_miner_crons { @@ -605,9 +591,7 @@ impl Actor { st.miner_count -= 1 } - st.claims = claims.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims") - })?; + st.claims = claims.flush().context("failed to flush claims")?; Ok(()) })?; } diff --git a/actors/power/src/state.rs b/actors/power/src/state.rs index 46dcbf1e8..a7a566cac 100644 --- a/actors/power/src/state.rs +++ b/actors/power/src/state.rs @@ -7,7 +7,7 @@ use cid::Cid; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ actor_error, make_empty_map, make_map_with_root, make_map_with_root_and_bitwidth, ActorContext, - ActorDowncast, ActorError, Map, Multimap, + ActorError, Map, Multimap, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -17,7 +17,6 @@ use fvm_shared::address::Address; use fvm_shared::bigint::{bigint_ser, BigInt}; use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; -use fvm_shared::error::ExitCode; use fvm_shared::sector::{RegisteredPoStProof, StoragePower}; use fvm_shared::smooth::{AlphaBetaFilter, FilterEstimate, DEFAULT_ALPHA, DEFAULT_BETA}; use fvm_shared::HAMT_BIT_WIDTH; @@ -269,13 +268,10 @@ impl State { where BS: Blockstore, { - let claims = make_map_with_root::<_, Claim>(&self.claims, store).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims") - })?; + let claims = + make_map_with_root::<_, Claim>(&self.claims, store).context("failed to load claims")?; - if !claims.contains_key(&miner_addr.to_bytes()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to look up claim") - })? { + if !claims.contains_key(&miner_addr.to_bytes()).context("failed to look up claim")? { return Err(actor_error!( forbidden, "unknown miner {} forbidden to interact with power actor", @@ -292,9 +288,7 @@ impl State { ) -> Result, ActorError> { let claims = make_map_with_root_and_bitwidth::<_, Claim>(&self.claims, store, HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load claims") - })?; + .context("failed to load claims")?; let claim = get_claim(&claims, miner)?; Ok(claim.cloned()) diff --git a/actors/runtime/src/actor_error.rs b/actors/runtime/src/actor_error.rs index 9c976c61b..b376600b3 100644 --- a/actors/runtime/src/actor_error.rs +++ b/actors/runtime/src/actor_error.rs @@ -3,6 +3,8 @@ use std::fmt::Display; use fvm_shared::error::ExitCode; use thiserror::Error; +use crate::ActorDowncast; + /// The error type returned by actor method calls. #[derive(Error, Debug, Clone, PartialEq)] #[error("ActorError(exit_code: {exit_code:?}, msg: {msg})")] @@ -183,3 +185,11 @@ impl> ActorContext for Result { }) } } + +// TODO: remove once the runtime doesn't use anyhow::Result anymore +impl From for ActorError { + fn from(e: anyhow::Error) -> Self { + // THIS DEFAULT IS WRONG, it is just a placeholder + e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "runtime error") + } +} diff --git a/actors/verifreg/src/lib.rs b/actors/verifreg/src/lib.rs index a106919e1..1704c7272 100644 --- a/actors/verifreg/src/lib.rs +++ b/actors/verifreg/src/lib.rs @@ -4,14 +4,13 @@ use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ actor_error, cbor, make_map_with_root_and_bitwidth, resolve_to_id_addr, ActorContext, - ActorDowncast, ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_ipld_hamt::BytesKey; use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser::BigIntDe; -use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, HAMT_BIT_WIDTH, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::{FromPrimitive, Signed, Zero}; @@ -55,9 +54,7 @@ impl Actor { .resolve_address(&root_key) .ok_or_else(|| actor_error!(illegal_argument, "root should be an ID address"))?; - let st = State::new(rt.store(), id_addr).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to create verifreg state") - })?; + let st = State::new(rt.store(), id_addr).context("Failed to create verifreg state")?; rt.create(&st)?; Ok(()) @@ -90,27 +87,18 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verifiers = make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; + .context("failed to load verified clients")?; + let verified_clients = make_map_with_root_and_bitwidth::<_, BigIntDe>( &st.verified_clients, rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; + .context("failed to load verified clients")?; - let found = verified_clients.contains_key(&verifier.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get client state for {}", verifier), - ) - })?; + let found = verified_clients + .contains_key(&verifier.to_bytes()) + .with_context(|| format!("failed to get client state for {}", verifier))?; if found { return Err(actor_error!( illegal_argument, @@ -119,12 +107,11 @@ impl Actor { )); } - verifiers.set(verifier.to_bytes().into(), BigIntDe(params.allowance.clone())).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to add verifier"), - )?; - st.verifiers = verifiers.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers") - })?; + verifiers + .set(verifier.to_bytes().into(), BigIntDe(params.allowance.clone())) + .context("failed to add verifier")?; + + st.verifiers = verifiers.flush().context("failed to flush verifiers")?; Ok(()) })?; @@ -288,21 +275,11 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; + .context("failed to load verified clients")?; let BigIntDe(vc_cap) = verified_clients .get(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get verified client {}", &client), - ) - })? + .with_context(|| format!("failed to get verified client {}", &client))? .ok_or_else(|| actor_error!(not_found, "no such verified client {}", client))?; if vc_cap.is_negative() { return Err(actor_error!( @@ -329,12 +306,7 @@ impl Actor { // Will be restored later if the deal did not get activated with a ProvenSector. verified_clients .delete(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to delete verified client {}", client), - ) - })? + .with_context(|| format!("Failed to delete verified client {}", client))? .ok_or_else(|| { actor_error!( illegal_state, @@ -343,19 +315,14 @@ impl Actor { ) })?; } else { - verified_clients.set(client.to_bytes().into(), BigIntDe(new_vc_cap)).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to update verified client {}", client), - ) - }, - )?; + verified_clients + .set(client.to_bytes().into(), BigIntDe(new_vc_cap)) + .with_context(|| format!("Failed to update verified client {}", client))?; } - st.verified_clients = verified_clients.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients") - })?; + st.verified_clients = + verified_clients.flush().context("failed to flush verified clients")?; + Ok(()) })?; @@ -389,25 +356,19 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load verified clients", - ) - })?; + .context("failed to load verified clients")?; + let verifiers = make_map_with_root_and_bitwidth::<_, BigIntDe>( &st.verifiers, rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers") - })?; + .context("failed to load verifiers")?; // validate we are NOT attempting to do this for a verifier - let found = verifiers.contains_key(&client.to_bytes()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier") - })?; + let found = + verifiers.contains_key(&client.to_bytes()).context("failed to get verifier")?; + if found { return Err(actor_error!( illegal_argument, @@ -419,27 +380,19 @@ impl Actor { // Get existing cap let BigIntDe(vc_cap) = verified_clients .get(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get verified client {}", &client), - ) - })? + .with_context(|| format!("failed to get verified client {}", &client))? .cloned() .unwrap_or_default(); // Update to new cap let new_vc_cap = vc_cap + ¶ms.deal_size; - verified_clients.set(client.to_bytes().into(), BigIntDe(new_vc_cap)).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("Failed to put verified client {}", client), - ) - })?; + verified_clients + .set(client.to_bytes().into(), BigIntDe(new_vc_cap)) + .with_context(|| format!("Failed to put verified client {}", client))?; + + st.verified_clients = + verified_clients.flush().context("failed to flush verified clients")?; - st.verified_clients = verified_clients.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients") - })?; Ok(()) })?; @@ -496,9 +449,7 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; + .context("failed to load verified clients")?; // check that `client` is currently a verified client if !is_verifier(rt, st, client)? { @@ -508,12 +459,7 @@ impl Actor { // get existing cap allocated to client let BigIntDe(previous_data_cap) = verified_clients .get(&client.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get verified client {}", &client), - ) - })? + .with_context(|| format!("failed to get verified client {}", &client))? .cloned() .unwrap_or_default(); @@ -533,12 +479,7 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load datacap removal proposal ids", - ) - })?; + .context("failed to load datacap removal proposal ids")?; let verifier_1_id = use_proposal_id(&mut proposal_ids, verifier_1, client)?; let verifier_2_id = use_proposal_id(&mut proposal_ids, verifier_2, client)?; @@ -561,22 +502,16 @@ impl Actor { let new_data_cap = &previous_data_cap - ¶ms.data_cap_amount_to_remove; if new_data_cap <= Zero::zero() { // no DataCap remaining, delete verified client - verified_clients.delete(&client.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete verified client {}", &client), - ) - })?; + verified_clients + .delete(&client.to_bytes()) + .with_context(|| format!("failed to delete verified client {}", &client))?; removed_data_cap_amount = previous_data_cap; } else { // update DataCap amount after removal verified_clients .set(BytesKey::from(client.to_bytes()), BigIntDe(new_data_cap)) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update datacap for verified client {}", &client), - ) + .with_context(|| { + format!("failed to update datacap for verified client {}", &client) })?; removed_data_cap_amount = params.data_cap_amount_to_remove.clone(); } @@ -615,14 +550,11 @@ where rt.store(), HAMT_BIT_WIDTH, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients") - })?; + .context("failed to load verified clients")?; // check that the `address` is currently a verified client - let found = verified_clients - .contains_key(&address.to_bytes()) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier"))?; + let found = + verified_clients.contains_key(&address.to_bytes()).context("failed to get verifier")?; Ok(found) } From 4512ad16429266f5779cc715822c97ad9c337272 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 12 Apr 2022 13:53:14 +0200 Subject: [PATCH 04/10] convert market actor --- Cargo.lock | 19 +- actors/market/Cargo.toml | 1 - actors/market/src/balance_table.rs | 35 +-- actors/market/src/lib.rs | 355 +++++++++++------------------ actors/market/src/state.rs | 164 ++++++------- 5 files changed, 217 insertions(+), 357 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6e41ec3f0..6bce587df 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -611,7 +611,6 @@ name = "fil_actor_market" version = "8.0.0-alpha.1" dependencies = [ "ahash", - "anyhow", "cid", "fil_actor_power", "fil_actor_reward", @@ -697,7 +696,6 @@ dependencies = [ name = "fil_actor_power" version = "8.0.0-alpha.1" dependencies = [ - "anyhow", "cid", "fil_actors_runtime", "fvm_ipld_blockstore", @@ -748,7 +746,6 @@ dependencies = [ name = "fil_actor_verifreg" version = "8.0.0-alpha.1" dependencies = [ - "anyhow", "cid", "fil_actors_runtime", "fvm_ipld_blockstore", @@ -935,8 +932,6 @@ dependencies = [ [[package]] name = "fvm_ipld_amt" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3394e5f9c2adb4d586519bc24bbfd659366e01e7ffa6cda676be94a62bab474" dependencies = [ "ahash", "anyhow", @@ -952,8 +947,6 @@ dependencies = [ [[package]] name = "fvm_ipld_bitfield" version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9011349297962982b8ab2663c220034525ec0f95f408c2b561d3d98867f1a803" dependencies = [ "cs_serde_bytes", "fvm_ipld_encoding", @@ -965,8 +958,6 @@ dependencies = [ [[package]] name = "fvm_ipld_blockstore" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1985eae58ec2fbf54535ce115c72a2141459fb7fb4ff7379e17bffae0e302578" dependencies = [ "anyhow", "cid", @@ -990,8 +981,6 @@ dependencies = [ [[package]] name = "fvm_ipld_encoding" version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bd635987aac46a753ec81767713af35cb50f182c7cc49d3a429643ede0e709" dependencies = [ "anyhow", "cid", @@ -1028,8 +1017,6 @@ dependencies = [ [[package]] name = "fvm_sdk" version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cd152ce62acadd75bdb461dcb8009389a3a7583c0832a122537da8fc17d73e7" dependencies = [ "cid", "fvm_ipld_encoding", @@ -1074,8 +1061,6 @@ dependencies = [ [[package]] name = "fvm_shared" version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "857cf6a95b35d583e8db25d38a939af335cb2eba2f8a5e1f1c0be58f77d52f5b" dependencies = [ "anyhow", "bimap", @@ -1973,3 +1958,7 @@ name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[patch.unused]] +name = "fvm_ipld_hamt" +version = "0.5.0" diff --git a/actors/market/Cargo.toml b/actors/market/Cargo.toml index f7b9c6a3b..363b9b15a 100644 --- a/actors/market/Cargo.toml +++ b/actors/market/Cargo.toml @@ -24,7 +24,6 @@ ahash = "0.7.6" serde = { version = "1.0.136", features = ["derive"] } cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } log = "0.4.14" -anyhow = "1.0.56" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" libipld-core = { version = "0.13.1", features = ["serde-codec"] } diff --git a/actors/market/src/balance_table.rs b/actors/market/src/balance_table.rs index 2f4de73a1..95b7a9cd0 100644 --- a/actors/market/src/balance_table.rs +++ b/actors/market/src/balance_table.rs @@ -9,7 +9,9 @@ use fvm_shared::bigint::bigint_ser::BigIntDe; use fvm_shared::econ::TokenAmount; use num_traits::{Signed, Zero}; -use fil_actors_runtime::{make_empty_map, make_map_with_root_and_bitwidth, Map}; +use fil_actors_runtime::{ + actor_error, make_empty_map, make_map_with_root_and_bitwidth, ActorError, Map, +}; pub const BALANCE_TABLE_BITWIDTH: u32 = 6; @@ -25,17 +27,17 @@ where } /// Initializes a balance table from a root Cid - pub fn from_root(bs: &'a BS, cid: &Cid) -> Result { + pub fn from_root(bs: &'a BS, cid: &Cid) -> Result> { Ok(Self(make_map_with_root_and_bitwidth(cid, bs, BALANCE_TABLE_BITWIDTH)?)) } /// Retrieve root from balance table - pub fn root(&mut self) -> Result { + pub fn root(&mut self) -> Result> { self.0.flush() } /// Gets token amount for given address in balance table - pub fn get(&self, key: &Address) -> Result { + pub fn get(&self, key: &Address) -> Result> { if let Some(v) = self.0.get(&key.to_bytes())? { Ok(v.0.clone()) } else { @@ -44,12 +46,17 @@ where } /// Adds token amount to previously initialized account. - pub fn add(&mut self, key: &Address, value: &TokenAmount) -> Result<(), HamtError> { + pub fn add(&mut self, key: &Address, value: &TokenAmount) -> Result<(), ActorError> { let prev = self.get(key)?; let sum = &prev + value; if sum.is_negative() { - Err(format!("New balance in table cannot be negative: {}", sum).into()) - } else if sum.is_zero() && !prev.is_zero() { + return Err(actor_error!( + illegal_argument, + "new balance in table cannot be negative: {}", + sum + )); + } + if sum.is_zero() && !prev.is_zero() { self.0.delete(&key.to_bytes())?; Ok(()) } else { @@ -66,7 +73,7 @@ where key: &Address, req: &TokenAmount, floor: &TokenAmount, - ) -> Result { + ) -> Result { let prev = self.get(key)?; let available = std::cmp::max(TokenAmount::zero(), prev - floor); let sub: TokenAmount = std::cmp::min(&available, req).clone(); @@ -79,24 +86,24 @@ where } /// Subtracts value from a balance, and errors if full amount was not substracted. - pub fn must_subtract(&mut self, key: &Address, req: &TokenAmount) -> Result<(), HamtError> { + pub fn must_subtract(&mut self, key: &Address, req: &TokenAmount) -> Result<(), ActorError> { let prev = self.get(key)?; if req > &prev { - Err("couldn't subtract the requested amount".into()) - } else { - self.add(key, &-req) + return Err(actor_error!(illegal_argument, "couldn't subtract the requested amount")); } + self.add(key, &-req)?; + + Ok(()) } /// Returns total balance held by this balance table #[allow(dead_code)] - pub fn total(&self) -> Result { + pub fn total(&self) -> Result> { let mut total = TokenAmount::default(); self.0.for_each(|_, v: &BigIntDe| { total += &v.0; - Ok(()) })?; Ok(total) diff --git a/actors/market/src/lib.rs b/actors/market/src/lib.rs index 8c5757147..c76f998b9 100644 --- a/actors/market/src/lib.rs +++ b/actors/market/src/lib.rs @@ -12,7 +12,6 @@ use fvm_shared::bigint::BigInt; use fvm_shared::clock::{ChainEpoch, QuantSpec, EPOCH_UNDEFINED}; use fvm_shared::deal::DealID; use fvm_shared::econ::TokenAmount; -use fvm_shared::error::ExitCode; use fvm_shared::piece::PieceInfo; use fvm_shared::reward::ThisEpochRewardReturn; use fvm_shared::sector::StoragePower; @@ -24,7 +23,7 @@ use num_traits::{FromPrimitive, Signed, Zero}; use fil_actors_runtime::cbor::serialize_vec; use fil_actors_runtime::runtime::{ActorCode, Policy, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorDowncast, ActorError, BURNT_FUNDS_ACTOR_ADDR, CRON_ACTOR_ADDR, + actor_error, cbor, ActorContext, ActorError, BURNT_FUNDS_ACTOR_ADDR, CRON_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, }; @@ -92,9 +91,7 @@ impl Actor { { rt.validate_immediate_caller_is(std::iter::once(&*SYSTEM_ACTOR_ADDR))?; - let st = State::new(rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to create market state") - })?; + let st = State::new(rt.store()).context("Failed to create market state")?; rt.create(&st)?; Ok(()) } @@ -125,20 +122,15 @@ impl Actor { msm.with_escrow_table(Permission::Write) .with_locked_table(Permission::Write) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; - msm.escrow_table.as_mut().unwrap().add(&nominal, &msg_value).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to add balance to escrow table", - ) - })?; + msm.escrow_table + .as_mut() + .unwrap() + .add(&nominal, &msg_value) + .context("failed to add balance to escrow table")?; - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(()) })?; @@ -170,32 +162,26 @@ impl Actor { msm.with_escrow_table(Permission::Write) .with_locked_table(Permission::Write) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; // The withdrawable amount might be slightly less than nominal // depending on whether or not all relevant entries have been processed // by cron - let min_balance = msm.locked_table.as_ref().unwrap().get(&nominal).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance") - })?; + let min_balance = msm + .locked_table + .as_ref() + .unwrap() + .get(&nominal) + .context("failed to get locked balance")?; let ex = msm .escrow_table .as_mut() .unwrap() .subtract_with_minimum(&nominal, ¶ms.amount, &min_balance) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to subtract from escrow table", - ) - })?; + .context("failed to subtract from escrow table")?; - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(ex) })?; @@ -275,7 +261,7 @@ impl Actor { .with_escrow_table(Permission::ReadOnly) .with_locked_table(Permission::ReadOnly) .build() - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load msm"))?; + .context("failed to load msm")?; for (di, mut deal) in params.deals.into_iter().enumerate() { // drop malformed deals @@ -307,25 +293,18 @@ impl Actor { let lockup = total_client_lockup.entry(client_id).or_default(); *lockup += deal.proposal.client_balance_requirement(); - let client_balance_ok = msm.balance_covered(client, lockup).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to check client balance coverage", - ) - })?; + let client_balance_ok = msm + .balance_covered(client, lockup) + .context("failed to check client balance coverage")?; if !client_balance_ok { info!("invalid deal: {}: insufficient client funds to cover proposal cost", di); continue; } total_provider_lockup += &deal.proposal.provider_collateral; - let provider_balance_ok = - msm.balance_covered(provider, &total_provider_lockup).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to check provider balance coverage", - ) - })?; + let provider_balance_ok = msm + .balance_covered(provider, &total_provider_lockup) + .context("failed to check provider balance coverage")?; if !provider_balance_ok { info!("invalid deal: {}: insufficient provider funds to cover proposal cost", di); @@ -344,13 +323,12 @@ impl Actor { // check proposalCids for duplication within message batch // check state PendingProposals for duplication across messages - let duplicate_in_state = - msm.pending_deals.as_ref().unwrap().has(&pcid.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to check for existence of deal proposal", - ) - })?; + let duplicate_in_state = msm + .pending_deals + .as_ref() + .unwrap() + .has(&pcid.to_bytes()) + .context("failed to check for existence of deal proposal")?; let duplicate_in_message = proposal_cid_lookup.contains(&pcid); if duplicate_in_state || duplicate_in_message { info!("invalid deal {}: cannot publish duplicate deal proposal", di); @@ -410,9 +388,7 @@ impl Actor { .with_escrow_table(Permission::Write) .with_locked_table(Permission::Write) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; // All storage dealProposals will be added in an atomic transaction; this operation will be unrolled if any of them fails. // This should only fail on programmer error because all expected invalid conditions should be filtered in the first set of checks. for (vid, valid_deal) in valid_deals.iter().enumerate() { @@ -422,31 +398,32 @@ impl Actor { let pcid = valid_proposal_cids[vid]; - msm.pending_deals.as_mut().unwrap().put(pcid.to_bytes().into()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to set pending deal") - })?; - msm.deal_proposals.as_mut().unwrap().set(id, valid_deal.proposal.clone()).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to set deal"), - )?; + msm.pending_deals + .as_mut() + .unwrap() + .put(pcid.to_bytes().into()) + .context("failed to set pending deal")?; + msm.deal_proposals + .as_mut() + .unwrap() + .set(id, valid_deal.proposal.clone()) + .context("failed to set deal")?; // We randomize the first epoch for when the deal will be processed so an attacker isn't able to // schedule too many deals for the same tick. let process_epoch = gen_rand_next_epoch(rt.policy(), valid_deal.proposal.start_epoch, id); - msm.deals_by_epoch.as_mut().unwrap().put(process_epoch, id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to set deal ops by epoch", - ) - })?; + msm.deals_by_epoch + .as_mut() + .unwrap() + .put(process_epoch, id) + .context("failed to set deal ops by epoch")?; new_deal_ids.push(id); } - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(()) })?; @@ -470,9 +447,8 @@ impl Actor { let curr_epoch = rt.curr_epoch(); let st: State = rt.state()?; - let proposals = DealArray::load(&st.proposals, rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals") - })?; + let proposals = + DealArray::load(&st.proposals, rt.store()).context("failed to load deal proposals")?; let mut weights = Vec::with_capacity(params.sectors.len()); for sector in params.sectors.iter() { @@ -483,12 +459,7 @@ impl Actor { sector.sector_expiry, curr_epoch, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to validate deal proposals for activation", - ) - })?; + .context("failed to validate deal proposals for activation")?; weights.push(SectorWeights { deal_space, deal_weight, verified_deal_weight }); } @@ -516,31 +487,22 @@ impl Actor { params.sector_expiry, curr_epoch, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to validate deal proposals for activation", - ) - })?; + .context("failed to validate deal proposals for activation")?; let mut msm = st.mutator(rt.store()); msm.with_deal_states(Permission::Write) .with_pending_proposals(Permission::ReadOnly) .with_deal_proposals(Permission::ReadOnly) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; for deal_id in params.deal_ids { // This construction could be replaced with a single "update deal state" // state method, possibly batched over all deal ids at once. - let s = msm.deal_states.as_ref().unwrap().get(deal_id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get state for deal_id ({})", deal_id), - ) - })?; + let s = + msm.deal_states.as_ref().unwrap().get(deal_id).with_context(|| { + format!("failed to get state for deal_id ({})", deal_id) + })?; if s.is_some() { return Err(actor_error!( illegal_argument, @@ -554,25 +516,19 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get deal_id ({})", deal_id), - ) - })? + .with_context(|| format!("failed to get deal_id ({})", deal_id))? .ok_or_else(|| actor_error!(not_found, "no such deal_id: {}", deal_id))?; let propc = proposal .cid() .map_err(|e| ActorError::from(e).wrap("failed to calculate proposal Cid"))?; - let has = - msm.pending_deals.as_ref().unwrap().has(&propc.to_bytes()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get pending proposal ({})", propc), - ) - })?; + let has = msm + .pending_deals + .as_ref() + .unwrap() + .has(&propc.to_bytes()) + .with_context(|| format!("failed to get pending proposal ({})", propc))?; if !has { return Err(actor_error!( @@ -593,17 +549,10 @@ impl Actor { slash_epoch: EPOCH_UNDEFINED, }, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to set deal state {}", deal_id), - ) - })?; + .with_context(|| format!("failed to set deal state {}", deal_id))?; } - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(()) })?; @@ -629,14 +578,15 @@ impl Actor { msm.with_deal_states(Permission::Write) .with_deal_proposals(Permission::ReadOnly) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; for id in params.deal_ids { - let deal = msm.deal_proposals.as_ref().unwrap().get(id).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get deal proposal") - })?; + let deal = msm + .deal_proposals + .as_ref() + .unwrap() + .get(id) + .context("failed to get deal proposal")?; // The deal may have expired and been deleted before the sector is terminated. // Nothing to do, but continue execution for the other deals. if deal.is_none() { @@ -666,9 +616,7 @@ impl Actor { .as_ref() .unwrap() .get(id) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get deal state") - })? + .context("failed to get deal state")? // A deal with a proposal but no state is not activated, but then it should not be // part of a sector that is terminating. .ok_or_else(|| actor_error!(illegal_argument, "no state for deal {}", id))?; @@ -683,17 +631,14 @@ impl Actor { // and slashing of provider collateral happens in cron_tick. state.slash_epoch = params.epoch; - msm.deal_states.as_mut().unwrap().set(id, state).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to set deal state ({})", id), - ) - })?; + msm.deal_states + .as_mut() + .unwrap() + .set(id, state) + .with_context(|| format!("failed to set deal state ({}", id))?; } - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(()) })?; Ok(()) @@ -711,33 +656,23 @@ impl Actor { let st: State = rt.state()?; - let proposals = DealArray::load(&st.proposals, rt.store()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals") - })?; + let proposals = + DealArray::load(&st.proposals, rt.store()).context("failed to load deal proposals")?; let mut commds = Vec::with_capacity(params.inputs.len()); for comm_input in params.inputs.iter() { let mut pieces: Vec = Vec::with_capacity(comm_input.deal_ids.len()); for deal_id in &comm_input.deal_ids { let deal = proposals .get(*deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get deal_id ({})", deal_id), - ) - })? + .with_context(|| format!("failed to get deal_id ({})", deal_id))? .ok_or_else(|| { actor_error!(not_found, "proposal doesn't exist ({})", deal_id) })?; pieces.push(PieceInfo { cid: deal.piece_cid, size: deal.piece_size }); } - let commd = - rt.compute_unsealed_sector_cid(comm_input.sector_type, &pieces).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, - "failed to compute unsealed sector CID", - ) - })?; + let commd = rt + .compute_unsealed_sector_cid(comm_input.sector_type, &pieces) + .context("failed to compute unsealed sector CID")?; commds.push(commd); } @@ -766,9 +701,7 @@ impl Actor { .with_deal_proposals(Permission::Write) .with_pending_proposals(Permission::Write) .build() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load state") - })?; + .context("failed to load state")?; for i in (last_cron + 1)..=rt.curr_epoch() { // TODO specs-actors modifies msm as it's iterated through, which is memory unsafe @@ -782,11 +715,8 @@ impl Actor { .unwrap() .for_each(i, |deal_id| { deal_ids.push(deal_id); - Ok(()) }) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to set deal state") - })?; + .context("failed to set deal state")?; for deal_id in deal_ids { let deal = msm @@ -794,12 +724,7 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to get deal_id ({})", deal_id), - ) - })? + .with_context(|| format!("failed to get deal_id ({})", deal_id))? .ok_or_else(|| { actor_error!(not_found, "proposal doesn't exist ({})", deal_id) })? @@ -815,12 +740,7 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to get deal state", - ) - })? + .context("failed to get deal state")? .cloned(); // deal has been published but not activated yet -> terminate it @@ -846,12 +766,9 @@ impl Actor { // Delete the proposal (but not state, which doesn't exist). let deleted = - msm.deal_proposals.as_mut().unwrap().delete(deal_id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete deal proposal {}", deal_id), - ) - })?; + msm.deal_proposals.as_mut().unwrap().delete(deal_id).with_context( + || format!("failed to delete deal proposal {}", deal_id), + )?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -865,11 +782,8 @@ impl Actor { .as_mut() .unwrap() .delete(&dcid.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete pending proposal {}", deal_id), - ) + .with_context(|| { + format!("failed to delete pending proposal {}", deal_id) })? .ok_or_else(|| { actor_error!( @@ -887,12 +801,7 @@ impl Actor { .as_mut() .unwrap() .delete(&dcid.to_bytes()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete pending proposal {}", dcid), - ) - })? + .with_context(|| format!("failed to delete pending proposal {}", dcid))? .ok_or_else(|| { actor_error!( illegal_state, @@ -927,13 +836,12 @@ impl Actor { amount_slashed += slash_amount; // Delete proposal and state simultaneously. - let deleted = - msm.deal_states.as_mut().unwrap().delete(deal_id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to delete deal state", - ) - })?; + let deleted = msm + .deal_states + .as_mut() + .unwrap() + .delete(deal_id) + .context("failed to delete deal state")?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -941,13 +849,12 @@ impl Actor { )); } - let deleted = - msm.deal_proposals.as_mut().unwrap().delete(deal_id).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to delete deal proposal", - ) - })?; + let deleted = msm + .deal_proposals + .as_mut() + .unwrap() + .delete(deal_id) + .context("failed to delete deal proposal")?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -972,12 +879,11 @@ impl Actor { } state.last_updated_epoch = curr_epoch; - msm.deal_states.as_mut().unwrap().set(deal_id, state).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to set deal state", - ) - })?; + msm.deal_states + .as_mut() + .unwrap() + .set(deal_id, state) + .context("failed to set deal state")?; if let Some(ev) = updates_needed.get_mut(&next_epoch) { ev.push(deal_id); @@ -986,29 +892,25 @@ impl Actor { } } } - msm.deals_by_epoch.as_mut().unwrap().remove_all(i).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to delete deal ops for epoch {}", i), - ) - })?; + msm.deals_by_epoch + .as_mut() + .unwrap() + .remove_all(i) + .with_context(|| format!("failed to delete deal ops for epoch {}", i))?; } // updates_needed is already sorted by epoch. for (epoch, deals) in updates_needed { - msm.deals_by_epoch.as_mut().unwrap().put_many(epoch, &deals).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to reinsert deal IDs for epoch {}", epoch), - ) - })?; + msm.deals_by_epoch + .as_mut() + .unwrap() + .put_many(epoch, &deals) + .with_context(|| format!("failed to reinsert deal IDs for epoch {}", epoch))?; } msm.st.last_cron = rt.curr_epoch(); - msm.commit_state().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to flush state") - })?; + msm.commit_state().context("failed to flush state")?; Ok(()) })?; @@ -1051,7 +953,7 @@ pub fn validate_deals_for_activation( miner_addr: &Address, sector_expiry: ChainEpoch, curr_epoch: ChainEpoch, -) -> anyhow::Result<(BigInt, BigInt, u64)> +) -> Result<(BigInt, BigInt, u64), ActorError> where BS: Blockstore, { @@ -1066,7 +968,7 @@ pub fn validate_and_compute_deal_weight( miner_addr: &Address, sector_expiry: ChainEpoch, sector_activation: ChainEpoch, -) -> anyhow::Result<(BigInt, BigInt, u64)> +) -> Result<(BigInt, BigInt, u64), ActorError> where BS: Blockstore, { @@ -1243,9 +1145,8 @@ where { // Generate unsigned bytes let sv_bz = serialize_vec(&proposal.proposal, "deal proposal")?; - rt.verify_signature(&proposal.client_signature, &proposal.proposal.client, &sv_bz).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "signature proposal invalid"), - )?; + rt.verify_signature(&proposal.client_signature, &proposal.proposal.client, &sv_bz) + .context("signature proposal invalid")?; Ok(()) } diff --git a/actors/market/src/state.rs b/actors/market/src/state.rs index 08e73e9ff..8b70100af 100644 --- a/actors/market/src/state.rs +++ b/actors/market/src/state.rs @@ -2,11 +2,9 @@ // SPDX-License-Identifier: Apache-2.0, MIT use crate::balance_table::BalanceTable; -use anyhow::anyhow; use cid::Cid; -use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ - actor_error, make_empty_map, ActorDowncast, ActorError, Array, Set, SetMultimap, + actor_error, make_empty_map, runtime::Policy, ActorContext, ActorError, Array, Set, SetMultimap, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -16,7 +14,6 @@ use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, EPOCH_UNDEFINED}; use fvm_shared::deal::DealID; use fvm_shared::econ::TokenAmount; -use fvm_shared::error::ExitCode; use fvm_shared::HAMT_BIT_WIDTH; use num_traits::{Signed, Zero}; @@ -69,25 +66,24 @@ pub struct State { } impl State { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let empty_proposals_array = Array::<(), BS>::new_with_bit_width(store, PROPOSALS_AMT_BITWIDTH) .flush() - .map_err(|e| anyhow!("Failed to create empty proposals array: {}", e))?; + .context("Failed to create empty proposals array")?; let empty_states_array = Array::<(), BS>::new_with_bit_width(store, STATES_AMT_BITWIDTH) .flush() - .map_err(|e| anyhow!("Failed to create empty states array: {}", e))?; + .context("Failed to create empty states array")?; let empty_pending_proposals_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .map_err(|e| anyhow!("Failed to create empty pending proposals map state: {}", e))?; - let empty_balance_table = BalanceTable::new(store) - .root() - .map_err(|e| anyhow!("Failed to create empty balance table map: {}", e))?; - - let empty_deal_ops_hamt = SetMultimap::new(store) - .root() - .map_err(|e| anyhow!("Failed to create empty multiset: {}", e))?; + .context("Failed to create empty pending proposals map state")?; + let empty_balance_table = + BalanceTable::new(store).root().context("Failed to create empty balance table map")?; + + let empty_deal_ops_hamt = + SetMultimap::new(store).root().context("Failed to create empty multiset")?; + Ok(Self { proposals: empty_proposals_array, states: empty_states_array, @@ -216,7 +212,7 @@ where } } - pub(super) fn build(&mut self) -> anyhow::Result<&mut Self> { + pub(super) fn build(&mut self) -> Result<&mut Self, ActorError> { if self.proposal_permit != Permission::Invalid { self.deal_proposals = Some(DealArray::load(&self.st.proposals, self.store)?); } @@ -282,25 +278,22 @@ where self } - pub(super) fn commit_state(&mut self) -> anyhow::Result<()> { + pub(super) fn commit_state(&mut self) -> Result<(), ActorError> { if self.proposal_permit == Permission::Write { if let Some(s) = &mut self.deal_proposals { - self.st.proposals = - s.flush().map_err(|e| e.downcast_wrap("failed to flush deal proposals"))?; + self.st.proposals = s.flush().context("failed to flush deal proposals")?; } } if self.state_permit == Permission::Write { if let Some(s) = &mut self.deal_states { - self.st.states = - s.flush().map_err(|e| e.downcast_wrap("failed to flush deal states"))?; + self.st.states = s.flush().context("failed to flush deal states")?; } } if self.locked_permit == Permission::Write { if let Some(s) = &mut self.locked_table { - self.st.locked_table = - s.root().map_err(|e| e.downcast_wrap("failed to flush locked table"))?; + self.st.locked_table = s.root().context("failed to flush locked table")?; } if let Some(s) = &mut self.total_client_locked_collateral { self.st.total_client_locked_collateral = s.clone(); @@ -315,22 +308,19 @@ where if self.escrow_permit == Permission::Write { if let Some(s) = &mut self.escrow_table { - self.st.escrow_table = - s.root().map_err(|e| e.downcast_wrap("failed to flush escrow table"))?; + self.st.escrow_table = s.root().context("failed to flush escrow table")?; } } if self.pending_permit == Permission::Write { if let Some(s) = &mut self.pending_deals { - self.st.pending_proposals = - s.root().map_err(|e| e.downcast_wrap("failed to flush escrow table"))?; + self.st.pending_proposals = s.root().context("failed to flush escrow table")?; } } if self.dpe_permit == Permission::Write { if let Some(s) = &mut self.deals_by_epoch { - self.st.deal_ops_by_epoch = - s.root().map_err(|e| e.downcast_wrap("failed to flush escrow table"))?; + self.st.deal_ops_by_epoch = s.root().context("failed to flush escrow table")?; } } @@ -408,26 +398,16 @@ where // Unlock remaining storage fee self.unlock_balance(&deal.client, &payment_remaining, Reason::ClientStorageFee) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to unlock remaining client storage fee", - ) - })?; + .context("failed to unlock remaining client storage fee")?; // Unlock client collateral self.unlock_balance(&deal.client, &deal.client_collateral, Reason::ClientCollateral) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to unlock client collateral", - ) - })?; + .context("failed to unlock client collateral")?; // slash provider collateral let slashed = deal.provider_collateral.clone(); self.slash_balance(&deal.provider, &slashed, Reason::ProviderCollateral) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "slashing balance"))?; + .context("slashing balance")?; return Ok((slashed, EPOCH_UNDEFINED, true)); } @@ -453,36 +433,20 @@ where deal: &DealProposal, ) -> Result { self.unlock_balance(&deal.client, &deal.total_storage_fee(), Reason::ClientStorageFee) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failure unlocking client storage fee", - ) - })?; + .context("failure unlocking client storage fee")?; self.unlock_balance(&deal.client, &deal.client_collateral, Reason::ClientCollateral) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failure unlocking client collateral", - ) - })?; + .context("failure unlocking client collateral")?; let amount_slashed = collateral_penalty_for_deal_activation_missed(deal.provider_collateral.clone()); let amount_remaining = deal.provider_balance_requirement() - &amount_slashed; - self.slash_balance(&deal.provider, &amount_slashed, Reason::ProviderCollateral).map_err( - |e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to slash balance"), - )?; + self.slash_balance(&deal.provider, &amount_slashed, Reason::ProviderCollateral) + .context("failed to slash balance")?; self.unlock_balance(&deal.provider, &amount_remaining, Reason::ProviderCollateral) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to unlock deal provider balance", - ) - })?; + .context("failed to unlock deal provider balance")?; Ok(amount_slashed) } @@ -501,20 +465,10 @@ where } self.unlock_balance(&deal.provider, &deal.provider_collateral, Reason::ProviderCollateral) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed unlocking deal provider balance", - ) - })?; + .context("failed unlocking deal provider balance")?; self.unlock_balance(&deal.client, &deal.client_collateral, Reason::ClientCollateral) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed unlocking deal client balance", - ) - })?; + .context("failed unlocking deal client balance")?; Ok(()) } @@ -530,13 +484,19 @@ where &self, addr: Address, amount_to_lock: &TokenAmount, - ) -> anyhow::Result { - let prev_locked = self.locked_table.as_ref().unwrap().get(&addr).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance") - })?; - let escrow_balance = self.escrow_table.as_ref().unwrap().get(&addr).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get escrow balance") - })?; + ) -> Result { + let prev_locked = self + .locked_table + .as_ref() + .unwrap() + .get(&addr) + .context("failed to get locked balance")?; + let escrow_balance = self + .escrow_table + .as_ref() + .unwrap() + .get(&addr) + .context("failed to get escrow balance")?; Ok((prev_locked + amount_to_lock) <= escrow_balance) } @@ -549,13 +509,19 @@ where return Err(actor_error!(illegal_state, "cannot lock negative amount {}", amount)); } - let prev_locked = self.locked_table.as_ref().unwrap().get(addr).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance") - })?; + let prev_locked = self + .locked_table + .as_ref() + .unwrap() + .get(addr) + .context("failed to get locked balance")?; - let escrow_balance = self.escrow_table.as_ref().unwrap().get(addr).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get escrow balance") - })?; + let escrow_balance = self + .escrow_table + .as_ref() + .unwrap() + .get(addr) + .context("failed to get escrow balance")?; if &prev_locked + amount > escrow_balance { return Err(actor_error!(insufficient_funds; @@ -564,9 +530,11 @@ where addr, escrow_balance, prev_locked, amount)); } - self.locked_table.as_mut().unwrap().add(addr, amount).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to add locked balance") - })?; + self.locked_table + .as_mut() + .unwrap() + .add(addr, amount) + .context("failed to add locked balance")?; Ok(()) } @@ -597,7 +565,7 @@ where addr: &Address, amount: &TokenAmount, lock_reason: Reason, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if amount.is_negative() { return Err(actor_error!(illegal_state, "unlock negative amount: {}", amount).into()); } @@ -634,17 +602,13 @@ where .as_mut() .unwrap() .must_subtract(from_addr, amount) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "subtract from escrow"))?; + .context("subtract from escrow")?; self.unlock_balance(from_addr, amount, Reason::ClientStorageFee) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "subtract from locked"))?; + .context("subtract from locked")?; // Add subtracted amount to the recipient - self.escrow_table - .as_mut() - .unwrap() - .add(to_addr, amount) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "add to escrow"))?; + self.escrow_table.as_mut().unwrap().add(to_addr, amount).context("add to escrow")?; Ok(()) } @@ -654,7 +618,7 @@ where addr: &Address, amount: &TokenAmount, lock_reason: Reason, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if amount.is_negative() { return Err(actor_error!(illegal_state, "negative amount to slash: {}", amount).into()); } From 788eafefc2f49f8709a422f83c83b83f75b8e132 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 12 Apr 2022 18:13:12 +0200 Subject: [PATCH 05/10] migrate miner --- Cargo.lock | 2 +- actors/miner/Cargo.toml | 1 - actors/miner/src/bitfield_queue.rs | 28 +- actors/miner/src/deadline_assignment.rs | 9 +- actors/miner/src/deadline_state.rs | 411 ++++++-------- actors/miner/src/deadlines.rs | 10 +- actors/miner/src/expiration_queue.rs | 167 +++--- actors/miner/src/lib.rs | 678 +++++++----------------- actors/miner/src/partition_state.rs | 204 +++---- actors/miner/src/sector_map.rs | 70 +-- actors/miner/src/sectors.rs | 33 +- actors/miner/src/state.rs | 268 +++++----- actors/miner/tests/util.rs | 1 - actors/runtime/Cargo.toml | 1 + actors/runtime/src/actor_error.rs | 45 +- 15 files changed, 820 insertions(+), 1108 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6bce587df..15cb65444 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -634,7 +634,6 @@ dependencies = [ name = "fil_actor_miner" version = "8.0.0-alpha.1" dependencies = [ - "anyhow", "byteorder", "cid", "fil_actor_account", @@ -769,6 +768,7 @@ dependencies = [ "cid", "derive_builder", "fvm_ipld_amt", + "fvm_ipld_bitfield", "fvm_ipld_blockstore", "fvm_ipld_encoding", "fvm_ipld_hamt", diff --git a/actors/miner/Cargo.toml b/actors/miner/Cargo.toml index ef2c10f47..4af84b0c3 100644 --- a/actors/miner/Cargo.toml +++ b/actors/miner/Cargo.toml @@ -26,7 +26,6 @@ num-derive = "0.3.3" lazy_static = "1.4.0" log = "0.4.14" byteorder = "1.4.3" -anyhow = "1.0.56" itertools = "0.10.3" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" diff --git a/actors/miner/src/bitfield_queue.rs b/actors/miner/src/bitfield_queue.rs index 6d3c1c969..d466c8abc 100644 --- a/actors/miner/src/bitfield_queue.rs +++ b/actors/miner/src/bitfield_queue.rs @@ -4,7 +4,7 @@ use std::convert::TryInto; use cid::Cid; -use fil_actors_runtime::{ActorDowncast, Array}; +use fil_actors_runtime::{ActorContext, ActorDowncast, ActorError, Array}; use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -19,12 +19,16 @@ pub struct BitFieldQueue<'db, BS> { } impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { - pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result { + pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result> { Ok(Self { amt: Array::load(root, store)?, quant }) } /// Adds values to the queue entry for an epoch. - pub fn add_to_queue(&mut self, raw_epoch: ChainEpoch, values: &BitField) -> anyhow::Result<()> { + pub fn add_to_queue( + &mut self, + raw_epoch: ChainEpoch, + values: &BitField, + ) -> Result<(), ActorError> { if values.is_empty() { // nothing to do. return Ok(()); @@ -50,7 +54,7 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { &mut self, epoch: ChainEpoch, values: impl IntoIterator, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { self.add_to_queue(epoch, &BitField::try_from_bits(values)?) } @@ -58,7 +62,7 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { /// shifting other bits down and removing any newly empty entries. /// /// See the docs on `BitField::cut` to better understand what it does. - pub fn cut(&mut self, to_cut: &BitField) -> anyhow::Result<()> { + pub fn cut(&mut self, to_cut: &BitField) -> Result<(), ActorError> { let mut epochs_to_remove = Vec::::new(); self.amt @@ -70,14 +74,12 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { } else { **bitfield = bf; } - - Ok(()) }) - .map_err(|e| e.downcast_wrap("failed to cut from bitfield queue"))?; + .context("failed to cut from bitfield queue")?; self.amt .batch_delete(epochs_to_remove, true) - .map_err(|e| e.downcast_wrap("failed to remove empty epochs from bitfield queue"))?; + .context("failed to remove empty epochs from bitfield queue")?; Ok(()) } @@ -85,7 +87,7 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { pub fn add_many_to_queue_values( &mut self, values: impl IntoIterator, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { // Pre-quantize to reduce the number of updates. let mut quantized_values: Vec<_> = values .into_iter() @@ -110,19 +112,19 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { /// Removes and returns all values with keys less than or equal to until. /// Modified return value indicates whether this structure has been changed by the call. - pub fn pop_until(&mut self, until: ChainEpoch) -> anyhow::Result<(BitField, bool)> { + pub fn pop_until(&mut self, until: ChainEpoch) -> Result<(BitField, bool), ActorError> { let mut popped_values = BitField::new(); let mut popped_keys = Vec::::new(); self.amt.for_each_while(|epoch, bitfield| { if epoch as ChainEpoch > until { // break - return Ok(false); + return false; } popped_keys.push(epoch); popped_values |= bitfield; - Ok(true) + true })?; if popped_keys.is_empty() { diff --git a/actors/miner/src/deadline_assignment.rs b/actors/miner/src/deadline_assignment.rs index 72fb73023..9f3ffa86e 100644 --- a/actors/miner/src/deadline_assignment.rs +++ b/actors/miner/src/deadline_assignment.rs @@ -4,9 +4,7 @@ use std::cmp::Ordering; use std::collections::BinaryHeap; -use anyhow::anyhow; - -use fil_actors_runtime::runtime::Policy; +use fil_actors_runtime::{actor_error, runtime::Policy, ActorError}; use super::{Deadline, SectorOnChainInfo}; @@ -140,7 +138,7 @@ pub fn assign_deadlines( partition_size: u64, deadlines: &[Option], sectors: Vec, -) -> anyhow::Result>> { +) -> Result>, ActorError> { struct Entry { partition_size: u64, info: DeadlineAssignmentInfo, @@ -189,7 +187,8 @@ pub fn assign_deadlines( let info = &mut heap.peek_mut().unwrap().info; if info.max_partitions_reached(partition_size, max_partitions) { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "max partitions limit {} reached for all deadlines", max_partitions )); diff --git a/actors/miner/src/deadline_state.rs b/actors/miner/src/deadline_state.rs index 40816d52f..73660335e 100644 --- a/actors/miner/src/deadline_state.rs +++ b/actors/miner/src/deadline_state.rs @@ -4,18 +4,16 @@ use std::cmp; use std::collections::BTreeSet; -use anyhow::anyhow; use cid::multihash::Code; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorDowncast, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::CborStore; use fvm_shared::clock::{ChainEpoch, QuantSpec}; use fvm_shared::econ::TokenAmount; -use fvm_shared::error::ExitCode; use fvm_shared::sector::{PoStProof, SectorSize}; use num_traits::{Signed, Zero}; @@ -55,17 +53,13 @@ impl Deadlines { policy: &Policy, store: &BS, deadline_idx: u64, - ) -> anyhow::Result { + ) -> Result { if deadline_idx >= policy.wpost_period_deadlines { - return Err(anyhow!(actor_error!( - illegal_argument, - "invalid deadline {}", - deadline_idx - ))); + return Err(actor_error!(illegal_argument, "invalid deadline {}", deadline_idx)); } store.get_cbor(&self.due[deadline_idx as usize])?.ok_or_else(|| { - anyhow!(actor_error!(illegal_state, "failed to lookup deadline {}", deadline_idx)) + actor_error!(illegal_state, "failed to lookup deadline {}", deadline_idx) }) } @@ -73,8 +67,8 @@ impl Deadlines { &self, policy: &Policy, store: &BS, - mut f: impl FnMut(u64, Deadline) -> anyhow::Result<()>, - ) -> anyhow::Result<()> { + mut f: impl FnMut(u64, Deadline) -> Result<(), ActorError>, + ) -> Result<(), ActorError> { for i in 0..(self.due.len() as u64) { let index = i; let deadline = self.load_deadline(policy, store, index)?; @@ -89,9 +83,9 @@ impl Deadlines { store: &BS, deadline_idx: u64, deadline: &Deadline, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if deadline_idx >= policy.wpost_period_deadlines { - return Err(anyhow!("invalid deadline {}", deadline_idx)); + return Err(actor_error!(illegal_argument, "invalid deadline {}", deadline_idx)); } deadline.validate_state()?; @@ -181,24 +175,24 @@ pub struct DisputeInfo { } impl Deadline { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let empty_partitions_array = Array::<(), BS>::new_with_bit_width(store, DEADLINE_PARTITIONS_AMT_BITWIDTH) .flush() - .map_err(|e| e.downcast_wrap("Failed to create empty states array"))?; + .context("Failed to create empty states array")?; let empty_deadline_expiration_array = Array::<(), BS>::new_with_bit_width(store, DEADLINE_EXPIRATIONS_AMT_BITWIDTH) .flush() - .map_err(|e| e.downcast_wrap("Failed to create empty states array"))?; + .context("Failed to create empty states array")?; let empty_post_submissions_array = Array::<(), BS>::new_with_bit_width( store, DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH, ) .flush() - .map_err(|e| e.downcast_wrap("Failed to create empty states array"))?; + .context("Failed to create empty states array")?; let empty_sectors_array = Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) .flush() - .map_err(|e| e.downcast_wrap("Failed to construct empty sectors snapshot array"))?; + .context("Failed to construct empty sectors snapshot array")?; Ok(Self { partitions: empty_partitions_array, expirations_epochs: empty_deadline_expiration_array, @@ -217,28 +211,28 @@ impl Deadline { pub fn partitions_amt<'db, BS: Blockstore>( &self, store: &'db BS, - ) -> anyhow::Result> { + ) -> Result, ActorError> { Ok(Array::load(&self.partitions, store)?) } pub fn optimistic_proofs_amt<'db, BS: Blockstore>( &self, store: &'db BS, - ) -> anyhow::Result> { + ) -> Result, ActorError> { Ok(Array::load(&self.optimistic_post_submissions, store)?) } pub fn partitions_snapshot_amt<'db, BS: Blockstore>( &self, store: &'db BS, - ) -> anyhow::Result> { + ) -> Result, ActorError> { Ok(Array::load(&self.partitions_snapshot, store)?) } pub fn optimistic_proofs_snapshot_amt<'db, BS: Blockstore>( &self, store: &'db BS, - ) -> anyhow::Result> { + ) -> Result, ActorError> { Ok(Array::load(&self.optimistic_post_submissions_snapshot, store)?) } @@ -246,17 +240,12 @@ impl Deadline { &self, store: &BS, partition_idx: u64, - ) -> anyhow::Result { + ) -> Result { let partitions = Array::::load(&self.partitions, store)?; let partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to lookup partition {}", partition_idx), - ) - })? + .with_context(|| format!("failed to lookup partition {}", partition_idx))? .ok_or_else(|| actor_error!(not_found, "no partition {}", partition_idx))?; Ok(partition.clone()) @@ -266,17 +255,12 @@ impl Deadline { &self, store: &BS, partition_idx: u64, - ) -> anyhow::Result { + ) -> Result { let partitions = Array::::load(&self.partitions_snapshot, store)?; let partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to lookup partition snapshot {}", partition_idx), - ) - })? + .with_context(|| format!("failed to lookup partition snapshot {}", partition_idx))? .ok_or_else(|| actor_error!(not_found, "no partition snapshot {}", partition_idx))?; Ok(partition.clone()) @@ -289,19 +273,18 @@ impl Deadline { expiration_epoch: ChainEpoch, partitions: &[u64], quant: QuantSpec, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { // Avoid doing any work if there's nothing to reschedule. if partitions.is_empty() { return Ok(()); } let mut queue = BitFieldQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load expiration queue"))?; + .context("failed to load expiration queue")?; queue .add_to_queue_values(expiration_epoch, partitions.iter().copied()) - .map_err(|e| e.downcast_wrap("failed to mutate expiration queue"))?; - self.expirations_epochs = - queue.amt.flush().map_err(|e| e.downcast_wrap("failed to save expiration queue"))?; + .context("failed to mutate expiration queue")?; + self.expirations_epochs = queue.amt.flush().context("failed to save expiration queue")?; Ok(()) } @@ -313,7 +296,7 @@ impl Deadline { store: &BS, until: ChainEpoch, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { let (expired_partitions, modified) = self.pop_expired_partitions(store, until, quant)?; if !modified { @@ -333,17 +316,13 @@ impl Deadline { // For each partition with an expiry, remove and collect expirations from the partition queue. for i in expired_partitions.iter() { let partition_idx = i; - let mut partition = partitions - .get(partition_idx)? - .cloned() - .ok_or_else(|| anyhow!("missing expected partition {}", partition_idx))?; + let mut partition = partitions.get(partition_idx)?.cloned().ok_or_else(|| { + actor_error!(illegal_state, "missing expected partition {}", partition_idx) + })?; let partition_expiration = - partition.pop_expired_sectors(store, until, quant).map_err(|e| { - e.downcast_wrap(format!( - "failed to pop expired sectors from partition {}", - partition_idx - )) + partition.pop_expired_sectors(store, until, quant).with_context(|| { + format!("failed to pop expired sectors from partition {}", partition_idx) })?; if !partition_expiration.early_sectors.is_empty() { @@ -397,7 +376,7 @@ impl Deadline { mut sectors: &[SectorOnChainInfo], sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { let mut total_power = PowerPair::zero(); if sectors.is_empty() { return Ok(total_power); @@ -456,12 +435,11 @@ impl Deadline { self.partitions = partitions.flush()?; // Next, update the expiration queue. - let mut deadline_expirations = - BitFieldQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load expiration epochs"))?; + let mut deadline_expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant) + .context("failed to load expiration epochs")?; deadline_expirations .add_many_to_queue_values(partition_deadline_updates.iter().copied()) - .map_err(|e| e.downcast_wrap("failed to add expirations for new deadlines"))?; + .context("failed to add expirations for new deadlines")?; self.expirations_epochs = deadline_expirations.amt.flush()?; Ok(total_power) @@ -472,7 +450,7 @@ impl Deadline { store: &BS, max_partitions: u64, max_sectors: u64, - ) -> anyhow::Result<(TerminationResult, /* has more */ bool)> { + ) -> Result<(TerminationResult, /* has more */ bool), ActorError> { let mut partitions = self.partitions_amt(store)?; let mut partitions_finished = Vec::::new(); @@ -481,9 +459,10 @@ impl Deadline { for i in self.early_terminations.iter() { let partition_idx = i; - let mut partition = match partitions.get(partition_idx).map_err(|e| { - e.downcast_wrap(format!("failed to load partition {}", partition_idx)) - })? { + let mut partition = match partitions + .get(partition_idx) + .with_context(|| format!("failed to load partition {}", partition_idx))? + { Some(partition) => partition.clone(), None => { partitions_finished.push(partition_idx); @@ -494,7 +473,7 @@ impl Deadline { // Pop early terminations. let (partition_result, more) = partition .pop_early_terminations(store, max_sectors - result.sectors_processed) - .map_err(|e| e.downcast_wrap("failed to pop terminations from partition"))?; + .context("failed to pop terminations from partition")?; result += partition_result; @@ -504,9 +483,9 @@ impl Deadline { } // Save partition - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_wrap(format!("failed to store partition {}", partition_idx)) - })?; + partitions + .set(partition_idx, partition) + .with_context(|| format!("failed to store partition {}", partition_idx))?; if !result.below_limit(max_partitions, max_sectors) { break; @@ -519,8 +498,7 @@ impl Deadline { } // Save deadline's partitions - self.partitions = - partitions.flush().map_err(|e| e.downcast_wrap("failed to update partitions"))?; + self.partitions = partitions.flush().context("failed to update partitions")?; // Update global early terminations bitfield. let no_early_terminations = self.early_terminations.is_empty(); @@ -532,11 +510,10 @@ impl Deadline { store: &BS, until: ChainEpoch, quant: QuantSpec, - ) -> anyhow::Result<(BitField, bool)> { + ) -> Result<(BitField, bool), ActorError> { let mut expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant)?; - let (popped, modified) = expirations - .pop_until(until) - .map_err(|e| e.downcast_wrap("failed to pop expiring partitions"))?; + let (popped, modified) = + expirations.pop_until(until).context("failed to pop expiring partitions")?; if modified { self.expirations_epochs = expirations.amt.flush()?; @@ -555,16 +532,14 @@ impl Deadline { partition_sectors: &mut PartitionSectorMap, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { let mut partitions = self.partitions_amt(store)?; let mut power_lost = PowerPair::zero(); for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_wrap(format!("failed to load partition {}", partition_idx)) - })? + .with_context(|| format!("failed to load partition {}", partition_idx))? .ok_or_else( || actor_error!(not_found; "failed to find partition {}", partition_idx), )? @@ -580,16 +555,13 @@ impl Deadline { sector_size, quant, ) - .map_err(|e| { - e.downcast_wrap(format!( - "failed to terminate sectors in partition {}", - partition_idx - )) + .with_context(|| { + format!("failed to terminate sectors in partition {}", partition_idx) })?; - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_wrap(format!("failed to store updated partition {}", partition_idx)) - })?; + partitions + .set(partition_idx, partition) + .with_context(|| format!("failed to store updated partition {}", partition_idx))?; if !removed.is_empty() { // Record that partition now has pending early terminations. @@ -606,8 +578,7 @@ impl Deadline { } // save partitions back - self.partitions = - partitions.flush().map_err(|e| e.downcast_wrap("failed to persist partitions"))?; + self.partitions = partitions.flush().context("failed to persist partitions")?; Ok(power_lost) } @@ -628,10 +599,9 @@ impl Deadline { BitField, // dead PowerPair, // removed power ), - anyhow::Error, + ActorError, > { - let old_partitions = - self.partitions_amt(store).map_err(|e| e.downcast_wrap("failed to load partitions"))?; + let old_partitions = self.partitions_amt(store).context("failed to load partitions")?; let partition_count = old_partitions.count(); let to_remove_set: BTreeSet<_> = to_remove @@ -669,7 +639,7 @@ impl Deadline { // corresponding index, like the Go impl does old_partitions - .for_each(|partition_idx, partition| { + .try_for_each::<_, ActorError>(|partition_idx, partition| { // If we're keeping the partition as-is, append it to the new partitions array. if !to_remove_set.contains(&partition_idx) { new_partitions.set(new_partitions.count(), partition.clone())?; @@ -683,8 +653,7 @@ impl Deadline { illegal_argument, "cannot remove partition {}: has faults", partition_idx - ) - .into()); + )); } // Don't allow removing partitions with unproven sectors @@ -707,11 +676,10 @@ impl Deadline { Ok(()) }) - .map_err(|e| e.downcast_wrap("while removing partitions"))?; + .context("while removing partitions")?; - self.partitions = new_partitions - .flush() - .map_err(|e| e.downcast_wrap("failed to persist new partition table"))?; + self.partitions = + new_partitions.flush().context("failed to persist new partition table")?; let dead = BitField::union(&all_dead_sectors); let live = BitField::union(&all_live_sectors); @@ -725,16 +693,14 @@ impl Deadline { // Update expiration bitfields. let mut expiration_epochs = BitFieldQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load expiration queue"))?; + .context("failed to load expiration queue")?; - expiration_epochs.cut(to_remove).map_err(|e| { - e.downcast_wrap("failed cut removed partitions from deadline expiration queue") - })?; + expiration_epochs + .cut(to_remove) + .context("failed cut removed partitions from deadline expiration queue")?; - self.expirations_epochs = expiration_epochs - .amt - .flush() - .map_err(|e| e.downcast_wrap("failed persist deadline expiration queue"))?; + self.expirations_epochs = + expiration_epochs.amt.flush().context("failed persist deadline expiration queue")?; Ok((live, dead, removed_power)) } @@ -747,7 +713,7 @@ impl Deadline { quant: QuantSpec, fault_expiration_epoch: ChainEpoch, partition_sectors: &mut PartitionSectorMap, - ) -> anyhow::Result { + ) -> Result { let mut partitions = self.partitions_amt(store)?; // Record partitions with some fault, for subsequently indexing in the deadline. @@ -758,12 +724,7 @@ impl Deadline { for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partition {}", partition_idx), - ) - })? + .with_context(|| format!("failed to load partition {}", partition_idx))? .ok_or_else(|| actor_error!(not_found; "no such partition {}", partition_idx))? .clone(); @@ -776,11 +737,8 @@ impl Deadline { sector_size, quant, ) - .map_err(|e| { - e.downcast_wrap(format!( - "failed to declare faults in partition {}", - partition_idx - )) + .with_context(|| { + format!("failed to declare faults in partition {}", partition_idx) })?; self.faulty_power += &partition_new_faulty_power; @@ -789,17 +747,12 @@ impl Deadline { partitions_with_fault.push(partition_idx); } - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to store partition {}", partition_idx), - ) - })?; + partitions + .set(partition_idx, partition) + .with_context(|| format!("failed to store partition {}", partition_idx))?; } - self.partitions = partitions.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions root") - })?; + self.partitions = partitions.flush().context("failed to store partitions root")?; self.add_expiration_partitions( store, @@ -807,12 +760,7 @@ impl Deadline { &partitions_with_fault, quant, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to update expirations for partitions with faults", - ) - })?; + .context("failed to update expirations for partitions with faults")?; Ok(power_delta) } @@ -823,38 +771,28 @@ impl Deadline { sectors: &Sectors<'_, BS>, sector_size: SectorSize, partition_sectors: &mut PartitionSectorMap, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let mut partitions = self.partitions_amt(store)?; for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partition {}", partition_idx), - ) - })? + .with_context(|| format!("failed to load partition {}", partition_idx))? .ok_or_else(|| actor_error!(not_found; "no such partition {}", partition_idx))? .clone(); partition .declare_faults_recovered(sectors, sector_size, sector_numbers) - .map_err(|e| e.downcast_wrap("failed to add recoveries"))?; + .context("failed to add recoveries")?; - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update partition {}", partition_idx), - ) - })?; + partitions + .set(partition_idx, partition) + .with_context(|| format!("failed to update partition {}", partition_idx))?; } // Power is not regained until the deadline end, when the recovery is confirmed. - self.partitions = partitions.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions root") - })?; + self.partitions = partitions.flush().context("failed to store partitions root")?; Ok(()) } @@ -869,9 +807,7 @@ impl Deadline { fault_expiration_epoch: ChainEpoch, sectors: Cid, ) -> Result<(PowerPair, PowerPair), ActorError> { - let mut partitions = self.partitions_amt(store).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load partitions") - })?; + let mut partitions = self.partitions_amt(store).context("failed to load partitions")?; let mut detected_any = false; let mut rescheduled_partitions = Vec::::new(); @@ -886,12 +822,7 @@ impl Deadline { let mut partition = partitions .get(partition_idx) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partition {}", partition_idx), - ) - })? + .with_context(|| format!("failed to load partition {}", partition_idx))? .ok_or_else(|| actor_error!(illegal_state; "no partition {}", partition_idx))? .clone(); @@ -906,14 +837,10 @@ impl Deadline { // Ok, we actually need to process this partition. Make sure we save the partition state back. detected_any = true; - let (part_power_delta, part_penalized_power, part_new_faulty_power) = partition - .record_missed_post(store, fault_expiration_epoch, quant) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to record missed PoSt for partition {}", partition_idx), - ) - })?; + let (part_power_delta, part_penalized_power, part_new_faulty_power) = + partition.record_missed_post(store, fault_expiration_epoch, quant).with_context( + || format!("failed to record missed PoSt for partition {}", partition_idx), + )?; // We marked some sectors faulty, we need to record the new // expiration. We don't want to do this if we're just penalizing @@ -923,12 +850,9 @@ impl Deadline { } // Save new partition state. - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update partition {}", partition_idx), - ) - })?; + partitions + .set(partition_idx, partition) + .with_context(|| format!("failed to update partition {}", partition_idx))?; self.faulty_power += &part_new_faulty_power; @@ -938,9 +862,7 @@ impl Deadline { // Save modified deadline state. if detected_any { - self.partitions = partitions.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions") - })?; + self.partitions = partitions.flush().context("failed to store partitions")?; } self.add_expiration_partitions( @@ -949,12 +871,7 @@ impl Deadline { &rescheduled_partitions, quant, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to update deadline expiration queue", - ) - })?; + .context("failed to update deadline expiration queue")?; // Reset PoSt submissions. self.partitions_posted = BitField::new(); @@ -965,43 +882,40 @@ impl Deadline { DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH, ) .flush() - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to clear pending proofs array") - })?; + .context("failed to clear pending proofs array")?; // only snapshot sectors if there's a proof that might be disputed (this is equivalent to asking if the OptimisticPoStSubmissionsSnapshot is empty) if self.optimistic_post_submissions != self.optimistic_post_submissions_snapshot { self.sectors_snapshot = sectors; } else { self.sectors_snapshot = - Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH).flush().map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to clear sectors snapshot array", - ) - }, - )?; + Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) + .flush() + .context("failed to clear sectors snapshot array")?; } Ok((power_delta, penalized_power)) } + pub fn for_each( &self, store: &BS, - f: impl FnMut(u64, &Partition) -> anyhow::Result<()>, - ) -> anyhow::Result<()> { + f: impl FnMut(u64, &Partition) -> Result<(), ActorError>, + ) -> Result<(), ActorError> { let parts = self.partitions_amt(store)?; - parts.for_each(f)?; + parts.try_for_each(f)?; Ok(()) } - pub fn validate_state(&self) -> anyhow::Result<()> { + pub fn validate_state(&self) -> Result<(), ActorError> { if self.live_sectors > self.total_sectors { - return Err(anyhow!("deadline left with more live sectors than total")); + return Err(actor_error!( + illegal_state, + "deadline left with more live sectors than total" + )); } if self.faulty_power.raw.is_negative() || self.faulty_power.qa.is_negative() { - return Err(anyhow!("deadline left with negative faulty power")); + return Err(actor_error!(illegal_state, "deadline left with negative faulty power")); } Ok(()) @@ -1011,19 +925,18 @@ impl Deadline { &self, store: &BS, partitions: BitField, - ) -> anyhow::Result { - let partitions_snapshot = self - .partitions_snapshot_amt(store) - .map_err(|e| e.downcast_wrap("failed to load partitions {}"))?; + ) -> Result { + let partitions_snapshot = + self.partitions_snapshot_amt(store).context("failed to load partitions {}")?; let mut all_sectors = Vec::new(); let mut all_ignored = Vec::new(); let mut disputed_sectors = PartitionSectorMap::default(); let mut disputed_power = PowerPair::zero(); for part_idx in partitions.iter() { - let partition_snapshot = partitions_snapshot - .get(part_idx)? - .ok_or_else(|| anyhow!("failed to find partition {}", part_idx))?; + let partition_snapshot = partitions_snapshot.get(part_idx)?.ok_or_else(|| { + actor_error!(illegal_state, "failed to find partition {}", part_idx) + })?; // Record sectors for proof verification all_sectors.push(partition_snapshot.sectors.clone()); @@ -1116,24 +1029,24 @@ impl Deadline { quant: QuantSpec, fault_expiration: ChainEpoch, post_partitions: &mut [PoStPartition], - ) -> anyhow::Result { + ) -> Result { let partition_indexes = BitField::try_from_bits(post_partitions.iter().map(|p| p.index)) .map_err(|_| actor_error!(illegal_argument; "partition index out of bitfield range"))?; let num_partitions = partition_indexes.len(); if num_partitions != post_partitions.len() as u64 { - return Err(anyhow!(actor_error!(illegal_argument, "duplicate partitions proven"))); + return Err(actor_error!(illegal_argument, "duplicate partitions proven")); } // First check to see if we're proving any already proven partitions. // This is faster than checking one by one. let already_proven = &self.partitions_posted & &partition_indexes; if !already_proven.is_empty() { - return Err(anyhow!(actor_error!( + return Err(actor_error!( illegal_argument, "partition already proven: {:?}", already_proven - ))); + )); } let mut partitions = self.partitions_amt(store)?; @@ -1150,7 +1063,7 @@ impl Deadline { for post in post_partitions { let mut partition = partitions .get(post.index) - .map_err(|e| e.downcast_wrap(format!("failed to load partition {}", post.index)))? + .with_context(|| format!("failed to load partition {}", post.index))? .ok_or_else(|| actor_error!(not_found; "no such partition {}", post.index))? .clone(); @@ -1166,11 +1079,8 @@ impl Deadline { fault_expiration, &mut post.skipped, ) - .map_err(|e| { - e.downcast_wrap(format!( - "failed to add skipped faults to partition {}", - post.index - )) + .with_context(|| { + format!("failed to add skipped faults to partition {}", post.index) })?; // If we have new faulty power, we've added some faults. We need @@ -1180,12 +1090,9 @@ impl Deadline { } let recovered_power = - partition.recover_faults(store, sectors, sector_size, quant).map_err(|e| { - e.downcast_wrap(format!( - "failed to recover faulty sectors for partition {}", - post.index - )) - })?; + partition.recover_faults(store, sectors, sector_size, quant).with_context( + || format!("failed to recover faulty sectors for partition {}", post.index), + )?; new_power_delta += &partition.activate_unproven(); @@ -1197,12 +1104,9 @@ impl Deadline { all_ignored.push(partition.terminated.clone()); // This will be rolled back if the method aborts with a failed proof. - partitions.set(post.index, partition).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update partition {}", post.index), - ) - })?; + partitions + .set(post.index, partition) + .with_context(|| format!("failed to update partition {}", post.index))?; new_faulty_power_total += &new_fault_power; retracted_recovery_power_total += &retracted_recovery_power; @@ -1215,20 +1119,13 @@ impl Deadline { } self.add_expiration_partitions(store, fault_expiration, &rescheduled_partitions, quant) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to update expirations for partitions with faults", - ) - })?; + .context("failed to update expirations for partitions with faults")?; // Save everything back. self.faulty_power -= &recovered_power_total; self.faulty_power += &new_faulty_power_total; - self.partitions = partitions.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to persist partitions") - })?; + self.partitions = partitions.flush().context("failed to persist partitions")?; // Collect all sectors, faults, and recoveries for proof verification. let all_sector_numbers = BitField::union(&all_sectors); @@ -1252,18 +1149,17 @@ impl Deadline { store: &BS, partitions: &BitField, proofs: &[PoStProof], - ) -> anyhow::Result<()> { - let mut proof_arr = self - .optimistic_proofs_amt(store) - .map_err(|e| e.downcast_wrap("failed to load post proofs"))?; + ) -> Result<(), ActorError> { + let mut proof_arr = + self.optimistic_proofs_amt(store).context("failed to load post proofs")?; proof_arr .set( proof_arr.count(), // TODO: Can we do this with out cloning? WindowedPoSt { partitions: partitions.clone(), proofs: proofs.to_vec() }, ) - .map_err(|e| e.downcast_wrap("failed to store proof"))?; - let root = proof_arr.flush().map_err(|e| e.downcast_wrap("failed to save proofs"))?; + .context("failed to store proof")?; + let root = proof_arr.flush().context("failed to save proofs")?; self.optimistic_post_submissions = root; Ok(()) } @@ -1275,18 +1171,18 @@ impl Deadline { &mut self, store: &BS, idx: u64, - ) -> anyhow::Result<(BitField, Vec)> { + ) -> Result<(BitField, Vec), ActorError> { let mut proof_arr = self .optimistic_proofs_snapshot_amt(store) - .map_err(|e| e.downcast_wrap("failed to load post proofs snapshot amt"))?; + .context("failed to load post proofs snapshot amt")?; // Extract and remove the proof from the proofs array, leaving a hole. // This will not affect concurrent attempts to refute other proofs. let post = proof_arr .delete(idx) - .map_err(|e| e.downcast_wrap(format!("failed to retrieve proof {}", idx)))? + .with_context(|| format!("failed to retrieve proof {}", idx))? .ok_or_else(|| actor_error!(illegal_argument, "proof {} not found", idx))?; - let root = proof_arr.flush().map_err(|e| e.downcast_wrap("failed to save proofs"))?; + let root = proof_arr.flush().context("failed to save proofs")?; self.optimistic_post_submissions_snapshot = root; Ok((post.partitions, post.proofs)) } @@ -1307,7 +1203,7 @@ impl Deadline { partition_sectors: &mut PartitionSectorMap, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let mut partitions = self.partitions_amt(store)?; // track partitions with moved expirations. @@ -1315,9 +1211,10 @@ impl Deadline { let mut all_replaced = Vec::new(); for (partition_idx, sector_numbers) in partition_sectors.iter() { - let mut partition = match partitions.get(partition_idx).map_err(|e| { - e.downcast_wrap(format!("failed to load partition {}", partition_idx)) - })? { + let mut partition = match partitions + .get(partition_idx) + .with_context(|| format!("failed to load partition {}", partition_idx))? + { Some(partition) => partition.clone(), None => { // We failed to find the partition, it could have moved @@ -1336,11 +1233,8 @@ impl Deadline { sector_size, quant, ) - .map_err(|e| { - e.downcast_wrap(format!( - "failed to reschedule expirations in partition {}", - partition_idx - )) + .with_context(|| { + format!("failed to reschedule expirations in partition {}", partition_idx) })?; if replaced.is_empty() { @@ -1350,17 +1244,16 @@ impl Deadline { all_replaced.extend(replaced); rescheduled_partitions.push(partition_idx); - partitions.set(partition_idx, partition).map_err(|e| { - e.downcast_wrap(format!("failed to store partition {}", partition_idx)) - })?; + partitions + .set(partition_idx, partition) + .with_context(|| format!("failed to store partition {}", partition_idx))?; } if !rescheduled_partitions.is_empty() { - self.partitions = - partitions.flush().map_err(|e| e.downcast_wrap("failed to save partitions"))?; + self.partitions = partitions.flush().context("failed to save partitions")?; self.add_expiration_partitions(store, expiration, &rescheduled_partitions, quant) - .map_err(|e| e.downcast_wrap("failed to reschedule partition expirations"))?; + .context("failed to reschedule partition expirations")?; } Ok(all_replaced) diff --git a/actors/miner/src/deadlines.rs b/actors/miner/src/deadlines.rs index 3e625f908..01d042be1 100644 --- a/actors/miner/src/deadlines.rs +++ b/actors/miner/src/deadlines.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0, MIT use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::Array; +use fil_actors_runtime::{actor_error, ActorError, Array}; use fvm_ipld_blockstore::Blockstore; use fvm_shared::clock::{ChainEpoch, QuantSpec}; @@ -36,7 +36,7 @@ impl Deadlines { policy: &Policy, store: &BS, sector_number: SectorNumber, - ) -> anyhow::Result<(u64, u64)> { + ) -> Result<(u64, u64), ActorError> { for i in 0..self.due.len() { let deadline_idx = i as u64; let deadline = self.load_deadline(policy, store, deadline_idx)?; @@ -47,9 +47,9 @@ impl Deadlines { partitions.for_each_while(|i, partition| { if partition.sectors.get(sector_number) { partition_idx = Some(i); - Ok(false) + false } else { - Ok(true) + true } })?; @@ -58,7 +58,7 @@ impl Deadlines { } } - Err(anyhow::anyhow!("sector {} not due at any deadline", sector_number)) + Err(actor_error!(illegal_state, "sector {} not due at any deadline", sector_number)) } } diff --git a/actors/miner/src/expiration_queue.rs b/actors/miner/src/expiration_queue.rs index 8f7bb977e..c86c4f635 100644 --- a/actors/miner/src/expiration_queue.rs +++ b/actors/miner/src/expiration_queue.rs @@ -4,10 +4,9 @@ use std::collections::{BTreeMap, BTreeSet}; use std::convert::TryInto; -use anyhow::{anyhow, Context}; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{ActorDowncast, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; use fvm_ipld_amt::{Error as AmtError, ValueMut}; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -59,7 +58,7 @@ impl ExpirationSet { on_time_pledge: &TokenAmount, active_power: &PowerPair, faulty_power: &PowerPair, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { self.on_time_sectors |= on_time_sectors; self.early_sectors |= early_sectors; self.on_time_pledge += on_time_pledge; @@ -78,17 +77,19 @@ impl ExpirationSet { on_time_pledge: &TokenAmount, active_power: &PowerPair, faulty_power: &PowerPair, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { // Check for sector intersection. This could be cheaper with a combined intersection/difference method used below. if !self.on_time_sectors.contains_all(on_time_sectors) { - return Err(anyhow!( + return Err(actor_error!( + illegal_argument, "removing on-time sectors {:?} not contained in {:?}", on_time_sectors, self.on_time_sectors )); } if !self.early_sectors.contains_all(early_sectors) { - return Err(anyhow!( + return Err(actor_error!( + illegal_argument, "removing early sectors {:?} not contained in {:?}", early_sectors, self.early_sectors @@ -103,10 +104,10 @@ impl ExpirationSet { // Check underflow. if self.on_time_pledge.is_negative() { - return Err(anyhow!("expiration set pledge underflow: {:?}", self)); + return Err(actor_error!(illegal_state, "expiration set pledge underflow: {:?}", self)); } if self.active_power.qa.is_negative() || self.faulty_power.qa.is_negative() { - return Err(anyhow!("expiration set power underflow: {:?}", self)); + return Err(actor_error!(illegal_state, "expiration set power underflow: {:?}", self)); } self.validate_state()?; Ok(()) @@ -124,25 +125,37 @@ impl ExpirationSet { } /// validates a set of assertions that must hold for expiration sets - pub fn validate_state(&self) -> anyhow::Result<()> { + pub fn validate_state(&self) -> Result<(), ActorError> { if self.on_time_pledge.is_negative() { - return Err(anyhow!("ExpirationSet left with negative pledge")); + return Err(actor_error!(illegal_state, "ExpirationSet left with negative pledge")); } if self.active_power.raw.is_negative() { - return Err(anyhow!("ExpirationSet left with negative raw active power")); + return Err(actor_error!( + illegal_state, + "ExpirationSet left with negative raw active power" + )); } if self.active_power.qa.is_negative() { - return Err(anyhow!("ExpirationSet left with negative qa active power")); + return Err(actor_error!( + illegal_state, + "ExpirationSet left with negative qa active power" + )); } if self.faulty_power.raw.is_negative() { - return Err(anyhow!("ExpirationSet left with negative raw faulty power")); + return Err(actor_error!( + illegal_state, + "ExpirationSet left with negative raw faulty power" + )); } if self.faulty_power.qa.is_negative() { - return Err(anyhow!("ExpirationSet left with negative qa faulty power")); + return Err(actor_error!( + illegal_state, + "ExpirationSet left with negative qa faulty power" + )); } Ok(()) @@ -162,7 +175,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { /// /// Epochs provided to subsequent method calls will be quantized upwards to quanta mod offsetSeed before being /// written to/read from queue entries. - pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result { + pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result> { Ok(Self { amt: Array::load(root, store)?, quant }) } @@ -173,7 +186,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &mut self, sectors: impl IntoIterator, sector_size: SectorSize, - ) -> anyhow::Result<(BitField, PowerPair, TokenAmount)> { + ) -> Result<(BitField, PowerPair, TokenAmount), ActorError> { let mut total_power = PowerPair::zero(); let mut total_pledge = TokenAmount::zero(); let mut total_sectors = Vec::::new(); @@ -189,7 +202,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &PowerPair::zero(), &group.pledge, ) - .map_err(|e| e.downcast_wrap("failed to record new sector expirations"))?; + .context("failed to record new sector expirations")?; total_sectors.push(sector_numbers); total_power += &group.power; @@ -209,14 +222,14 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { new_expiration: ChainEpoch, sectors: &[SectorOnChainInfo], sector_size: SectorSize, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if sectors.is_empty() { return Ok(()); } let (sector_numbers, power, pledge) = self .remove_active_sectors(sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to remove sector expirations"))?; + .context("failed to remove sector expirations")?; self.add( new_expiration, @@ -226,7 +239,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &PowerPair::zero(), &pledge, ) - .map_err(|e| e.downcast_wrap("failed to record new sector expirations"))?; + .context("failed to record new sector expirations")?; Ok(()) } @@ -240,7 +253,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { new_expiration: ChainEpoch, sectors: &[SectorOnChainInfo], sector_size: SectorSize, - ) -> anyhow::Result { + ) -> Result { let mut sectors_total = Vec::new(); let mut expiring_power = PowerPair::zero(); let mut rescheduled_power = PowerPair::zero(); @@ -291,15 +304,19 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { } /// Re-schedules *all* sectors to expire at an early expiration epoch, if they wouldn't expire before then anyway. - pub fn reschedule_all_as_faults(&mut self, fault_expiration: ChainEpoch) -> anyhow::Result<()> { + pub fn reschedule_all_as_faults( + &mut self, + fault_expiration: ChainEpoch, + ) -> Result<(), ActorError> { let mut rescheduled_epochs = Vec::::new(); let mut rescheduled_sectors = BitField::new(); let mut rescheduled_power = PowerPair::zero(); let mut mutated_expiration_sets = Vec::<(ChainEpoch, ExpirationSet)>::new(); - self.amt.for_each(|e, expiration_set| { - let epoch: ChainEpoch = e.try_into()?; + self.amt.try_for_each(|e, expiration_set| { + let epoch: ChainEpoch = + e.try_into().map_err(|e| actor_error!(illegal_state, "{}", e))?; if epoch <= self.quant.quantize_up(fault_expiration) { let mut expiration_set = expiration_set.clone(); @@ -313,7 +330,9 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { rescheduled_epochs.push(e); // sanity check to make sure we're not trying to re-schedule already faulty sectors. if !expiration_set.early_sectors.is_empty() { - return Err(anyhow!( + // TODO: correct exit code? + return Err(actor_error!( + illegal_state, "attempted to re-schedule early expirations to an earlier epoch" )); } @@ -361,7 +380,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &mut self, sectors: Vec, sector_size: SectorSize, - ) -> anyhow::Result { + ) -> Result { let mut remaining: BTreeSet = sectors.iter().map(|sector| sector.sector_number).collect(); @@ -375,14 +394,14 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let on_time_sectors: BTreeSet = expiration_set .on_time_sectors .bounded_iter(ENTRY_SECTORS_MAX) - .context("too many sectors to reschedule")? + .ok_or_else(|| actor_error!(illegal_argument, "too many sectors to reschedule"))? .map(|i| i as SectorNumber) .collect(); let early_sectors: BTreeSet = expiration_set .early_sectors .bounded_iter(ENTRY_SECTORS_MAX) - .context("too many sectors to reschedule")? + .ok_or_else(|| actor_error!(illegal_argument, "too many sectors to reschedule"))? .map(|i| i as SectorNumber) .collect(); @@ -423,7 +442,11 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { })?; if !remaining.is_empty() { - return Err(anyhow!("sectors not found in expiration queue: {:?}", remaining)); + return Err(actor_error!( + not_found, + "sectors not found in expiration queue: {:?}", + remaining + )); } // Re-schedule the removed sectors to their target expiration. @@ -441,14 +464,14 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { old_sectors: &[SectorOnChainInfo], new_sectors: &[SectorOnChainInfo], sector_size: SectorSize, - ) -> anyhow::Result<(BitField, BitField, PowerPair, TokenAmount)> { + ) -> Result<(BitField, BitField, PowerPair, TokenAmount), ActorError> { let (old_sector_numbers, old_power, old_pledge) = self .remove_active_sectors(old_sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to remove replaced sectors"))?; + .context("failed to remove replaced sectors")?; let (new_sector_numbers, new_power, new_pledge) = self .add_active_sectors(new_sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to add replacement sectors"))?; + .context("failed to add replacement sectors")?; Ok(( old_sector_numbers, @@ -469,20 +492,20 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { faults: &BitField, recovering: &BitField, sector_size: SectorSize, - ) -> anyhow::Result<(ExpirationSet, PowerPair)> { + ) -> Result<(ExpirationSet, PowerPair), ActorError> { let mut remaining: BTreeSet<_> = sectors.iter().map(|sector| sector.sector_number).collect(); // ADDRESSED_SECTORS_MAX is defined as 25000, so this will not error. let faults_map: BTreeSet<_> = faults .bounded_iter(policy.addressed_sectors_max) - .context("too many faults to expand")? + .ok_or_else(|| actor_error!(illegal_argument, "too many faults to expand"))? .map(|i| i as SectorNumber) .collect(); let recovering_map: BTreeSet<_> = recovering .bounded_iter(policy.addressed_sectors_max) - .context("too many recoveries to expand")? + .ok_or_else(|| actor_error!(illegal_argument, "too many recoveries to expand"))? .map(|i| i as SectorNumber) .collect(); @@ -511,7 +534,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { // Remove non-faulty sectors. let (removed_sector_numbers, removed_power, removed_pledge) = self .remove_active_sectors(&non_faulty_sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to remove on-time recoveries"))?; + .context("failed to remove on-time recoveries")?; removed.on_time_sectors = removed_sector_numbers; removed.active_power = removed_power; removed.on_time_pledge = removed_pledge; @@ -524,14 +547,16 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let on_time_sectors: BTreeSet = expiration_set .on_time_sectors .bounded_iter(ENTRY_SECTORS_MAX) - .context("too many on-time sectors to expand")? + .ok_or_else(|| { + actor_error!(illegal_argument, "too many on-time sectors to expand") + })? .map(|i| i as SectorNumber) .collect(); let early_sectors: BTreeSet = expiration_set .early_sectors .bounded_iter(ENTRY_SECTORS_MAX) - .context("too many early sectors to expand")? + .ok_or_else(|| actor_error!(illegal_argument, "too many early sectors to expand"))? .map(|i| i as SectorNumber) .collect(); @@ -580,14 +605,18 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { })?; if !remaining.is_empty() { - return Err(anyhow!("sectors not found in expiration queue: {:?}", remaining)); + return Err(actor_error!( + not_found, + "sectors not found in expiration queue: {:?}", + remaining + )); } Ok((removed, recovering_power)) } /// Removes and aggregates entries from the queue up to and including some epoch. - pub fn pop_until(&mut self, until: ChainEpoch) -> anyhow::Result { + pub fn pop_until(&mut self, until: ChainEpoch) -> Result { let mut on_time_sectors = BitField::new(); let mut early_sectors = BitField::new(); let mut active_power = PowerPair::zero(); @@ -597,7 +626,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { self.amt.for_each_while(|i, this_value| { if i as ChainEpoch > until { - return Ok(false); + return false; } popped_keys.push(i); @@ -607,7 +636,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { faulty_power += &this_value.faulty_power; on_time_pledge += &this_value.on_time_pledge; - Ok(true) + true })?; self.amt.batch_delete(popped_keys, true)?; @@ -629,13 +658,13 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { active_power: &PowerPair, faulty_power: &PowerPair, pledge: &TokenAmount, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let epoch = self.quant.quantize_up(raw_epoch); let mut expiration_set = self.may_get(epoch)?; expiration_set .add(on_time_sectors, early_sectors, pledge, active_power, faulty_power) - .map_err(|e| anyhow!("failed to add expiration values for epoch {}: {}", epoch, e))?; + .with_context(|| format!("failed to add expiration values for epoch {}", epoch))?; self.must_update(epoch, expiration_set)?; Ok(()) @@ -649,19 +678,21 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { active_power: &PowerPair, faulty_power: &PowerPair, pledge: &TokenAmount, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let epoch = self.quant.quantize_up(raw_epoch); let mut expiration_set = self .amt .get(epoch.try_into()?) - .map_err(|e| e.downcast_wrap(format!("failed to lookup queue epoch {}", epoch)))? - .ok_or_else(|| anyhow!("missing expected expiration set at epoch {}", epoch))? + .with_context(|| format!("failed to lookup queue epoch {}", epoch))? + .ok_or_else(|| { + actor_error!(illegal_state, "missing expected expiration set at epoch {}", epoch) + })? .clone(); expiration_set .remove(on_time_sectors, early_sectors, pledge, active_power, faulty_power) - .map_err(|e| { - anyhow!("failed to remove expiration values for queue epoch {}: {}", epoch, e) - })?; + .with_context(|| { + format!("failed to remove expiration values for queue epoch {}", epoch) + })?; self.must_update_or_delete(epoch, expiration_set)?; Ok(()) @@ -671,7 +702,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &mut self, sectors: &[SectorOnChainInfo], sector_size: SectorSize, - ) -> anyhow::Result<(BitField, PowerPair, TokenAmount)> { + ) -> Result<(BitField, PowerPair, TokenAmount), ActorError> { let mut removed_sector_numbers = Vec::::new(); let mut removed_power = PowerPair::zero(); let mut removed_pledge = TokenAmount::zero(); @@ -707,11 +738,11 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { mut f: impl FnMut( ChainEpoch, &mut ValueMut<'_, ExpirationSet>, - ) -> anyhow::Result, - ) -> anyhow::Result<()> { + ) -> Result, + ) -> Result<(), ActorError> { let mut epochs_emptied = Vec::::new(); - self.amt.for_each_while_mut(|e, expiration_set| { + self.amt.try_for_each_while_mut::<_, ActorError>(|e, expiration_set| { let keep_going = f(e.try_into()?, expiration_set)?; if expiration_set.is_empty() { @@ -728,11 +759,11 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { Ok(()) } - fn may_get(&self, key: ChainEpoch) -> anyhow::Result { + fn may_get(&self, key: ChainEpoch) -> Result { Ok(self .amt .get(key.try_into()?) - .map_err(|e| e.downcast_wrap(format!("failed to lookup queue epoch {}", key)))? + .with_context(|| format!("failed to lookup queue epoch {}", key))? .cloned() .unwrap_or_default()) } @@ -741,10 +772,10 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &mut self, epoch: ChainEpoch, expiration_set: ExpirationSet, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { self.amt .set(epoch.try_into()?, expiration_set) - .map_err(|e| e.downcast_wrap(format!("failed to set queue epoch {}", epoch))) + .with_context(|| format!("failed to set queue epoch {}", epoch)) } /// Since this might delete the node, it's not safe for use inside an iteration. @@ -752,15 +783,15 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &mut self, epoch: ChainEpoch, expiration_set: ExpirationSet, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if expiration_set.is_empty() { self.amt .delete(epoch.try_into()?) - .map_err(|e| e.downcast_wrap(format!("failed to delete queue epoch {}", epoch)))?; + .with_context(|| format!("failed to delete queue epoch {}", epoch))?; } else { self.amt .set(epoch.try_into()?, expiration_set) - .map_err(|e| e.downcast_wrap(format!("failed to set queue epoch {}", epoch)))?; + .with_context(|| format!("failed to set queue epoch {}", epoch))?; } Ok(()) @@ -775,7 +806,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { &self, sector_size: SectorSize, sectors: &[SectorOnChainInfo], - ) -> anyhow::Result> { + ) -> Result, ActorError> { let mut declared_expirations = BTreeMap::::new(); let mut sectors_by_number = BTreeMap::::new(); let mut all_remaining = BTreeSet::::new(); @@ -807,7 +838,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { // If sectors remain, traverse next in epoch order. Remaining sectors should be // rescheduled to expire soon, so this traversal should exit early. if !all_remaining.is_empty() { - self.amt.for_each_while(|epoch, es| { + self.amt.try_for_each_while::<_, ActorError>(|epoch, es| { let epoch = epoch as ChainEpoch; // If this set's epoch is one of our declared epochs, we've already processed it // in the loop above, so skip processing here. Sectors rescheduled to this epoch @@ -838,7 +869,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { } if !all_remaining.is_empty() { - return Err(anyhow!("some sectors not found in expiration queue")); + return Err(actor_error!(not_found, "some sectors not found in expiration queue")); } // The built-in stable sort is timsort. It will find the two sorted runs and merge them. @@ -939,10 +970,14 @@ fn group_expiration_set( } /// Checks for invalid overlap between bitfield and a set's early sectors. -fn check_no_early_sectors(set: &BTreeSet, es: &ExpirationSet) -> anyhow::Result<()> { +fn check_no_early_sectors(set: &BTreeSet, es: &ExpirationSet) -> Result<(), ActorError> { for u in es.early_sectors.iter() { if set.contains(&(u as u64)) { - return Err(anyhow!("Invalid attempt to group sector {} with an early expiration", u)); + return Err(actor_error!( + illegal_argument, + "Invalid attempt to group sector {} with an early expiration", + u + )); } } Ok(()) diff --git a/actors/miner/src/lib.rs b/actors/miner/src/lib.rs index 2722108f7..ecb65f908 100644 --- a/actors/miner/src/lib.rs +++ b/actors/miner/src/lib.rs @@ -6,7 +6,6 @@ use std::collections::BTreeMap; use std::iter; use std::ops::Neg; -use anyhow::{anyhow, Error}; pub use bitfield_queue::*; use byteorder::{BigEndian, ByteOrder, WriteBytesExt}; use cid::multihash::Code; @@ -18,7 +17,7 @@ pub use deadlines::*; pub use expiration_queue::*; use fil_actors_runtime::runtime::{ActorCode, Policy, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorDowncast, ActorError, BURNT_FUNDS_ACTOR_ADDR, INIT_ACTOR_ADDR, + actor_error, cbor, ActorContext, ActorError, BURNT_FUNDS_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, }; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; @@ -151,12 +150,7 @@ impl Actor { let blake2b = |b: &[u8]| rt.hash_blake2b(b); let offset = assign_proving_period_offset(policy, rt.message().receiver(), current_epoch, blake2b) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_SERIALIZATION, - "failed to assign proving period offset", - ) - })?; + .context("failed to assign proving period offset")?; let period_start = current_proving_period_start(policy, current_epoch, offset); if period_start > current_epoch { @@ -185,14 +179,12 @@ impl Actor { params.multi_addresses, params.window_post_proof_type, )?; - let info_cid = rt.store().put_cbor(&info, Blake2b256).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state") - })?; + let info_cid = + rt.store().put_cbor(&info, Blake2b256).context("failed to construct illegal state")?; + + let st = State::new(policy, rt.store(), info_cid, period_start, deadline_idx) + .context("failed to construct state")?; - let st = - State::new(policy, rt.store(), info_cid, period_start, deadline_idx).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct state") - })?; rt.create(&st)?; Ok(()) @@ -250,9 +242,7 @@ impl Actor { }) } - state.save_info(rt.store(), &info).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "could not save miner info") - })?; + state.save_info(rt.store(), &info).context("could not save miner info")?; Ok(()) })?; @@ -321,9 +311,7 @@ impl Actor { } } - state.save_info(rt.store(), &info).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save miner info") - })?; + state.save_info(rt.store(), &info).context("failed to save miner info")?; Ok(()) }) @@ -345,9 +333,7 @@ impl Actor { )?; info.peer_id = params.new_id; - state.save_info(rt.store(), &info).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "could not save miner info") - })?; + state.save_info(rt.store(), &info).context("could not save miner info")?; Ok(()) })?; @@ -373,9 +359,7 @@ impl Actor { )?; info.multi_address = params.new_multi_addrs; - state.save_info(rt.store(), &info).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "could not save miner info") - })?; + state.save_info(rt.store(), &info).context("could not save miner info")?; Ok(()) })?; @@ -538,20 +522,15 @@ impl Actor { return Err(actor_error!(illegal_argument, "post commit randomness mismatched")); } - let sectors = Sectors::load(rt.store(), &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors") - })?; + let sectors = + Sectors::load(rt.store(), &state.sectors).context("failed to load sectors")?; let mut deadlines = - state.load_deadlines(rt.store()).map_err(|e| e.wrap("failed to load deadlines"))?; + state.load_deadlines(rt.store()).context("failed to load deadlines")?; - let mut deadline = - deadlines.load_deadline(rt.policy(), rt.store(), params.deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", params.deadline), - ) - })?; + let mut deadline = deadlines + .load_deadline(rt.policy(), rt.store(), params.deadline) + .with_context(|| format!("failed to load deadline {}", params.deadline))?; // Record proven sectors/partitions, returning updates to power and the final set of sectors // proven/skipped. @@ -573,14 +552,8 @@ impl Actor { fault_expiration, &mut params.partitions, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!( - "failed to process post submission for deadline {}", - params.deadline - ), - ) + .with_context(|| { + format!("failed to process post submission for deadline {}", params.deadline) })?; // Make sure we actually proved something. @@ -600,41 +573,25 @@ impl Actor { if post_result.recovered_power.is_zero() { deadline .record_post_proofs(rt.store(), &post_result.partitions, ¶ms.proofs) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to record proof for optimistic verification", - ) - })? + .context("failed to record proof for optimistic verification")? } else { // Load sector infos for proof, substituting a known-good sector for known-faulty sectors. // Note: this is slightly sub-optimal, loading info for the recovering sectors again after they were already // loaded above. let sector_infos = sectors .load_for_proof(&post_result.sectors, &post_result.ignored_sectors) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load sectors for post verification", - ) - })?; + .context("failed to load sectors for post verification")?; + verify_windowed_post(rt, current_deadline.challenge, §or_infos, params.proofs) - .map_err(|e| e.wrap("window post failed"))?; + .context("window post failed")?; } let deadline_idx = params.deadline; - deadlines.update_deadline(policy, rt.store(), params.deadline, &deadline).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update deadline {}", deadline_idx), - ) - }, - )?; + deadlines + .update_deadline(policy, rt.store(), params.deadline, &deadline) + .with_context(|| format!("failed to update deadline {}", deadline_idx))?; - state.save_deadlines(rt.store(), deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.save_deadlines(rt.store(), deadlines).context("failed to save deadlines")?; Ok(post_result) })?; @@ -646,7 +603,7 @@ impl Actor { request_update_power(rt, post_result.power_delta)?; let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -699,10 +656,9 @@ impl Actor { info.control_addresses.iter().chain(&[info.worker, info.owner]), )?; let store = rt.store(); - let precommits = - state.get_all_precommitted_sectors(store, sector_numbers).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to get precommits") - })?; + let precommits = state + .get_all_precommitted_sectors(store, sector_numbers) + .context("failed to get precommits")?; // compute data commitments and validate each precommit let mut compute_data_commitments_inputs = Vec::with_capacity(precommits.len()); @@ -804,9 +760,7 @@ impl Actor { proof: params.aggregate_proof, infos: svis, }) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "aggregate seal verify failed") - })?; + .context("aggregate seal verify failed")?; let rew = request_current_epoch_block_reward(rt)?; let pwr = request_current_total_power(rt)?; @@ -835,7 +789,7 @@ impl Actor { )); } burn_funds(rt, aggregate_fee)?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -868,9 +822,8 @@ impl Actor { )?; let sector_store = rt.store().clone(); - let mut sectors = Sectors::load(§or_store, &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array") - })?; + let mut sectors = + Sectors::load(§or_store, &state.sectors).context("failed to load sectors array")?; let mut power_delta = PowerPair::zero(); let mut pledge_delta = TokenAmount::zero(); @@ -1072,21 +1025,11 @@ impl Actor { for &dl_idx in deadlines_to_load.iter() { let mut deadline = deadlines .load_deadline(rt.policy(),rt.store(), dl_idx) - .map_err(|e| - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", dl_idx), - ) - )?; + .with_context(|| format!("failed to load deadline {}", dl_idx))?; let mut partitions = deadline .partitions_amt(rt.store()) - .map_err(|e| - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partitions for deadline {}", dl_idx), - ) - )?; + .with_context(||format!("failed to load partitions for deadline {}", dl_idx))?; let quant = state.quant_spec_for_deadline(rt.policy(),dl_idx); @@ -1114,13 +1057,9 @@ impl Actor { new_unsealed_cid: with_details.unsealed_cid, proof: with_details.update.replica_proof.clone(), } - ) - .map_err(|e| - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, - format!("failed to verify replica proof for sector {}", with_details.sector_info.sector_number), - ) - )?; + ).with_context(|| { + format!("failed to verify replica proof for sector {}", with_details.sector_info.sector_number) + })?; let mut new_sector_info = with_details.sector_info.clone(); @@ -1201,12 +1140,7 @@ impl Actor { let mut partition = partitions .get(with_details.update.partition) - .map_err(|e| - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {} partition {}", with_details.update.deadline, with_details.update.partition), - ) - )? + .with_context(|| format!("failed to load deadline {} partition {}", with_details.update.deadline, with_details.update.partition))? .cloned() .ok_or_else(|| actor_error!(not_found, "no such deadline {} partition {}", dl_idx, with_details.update.partition))?; @@ -1217,44 +1151,28 @@ impl Actor { info.sector_size, quant, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to replace sector at deadline {} partition {}", with_details.update.deadline, with_details.update.partition), - ) - })?; + .with_context(|| format!("failed to replace sector at deadline {} partition {}", with_details.update.deadline, with_details.update.partition))?; power_delta += &partition_power_delta; pledge_delta += &partition_pledge_delta; partitions .set(with_details.update.partition, partition) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save deadline {} partition {}", with_details.update.deadline, with_details.update.partition), - ) + .with_context(|| { + format!("failed to save deadline {} partition {}", with_details.update.deadline, with_details.update.partition) })?; succeeded.push(new_sector_info.sector_number); new_sectors.push(new_sector_info); } - deadline.partitions = partitions.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save partitions for deadline {}", dl_idx), - ) + deadline.partitions = partitions.flush().with_context(|| { + format!("failed to save partitions for deadline {}", dl_idx) })?; deadlines .update_deadline(rt.policy(), rt.store(), dl_idx, &deadline) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save deadline {}", dl_idx), - ) - })?; + .with_context(|| format!("failed to save deadline {}", dl_idx))?; } let success_len = succeeded.len(); @@ -1276,19 +1194,10 @@ impl Actor { } // Overwrite sector infos. - sectors.store(new_sectors).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to update sector infos", - ) - })?; + sectors.store(new_sectors).context("failed to update sector infos")?; - state.sectors = sectors.amt.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors") - })?; - state.save_deadlines(rt.store(), deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.sectors = sectors.amt.flush().context("failed to save sectors")?; + state.save_deadlines(rt.store(), deadlines).context("failed to save deadlines")?; BitField::try_from_bits(succeeded).map_err(|_|{ actor_error!(illegal_argument; "invalid sector number") @@ -1369,52 +1278,31 @@ impl Actor { let mut dl_current = deadlines_current .load_deadline(policy, rt.store(), params.deadline) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deadline") - })?; + .context("failed to load deadline")?; // Take the post from the snapshot for dispute. // This operation REMOVES the PoSt from the snapshot so // it can't be disputed again. If this method fails, // this operation must be rolled back. - let (partitions, proofs) = - dl_current.take_post_proofs(rt.store(), params.post_index).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load proof for dispute", - ) - })?; + let (partitions, proofs) = dl_current + .take_post_proofs(rt.store(), params.post_index) + .context("failed to load proof for dispute")?; // Load the partition info we need for the dispute. let mut dispute_info = dl_current .load_partitions_for_dispute(rt.store(), partitions) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load partition for dispute", - ) - })?; + .context("failed to load partition for dispute")?; // This includes power that is no longer active (e.g., due to sector terminations). // It must only be used for penalty calculations, not power adjustments. let penalised_power = dispute_info.disputed_power.clone(); // Load sectors for the dispute. - let sectors = - Sectors::load(rt.store(), &dl_current.sectors_snapshot).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load sectors array", - ) - })?; + let sectors = Sectors::load(rt.store(), &dl_current.sectors_snapshot) + .context("failed to load sectors array")?; let sector_infos = sectors .load_for_proof(&dispute_info.all_sector_nos, &dispute_info.ignored_sector_nos) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load sectors to dispute window post", - ) - })?; + .context("failed to load sectors to dispute window post")?; // Check proof, we fail if validation succeeds. if verify_windowed_post(rt, target_deadline.challenge, §or_infos, proofs)? { @@ -1439,22 +1327,14 @@ impl Actor { fault_expiration_epoch, &mut dispute_info.disputed_sectors, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to declare faults") - })?; + .context("failed to declare faults")?; deadlines_current .update_deadline(policy, rt.store(), params.deadline, &dl_current) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update deadline {}", params.deadline), - ) - })?; + .with_context(|| format!("failed to update deadline {}", params.deadline))?; - st.save_deadlines(rt.store(), deadlines_current).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + st.save_deadlines(rt.store(), deadlines_current) + .context("failed to save deadlines")?; // --- penalties --- @@ -1483,9 +1363,7 @@ impl Actor { current_epoch, &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to pay debt") - })?; + .context("failed to pay debt")?; let to_burn = &penalty_from_vesting + &penalty_from_balance; @@ -1510,7 +1388,7 @@ impl Actor { notify_pledge_changed(rt, &pledge_delta)?; let st: State = rt.state()?; - st.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + st.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -1752,13 +1630,9 @@ impl Actor { e.wrap("failed to allocate sector numbers") )?; state.put_precommitted_sectors(store, chain_infos) - .map_err(|e| - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to write pre-committed sectors") - )?; + .context("failed to write pre-committed sectors")?; state.add_pre_commit_clean_ups(rt.policy(), store, clean_up_events) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to add pre-commit expiry to queue") - })?; + .context("failed to add pre-commit expiry to queue")?; // Activate miner cron needs_cron = !state.deadline_cron_active; state.deadline_cron_active = true; @@ -1766,7 +1640,7 @@ impl Actor { })?; burn_funds(rt, fee_to_burn)?; let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; if needs_cron { let new_dl_info = state.deadline_info(rt.policy(), curr_epoch); enroll_cron_event( @@ -1800,12 +1674,7 @@ impl Actor { let st: State = rt.state()?; let precommit = st .get_precommitted_sector(rt.store(), sector_number) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load pre-committed sector {}", sector_number), - ) - })? + .with_context(|| format!("failed to load pre-committed sector {}", sector_number))? .ok_or_else(|| actor_error!(not_found, "no pre-commited sector {}", sector_number))?; let max_proof_size = precommit.info.seal_proof.proof_size().map_err(|e| { @@ -1890,13 +1759,9 @@ impl Actor { let st: State = rt.state()?; let store = rt.store(); // This skips missing pre-commits. - let precommited_sectors = - st.find_precommitted_sectors(store, ¶ms.sectors).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load pre-committed sectors", - ) - })?; + let precommited_sectors = st + .find_precommitted_sectors(store, ¶ms.sectors) + .context("failed to load pre-committed sectors")?; confirm_sector_proofs_valid_internal( rt, precommited_sectors, @@ -2034,28 +1899,20 @@ impl Actor { decls.push(decl); } - let mut sectors = Sectors::load(rt.store(), &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array") - })?; + let mut sectors = Sectors::load(rt.store(), &state.sectors) + .context("failed to load sectors array")?; let mut power_delta = PowerPair::zero(); let mut pledge_delta = TokenAmount::zero(); for deadline_idx in deadlines_to_load { let policy = rt.policy(); - let mut deadline = - deadlines.load_deadline(policy, store, deadline_idx).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", deadline_idx), - ) - })?; + let mut deadline = deadlines + .load_deadline(policy, store, deadline_idx) + .with_context(|| format!("failed to load deadline {}", deadline_idx))?; - let mut partitions = deadline.partitions_amt(store).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partitions for deadline {}", deadline_idx), - ) + let mut partitions = deadline.partitions_amt(store).with_context(|| { + format!("failed to load partitions for deadline {}", deadline_idx) })?; let quant = state.quant_spec_for_deadline(policy, deadline_idx); @@ -2069,12 +1926,7 @@ impl Actor { let mut partition = partitions .get(decl.partition) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load partition {:?}", key), - ) - })? + .with_context(|| format!("failed to load partition {:?}", key))? .cloned() .ok_or_else(|| actor_error!(not_found, "no such partition {:?}", key))?; @@ -2143,32 +1995,23 @@ impl Actor { .collect::>()?; // Overwrite sector infos. - sectors.store(new_sectors.clone()).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update sectors {:?}", decl.sectors), - ) - })?; + sectors + .store(new_sectors.clone()) + .with_context(|| format!("failed to update sectors {:?}", decl.sectors))?; // Remove old sectors from partition and assign new sectors. let (partition_power_delta, partition_pledge_delta) = partition .replace_sectors(store, &old_sectors, &new_sectors, info.sector_size, quant) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to replace sector expirations at {:?}", key), - ) + .with_context(|| { + format!("failed to replace sector expirations at {:?}", key) })?; power_delta += &partition_power_delta; pledge_delta += partition_pledge_delta; // expected to be zero, see note below. - partitions.set(decl.partition, partition).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save partition {:?}", key), - ) - })?; + partitions + .set(decl.partition, partition) + .with_context(|| format!("failed to save partition {:?}", key))?; // Record the new partition expiration epoch for setting outside this loop // over declarations. @@ -2183,44 +2026,30 @@ impl Actor { } } - deadline.partitions = partitions.flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save partitions for deadline {}", deadline_idx), - ) + deadline.partitions = partitions.flush().with_context(|| { + format!("failed to save partitions for deadline {}", deadline_idx) })?; // Record partitions in deadline expiration queue for epoch in epochs_to_reschedule { let p_idxs = partitions_by_new_epoch.get(&epoch).unwrap(); - deadline.add_expiration_partitions(store, epoch, p_idxs, quant).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!( - "failed to add expiration partitions to \ - deadline {} epoch {}", - deadline_idx, epoch - ), + deadline.add_expiration_partitions(store, epoch, p_idxs, quant).with_context( + || { + format!( + "failed to add expiration partitions to deadline {} epoch {}", + deadline_idx, epoch ) }, )?; } - deadlines.update_deadline(policy, store, deadline_idx, &deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save deadline {}", deadline_idx), - ) - })?; + deadlines + .update_deadline(policy, store, deadline_idx, &deadline) + .with_context(|| format!("failed to save deadline {}", deadline_idx))?; } - state.sectors = sectors.amt.flush().map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors") - })?; - state.save_deadlines(store, deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.sectors = sectors.amt.flush().context("failed to save sectors")?; + state.save_deadlines(store, deadlines).context("failed to save deadlines")?; Ok((power_delta, pledge_delta)) })?; @@ -2317,9 +2146,7 @@ impl Actor { // We're only reading the sectors, so there's no need to save this back. // However, we still want to avoid re-loading this array per-partition. - let sectors = Sectors::load(store, &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors") - })?; + let sectors = Sectors::load(store, &state.sectors).context("failed to load sectors")?; for (deadline_idx, partition_sectors) in to_process.iter() { // If the deadline is the current or next deadline to prove, don't allow terminating sectors. @@ -2338,13 +2165,9 @@ impl Actor { } let quant = state.quant_spec_for_deadline(rt.policy(), deadline_idx); - let mut deadline = - deadlines.load_deadline(rt.policy(), store, deadline_idx).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", deadline_idx), - ) - })?; + let mut deadline = deadlines + .load_deadline(rt.policy(), store, deadline_idx) + .with_context(|| format!("failed to load deadline {}", deadline_idx))?; let removed_power = deadline .terminate_sectors( @@ -2356,29 +2179,19 @@ impl Actor { info.sector_size, quant, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to terminate sectors in deadline {}", deadline_idx), - ) + .with_context(|| { + format!("failed to terminate sectors in deadline {}", deadline_idx) })?; state.early_terminations.set(deadline_idx); power_delta -= &removed_power; - deadlines.update_deadline(rt.policy(), store, deadline_idx, &deadline).map_err( - |e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update deadline {}", deadline_idx), - ) - }, - )?; + deadlines + .update_deadline(rt.policy(), store, deadline_idx, &deadline) + .with_context(|| format!("failed to update deadline {}", deadline_idx))?; } - state.save_deadlines(store, deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.save_deadlines(store, deadlines).context("failed to save deadlines")?; Ok((had_early_terminations, power_delta)) })?; @@ -2401,7 +2214,7 @@ impl Actor { schedule_early_termination_work(rt)?; } let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; request_update_power(rt, power_delta)?; Ok(TerminateSectorsReturn { done: !more }) @@ -2462,9 +2275,8 @@ impl Actor { let mut deadlines = state.load_deadlines(store).map_err(|e| e.wrap("failed to load deadlines"))?; - let sectors = Sectors::load(store, &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array") - })?; + let sectors = + Sectors::load(store, &state.sectors).context("failed to load sectors array")?; let mut new_fault_power_total = PowerPair::zero(); let curr_epoch = rt.curr_epoch(); @@ -2494,13 +2306,9 @@ impl Actor { ) })?; - let mut deadline = - deadlines.load_deadline(policy, store, deadline_idx).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", deadline_idx), - ) - })?; + let mut deadline = deadlines + .load_deadline(policy, store, deadline_idx) + .with_context(|| format!("failed to load deadline {}", deadline_idx))?; let fault_expiration_epoch = target_deadline.last() + policy.fault_max_age; @@ -2513,26 +2321,18 @@ impl Actor { fault_expiration_epoch, partition_map, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to declare faults for deadline {}", deadline_idx), - ) + .with_context(|| { + format!("failed to declare faults for deadline {}", deadline_idx) })?; - deadlines.update_deadline(policy, store, deadline_idx, &deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to store deadline {} partitions", deadline_idx), - ) - })?; + deadlines.update_deadline(policy, store, deadline_idx, &deadline).with_context( + || format!("failed to store deadline {} partitions", deadline_idx), + )?; new_fault_power_total += &deadline_power_delta; } - state.save_deadlines(store, deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.save_deadlines(store, deadlines).context("failed to save deadlines")?; Ok(new_fault_power_total) })?; @@ -2616,9 +2416,8 @@ impl Actor { let mut deadlines = state.load_deadlines(store).map_err(|e| e.wrap("failed to load deadlines"))?; - let sectors = Sectors::load(store, &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array") - })?; + let sectors = + Sectors::load(store, &state.sectors).context("failed to load sectors array")?; let curr_epoch = rt.curr_epoch(); for (deadline_idx, partition_map) in to_process.iter() { let policy = rt.policy(); @@ -2646,41 +2445,29 @@ impl Actor { ) })?; - let mut deadline = - deadlines.load_deadline(policy, store, deadline_idx).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", deadline_idx), - ) - })?; + let mut deadline = deadlines + .load_deadline(policy, store, deadline_idx) + .with_context(|| format!("failed to load deadline {}", deadline_idx))?; deadline .declare_faults_recovered(store, §ors, info.sector_size, partition_map) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to declare recoveries for deadline {}", deadline_idx), - ) + .with_context(|| { + format!("failed to declare recoveries for deadline {}", deadline_idx) })?; - deadlines.update_deadline(policy, store, deadline_idx, &deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to store deadline {}", deadline_idx), - ) - })?; + deadlines + .update_deadline(policy, store, deadline_idx, &deadline) + .with_context(|| format!("failed to store deadline {}", deadline_idx))?; } - state.save_deadlines(store, deadlines).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save deadlines") - })?; + state.save_deadlines(store, deadlines).context("failed to save deadlines")?; Ok(fee_to_burn) })?; burn_funds(rt, fee_to_burn)?; let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; // Power is not restored yet, but when the recovered sectors are successfully PoSted. Ok(()) @@ -2755,29 +2542,19 @@ impl Actor { let mut deadlines = state.load_deadlines(store).map_err(|e| e.wrap("failed to load deadlines"))?; - let mut deadline = - deadlines.load_deadline(policy, store, params_deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load deadline {}", params_deadline), - ) - })?; + let mut deadline = deadlines + .load_deadline(policy, store, params_deadline) + .with_context(|| format!("failed to load deadline {}", params_deadline))?; let (live, dead, removed_power) = - deadline.remove_partitions(store, partitions, quant).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to remove partitions from deadline {}", params_deadline), - ) + deadline.remove_partitions(store, partitions, quant).with_context(|| { + format!("failed to remove partitions from deadline {}", params_deadline) })?; - state.delete_sectors(store, &dead).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to delete dead sectors") - })?; + state.delete_sectors(store, &dead).context("failed to delete dead sectors")?; - let sectors = state.load_sector_infos(store, &live).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load moved sectors") - })?; + let sectors = + state.load_sector_infos(store, &live).context("failed to load moved sectors")?; let proven = true; let added_power = deadline .add_sectors( @@ -2788,12 +2565,7 @@ impl Actor { info.sector_size, quant, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to add back moved sectors", - ) - })?; + .context("failed to add back moved sectors")?; if removed_power != added_power { return Err(actor_error!( @@ -2804,19 +2576,13 @@ impl Actor { )); } - deadlines.update_deadline(policy, store, params_deadline, &deadline).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to update deadline {}", params_deadline), - ) - })?; + deadlines + .update_deadline(policy, store, params_deadline, &deadline) + .with_context(|| format!("failed to update deadline {}", params_deadline))?; - state.save_deadlines(store, deadlines).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to save deadline {}", params_deadline), - ) - })?; + state + .save_deadlines(store, deadlines) + .with_context(|| format!("failed to save deadline {}", params_deadline))?; Ok(()) })?; @@ -2943,9 +2709,7 @@ impl Actor { rt.curr_epoch(), &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to repay penalty") - })?; + .context("failed to repay penalty")?; pledge_delta_total -= &penalty_from_vesting; let to_burn = penalty_from_vesting + penalty_from_balance; Ok((pledge_delta_total, to_burn)) @@ -2954,7 +2718,7 @@ impl Actor { notify_pledge_changed(rt, &pledge_delta_total)?; burn_funds(rt, to_burn)?; let st: State = rt.state()?; - st.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + st.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -2974,7 +2738,7 @@ impl Actor { let fault = rt .verify_consensus_fault(¶ms.header1, ¶ms.header2, ¶ms.header_extra) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "fault not verified"))? + .context("fault not verified")? .ok_or_else(|| actor_error!(illegal_argument, "No consensus fault found"))?; if fault.target != rt.message().receiver() { return Err(actor_error!( @@ -3031,9 +2795,7 @@ impl Actor { rt.curr_epoch(), &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to pay fees") - })?; + .context("failed to pay fees")?; let mut burn_amount = &penalty_from_vesting + &penalty_from_balance; pledge_delta -= penalty_from_vesting; @@ -3045,9 +2807,7 @@ impl Actor { info.consensus_fault_elapsed = rt.curr_epoch() + rt.policy().consensus_fault_ineligibility_duration; - st.save_info(rt.store(), &info).map_err(|e| { - e.downcast_default(ExitCode::USR_SERIALIZATION, "failed to save miner info") - })?; + st.save_info(rt.store(), &info).context("failed to save miner info")?; Ok((burn_amount, reward_amount)) })?; @@ -3060,7 +2820,7 @@ impl Actor { notify_pledge_changed(rt, &pledge_delta)?; let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -3099,10 +2859,9 @@ impl Actor { } // Unlock vested funds so we can spend them. - let newly_vested = - state.unlock_vested_funds(rt.store(), rt.curr_epoch()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "Failed to vest fund") - })?; + let newly_vested = state + .unlock_vested_funds(rt.store(), rt.curr_epoch()) + .context("Failed to vest fund")?; // available balance already accounts for fee debt so it is correct to call // this before RepayDebts. We would have to @@ -3146,7 +2905,7 @@ impl Actor { burn_funds(rt, fee_to_burn)?; notify_pledge_changed(rt, &newly_vested.neg())?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(WithdrawBalanceReturn { amount_withdrawn: amount_withdrawn.clone() }) } @@ -3168,9 +2927,7 @@ impl Actor { rt.curr_epoch(), &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to unlock fee debt") - })?; + .context("failed to unlock fee debt")?; Ok((from_vesting, from_balance, state.clone())) })?; @@ -3179,7 +2936,7 @@ impl Actor { notify_pledge_changed(rt, &from_vesting.neg())?; burn_funds(rt, burn_amount)?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(()) } @@ -3220,7 +2977,7 @@ impl Actor { } }; let state: State = rt.state()?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok(()) } } @@ -3249,12 +3006,7 @@ where policy.addressed_partitions_max, policy.addressed_sectors_max, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to pop early terminations", - ) - })?; + .context("failed to pop early terminations")?; // Nothing to do, don't waste any time. // This can happen if we end up processing early terminations @@ -3265,9 +3017,8 @@ where } let info = get_miner_info(rt.store(), state)?; - let sectors = Sectors::load(store, &state.sectors).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array") - })?; + let sectors = + Sectors::load(store, &state.sectors).context("failed to load sectors array")?; let mut total_initial_pledge = TokenAmount::zero(); let mut deals_to_terminate = @@ -3318,9 +3069,7 @@ where rt.curr_epoch(), &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to repay penalty") - })?; + .context("failed to repay penalty")?; penalty = &penalty_from_vesting + penalty_from_balance; pledge_delta -= penalty_from_vesting; @@ -3380,7 +3129,7 @@ where // from locked vesting funds before funds free this epoch. let newly_vested = state .unlock_vested_funds(rt.store(), rt.curr_epoch()) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to vest funds"))?; + .context("failed to vest funds")?; pledge_delta_total -= newly_vested; @@ -3390,12 +3139,7 @@ where let deposit_to_burn = state .cleanup_expired_pre_commits(policy, rt.store(), rt.curr_epoch()) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to expire pre-committed sectors", - ) - })?; + .context("failed to expire pre-committed sectors")?; state .apply_penalty(&deposit_to_burn) @@ -3411,9 +3155,9 @@ where // That way, don't re-schedule a cron callback if one is already scheduled. had_early_terminations = have_pending_early_terminations(state); - let result = state.advance_deadline(policy, rt.store(), rt.curr_epoch()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to advance deadline") - })?; + let result = state + .advance_deadline(policy, rt.store(), rt.curr_epoch()) + .context("failed to advance deadline")?; // Faults detected by this missed PoSt pay no penalty, but sectors that were already faulty // and remain faulty through this deadline pay the fault fee. @@ -3442,9 +3186,7 @@ where rt.curr_epoch(), &rt.current_balance(), ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to unlock penalty") - })?; + .context("failed to unlock penalty")?; penalty_total = &penalty_from_vesting + penalty_from_balance; pledge_delta_total -= penalty_from_vesting; @@ -3569,12 +3311,7 @@ where { let replace_sector = state .get_sector(store, params.replace_sector_number) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load sector {}", params.replace_sector_number), - ) - })? + .with_context(|| format!("failed to load sector {}", params.replace_sector_number))? .ok_or_else(|| { actor_error!(not_found, "no such sector {} to replace", params.replace_sector_number) })?; @@ -3636,12 +3373,7 @@ where params.replace_sector_partition, params.replace_sector_number, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to replace sector {}", params.replace_sector_number), - ) - })?; + .with_context(|| format!("failed to replace sector {}", params.replace_sector_number))?; Ok(()) } @@ -4053,9 +3785,11 @@ fn assign_proving_period_offset( addr: Address, current_epoch: ChainEpoch, blake2b: impl FnOnce(&[u8]) -> [u8; 32], -) -> anyhow::Result { +) -> Result { let mut my_addr = addr.marshal_cbor()?; - my_addr.write_i64::(current_epoch)?; + my_addr + .write_i64::(current_epoch) + .map_err(|err| actor_error!(serialization, "{}", err))?; let digest = blake2b(&my_addr); @@ -4101,9 +3835,10 @@ fn declaration_deadline_info( period_start: ChainEpoch, deadline_idx: u64, current_epoch: ChainEpoch, -) -> anyhow::Result { +) -> Result { if deadline_idx >= policy.wpost_period_deadlines { - return Err(anyhow!( + return Err(actor_error!( + illegal_argument, "invalid deadline {}, must be < {}", deadline_idx, policy.wpost_period_deadlines @@ -4116,9 +3851,9 @@ fn declaration_deadline_info( } /// Checks that a fault or recovery declaration at a specific deadline is outside the exclusion window for the deadline. -fn validate_fr_declaration_deadline(deadline: &DeadlineInfo) -> anyhow::Result<()> { +fn validate_fr_declaration_deadline(deadline: &DeadlineInfo) -> Result<(), ActorError> { if deadline.fault_cutoff_passed() { - Err(anyhow!("late fault or recovery declaration")) + Err(actor_error!(illegal_argument, "late fault or recovery declaration")) } else { Ok(()) } @@ -4128,14 +3863,16 @@ fn validate_fr_declaration_deadline(deadline: &DeadlineInfo) -> anyhow::Result<( fn validate_partition_contains_sectors( partition: &Partition, sectors: &mut UnvalidatedBitField, -) -> anyhow::Result<()> { - let sectors = sectors.validate().map_err(|e| anyhow!("failed to check sectors: {}", e))?; +) -> Result<(), ActorError> { + let sectors = sectors + .validate() + .map_err(|e| actor_error!(illegal_argument, "failed to check sectors: {}", e))?; // Check that the declared sectors are actually assigned to the partition. if partition.sectors.contains_all(sectors) { Ok(()) } else { - Err(anyhow!("not all sectors are assigned to the partition")) + Err(actor_error!(illegal_argument, "not all sectors are assigned to the partition")) } } @@ -4190,9 +3927,7 @@ fn get_miner_info(store: &BS, state: &State) -> Result( @@ -4217,9 +3952,7 @@ where info.worker = pending_worker_key.new_worker; info.pending_worker_key = None; - state - .save_info(rt.store(), info) - .map_err(|e| e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to save miner info")) + state.save_info(rt.store(), info).context("failed to save miner info") } /// Repays all fee debt and then verifies that the miner has amount needed to cover @@ -4234,9 +3967,9 @@ where BS: Blockstore, RT: Runtime, { - let res = state.repay_debts(&rt.current_balance()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "unlocked balance can not repay fee debt") - })?; + let res = state + .repay_debts(&rt.current_balance()) + .context("unlocked balance can not repay fee debt")?; info!("RepayDebtsOrAbort was called and succeeded"); Ok(res) } @@ -4433,13 +4166,11 @@ where new_sectors.push(new_sector_info); } - state.put_sectors(store, new_sectors.clone()).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to put new sectors") - })?; + state.put_sectors(store, new_sectors.clone()).context("failed to put new sectors")?; - state.delete_precommitted_sectors(store, &new_sector_numbers).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to delete precommited sectors") - })?; + state + .delete_precommitted_sectors(store, &new_sector_numbers) + .context("failed to delete precommited sectors")?; state .assign_sectors_to_deadlines( @@ -4450,12 +4181,7 @@ where info.window_post_partition_sectors, info.sector_size, ) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to assign new sectors to deadlines", - ) - })?; + .context("failed to assign new sectors to deadlines")?; let newly_vested = TokenAmount::zero(); @@ -4480,7 +4206,7 @@ where .add_initial_pledge(&total_pledge) .map_err(|e| actor_error!(illegal_state, "failed to add initial pledge: {}", e))?; - state.check_balance_invariants(&rt.current_balance()).map_err(balance_invariants_broken)?; + state.check_balance_invariants(&rt.current_balance())?; Ok((total_pledge, newly_vested)) })?; @@ -4491,14 +4217,6 @@ where Ok(()) } -// XXX: probably better to push this one level down into state -fn balance_invariants_broken(e: Error) -> ActorError { - ActorError::unchecked( - ERR_BALANCE_INVARIANTS_BROKEN, - format!("balance invariants broken: {}", e), - ) -} - impl ActorCode for Actor { fn invoke_method( rt: &mut RT, diff --git a/actors/miner/src/partition_state.rs b/actors/miner/src/partition_state.rs index 5d5420982..b5e4003dc 100644 --- a/actors/miner/src/partition_state.rs +++ b/actors/miner/src/partition_state.rs @@ -4,17 +4,15 @@ use std::convert::TryInto; use std::ops::{self, Neg}; -use anyhow::{anyhow, Context}; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorDowncast, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, QuantSpec, NO_QUANTIZATION}; use fvm_shared::econ::TokenAmount; -use fvm_shared::error::ExitCode; use fvm_shared::sector::{SectorSize, StoragePower}; use num_traits::{Signed, Zero}; @@ -65,7 +63,7 @@ pub struct Partition { } impl Partition { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let empty_expiration_array = Array::::new_with_bit_width(store, PARTITION_EXPIRATION_AMT_BITWIDTH) .flush()?; @@ -116,21 +114,19 @@ impl Partition { sectors: &[SectorOnChainInfo], sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + .context("failed to load sector expirations")?; let (sector_numbers, power, _) = expirations .add_active_sectors(sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to record new sector expirations"))?; + .context("failed to record new sector expirations")?; - self.expirations_epochs = expirations - .amt - .flush() - .map_err(|e| e.downcast_wrap("failed to store sector expirations"))?; + self.expirations_epochs = + expirations.amt.flush().context("failed to store sector expirations")?; if self.sectors.contains_any(§or_numbers) { - return Err(anyhow!("not all added sectors are new")); + return Err(actor_error!(illegal_argument, "not all added sectors are new")); } // Update other metadata using the calculated totals. @@ -159,16 +155,15 @@ impl Partition { fault_expiration: ChainEpoch, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result<(PowerPair, PowerPair)> { + ) -> Result<(PowerPair, PowerPair), ActorError> { // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load partition queue"))?; + .context("failed to load partition queue")?; // Reschedule faults - let new_faulty_power = - queue - .reschedule_as_faults(fault_expiration, sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to add faults to partition queue"))?; + let new_faulty_power = queue + .reschedule_as_faults(fault_expiration, sectors, sector_size) + .context("failed to add faults to partition queue")?; // Save expiration queue self.expirations_epochs = queue.amt.flush()?; @@ -187,8 +182,8 @@ impl Partition { let mut power_delta = new_faulty_power.clone().neg(); - let unproven_infos = select_sectors(sectors, &unproven) - .map_err(|e| e.downcast_wrap("failed to select unproven sectors"))?; + let unproven_infos = + select_sectors(sectors, &unproven).context("failed to select unproven sectors")?; if !unproven_infos.is_empty() { let lost_unproven_power = power_for_sectors(sector_size, &unproven_infos); self.unproven_power -= &lost_unproven_power; @@ -217,13 +212,12 @@ impl Partition { fault_expiration_epoch: ChainEpoch, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result<(BitField, PowerPair, PowerPair)> { + ) -> Result<(BitField, PowerPair, PowerPair), ActorError> { validate_partition_contains_sectors(self, sector_numbers) .map_err(|e| actor_error!(illegal_argument; "failed fault declaration: {}", e))?; - let sector_numbers = sector_numbers - .validate() - .map_err(|e| anyhow!("failed to intersect sectors with recoveries: {}", e))?; + let sector_numbers = + sector_numbers.validate().context("failed to intersect sectors with recoveries")?; // Split declarations into declarations of new faults, and retraction of declared recoveries. let retracted_recoveries = &self.recoveries & sector_numbers; @@ -246,7 +240,7 @@ impl Partition { sector_size, quant, ) - .map_err(|e| e.downcast_wrap("failed to add faults"))? + .context("failed to add faults")? } else { Default::default() }; @@ -267,7 +261,7 @@ impl Partition { sectors: &Sectors<'_, BS>, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { // Process recoveries, assuming the proof will be successful. // This similarly updates state. let recovered_sectors = sectors @@ -276,12 +270,12 @@ impl Partition { // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| anyhow!("failed to load partition queue: {:?}", e))?; + .context("failed to load partition queue")?; // Reschedule recovered let power = queue .reschedule_recovered(recovered_sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to reschedule faults in partition queue"))?; + .context("failed to reschedule faults in partition queue")?; // Save expiration queue self.expirations_epochs = queue.amt.flush()?; @@ -313,23 +307,20 @@ impl Partition { sectors: &Sectors<'_, BS>, sector_size: SectorSize, sector_numbers: &mut UnvalidatedBitField, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { // Check that the declared sectors are actually assigned to the partition. validate_partition_contains_sectors(self, sector_numbers) .map_err(|e| actor_error!(illegal_argument; "failed fault declaration: {}", e))?; - let sector_numbers = sector_numbers - .validate() - .map_err(|e| anyhow!("failed to validate recoveries: {}", e))?; + let sector_numbers = sector_numbers.validate().context("failed to validate recoveries")?; // Ignore sectors not faulty or already declared recovered let mut recoveries = sector_numbers & &self.faults; recoveries -= &self.recoveries; // Record the new recoveries for processing at Window PoSt or deadline cron. - let recovery_sectors = sectors - .load_sector(&recoveries) - .map_err(|e| e.wrap("failed to load recovery sectors"))?; + let recovery_sectors = + sectors.load_sector(&recoveries).context("failed to load recovery sectors")?; self.recoveries |= &recoveries; @@ -375,7 +366,7 @@ impl Partition { sector_numbers: &mut UnvalidatedBitField, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let sector_numbers = sector_numbers.validate().map_err(|e| { actor_error!(illegal_argument, "failed to validate rescheduled sectors: {}", e) })?; @@ -391,7 +382,7 @@ impl Partition { let sector_infos = sectors.load_sector(&active)?; let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + .context("failed to load sector expirations")?; expirations.reschedule_expirations(new_expiration, §or_infos, sector_size)?; self.expirations_epochs = expirations.amt.flush()?; @@ -413,25 +404,24 @@ impl Partition { new_sectors: &[SectorOnChainInfo], sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result<(PowerPair, TokenAmount)> { + ) -> Result<(PowerPair, TokenAmount), ActorError> { let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + .context("failed to load sector expirations")?; let (old_sector_numbers, new_sector_numbers, power_delta, pledge_delta) = expirations .replace_sectors(old_sectors, new_sectors, sector_size) - .map_err(|e| e.downcast_wrap("failed to replace sector expirations"))?; + .context("failed to replace sector expirations")?; - self.expirations_epochs = expirations - .amt - .flush() - .map_err(|e| e.downcast_wrap("failed to save sector expirations"))?; + self.expirations_epochs = + expirations.amt.flush().context("failed to save sector expirations")?; // Check the sectors being removed are active (alive, not faulty). let active = self.active_sectors(); let all_active = active.contains_all(&old_sector_numbers); if !all_active { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "refusing to replace inactive sectors in {:?} (active: {:?})", old_sector_numbers, active @@ -457,19 +447,19 @@ impl Partition { store: &BS, epoch: ChainEpoch, sectors: &BitField, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let mut early_termination_queue = BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION) - .map_err(|e| e.downcast_wrap("failed to load early termination queue"))?; + .context("failed to load early termination queue")?; early_termination_queue .add_to_queue(epoch, sectors) - .map_err(|e| e.downcast_wrap("failed to add to early termination queue"))?; + .context("failed to add to early termination queue")?; self.early_terminated = early_termination_queue .amt .flush() - .map_err(|e| e.downcast_wrap("failed to save early termination queue"))?; + .context("failed to save early termination queue")?; Ok(()) } @@ -487,7 +477,7 @@ impl Partition { sector_numbers: &mut UnvalidatedBitField, sector_size: SectorSize, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { let live_sectors = self.live_sectors(); let sector_numbers = sector_numbers.validate().map_err(|e| { actor_error!(illegal_argument, "failed to validate terminating sectors: {}", e) @@ -499,21 +489,19 @@ impl Partition { let sector_infos = sectors.load_sector(sector_numbers)?; let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + .context("failed to load sector expirations")?; let (mut removed, removed_recovering) = expirations .remove_sectors(policy, §or_infos, &self.faults, &self.recoveries, sector_size) - .map_err(|e| e.downcast_wrap("failed to remove sector expirations"))?; + .context("failed to remove sector expirations")?; - self.expirations_epochs = expirations - .amt - .flush() - .map_err(|e| e.downcast_wrap("failed to save sector expirations"))?; + self.expirations_epochs = + expirations.amt.flush().context("failed to save sector expirations")?; let removed_sectors = &removed.on_time_sectors | &removed.early_sectors; // Record early termination. self.record_early_termination(store, epoch, &removed_sectors) - .map_err(|e| e.downcast_wrap("failed to record early sector termination"))?; + .context("failed to record early sector termination")?; let unproven_nos = &removed_sectors & &self.unproven; @@ -546,20 +534,22 @@ impl Partition { store: &BS, until: ChainEpoch, quant: QuantSpec, - ) -> anyhow::Result { + ) -> Result { // This is a sanity check to make sure we handle proofs _before_ // handling sector expirations. if !self.unproven.is_empty() { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "Cannot pop expired sectors from a partition with unproven sectors" )); } let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load expiration queue"))?; - let popped = expirations.pop_until(until).map_err(|e| { - e.downcast_wrap(format!("failed to pop expiration queue until {}", until)) - })?; + .context("failed to load expiration queue")?; + let popped = expirations + .pop_until(until) + .with_context(|| format!("failed to pop expiration queue until {}", until))?; + self.expirations_epochs = expirations.amt.flush()?; let expired_sectors = &popped.on_time_sectors | &popped.early_sectors; @@ -569,15 +559,21 @@ impl Partition { // and all recoveries retracted. // No recoveries may be posted until the deadline is closed. if !self.recoveries.is_empty() { - return Err(anyhow!("unexpected recoveries while processing expirations")); + return Err(actor_error!( + illegal_state, + "unexpected recoveries while processing expirations" + )); } if !self.recovering_power.is_zero() { - return Err(anyhow!("unexpected recovering power while processing expirations")); + return Err(actor_error!( + illegal_state, + "unexpected recovering power while processing expirations" + )); } // Nothing expiring now should have already terminated. if self.terminated.contains_any(&expired_sectors) { - return Err(anyhow!("expiring sectors already terminated")); + return Err(actor_error!(illegal_state, "expiring sectors already terminated")); } // Mark the sectors as terminated and subtract sector power. @@ -588,7 +584,7 @@ impl Partition { // Record the epoch of any sectors expiring early, for termination fee calculation later. self.record_early_termination(store, until, &popped.early_sectors) - .map_err(|e| e.downcast_wrap("failed to record early terminations"))?; + .context("failed to record early terminations")?; // check invariants self.validate_state()?; @@ -604,15 +600,15 @@ impl Partition { store: &BS, fault_expiration: ChainEpoch, quant: QuantSpec, - ) -> anyhow::Result<(PowerPair, PowerPair, PowerPair)> { + ) -> Result<(PowerPair, PowerPair, PowerPair), ActorError> { // Collapse tail of queue into the last entry, and mark all power faulty. // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .map_err(|e| e.downcast_wrap("failed to load partition queue"))?; + .context("failed to load partition queue")?; queue .reschedule_all_as_faults(fault_expiration) - .map_err(|e| e.downcast_wrap("failed to reschedule all as faults"))?; + .context("failed to reschedule all as faults")?; // Save expiration queue self.expirations_epochs = queue.amt.flush()?; @@ -645,7 +641,7 @@ impl Partition { &mut self, store: &BS, max_sectors: u64, - ) -> anyhow::Result<(TerminationResult, /* has more */ bool)> { + ) -> Result<(TerminationResult, /* has more */ bool), ActorError> { // Load early terminations. let mut early_terminated_queue = BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION)?; @@ -657,14 +653,15 @@ impl Partition { early_terminated_queue .amt - .for_each_while(|i, sectors| { + .try_for_each_while::<_, ActorError>(|i, sectors| { let epoch: ChainEpoch = i.try_into()?; let count = sectors.len(); let limit = max_sectors - result.sectors_processed; let to_process = if limit < count { - let to_process = - sectors.slice(0, limit).context("expected more sectors in bitfield")?; + let to_process = sectors.slice(0, limit).ok_or_else(|| { + actor_error!(illegal_state, "expected more sectors in bitfield") + })?; let rest = sectors - &to_process; remaining = Some((rest, epoch)); result.sectors_processed += limit; @@ -680,24 +677,26 @@ impl Partition { let keep_going = result.sectors_processed < max_sectors; Ok(keep_going) }) - .map_err(|e| e.downcast_wrap("failed to walk early terminations queue"))?; + .context("failed to walk early terminations queue")?; // Update early terminations - early_terminated_queue.amt.batch_delete(processed, true).map_err(|e| { - e.downcast_wrap("failed to remove entries from early terminations queue") - })?; + early_terminated_queue + .amt + .batch_delete(processed, true) + .context("failed to remove entries from early terminations queue")?; if let Some((remaining_sectors, remaining_epoch)) = remaining.take() { - early_terminated_queue.amt.set(remaining_epoch as u64, remaining_sectors).map_err( - |e| e.downcast_wrap("failed to update remaining entry early terminations queue"), - )?; + early_terminated_queue + .amt + .set(remaining_epoch as u64, remaining_sectors) + .context("failed to update remaining entry early terminations queue")?; } // Save early terminations. self.early_terminated = early_terminated_queue .amt .flush() - .map_err(|e| e.downcast_wrap("failed to store early terminations queue"))?; + .context("failed to store early terminations queue")?; // check invariants self.validate_state()?; @@ -720,7 +719,7 @@ impl Partition { quant: QuantSpec, fault_expiration: ChainEpoch, skipped: &mut UnvalidatedBitField, - ) -> anyhow::Result<(PowerPair, PowerPair, PowerPair, bool)> { + ) -> Result<(PowerPair, PowerPair, PowerPair, bool), ActorError> { let skipped = skipped.validate().map_err(|e| { actor_error!(illegal_argument, "failed to validate skipped sectors: {}", e) })?; @@ -760,9 +759,7 @@ impl Partition { sector_size, quant, ) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to add skipped faults") - })?; + .context("failed to add skipped faults")?; // Remove faulty recoveries self.remove_recoveries(&retracted_recoveries, &retracted_recovery_power); @@ -774,59 +771,68 @@ impl Partition { } /// Test invariants about the partition power are valid. - pub fn validate_power_state(&self) -> anyhow::Result<()> { + pub fn validate_power_state(&self) -> Result<(), ActorError> { if self.live_power.raw.is_negative() || self.live_power.qa.is_negative() { - return Err(anyhow!("Partition left with negative live power")); + return Err(actor_error!(illegal_state, "Partition left with negative live power")); } if self.unproven_power.raw.is_negative() || self.unproven_power.qa.is_negative() { - return Err(anyhow!("Partition left with negative unproven power")); + return Err(actor_error!(illegal_state, "Partition left with negative unproven power")); } if self.faulty_power.raw.is_negative() || self.faulty_power.qa.is_negative() { - return Err(anyhow!("Partition left with negative faulty power")); + return Err(actor_error!(illegal_state, "Partition left with negative faulty power")); } if self.recovering_power.raw.is_negative() || self.recovering_power.qa.is_negative() { - return Err(anyhow!("Partition left with negative recovering power")); + return Err(actor_error!( + illegal_state, + "Partition left with negative recovering power" + )); } if self.unproven_power.raw > self.live_power.raw { - return Err(anyhow!("Partition left with invalid unproven power")); + return Err(actor_error!(illegal_state, "Partition left with invalid unproven power")); } if self.faulty_power.raw > self.live_power.raw { - return Err(anyhow!("Partition left with invalid faulty power")); + return Err(actor_error!(illegal_state, "Partition left with invalid faulty power")); } // The first half of this conditional shouldn't matter, keeping for readability if self.recovering_power.raw > self.live_power.raw || self.recovering_power.raw > self.faulty_power.raw { - return Err(anyhow!("Partition left with invalid recovering power")); + return Err(actor_error!( + illegal_state, + "Partition left with invalid recovering power" + )); } Ok(()) } - pub fn validate_bf_state(&self) -> anyhow::Result<()> { + pub fn validate_bf_state(&self) -> Result<(), ActorError> { let mut merge = &self.unproven | &self.faults; // Unproven or faulty sectors should not be in terminated if self.terminated.contains_any(&merge) { - return Err(anyhow!("Partition left with terminated sectors in multiple states")); + return Err(actor_error!( + illegal_state, + "Partition left with terminated sectors in multiple states" + )); } merge |= &self.terminated; // All merged sectors should exist in partition sectors if !self.sectors.contains_all(&merge) { - return Err(anyhow!("Partition left with invalid sector state")); + return Err(actor_error!(illegal_state, "Partition left with invalid sector state")); } // All recoveries should exist in partition faults if !self.faults.contains_all(&self.recoveries) { - return Err(anyhow!("Partition left with invalid recovery state")); + return Err(actor_error!(illegal_state, "Partition left with invalid recovery state")); } Ok(()) } - pub fn validate_state(&self) -> anyhow::Result<()> { + pub fn validate_state(&self) -> Result<(), ActorError> { self.validate_power_state()?; self.validate_bf_state()?; Ok(()) diff --git a/actors/miner/src/sector_map.rs b/actors/miner/src/sector_map.rs index 82ab755e1..8d197b6cf 100644 --- a/actors/miner/src/sector_map.rs +++ b/actors/miner/src/sector_map.rs @@ -3,11 +3,10 @@ use std::collections::BTreeMap; -use anyhow::anyhow; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; use serde::{Deserialize, Serialize}; -use fil_actors_runtime::runtime::Policy; +use fil_actors_runtime::{actor_error, runtime::Policy, ActorContext, ActorError}; /// Maps deadlines to partition maps. #[derive(Default)] @@ -21,34 +20,42 @@ impl DeadlineSectorMap { /// Check validates all bitfields and counts the number of partitions & sectors /// contained within the map, and returns an error if they exceed the given /// maximums. - pub fn check(&mut self, max_partitions: u64, max_sectors: u64) -> anyhow::Result<()> { - let (partition_count, sector_count) = - self.count().map_err(|e| anyhow!("failed to count sectors: {:?}", e))?; + pub fn check(&mut self, max_partitions: u64, max_sectors: u64) -> Result<(), ActorError> { + let (partition_count, sector_count) = self.count().context("failed to count sectors")?; if partition_count > max_partitions { - return Err(anyhow!("too many partitions {}, max {}", partition_count, max_partitions)); + return Err(actor_error!( + illegal_argument, + "too many partitions {}, max {}", + partition_count, + max_partitions + )); } if sector_count > max_sectors { - return Err(anyhow!("too many sectors {}, max {}", sector_count, max_sectors)); + return Err(actor_error!( + illegal_argument, + "too many sectors {}, max {}", + sector_count, + max_sectors + )); } Ok(()) } /// Counts the number of partitions & sectors within the map. - pub fn count(&mut self) -> anyhow::Result<(/* partitions */ u64, /* sectors */ u64)> { + pub fn count(&mut self) -> Result<(/* partitions */ u64, /* sectors */ u64), ActorError> { self.0.iter_mut().try_fold((0_u64, 0_u64), |(partitions, sectors), (deadline_idx, pm)| { - let (partition_count, sector_count) = pm - .count() - .map_err(|e| anyhow!("when counting deadline {}: {:?}", deadline_idx, e))?; + let (partition_count, sector_count) = + pm.count().with_context(|| format!("when counting deadline {}", deadline_idx))?; Ok(( - partitions - .checked_add(partition_count) - .ok_or_else(|| anyhow!("integer overflow when counting partitions"))?, - sectors - .checked_add(sector_count) - .ok_or_else(|| anyhow!("integer overflow when counting sectors"))?, + partitions.checked_add(partition_count).ok_or_else(|| { + actor_error!(illegal_state, "integer overflow when counting partitions") + })?, + sectors.checked_add(sector_count).ok_or_else(|| { + actor_error!(illegal_state, "integer overflow when counting sectors") + })?, )) }) } @@ -60,9 +67,9 @@ impl DeadlineSectorMap { deadline_idx: u64, partition_idx: u64, sector_numbers: UnvalidatedBitField, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { if deadline_idx >= policy.wpost_period_deadlines { - return Err(anyhow!("invalid deadline {}", deadline_idx)); + return Err(actor_error!(illegal_argument, "invalid deadline {}", deadline_idx)); } self.0.entry(deadline_idx).or_default().add(partition_idx, sector_numbers) @@ -75,7 +82,7 @@ impl DeadlineSectorMap { deadline_idx: u64, partition_idx: u64, sector_numbers: &[u64], - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { self.add( policy, deadline_idx, @@ -105,7 +112,7 @@ impl PartitionSectorMap { &mut self, partition_idx: u64, sector_numbers: Vec, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { self.add(partition_idx, BitField::try_from_bits(sector_numbers)?.into()) } /// Records the given sector bitfield at the given partition index, merging @@ -114,15 +121,14 @@ impl PartitionSectorMap { &mut self, partition_idx: u64, mut sector_numbers: UnvalidatedBitField, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { match self.0.get_mut(&partition_idx) { Some(old_sector_numbers) => { let old = old_sector_numbers .validate_mut() - .map_err(|e| anyhow!("failed to validate sector bitfield: {}", e))?; - let new = sector_numbers - .validate() - .map_err(|e| anyhow!("failed to validate new sector bitfield: {}", e))?; + .context("failed to validate sector bitfield")?; + let new = + sector_numbers.validate().context("failed to validate new sector bitfield")?; *old |= new; } None => { @@ -133,14 +139,14 @@ impl PartitionSectorMap { } /// Counts the number of partitions & sectors within the map. - pub fn count(&mut self) -> anyhow::Result<(/* partitions */ u64, /* sectors */ u64)> { + pub fn count(&mut self) -> Result<(/* partitions */ u64, /* sectors */ u64), ActorError> { let sectors = self.0.iter_mut().try_fold(0_u64, |sectors, (partition_idx, bf)| { - let validated = bf.validate().map_err(|e| { - anyhow!("failed to parse bitmap for partition {}: {}", partition_idx, e) + let validated = bf.validate().with_context(|| { + format!("failed to parse bitmap for partition {}", partition_idx) })?; - sectors - .checked_add(validated.len() as u64) - .ok_or_else(|| anyhow!("integer overflow when counting sectors")) + sectors.checked_add(validated.len() as u64).ok_or_else(|| { + actor_error!(illegal_state, "integer overflow when counting sectors") + }) })?; Ok((self.0.len() as u64, sectors)) } diff --git a/actors/miner/src/sectors.rs b/actors/miner/src/sectors.rs index 49b0bd959..40d589dc4 100644 --- a/actors/miner/src/sectors.rs +++ b/actors/miner/src/sectors.rs @@ -3,7 +3,6 @@ use std::collections::BTreeSet; -use anyhow::anyhow; use cid::Cid; use fil_actors_runtime::{actor_error, ActorDowncast, ActorError, Array}; use fvm_ipld_amt::Error as AmtError; @@ -19,7 +18,7 @@ pub struct Sectors<'db, BS> { } impl<'db, BS: Blockstore> Sectors<'db, BS> { - pub fn load(store: &'db BS, root: &Cid) -> Result { + pub fn load(store: &'db BS, root: &Cid) -> Result> { Ok(Self { amt: Array::load(root, store)? }) } @@ -50,7 +49,10 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { Ok(sector_infos) } - pub fn get(&self, sector_number: SectorNumber) -> anyhow::Result> { + pub fn get( + &self, + sector_number: SectorNumber, + ) -> Result, ActorError> { Ok(self .amt .get(sector_number) @@ -58,12 +60,16 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { .cloned()) } - pub fn store(&mut self, infos: Vec) -> anyhow::Result<()> { + pub fn store(&mut self, infos: Vec) -> Result<(), ActorError> { for info in infos { let sector_number = info.sector_number; if sector_number > MAX_SECTOR_NUMBER { - return Err(anyhow!("sector number {} out of range", info.sector_number)); + return Err(actor_error!( + illegal_argument, + "sector number {} out of range", + info.sector_number + )); } self.amt.set(sector_number, info).map_err(|e| { @@ -74,8 +80,9 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { Ok(()) } - pub fn must_get(&self, sector_number: SectorNumber) -> anyhow::Result { - self.get(sector_number)?.ok_or_else(|| anyhow!("sector {} not found", sector_number)) + pub fn must_get(&self, sector_number: SectorNumber) -> Result { + self.get(sector_number)? + .ok_or_else(|| actor_error!(not_found, "sector {} not found", sector_number)) } /// Loads info for a set of sectors to be proven. @@ -85,7 +92,7 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { &self, proven_sectors: &BitField, expected_faults: &BitField, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let non_faults = proven_sectors - expected_faults; if non_faults.is_empty() { @@ -108,7 +115,7 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { sectors: &BitField, faults: &BitField, fault_stand_in: SectorNumber, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let stand_in_info = self.must_get(fault_stand_in)?; // Expand faults into a map for quick lookups. @@ -131,13 +138,17 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { pub(crate) fn select_sectors( sectors: &[SectorOnChainInfo], field: &BitField, -) -> anyhow::Result> { +) -> Result, ActorError> { let mut to_include: BTreeSet<_> = field.iter().collect(); let included = sectors.iter().filter(|si| to_include.remove(&si.sector_number)).cloned().collect(); if !to_include.is_empty() { - return Err(anyhow!("failed to find {} expected sectors", to_include.len())); + return Err(actor_error!( + not_found, + "failed to find {} expected sectors", + to_include.len() + )); } Ok(included) diff --git a/actors/miner/src/state.rs b/actors/miner/src/state.rs index 1e96acc49..534f4e518 100644 --- a/actors/miner/src/state.rs +++ b/actors/miner/src/state.rs @@ -4,15 +4,13 @@ use std::cmp; use std::ops::Neg; -use anyhow::anyhow; use cid::multihash::Code; use cid::Cid; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ - actor_error, make_empty_map, make_map_with_root_and_bitwidth, u64_key, ActorDowncast, + actor_error, make_empty_map, make_map_with_root_and_bitwidth, u64_key, ActorContext, ActorError, Array, }; -use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -22,11 +20,12 @@ use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, QuantSpec, EPOCH_UNDEFINED}; use fvm_shared::econ::TokenAmount; -use fvm_shared::error::ExitCode; use fvm_shared::sector::{RegisteredPoStProof, SectorNumber, SectorSize, MAX_SECTOR_NUMBER}; use fvm_shared::HAMT_BIT_WIDTH; use num_traits::{Signed, Zero}; +use crate::ERR_BALANCE_INVARIANTS_BROKEN; + use super::deadlines::new_deadline_info; use super::policy::*; use super::types::*; @@ -128,50 +127,36 @@ impl State { info_cid: Cid, period_start: ChainEpoch, deadline_idx: u64, - ) -> anyhow::Result { - let empty_precommit_map = - make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH).flush().map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to construct empty precommit map", - ) - })?; + ) -> Result { + let empty_precommit_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) + .flush() + .context("failed to construct empty precommit map")?; + let empty_precommits_cleanup_array = Array::::new_with_bit_width(store, PRECOMMIT_EXPIRY_AMT_BITWIDTH) .flush() - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to construct empty precommits array", - ) - })?; + .context("failed to construct empty precommits array")?; + let empty_sectors_array = Array::::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) .flush() - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to construct sectors array", - ) - })?; - let empty_bitfield = store.put_cbor(&BitField::new(), Code::Blake2b256).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct empty bitfield") - })?; + .context("failed to construct sectors array")?; + + let empty_bitfield = store + .put_cbor(&BitField::new(), Code::Blake2b256) + .context("failed to construct empty bitfield")?; let deadline = Deadline::new(store)?; - let empty_deadline = store.put_cbor(&deadline, Code::Blake2b256).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state") - })?; + let empty_deadline = store + .put_cbor(&deadline, Code::Blake2b256) + .context("failed to construct illegal state")?; let empty_deadlines = store .put_cbor(&Deadlines::new(policy, empty_deadline), Code::Blake2b256) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state") - })?; + .context("failed to construct illegal state")?; - let empty_vesting_funds_cid = - store.put_cbor(&VestingFunds::new(), Code::Blake2b256).map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state") - })?; + let empty_vesting_funds_cid = store + .put_cbor(&VestingFunds::new(), Code::Blake2b256) + .context("failed to construct illegal state")?; Ok(Self { info: info_cid, @@ -196,11 +181,11 @@ impl State { }) } - pub fn get_info(&self, store: &BS) -> anyhow::Result { + pub fn get_info(&self, store: &BS) -> Result { match store.get_cbor(&self.info) { Ok(Some(info)) => Ok(info), - Ok(None) => Err(actor_error!(not_found, "failed to get miner info").into()), - Err(e) => Err(e.downcast_wrap("failed to get miner info")), + Ok(None) => Err(actor_error!(not_found, "failed to get miner info")), + Err(e) => Err(ActorError::from(e).wrap("failed to get miner info")), } } @@ -208,7 +193,7 @@ impl State { &mut self, store: &BS, info: &MinerInfo, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let cid = store.put_cbor(&info, Code::Blake2b256)?; self.info = cid; Ok(()) @@ -253,12 +238,7 @@ impl State { ) -> Result<(), ActorError> { let prior_allocation = store .get_cbor(&self.allocated_sectors) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - "failed to load allocated sectors bitfield", - ) - })? + .context("failed to load allocated sectors bitfield")? .ok_or_else(|| actor_error!(illegal_state, "allocated sectors bitfield not found"))?; if policy != CollisionPolicy::AllowCollisions { @@ -275,15 +255,13 @@ impl State { } let new_allocation = &prior_allocation | sector_numbers; self.allocated_sectors = - store.put_cbor(&new_allocation, Code::Blake2b256).map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_ARGUMENT, - format!( - "failed to store allocated sectors bitfield after adding {:?}", - sector_numbers, - ), + store.put_cbor(&new_allocation, Code::Blake2b256).with_context(|| { + format!( + "failed to store allocated sectors bitfield after adding {:?}", + sector_numbers, ) })?; + Ok(()) } @@ -292,18 +270,21 @@ impl State { &mut self, store: &BS, precommits: Vec, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let mut precommitted = make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; for precommit in precommits.into_iter() { let sector_no = precommit.info.sector_number; let modified = precommitted .set_if_absent(u64_key(precommit.info.sector_number), precommit) - .map_err(|e| { - e.downcast_wrap(format!("failed to store precommitment for {:?}", sector_no,)) - })?; + .with_context(|| format!("failed to store precommitment for {:?}", sector_no,))?; + if !modified { - return Err(anyhow!("sector {} already pre-commited", sector_no)); + return Err(actor_error!( + illegal_argument, + "sector {} already pre-commited", + sector_no + )); } } @@ -315,7 +296,7 @@ impl State { &self, store: &BS, sector_num: SectorNumber, - ) -> Result, HamtError> { + ) -> Result, HamtError> { let precommitted = make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; Ok(precommitted.get(&u64_key(sector_num))?.cloned()) @@ -326,7 +307,7 @@ impl State { &self, store: &BS, sector_numbers: &[SectorNumber], - ) -> anyhow::Result> { + ) -> Result, ActorError> { let precommitted = make_map_with_root_and_bitwidth::<_, SectorPreCommitOnChainInfo>( &self.pre_committed_sectors, store, @@ -335,9 +316,10 @@ impl State { let mut result = Vec::with_capacity(sector_numbers.len()); for §or_number in sector_numbers { - let info = match precommitted.get(&u64_key(sector_number)).map_err(|e| { - e.downcast_wrap(format!("failed to load precommitment for {}", sector_number)) - })? { + let info = match precommitted + .get(&u64_key(sector_number)) + .with_context(|| format!("failed to load precommitment for {}", sector_number))? + { Some(info) => info.clone(), None => continue, }; @@ -352,7 +334,7 @@ impl State { &mut self, store: &BS, sector_nums: &[SectorNumber], - ) -> Result<(), HamtError> { + ) -> Result<(), HamtError> { let mut precommitted = make_map_with_root_and_bitwidth::<_, SectorPreCommitOnChainInfo>( &self.pre_committed_sectors, store, @@ -371,7 +353,7 @@ impl State { &self, store: &BS, sector_num: SectorNumber, - ) -> anyhow::Result { + ) -> Result { let sectors = Sectors::load(store, &self.sectors)?; Ok(sectors.get(sector_num)?.is_some()) } @@ -380,14 +362,12 @@ impl State { &mut self, store: &BS, new_sectors: Vec, - ) -> anyhow::Result<()> { - let mut sectors = Sectors::load(store, &self.sectors) - .map_err(|e| e.downcast_wrap("failed to load sectors"))?; + ) -> Result<(), ActorError> { + let mut sectors = Sectors::load(store, &self.sectors).context("failed to load sectors")?; sectors.store(new_sectors)?; - self.sectors = - sectors.amt.flush().map_err(|e| e.downcast_wrap("failed to persist sectors"))?; + self.sectors = sectors.amt.flush().context("failed to persist sectors")?; Ok(()) } @@ -396,7 +376,7 @@ impl State { &self, store: &BS, sector_num: SectorNumber, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let sectors = Sectors::load(store, &self.sectors)?; sectors.get(sector_num) } @@ -405,26 +385,23 @@ impl State { &mut self, store: &BS, sector_nos: &BitField, - ) -> Result<(), AmtError> { + ) -> Result<(), ActorError> { let mut sectors = Sectors::load(store, &self.sectors)?; for sector_num in sector_nos.iter() { - sectors - .amt - .delete(sector_num) - .map_err(|e| e.downcast_wrap("could not delete sector number"))?; + sectors.amt.delete(sector_num).context("could not delete sector number")?; } self.sectors = sectors.amt.flush()?; Ok(()) } - pub fn for_each_sector(&self, store: &BS, mut f: F) -> anyhow::Result<()> + pub fn for_each_sector(&self, store: &BS, mut f: F) -> Result<(), ActorError> where - F: FnMut(&SectorOnChainInfo) -> anyhow::Result<()>, + F: FnMut(&SectorOnChainInfo) -> Result<(), ActorError>, { let sectors = Sectors::load(store, &self.sectors)?; - sectors.amt.for_each(|_, v| f(v))?; + sectors.amt.try_for_each(|_, v| f(v))?; Ok(()) } @@ -434,7 +411,7 @@ impl State { policy: &Policy, store: &BS, sector_number: SectorNumber, - ) -> anyhow::Result<(u64, u64)> { + ) -> Result<(u64, u64), ActorError> { let deadlines = self.load_deadlines(store)?; deadlines.find_sector(policy, store, sector_number) } @@ -455,7 +432,7 @@ impl State { current_epoch: ChainEpoch, sector_size: SectorSize, mut deadline_sectors: DeadlineSectorMap, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let mut deadlines = self.load_deadlines(store)?; let sectors = Sectors::load(store, &self.sectors)?; @@ -498,7 +475,7 @@ impl State { mut sectors: Vec, partition_size: u64, sector_size: SectorSize, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let mut deadlines = self.load_deadlines(store)?; // Sort sectors by number to get better runs in partition bitfields. @@ -565,7 +542,7 @@ impl State { store: &BS, max_partitions: u64, max_sectors: u64, - ) -> anyhow::Result<(TerminationResult, /* has more */ bool)> { + ) -> Result<(TerminationResult, /* has more */ bool), ActorError> { // Anything to do? This lets us avoid loading the deadlines if there's nothing to do. if self.early_terminations.is_empty() { return Ok((Default::default(), false)); @@ -590,11 +567,8 @@ impl State { max_partitions - result.partitions_processed, max_sectors - result.sectors_processed, ) - .map_err(|e| { - e.downcast_wrap(format!( - "failed to pop early terminations for deadline {}", - deadline_idx - )) + .with_context(|| { + format!("failed to pop early terminations for deadline {}", deadline_idx) })?; result += deadline_result; @@ -635,7 +609,7 @@ impl State { partition_idx: u64, sector_number: SectorNumber, require_proven: bool, - ) -> anyhow::Result { + ) -> Result { let dls = self.load_deadlines(store)?; let dl = dls.load_deadline(policy, store, deadline_idx)?; let partition = dl.load_partition(store, partition_idx)?; @@ -676,7 +650,7 @@ impl State { deadline_idx: u64, partition_idx: u64, sector_number: SectorNumber, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { let deadlines = self.load_deadlines(store)?; let deadline = deadlines.load_deadline(policy, store, deadline_idx)?; let partition = deadline.load_partition(store, partition_idx)?; @@ -716,16 +690,14 @@ impl State { &self, store: &BS, sectors: &BitField, - ) -> anyhow::Result> { + ) -> Result, ActorError> { Ok(Sectors::load(store, &self.sectors)?.load_sector(sectors)?) } pub fn load_deadlines(&self, store: &BS) -> Result { store .get_cbor::(&self.deadlines) - .map_err(|e| { - e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deadlines") - })? + .context("failed to load deadlines")? .ok_or_else( || actor_error!(illegal_state; "failed to load deadlines {}", self.deadlines), ) @@ -735,18 +707,19 @@ impl State { &mut self, store: &BS, deadlines: Deadlines, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { self.deadlines = store.put_cbor(&deadlines, Code::Blake2b256)?; Ok(()) } /// Loads the vesting funds table from the store. - pub fn load_vesting_funds(&self, store: &BS) -> anyhow::Result { + pub fn load_vesting_funds( + &self, + store: &BS, + ) -> Result { Ok(store .get_cbor(&self.vesting_funds) - .map_err(|e| { - e.downcast_wrap(format!("failed to load vesting funds {}", self.vesting_funds)) - })? + .with_context(|| format!("failed to load vesting funds {}", self.vesting_funds))? .ok_or_else( || actor_error!(not_found; "failed to load vesting funds {:?}", self.vesting_funds), )?) @@ -757,7 +730,7 @@ impl State { &mut self, store: &BS, funds: &VestingFunds, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { self.vesting_funds = store.put_cbor(funds, Code::Blake2b256)?; Ok(()) } @@ -773,10 +746,11 @@ impl State { // Funds and vesting // - pub fn add_pre_commit_deposit(&mut self, amount: &TokenAmount) -> anyhow::Result<()> { + pub fn add_pre_commit_deposit(&mut self, amount: &TokenAmount) -> Result<(), ActorError> { let new_total = &self.pre_commit_deposits + amount; if new_total.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_argument, "negative pre-commit deposit {} after adding {} to prior {}", new_total, amount, @@ -787,10 +761,11 @@ impl State { Ok(()) } - pub fn add_initial_pledge(&mut self, amount: &TokenAmount) -> anyhow::Result<()> { + pub fn add_initial_pledge(&mut self, amount: &TokenAmount) -> Result<(), ActorError> { let new_total = &self.initial_pledge + amount; if new_total.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_argument, "negative initial pledge requirement {} after adding {} to prior {}", new_total, amount, @@ -801,9 +776,9 @@ impl State { Ok(()) } - pub fn apply_penalty(&mut self, penalty: &TokenAmount) -> anyhow::Result<()> { + pub fn apply_penalty(&mut self, penalty: &TokenAmount) -> Result<(), ActorError> { if penalty.is_negative() { - Err(anyhow!("applying negative penalty {} not allowed", penalty)) + Err(actor_error!(illegal_argument, "applying negative penalty {} not allowed", penalty)) } else { self.fee_debt += penalty; Ok(()) @@ -817,9 +792,9 @@ impl State { current_epoch: ChainEpoch, vesting_sum: &TokenAmount, spec: &VestSpec, - ) -> anyhow::Result { + ) -> Result { if vesting_sum.is_negative() { - return Err(anyhow!("negative vesting sum {}", vesting_sum)); + return Err(actor_error!(illegal_argument, "negative vesting sum {}", vesting_sum)); } let mut vesting_funds = self.load_vesting_funds(store)?; @@ -828,7 +803,8 @@ impl State { let amount_unlocked = vesting_funds.unlock_vested_funds(current_epoch); self.locked_funds -= &amount_unlocked; if self.locked_funds.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "negative locked funds {} after unlocking {}", self.locked_funds, amount_unlocked @@ -858,7 +834,7 @@ impl State { TokenAmount, // from vesting TokenAmount, // from balance ), - anyhow::Error, + ActorError, > { let unlocked_balance = self.get_unlocked_balance(curr_balance)?; @@ -867,7 +843,10 @@ impl State { // * It may be possible the go implementation catches a potential panic here if from_vesting > self.fee_debt { - return Err(anyhow!("should never unlock more than the debt we need to repay")); + return Err(actor_error!( + illegal_state, + "should never unlock more than the debt we need to repay" + )); } self.fee_debt -= &from_vesting; @@ -881,7 +860,7 @@ impl State { /// burnt and an error if there are not sufficient funds to cover repayment. /// Miner state repays from unlocked funds and fails if unlocked funds are insufficient to cover fee debt. /// FeeDebt will be zero after a successful call. - pub fn repay_debts(&mut self, curr_balance: &TokenAmount) -> anyhow::Result { + pub fn repay_debts(&mut self, curr_balance: &TokenAmount) -> Result { let unlocked_balance = self.get_unlocked_balance(curr_balance)?; if unlocked_balance < self.fee_debt { return Err(actor_error!( @@ -903,7 +882,7 @@ impl State { store: &BS, current_epoch: ChainEpoch, target: &TokenAmount, - ) -> anyhow::Result { + ) -> Result { if target.is_zero() || self.locked_funds.is_zero() { return Ok(TokenAmount::zero()); } @@ -912,7 +891,8 @@ impl State { let amount_unlocked = vesting_funds.unlock_unvested_funds(current_epoch, target); self.locked_funds -= &amount_unlocked; if self.locked_funds.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "negative locked funds {} after unlocking {}", self.locked_funds, amount_unlocked @@ -929,7 +909,7 @@ impl State { &mut self, store: &BS, current_epoch: ChainEpoch, - ) -> anyhow::Result { + ) -> Result { if self.locked_funds.is_zero() { return Ok(TokenAmount::zero()); } @@ -938,9 +918,10 @@ impl State { let amount_unlocked = vesting_funds.unlock_vested_funds(current_epoch); self.locked_funds -= &amount_unlocked; if self.locked_funds.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "vesting cause locked funds to become negative: {}", - self.locked_funds, + self.locked_funds )); } @@ -953,7 +934,7 @@ impl State { &self, store: &BS, current_epoch: ChainEpoch, - ) -> anyhow::Result { + ) -> Result { let vesting_funds = self.load_vesting_funds(store)?; Ok(vesting_funds .funds @@ -963,11 +944,18 @@ impl State { } /// Unclaimed funds that are not locked -- includes funds used to cover initial pledge requirement. - pub fn get_unlocked_balance(&self, actor_balance: &TokenAmount) -> anyhow::Result { + pub fn get_unlocked_balance( + &self, + actor_balance: &TokenAmount, + ) -> Result { let unlocked_balance = actor_balance - &self.locked_funds - &self.pre_commit_deposits - &self.initial_pledge; if unlocked_balance.is_negative() { - return Err(anyhow!("negative unlocked balance {}", unlocked_balance)); + return Err(actor_error!( + illegal_state, + "negative unlocked balance {}", + unlocked_balance + )); } Ok(unlocked_balance) } @@ -977,28 +965,39 @@ impl State { pub fn get_available_balance( &self, actor_balance: &TokenAmount, - ) -> anyhow::Result { + ) -> Result { // (actor_balance - &self.locked_funds) - &self.pre_commit_deposit Ok(self.get_unlocked_balance(actor_balance)? - &self.fee_debt) } - pub fn check_balance_invariants(&self, balance: &TokenAmount) -> anyhow::Result<()> { + pub fn check_balance_invariants(&self, balance: &TokenAmount) -> Result<(), ActorError> { + // XXX: probably better to push this one level down into state + let fail = |msg| { + ActorError::unchecked( + ERR_BALANCE_INVARIANTS_BROKEN, + format!("balance invariants broken: {}", msg), + ) + }; + if self.pre_commit_deposits.is_negative() { - return Err(anyhow!("pre-commit deposit is negative: {}", self.pre_commit_deposits)); + return Err(fail(format!( + "pre-commit deposit is negative: {}", + self.pre_commit_deposits + ))); } if self.locked_funds.is_negative() { - return Err(anyhow!("locked funds is negative: {}", self.locked_funds)); + return Err(fail(format!("locked funds is negative: {}", self.locked_funds))); } if self.initial_pledge.is_negative() { - return Err(anyhow!("initial pledge is negative: {}", self.initial_pledge)); + return Err(fail(format!("initial pledge is negative: {}", self.initial_pledge))); } if self.fee_debt.is_negative() { - return Err(anyhow!("fee debt is negative: {}", self.fee_debt)); + return Err(fail(format!("fee debt is negative: {}", self.fee_debt))); } let min_balance = &self.pre_commit_deposits + &self.locked_funds + &self.initial_pledge; if balance < &min_balance { - return Err(anyhow!("fee debt is negative: {}", self.fee_debt)); + return Err(fail(format!("fee debt is negative: {}", self.fee_debt))); } Ok(()) @@ -1014,12 +1013,12 @@ impl State { policy: &Policy, store: &BS, cleanup_events: Vec<(ChainEpoch, u64)>, - ) -> anyhow::Result<()> { + ) -> Result<(), ActorError> { // Load BitField Queue for sector expiry let quant = self.quant_spec_every_deadline(policy); let mut queue = super::BitFieldQueue::new(store, &self.pre_committed_sectors_cleanup, quant) - .map_err(|e| e.downcast_wrap("failed to load pre-commit clean up queue"))?; + .context("failed to load pre-commit clean up queue")?; queue.add_many_to_queue_values(cleanup_events.into_iter())?; self.pre_committed_sectors_cleanup = queue.amt.flush()?; @@ -1031,7 +1030,7 @@ impl State { policy: &Policy, store: &BS, current_epoch: ChainEpoch, - ) -> anyhow::Result { + ) -> Result { let mut deposit_to_burn = TokenAmount::zero(); // cleanup expired pre-committed sectors @@ -1072,7 +1071,8 @@ impl State { self.pre_commit_deposits -= &deposit_to_burn; if self.pre_commit_deposits.is_negative() { - return Err(anyhow!( + return Err(actor_error!( + illegal_state, "pre-commit clean up caused negative deposits: {}", self.pre_commit_deposits )); @@ -1086,7 +1086,7 @@ impl State { policy: &Policy, store: &BS, current_epoch: ChainEpoch, - ) -> anyhow::Result { + ) -> Result { let mut pledge_delta = TokenAmount::zero(); let dl_info = self.deadline_info(policy, current_epoch); @@ -1169,7 +1169,7 @@ impl State { &self, store: &BS, sector_nos: &BitField, - ) -> anyhow::Result> { + ) -> Result, ActorError> { let mut precommits = Vec::new(); let precommitted = make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; diff --git a/actors/miner/tests/util.rs b/actors/miner/tests/util.rs index a1a006e39..a1befac76 100644 --- a/actors/miner/tests/util.rs +++ b/actors/miner/tests/util.rs @@ -1605,7 +1605,6 @@ where let arr = Array::::load(c, &rt.store).unwrap(); arr.for_each(|_, v: &T| { result.push(v.clone()); - Ok(()) }) .unwrap(); result diff --git a/actors/runtime/Cargo.toml b/actors/runtime/Cargo.toml index 1813dbd1b..acc8418bf 100644 --- a/actors/runtime/Cargo.toml +++ b/actors/runtime/Cargo.toml @@ -11,6 +11,7 @@ repository = "https://github.com/filecoin-project/builtin-actors" fvm_ipld_hamt = "0.4.0" fvm_ipld_amt = { version = "0.4.0", features = ["go-interop"] } fvm_shared = { version = "0.6.0", default-features = false } +fvm_ipld_bitfield = "0.5.0" num-traits = "0.2.14" num-derive = "0.3.3" serde = { version = "1.0.136", features = ["derive"] } diff --git a/actors/runtime/src/actor_error.rs b/actors/runtime/src/actor_error.rs index b376600b3..ab7e89b04 100644 --- a/actors/runtime/src/actor_error.rs +++ b/actors/runtime/src/actor_error.rs @@ -1,4 +1,4 @@ -use std::fmt::Display; +use std::{fmt::Display, num::TryFromIntError}; use fvm_shared::error::ExitCode; use thiserror::Error; @@ -93,6 +93,27 @@ impl From> for ActorE } } +impl From for ActorError { + fn from(e: fvm_ipld_bitfield::Error) -> Self { + // TODO: correct code? + Self { exit_code: ExitCode::USR_ILLEGAL_STATE, msg: e.to_string() } + } +} + +impl From for ActorError { + fn from(e: TryFromIntError) -> Self { + // TODO: correct code? + Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } + } +} + +impl From for ActorError { + fn from(e: fvm_ipld_bitfield::OutOfRangeError) -> Self { + // TODO: correct code? + Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } + } +} + impl From> for ActorError { fn from(e: crate::util::MultiMapError) -> Self { match e { @@ -113,6 +134,28 @@ impl, E: std::error::Error> From, E: std::error::Error> From> + for ActorError +{ + fn from(e: fvm_ipld_amt::EitherError) -> Self { + match e { + fvm_ipld_amt::EitherError::User(e) => e.into(), + fvm_ipld_amt::EitherError::Amt(e) => e.into(), + } + } +} + +impl, E: std::error::Error> From> + for ActorError +{ + fn from(e: fvm_ipld_hamt::EitherError) -> Self { + match e { + fvm_ipld_hamt::EitherError::User(e) => e.into(), + fvm_ipld_hamt::EitherError::Hamt(e) => e.into(), + } + } +} + /// Converts an actor deletion error into an actor error with the appropriate exit code. This /// facilitates propagation. #[cfg(feature = "fil-actor")] From 07abdffc3eb4cc14d73c41509051ffcac7fd0d26 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 12 Apr 2022 18:30:18 +0200 Subject: [PATCH 06/10] cleanup and clippy --- actors/market/src/lib.rs | 3 +- actors/market/src/state.rs | 4 +- actors/miner/src/bitfield_queue.rs | 6 +- actors/miner/src/deadline_state.rs | 7 +- actors/miner/src/partition_state.rs | 5 +- actors/miner/src/sectors.rs | 18 ++-- actors/miner/src/state.rs | 25 ++---- actors/runtime/src/actor_error.rs | 11 ++- .../runtime/src/runtime/actor_blockstore.rs | 8 +- actors/runtime/src/util/downcast.rs | 88 ------------------- actors/runtime/src/util/mod.rs | 2 - actors/runtime/src/util/set.rs | 2 +- test_vm/src/lib.rs | 4 +- 13 files changed, 40 insertions(+), 143 deletions(-) delete mode 100644 actors/runtime/src/util/downcast.rs diff --git a/actors/market/src/lib.rs b/actors/market/src/lib.rs index c76f998b9..1a9bf23fa 100644 --- a/actors/market/src/lib.rs +++ b/actors/market/src/lib.rs @@ -982,8 +982,7 @@ where illegal_argument, "deal id {} present multiple times", deal_id - ) - .into()); + )); } let proposal = proposals .get(*deal_id)? diff --git a/actors/market/src/state.rs b/actors/market/src/state.rs index 8b70100af..780130261 100644 --- a/actors/market/src/state.rs +++ b/actors/market/src/state.rs @@ -567,7 +567,7 @@ where lock_reason: Reason, ) -> Result<(), ActorError> { if amount.is_negative() { - return Err(actor_error!(illegal_state, "unlock negative amount: {}", amount).into()); + return Err(actor_error!(illegal_state, "unlock negative amount: {}", amount)); } self.locked_table.as_mut().unwrap().must_subtract(addr, amount)?; @@ -620,7 +620,7 @@ where lock_reason: Reason, ) -> Result<(), ActorError> { if amount.is_negative() { - return Err(actor_error!(illegal_state, "negative amount to slash: {}", amount).into()); + return Err(actor_error!(illegal_state, "negative amount to slash: {}", amount)); } // Subtract from locked and escrow tables diff --git a/actors/miner/src/bitfield_queue.rs b/actors/miner/src/bitfield_queue.rs index d466c8abc..cc002b5e9 100644 --- a/actors/miner/src/bitfield_queue.rs +++ b/actors/miner/src/bitfield_queue.rs @@ -4,7 +4,7 @@ use std::convert::TryInto; use cid::Cid; -use fil_actors_runtime::{ActorContext, ActorDowncast, ActorError, Array}; +use fil_actors_runtime::{ActorContext, ActorError, Array}; use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -39,13 +39,13 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { let bitfield = self .amt .get(epoch) - .map_err(|e| e.downcast_wrap(format!("failed to lookup queue epoch {}", epoch)))? + .with_context(|| format!("failed to lookup queue epoch {}", epoch))? .cloned() .unwrap_or_default(); self.amt .set(epoch, &bitfield | values) - .map_err(|e| e.downcast_wrap(format!("failed to set queue epoch {}", epoch)))?; + .with_context(|| format!("failed to set queue epoch {}", epoch))?; Ok(()) } diff --git a/actors/miner/src/deadline_state.rs b/actors/miner/src/deadline_state.rs index 73660335e..744d8fc53 100644 --- a/actors/miner/src/deadline_state.rs +++ b/actors/miner/src/deadline_state.rs @@ -614,7 +614,7 @@ impl Deadline { if let Some(&max_partition) = to_remove_set.iter().max() { if max_partition > partition_count { return Err( - actor_error!(illegal_argument; "partition index {} out of range [0, {})", max_partition, partition_count).into() + actor_error!(illegal_argument; "partition index {} out of range [0, {})", max_partition, partition_count), ); } } else { @@ -625,7 +625,7 @@ impl Deadline { // Should already be checked earlier, but we might as well check again. if !self.early_terminations.is_empty() { return Err( - actor_error!(illegal_argument; "cannot remove partitions from deadline with early terminations").into(), + actor_error!(illegal_argument; "cannot remove partitions from deadline with early terminations"), ); } @@ -663,8 +663,7 @@ impl Deadline { illegal_argument, "cannot remove partition {}: has unproven sectors", partition_idx - ) - .into()); + )); } // Get the live sectors. diff --git a/actors/miner/src/partition_state.rs b/actors/miner/src/partition_state.rs index b5e4003dc..9233f5e77 100644 --- a/actors/miner/src/partition_state.rs +++ b/actors/miner/src/partition_state.rs @@ -484,7 +484,7 @@ impl Partition { })?; if !live_sectors.contains_all(sector_numbers) { - return Err(actor_error!(illegal_argument, "can only terminate live sectors").into()); + return Err(actor_error!(illegal_argument, "can only terminate live sectors")); } let sector_infos = sectors.load_sector(sector_numbers)?; @@ -733,8 +733,7 @@ impl Partition { return Err(actor_error!( illegal_argument, "skipped faults contains sectors outside partition" - ) - .into()); + )); } // Find all skipped faults that have been labeled recovered diff --git a/actors/miner/src/sectors.rs b/actors/miner/src/sectors.rs index 40d589dc4..d7032a244 100644 --- a/actors/miner/src/sectors.rs +++ b/actors/miner/src/sectors.rs @@ -4,11 +4,10 @@ use std::collections::BTreeSet; use cid::Cid; -use fil_actors_runtime::{actor_error, ActorDowncast, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; -use fvm_shared::error::ExitCode; use fvm_shared::sector::{SectorNumber, MAX_SECTOR_NUMBER}; use super::SectorOnChainInfo; @@ -36,12 +35,7 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { let sector_on_chain = self .amt .get(sector_number) - .map_err(|e| { - e.downcast_default( - ExitCode::USR_ILLEGAL_STATE, - format!("failed to load sector {}", sector_number), - ) - })? + .with_context(|| format!("failed to load sector {}", sector_number))? .cloned() .ok_or_else(|| actor_error!(not_found; "sector not found: {}", sector_number))?; sector_infos.push(sector_on_chain); @@ -56,7 +50,7 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { Ok(self .amt .get(sector_number) - .map_err(|e| e.downcast_wrap(format!("failed to get sector {}", sector_number)))? + .with_context(|| format!("failed to get sector {}", sector_number))? .cloned()) } @@ -72,9 +66,9 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { )); } - self.amt.set(sector_number, info).map_err(|e| { - e.downcast_wrap(format!("failed to store sector {}", sector_number)) - })?; + self.amt + .set(sector_number, info) + .with_context(|| format!("failed to store sector {}", sector_number))?; } Ok(()) diff --git a/actors/miner/src/state.rs b/actors/miner/src/state.rs index 534f4e518..2a59de5d7 100644 --- a/actors/miner/src/state.rs +++ b/actors/miner/src/state.rs @@ -620,8 +620,7 @@ impl State { not_found; "sector {} not a member of partition {}, deadline {}", sector_number, partition_idx, deadline_idx - ) - .into()); + )); } let faulty = partition.faults.get(sector_number); @@ -660,8 +659,7 @@ impl State { not_found; "sector {} not a member of partition {}, deadline {}", sector_number, partition_idx, deadline_idx - ) - .into()); + )); } if partition.faults.get(sector_number) { @@ -669,8 +667,7 @@ impl State { forbidden; "sector {} not a member of partition {}, deadline {}", sector_number, partition_idx, deadline_idx - ) - .into()); + )); } if partition.terminated.get(sector_number) { @@ -678,8 +675,7 @@ impl State { not_found; "sector {} not of partition {}, deadline {} is terminated", sector_number, partition_idx, deadline_idx - ) - .into()); + )); } Ok(()) @@ -691,7 +687,7 @@ impl State { store: &BS, sectors: &BitField, ) -> Result, ActorError> { - Ok(Sectors::load(store, &self.sectors)?.load_sector(sectors)?) + Sectors::load(store, &self.sectors)?.load_sector(sectors) } pub fn load_deadlines(&self, store: &BS) -> Result { @@ -717,12 +713,12 @@ impl State { &self, store: &BS, ) -> Result { - Ok(store + store .get_cbor(&self.vesting_funds) .with_context(|| format!("failed to load vesting funds {}", self.vesting_funds))? .ok_or_else( || actor_error!(not_found; "failed to load vesting funds {:?}", self.vesting_funds), - )?) + ) } /// Saves the vesting table to the store. @@ -868,8 +864,7 @@ impl State { "unlocked balance can not repay fee debt ({} < {})", unlocked_balance, self.fee_debt - ) - .into()); + )); } Ok(std::mem::take(&mut self.fee_debt)) @@ -1175,9 +1170,7 @@ impl State { make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; for sector_no in sector_nos.iter() { if sector_no as u64 > MAX_SECTOR_NUMBER { - return Err( - actor_error!(illegal_argument; "sector number greater than maximum").into() - ); + return Err(actor_error!(illegal_argument; "sector number greater than maximum")); } let info: &SectorPreCommitOnChainInfo = precommitted diff --git a/actors/runtime/src/actor_error.rs b/actors/runtime/src/actor_error.rs index ab7e89b04..8968419af 100644 --- a/actors/runtime/src/actor_error.rs +++ b/actors/runtime/src/actor_error.rs @@ -3,8 +3,6 @@ use std::{fmt::Display, num::TryFromIntError}; use fvm_shared::error::ExitCode; use thiserror::Error; -use crate::ActorDowncast; - /// The error type returned by actor method calls. #[derive(Error, Debug, Clone, PartialEq)] #[error("ActorError(exit_code: {exit_code:?}, msg: {msg})")] @@ -232,7 +230,12 @@ impl> ActorContext for Result { // TODO: remove once the runtime doesn't use anyhow::Result anymore impl From for ActorError { fn from(e: anyhow::Error) -> Self { - // THIS DEFAULT IS WRONG, it is just a placeholder - e.downcast_default(ExitCode::USR_ILLEGAL_ARGUMENT, "runtime error") + match e.downcast::() { + Ok(actor_err) => actor_err, + Err(other) => ActorError::unchecked( + ExitCode::USR_ILLEGAL_ARGUMENT, + format!("runtime error: {}", other), + ), + } } } diff --git a/actors/runtime/src/runtime/actor_blockstore.rs b/actors/runtime/src/runtime/actor_blockstore.rs index 3f3a0e754..e711a8798 100644 --- a/actors/runtime/src/runtime/actor_blockstore.rs +++ b/actors/runtime/src/runtime/actor_blockstore.rs @@ -19,9 +19,9 @@ impl fvm_ipld_blockstore::Blockstore for ActorBlockstore { fn get(&self, cid: &Cid) -> Result>, Self::Error> { // If this fails, the _CID_ is invalid. I.e., we have a bug. - fvm::ipld::get(cid).map(Some).map_err(|c| { - actor_error!(illegal_state; "get failed with {:?} on CID '{}'", c, cid).into() - }) + fvm::ipld::get(cid) + .map(Some) + .map_err(|c| actor_error!(illegal_state; "get failed with {:?} on CID '{}'", c, cid)) } fn put_keyed(&self, k: &Cid, block: &[u8]) -> Result<(), Self::Error> { @@ -29,7 +29,7 @@ impl fvm_ipld_blockstore::Blockstore for ActorBlockstore { .map_err(|e| actor_error!(serialization, e.to_string()))?; let k2 = self.put(code, &Block::new(k.codec(), block))?; if k != &k2 { - Err(actor_error!(serialization; "put block with cid {} but has cid {}", k, k2).into()) + Err(actor_error!(serialization; "put block with cid {} but has cid {}", k, k2)) } else { Ok(()) } diff --git a/actors/runtime/src/util/downcast.rs b/actors/runtime/src/util/downcast.rs deleted file mode 100644 index cf299fe2f..000000000 --- a/actors/runtime/src/util/downcast.rs +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2019-2022 ChainSafe Systems -// SPDX-License-Identifier: Apache-2.0, MIT - -use anyhow::anyhow; -use fvm_ipld_amt::Error as AmtError; -use fvm_ipld_hamt::Error as HamtError; -use fvm_shared::error::ExitCode; - -use crate::ActorError; - -/// Trait to allow multiple error types to be able to be downcasted into an `ActorError`. -pub trait ActorDowncast { - /// Downcast a dynamic std Error into an `ActorError`. If the error cannot be downcasted - /// into an ActorError automatically, use the provided `ExitCode` to generate a new error. - fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError; - - /// Wrap the error with a message, without overwriting an exit code. - fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error; -} - -impl ActorDowncast for anyhow::Error { - fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { - match downcast_util(self) { - Ok(actor_error) => actor_error.wrap(msg), - Err(other) => { - ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) - } - } - } - fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { - match downcast_util(self) { - Ok(actor_error) => anyhow!(actor_error.wrap(msg)), - Err(other) => anyhow!("{}: {}", msg.as_ref(), other), - } - } -} - -impl ActorDowncast for AmtError { - fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { - match self { - // AmtError::Dynamic(e) => e.downcast_default(default_exit_code, msg), - // todo: proper downcast - other => { - ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) - } - } - } - fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { - match self { - // AmtError::Dynamic(e) => e.downcast_wrap(msg), - // todo: proper downcast - other => anyhow!("{}: {}", msg.as_ref(), other), - } - } -} - -impl ActorDowncast for HamtError { - fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { - match self { - // HamtError::Dynamic(e) => e.downcast_default(default_exit_code, msg), - // todo: proper downcast - other => { - ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) - } - } - } - fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { - match self { - // HamtError::Dynamic(e) => e.downcast_wrap(msg), - // todo: proper downcast - other => anyhow!("{}: {}", msg.as_ref(), other), - } - } -} - -/// Attempts to downcast a `Box` into an actor error. -/// Returns `Ok` with the actor error if it can be downcasted automatically -/// and returns `Err` with the original error if it cannot. -fn downcast_util(error: anyhow::Error) -> anyhow::Result { - // Check if error is ActorError, return as such - let error = match error.downcast::() { - Ok(actor_err) => return Ok(actor_err), - Err(other) => other, - }; - - // Could not be downcasted automatically to actor error, return initial dynamic error. - Err(error) -} diff --git a/actors/runtime/src/util/mod.rs b/actors/runtime/src/util/mod.rs index f47015884..d7d282804 100644 --- a/actors/runtime/src/util/mod.rs +++ b/actors/runtime/src/util/mod.rs @@ -1,14 +1,12 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -pub use self::downcast::*; pub use self::multimap::{EitherError as MultiMapEitherError, Error as MultiMapError, Multimap}; pub use self::set::Set; pub use self::set_multimap::SetMultimap; pub mod cbor; pub mod chaos; -mod downcast; mod multimap; mod set; mod set_multimap; diff --git a/actors/runtime/src/util/set.rs b/actors/runtime/src/util/set.rs index ca31b9ffc..277301c5e 100644 --- a/actors/runtime/src/util/set.rs +++ b/actors/runtime/src/util/set.rs @@ -74,7 +74,7 @@ where // Calls the for each function on the hamt with ignoring the value self.0.try_for_each(|s, _: &()| f(s)).map_err(|err| match err { fvm_ipld_hamt::EitherError::User(e) => e, - fvm_ipld_hamt::EitherError::Hamt(e) => e.into(), + fvm_ipld_hamt::EitherError::Hamt(e) => e, }) } diff --git a/test_vm/src/lib.rs b/test_vm/src/lib.rs index 21cb70c83..ceb8ea653 100644 --- a/test_vm/src/lib.rs +++ b/test_vm/src/lib.rs @@ -953,8 +953,8 @@ impl Error for TestVMError { } } -impl From for TestVMError { - fn from(h_err: fvm_ipld_hamt::Error) -> Self { +impl From> for TestVMError { + fn from(h_err: fvm_ipld_hamt::Error) -> Self { vm_err(h_err.to_string().as_str()) } } From 4f621b3e3a1a0274707da09526cdabcea648b57c Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 19 Apr 2022 18:15:15 +0200 Subject: [PATCH 07/10] fix: update dependencies --- Cargo.lock | 83 +++++++++++++++++++------------------- actors/init/Cargo.toml | 2 +- actors/market/Cargo.toml | 2 +- actors/miner/Cargo.toml | 2 +- actors/multisig/Cargo.toml | 2 +- actors/power/Cargo.toml | 2 +- actors/runtime/Cargo.toml | 2 +- actors/verifreg/Cargo.toml | 2 +- test_vm/Cargo.toml | 2 +- 9 files changed, 49 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 15cb65444..194f1dda1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -320,16 +320,16 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.8" +version = "3.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71c47df61d9e16dc010b55dba1952a57d8c215dbb533fd13cdd13369aac73b1c" +checksum = "3124f3f75ce09e22d1410043e1e24f2ecc44fad3afe4f08408f1f7663d68da2b" dependencies = [ "atty", "bitflags", "clap_derive", + "clap_lex", "indexmap", "lazy_static", - "os_str_bytes", "strsim", "termcolor", "textwrap", @@ -348,6 +348,15 @@ dependencies = [ "syn", ] +[[package]] +name = "clap_lex" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "189ddd3b5d32a70b35e7686054371742a937b0d99128e76dde6340210e966669" +dependencies = [ + "os_str_bytes", +] + [[package]] name = "concurrent-queue" version = "1.2.2" @@ -934,7 +943,6 @@ name = "fvm_ipld_amt" version = "0.4.0" dependencies = [ "ahash", - "anyhow", "cid", "fvm_ipld_blockstore", "fvm_ipld_encoding", @@ -959,7 +967,6 @@ dependencies = [ name = "fvm_ipld_blockstore" version = "0.1.0" dependencies = [ - "anyhow", "cid", ] @@ -982,7 +989,6 @@ dependencies = [ name = "fvm_ipld_encoding" version = "0.1.0" dependencies = [ - "anyhow", "cid", "cs_serde_bytes", "fvm_ipld_blockstore", @@ -995,11 +1001,8 @@ dependencies = [ [[package]] name = "fvm_ipld_hamt" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a03c6ae361a882360bc0c0f47265b294429f096baa8d9467247bbd62c6a6683c" +version = "0.5.0" dependencies = [ - "anyhow", "byteorder", "cid", "cs_serde_bytes", @@ -1062,7 +1065,6 @@ dependencies = [ name = "fvm_shared" version = "0.6.0" dependencies = [ - "anyhow", "bimap", "blake2b_simd", "byteorder", @@ -1112,9 +1114,9 @@ dependencies = [ [[package]] name = "gloo-timers" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d12a7f4e95cfe710f1d624fb1210b7d961a5fb05c4fd942f4feab06e61f590e" +checksum = "5fb7d06c1c8cc2a29bee7ec961009a0b2caa0793ee4900c2ffb348734ba1c8f9" dependencies = [ "futures-channel", "futures-core", @@ -1208,9 +1210,9 @@ checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" [[package]] name = "js-sys" -version = "0.3.56" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" +checksum = "671a26f820db17c2a2750743f1dd03bafd15b98c9f30c7c2628c024c05d73397" dependencies = [ "wasm-bindgen", ] @@ -1238,9 +1240,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.121" +version = "0.2.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efaa7b300f3b5fe8eb6bf21ce3895e1751d9665086af2d64b42f19701015ff4f" +checksum = "21a41fed9d98f27ab1c6d161da622a4fa35e8a54a8adc24bbf3ddd0ef70b0e50" [[package]] name = "libipld-core" @@ -1425,9 +1427,6 @@ name = "os_str_bytes" version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" -dependencies = [ - "memchr", -] [[package]] name = "parking" @@ -1502,18 +1501,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" dependencies = [ "unicode-xid", ] [[package]] name = "quote" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632d02bff7f874a36f33ea8bb416cd484b90cc66c1194b1a1110d067a7013f58" +checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" dependencies = [ "proc-macro2", ] @@ -1691,9 +1690,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "syn" -version = "1.0.90" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "704df27628939572cd88d33f171cd6f896f4eaca85252c6e0a72d8d8287ee86f" +checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" dependencies = [ "proc-macro2", "quote", @@ -1790,9 +1789,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", ] @@ -1845,9 +1844,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" +checksum = "27370197c907c55e3f1a9fbe26f44e937fe6451368324e009cba39e139dc08ad" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -1855,9 +1854,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" +checksum = "53e04185bfa3a779273da532f5025e33398409573f348985af9a1cbf3774d3f4" dependencies = [ "bumpalo", "lazy_static", @@ -1870,9 +1869,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.29" +version = "0.4.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" +checksum = "6f741de44b75e14c35df886aff5f1eb73aa114fa5d4d00dcd37b5e01259bf3b2" dependencies = [ "cfg-if", "js-sys", @@ -1882,9 +1881,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" +checksum = "17cae7ff784d7e83a2fe7611cfe766ecf034111b49deb850a3dc7699c08251f5" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -1892,9 +1891,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" +checksum = "99ec0dc7a4756fffc231aab1b9f2f578d23cd391390ab27f952ae0c9b3ece20b" dependencies = [ "proc-macro2", "quote", @@ -1905,15 +1904,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.79" +version = "0.2.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" +checksum = "d554b7f530dee5964d9a9468d95c1f8b8acae4f282807e7d27d4b03099a46744" [[package]] name = "web-sys" -version = "0.3.56" +version = "0.3.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" +checksum = "7b17e741662c70c8bd24ac5c5b18de314a2c26c32bf8346ee1e6f53de919c283" dependencies = [ "js-sys", "wasm-bindgen", diff --git a/actors/init/Cargo.toml b/actors/init/Cargo.toml index ec8253e93..e252dfac6 100644 --- a/actors/init/Cargo.toml +++ b/actors/init/Cargo.toml @@ -16,7 +16,7 @@ crate-type = ["cdylib", "lib"] [dependencies] fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["fil-actor"] } fvm_shared = { version = "0.6.0", default-features = false } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" serde = { version = "1.0.136", features = ["derive"] } num-traits = "0.2.14" num-derive = "0.3.3" diff --git a/actors/market/Cargo.toml b/actors/market/Cargo.toml index 363b9b15a..58ca56ab2 100644 --- a/actors/market/Cargo.toml +++ b/actors/market/Cargo.toml @@ -15,7 +15,7 @@ crate-type = ["cdylib", "lib"] [dependencies] fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["fil-actor"] } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" fvm_shared = { version = "0.6.0", default-features = false } fvm_ipld_bitfield = "0.5.0" num-traits = "0.2.14" diff --git a/actors/miner/Cargo.toml b/actors/miner/Cargo.toml index 4af84b0c3..bb89abe4f 100644 --- a/actors/miner/Cargo.toml +++ b/actors/miner/Cargo.toml @@ -18,7 +18,7 @@ fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features fvm_shared = { version = "0.6.0", default-features = false } fvm_ipld_bitfield = "0.5.0" fvm_ipld_amt = { version = "0.4.0", features = ["go-interop"] } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" serde = { version = "1.0.136", features = ["derive"] } cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } num-traits = "0.2.14" diff --git a/actors/multisig/Cargo.toml b/actors/multisig/Cargo.toml index 52be22713..b66e00e91 100644 --- a/actors/multisig/Cargo.toml +++ b/actors/multisig/Cargo.toml @@ -16,7 +16,7 @@ crate-type = ["cdylib", "lib"] [dependencies] fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["fil-actor"] } fvm_shared = { version = "0.6.0", default-features = false } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" num-traits = "0.2.14" num-derive = "0.3.3" cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } diff --git a/actors/power/Cargo.toml b/actors/power/Cargo.toml index 3270ee307..1764fd542 100644 --- a/actors/power/Cargo.toml +++ b/actors/power/Cargo.toml @@ -16,7 +16,7 @@ crate-type = ["cdylib", "lib"] [dependencies] fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["fil-actor"] } fvm_shared = { version = "0.6.0", default-features = false } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" num-traits = "0.2.14" num-derive = "0.3.3" log = "0.4.14" diff --git a/actors/runtime/Cargo.toml b/actors/runtime/Cargo.toml index acc8418bf..dc7e45e85 100644 --- a/actors/runtime/Cargo.toml +++ b/actors/runtime/Cargo.toml @@ -8,7 +8,7 @@ edition = "2021" repository = "https://github.com/filecoin-project/builtin-actors" [dependencies] -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" fvm_ipld_amt = { version = "0.4.0", features = ["go-interop"] } fvm_shared = { version = "0.6.0", default-features = false } fvm_ipld_bitfield = "0.5.0" diff --git a/actors/verifreg/Cargo.toml b/actors/verifreg/Cargo.toml index 62e9dbd08..8680859f6 100644 --- a/actors/verifreg/Cargo.toml +++ b/actors/verifreg/Cargo.toml @@ -21,7 +21,7 @@ num-traits = "0.2.14" num-derive = "0.3.3" cid = { version = "0.8.3", default-features = false, features = ["serde-codec"] } lazy_static = "1.4.0" -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" diff --git a/test_vm/Cargo.toml b/test_vm/Cargo.toml index 59767fe42..2b06317a0 100644 --- a/test_vm/Cargo.toml +++ b/test_vm/Cargo.toml @@ -26,7 +26,7 @@ lazy_static = "1.4.0" fvm_shared = { version = "0.6.0", default-features = false } fvm_ipld_encoding = { version = "0.1.0", default-features = false } fvm_ipld_blockstore = { version = "0.1.0", default-features = false } -fvm_ipld_hamt = "0.4.0" +fvm_ipld_hamt = "0.5.0" num-traits = "0.2.14" num-derive = "0.3.3" log = "0.4.14" From 9d6c88ce89051b8d519d65b0e4710b214c92462e Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Tue, 19 Apr 2022 18:34:45 +0200 Subject: [PATCH 08/10] cr: use custom error for bitfield_queue --- Cargo.lock | 1 + actors/miner/Cargo.toml | 1 + actors/miner/src/bitfield_queue.rs | 68 +++++++++++++++++------------- actors/miner/src/deadline_state.rs | 8 ++-- 4 files changed, 45 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 194f1dda1..bf32b175e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -664,6 +664,7 @@ dependencies = [ "num-traits", "rand", "serde", + "thiserror", ] [[package]] diff --git a/actors/miner/Cargo.toml b/actors/miner/Cargo.toml index bb89abe4f..525e498ca 100644 --- a/actors/miner/Cargo.toml +++ b/actors/miner/Cargo.toml @@ -29,6 +29,7 @@ byteorder = "1.4.3" itertools = "0.10.3" fvm_ipld_blockstore = { version = "0.1" } fvm_ipld_encoding = "0.1.0" +thiserror = "1.0" [dev-dependencies] fil_actors_runtime = { version = "8.0.0-alpha.1", path = "../runtime", features = ["test_utils", "sector-default"] } diff --git a/actors/miner/src/bitfield_queue.rs b/actors/miner/src/bitfield_queue.rs index cc002b5e9..3b5dacc89 100644 --- a/actors/miner/src/bitfield_queue.rs +++ b/actors/miner/src/bitfield_queue.rs @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0, MIT use std::convert::TryInto; +use std::num::TryFromIntError; use cid::Cid; -use fil_actors_runtime::{ActorContext, ActorError, Array}; +use fil_actors_runtime::{ActorError, Array}; use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -18,8 +19,28 @@ pub struct BitFieldQueue<'db, BS> { quant: QuantSpec, } +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("amt {0}")] + Amt(#[from] AmtError), + #[error("conversion failure {0}")] + Int(#[from] TryFromIntError), + #[error("bitfield {0}")] + Bitfield(#[from] fvm_ipld_bitfield::OutOfRangeError), +} + +impl From> for ActorError { + fn from(e: Error) -> Self { + match e { + Error::Amt(e) => e.into(), + Error::Int(e) => e.into(), + Error::Bitfield(e) => e.into(), + } + } +} + impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { - pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result> { + pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result> { Ok(Self { amt: Array::load(root, store)?, quant }) } @@ -28,7 +49,7 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { &mut self, raw_epoch: ChainEpoch, values: &BitField, - ) -> Result<(), ActorError> { + ) -> Result<(), Error> { if values.is_empty() { // nothing to do. return Ok(()); @@ -36,16 +57,9 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { let epoch: u64 = self.quant.quantize_up(raw_epoch).try_into()?; - let bitfield = self - .amt - .get(epoch) - .with_context(|| format!("failed to lookup queue epoch {}", epoch))? - .cloned() - .unwrap_or_default(); + let bitfield = self.amt.get(epoch)?.cloned().unwrap_or_default(); - self.amt - .set(epoch, &bitfield | values) - .with_context(|| format!("failed to set queue epoch {}", epoch))?; + self.amt.set(epoch, &bitfield | values)?; Ok(()) } @@ -54,7 +68,7 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { &mut self, epoch: ChainEpoch, values: impl IntoIterator, - ) -> Result<(), ActorError> { + ) -> Result<(), Error> { self.add_to_queue(epoch, &BitField::try_from_bits(values)?) } @@ -62,24 +76,20 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { /// shifting other bits down and removing any newly empty entries. /// /// See the docs on `BitField::cut` to better understand what it does. - pub fn cut(&mut self, to_cut: &BitField) -> Result<(), ActorError> { + pub fn cut(&mut self, to_cut: &BitField) -> Result<(), Error> { let mut epochs_to_remove = Vec::::new(); - self.amt - .for_each_mut(|epoch, bitfield| { - let bf = bitfield.cut(to_cut); + self.amt.for_each_mut(|epoch, bitfield| { + let bf = bitfield.cut(to_cut); - if bf.is_empty() { - epochs_to_remove.push(epoch); - } else { - **bitfield = bf; - } - }) - .context("failed to cut from bitfield queue")?; + if bf.is_empty() { + epochs_to_remove.push(epoch); + } else { + **bitfield = bf; + } + })?; - self.amt - .batch_delete(epochs_to_remove, true) - .context("failed to remove empty epochs from bitfield queue")?; + self.amt.batch_delete(epochs_to_remove, true)?; Ok(()) } @@ -87,7 +97,7 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { pub fn add_many_to_queue_values( &mut self, values: impl IntoIterator, - ) -> Result<(), ActorError> { + ) -> Result<(), Error> { // Pre-quantize to reduce the number of updates. let mut quantized_values: Vec<_> = values .into_iter() @@ -112,7 +122,7 @@ impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { /// Removes and returns all values with keys less than or equal to until. /// Modified return value indicates whether this structure has been changed by the call. - pub fn pop_until(&mut self, until: ChainEpoch) -> Result<(BitField, bool), ActorError> { + pub fn pop_until(&mut self, until: ChainEpoch) -> Result<(BitField, bool), Error> { let mut popped_values = BitField::new(); let mut popped_keys = Vec::::new(); diff --git a/actors/miner/src/deadline_state.rs b/actors/miner/src/deadline_state.rs index 744d8fc53..0ed0c0d5b 100644 --- a/actors/miner/src/deadline_state.rs +++ b/actors/miner/src/deadline_state.rs @@ -279,11 +279,11 @@ impl Deadline { return Ok(()); } - let mut queue = BitFieldQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load expiration queue")?; + let mut queue = BitFieldQueue::new(store, &self.expirations_epochs, quant)?; + queue - .add_to_queue_values(expiration_epoch, partitions.iter().copied()) - .context("failed to mutate expiration queue")?; + .add_to_queue_values(expiration_epoch, partitions.iter().copied())?; + self.expirations_epochs = queue.amt.flush().context("failed to save expiration queue")?; Ok(()) From 0e32a83b072d5567f8d5230d6fa45c45489f3554 Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Fri, 22 Apr 2022 16:04:04 +0200 Subject: [PATCH 09/10] fixup --- Cargo.lock | 4 ---- actors/market/tests/market_actor_test.rs | 1 - 2 files changed, 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bf32b175e..370e6ccc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1958,7 +1958,3 @@ name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" - -[[patch.unused]] -name = "fvm_ipld_hamt" -version = "0.5.0" diff --git a/actors/market/tests/market_actor_test.rs b/actors/market/tests/market_actor_test.rs index 7ae9ea9b8..c9e3f66ed 100644 --- a/actors/market/tests/market_actor_test.rs +++ b/actors/market/tests/market_actor_test.rs @@ -2475,7 +2475,6 @@ where dobe.for_each(epoch, |id| { assert_eq!(epoch % deal_updates_interval, (id as i64) % deal_updates_interval); count += 1; - Ok(()) }) .unwrap(); assert_eq!(n, count, "unexpected deal count at epoch {}", epoch); From 4642af39bd79c9d410b2083127a581acb229dc1b Mon Sep 17 00:00:00 2001 From: dignifiedquire Date: Thu, 28 Apr 2022 20:00:22 +0200 Subject: [PATCH 10/10] refactor to use apply exit codes in the actor directly --- Cargo.lock | 20 +- actors/account/src/lib.rs | 5 +- actors/init/src/lib.rs | 10 +- actors/init/src/state.rs | 16 +- actors/market/src/balance_table.rs | 16 +- actors/market/src/lib.rs | 182 +++++++++------- actors/market/src/state.rs | 97 ++++++--- actors/market/tests/market_actor_test.rs | 6 +- actors/miner/src/bitfield_queue.rs | 12 +- actors/miner/src/deadline_state.rs | 264 +++++++++++++++-------- actors/miner/src/deadlines.rs | 24 ++- actors/miner/src/expiration_queue.rs | 223 ++++++++++--------- actors/miner/src/lib.rs | 123 ++++++----- actors/miner/src/partition_state.rs | 108 ++++++---- actors/miner/src/sector_map.rs | 27 ++- actors/miner/src/sectors.rs | 19 +- actors/miner/src/state.rs | 134 +++++++----- actors/miner/tests/util.rs | 6 +- actors/multisig/src/lib.rs | 67 ++++-- actors/multisig/src/state.rs | 14 +- actors/paych/src/lib.rs | 30 ++- actors/power/src/lib.rs | 79 ++++--- actors/power/src/state.rs | 46 ++-- actors/power/tests/harness/mod.rs | 1 - actors/reward/src/lib.rs | 9 +- actors/runtime/src/actor_error.rs | 159 +++++--------- actors/runtime/src/util/chaos/mod.rs | 8 +- actors/system/src/lib.rs | 13 +- actors/verifreg/src/lib.rs | 148 ++++++++----- 29 files changed, 1112 insertions(+), 754 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 370e6ccc7..7d8e3ebfa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15,9 +15,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27" +checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" [[package]] name = "arrayref" @@ -178,9 +178,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "dc19a4937b4fbd3fe3379793130e42060d10627a360f2127802b10b87e7baf74" [[package]] name = "base64" @@ -320,9 +320,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.10" +version = "3.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3124f3f75ce09e22d1410043e1e24f2ecc44fad3afe4f08408f1f7663d68da2b" +checksum = "7c167e37342afc5f33fd87bbc870cedd020d2a6dffa05d45ccd9241fbdd146db" dependencies = [ "atty", "bitflags", @@ -1377,9 +1377,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ "autocfg", "num-integer", @@ -1437,9 +1437,9 @@ checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" diff --git a/actors/account/src/lib.rs b/actors/account/src/lib.rs index f2838ba74..64da02615 100644 --- a/actors/account/src/lib.rs +++ b/actors/account/src/lib.rs @@ -4,14 +4,15 @@ use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::address::{Address, Protocol}; +use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::FromPrimitive; use fil_actors_runtime::builtin::singletons::SYSTEM_ACTOR_ADDR; -use fil_actors_runtime::cbor; use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{actor_error, ActorError}; +use fil_actors_runtime::{cbor, ActorContext2}; pub use self::state::State; @@ -80,7 +81,7 @@ impl ActorCode for Actor { } Some(Method::PubkeyAddress) => { let addr = Self::pubkey_address(rt)?; - Ok(RawBytes::serialize(addr)?) + Ok(RawBytes::serialize(addr).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), } diff --git a/actors/init/src/lib.rs b/actors/init/src/lib.rs index e5dc7508a..e3a67874a 100644 --- a/actors/init/src/lib.rs +++ b/actors/init/src/lib.rs @@ -3,11 +3,12 @@ use cid::Cid; use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{actor_error, cbor, ActorContext, ActorError, SYSTEM_ACTOR_ADDR}; +use fil_actors_runtime::{actor_error, cbor, ActorContext2, ActorError, SYSTEM_ACTOR_ADDR}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::actor::builtin::Type; use fvm_shared::address::Address; +use fvm_shared::error::ExitCode; use fvm_shared::{ActorID, MethodNum, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::FromPrimitive; @@ -42,8 +43,7 @@ impl Actor { { let sys_ref: &Address = &SYSTEM_ACTOR_ADDR; rt.validate_immediate_caller_is(std::iter::once(sys_ref))?; - let state = State::new(rt.store(), params.network_name) - .context("failed to construct init actor state")?; + let state = State::new(rt.store(), params.network_name)?; rt.create(&state)?; @@ -85,7 +85,7 @@ impl Actor { // Store mapping of pubkey or actor address to actor ID let id_address: ActorID = rt.transaction(|s: &mut State, rt| { s.map_address_to_new_id(rt.store(), &robust_address) - .context("failed to allocate ID address") + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to allocate ID address") })?; // Create an empty actor @@ -121,7 +121,7 @@ impl ActorCode for Actor { } Some(Method::Exec) => { let res = Self::exec(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), } diff --git a/actors/init/src/state.rs b/actors/init/src/state.rs index b640e4b6b..4abb6a208 100644 --- a/actors/init/src/state.rs +++ b/actors/init/src/state.rs @@ -2,15 +2,16 @@ // SPDX-License-Identifier: Apache-2.0, MIT use cid::Cid; +use fil_actors_runtime::ActorError; use fil_actors_runtime::{ - make_empty_map, make_map_with_root_and_bitwidth, FIRST_NON_SINGLETON_ADDR, + make_empty_map, make_map_with_root_and_bitwidth, ActorContext2, FIRST_NON_SINGLETON_ADDR, }; -use fil_actors_runtime::{ActorContext, ActorError}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::Cbor; use fvm_ipld_hamt::Error as HamtError; use fvm_shared::address::{Address, Protocol}; +use fvm_shared::error::ExitCode; use fvm_shared::{ActorID, HAMT_BIT_WIDTH}; /// State is reponsible for creating @@ -25,7 +26,7 @@ impl State { pub fn new(store: &BS, network_name: String) -> Result { let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .context("failed to create empty map")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to create empty map")?; Ok(Self { address_map: empty_map, next_id: FIRST_NON_SINGLETON_ADDR, network_name }) } @@ -66,9 +67,14 @@ impl State { return Ok(Some(*addr)); } - let map = make_map_with_root_and_bitwidth(&self.address_map, store, HAMT_BIT_WIDTH)?; + let map = make_map_with_root_and_bitwidth(&self.address_map, store, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; - Ok(map.get(&addr.to_bytes())?.copied().map(Address::new_id)) + Ok(map + .get(&addr.to_bytes()) + .exit_code(ExitCode::USR_ILLEGAL_STATE)? + .copied() + .map(Address::new_id)) } } diff --git a/actors/market/src/balance_table.rs b/actors/market/src/balance_table.rs index 95b7a9cd0..4a7a41f1b 100644 --- a/actors/market/src/balance_table.rs +++ b/actors/market/src/balance_table.rs @@ -4,13 +4,13 @@ use cid::Cid; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_hamt::Error as HamtError; -use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser::BigIntDe; use fvm_shared::econ::TokenAmount; +use fvm_shared::{address::Address, error::ExitCode}; use num_traits::{Signed, Zero}; use fil_actors_runtime::{ - actor_error, make_empty_map, make_map_with_root_and_bitwidth, ActorError, Map, + actor_error, make_empty_map, make_map_with_root_and_bitwidth, ActorContext2, ActorError, Map, }; pub const BALANCE_TABLE_BITWIDTH: u32 = 6; @@ -47,7 +47,7 @@ where /// Adds token amount to previously initialized account. pub fn add(&mut self, key: &Address, value: &TokenAmount) -> Result<(), ActorError> { - let prev = self.get(key)?; + let prev = self.get(key).exit_code(ExitCode::USR_SERIALIZATION)?; let sum = &prev + value; if sum.is_negative() { return Err(actor_error!( @@ -57,10 +57,12 @@ where )); } if sum.is_zero() && !prev.is_zero() { - self.0.delete(&key.to_bytes())?; + self.0.delete(&key.to_bytes()).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } else { - self.0.set(key.to_bytes().into(), BigIntDe(sum))?; + self.0 + .set(key.to_bytes().into(), BigIntDe(sum)) + .exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } } @@ -74,7 +76,7 @@ where req: &TokenAmount, floor: &TokenAmount, ) -> Result { - let prev = self.get(key)?; + let prev = self.get(key).exit_code(ExitCode::USR_SERIALIZATION)?; let available = std::cmp::max(TokenAmount::zero(), prev - floor); let sub: TokenAmount = std::cmp::min(&available, req).clone(); @@ -87,7 +89,7 @@ where /// Subtracts value from a balance, and errors if full amount was not substracted. pub fn must_subtract(&mut self, key: &Address, req: &TokenAmount) -> Result<(), ActorError> { - let prev = self.get(key)?; + let prev = self.get(key).exit_code(ExitCode::USR_SERIALIZATION)?; if req > &prev { return Err(actor_error!(illegal_argument, "couldn't subtract the requested amount")); diff --git a/actors/market/src/lib.rs b/actors/market/src/lib.rs index 1a9bf23fa..4b1abce45 100644 --- a/actors/market/src/lib.rs +++ b/actors/market/src/lib.rs @@ -12,6 +12,7 @@ use fvm_shared::bigint::BigInt; use fvm_shared::clock::{ChainEpoch, QuantSpec, EPOCH_UNDEFINED}; use fvm_shared::deal::DealID; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::piece::PieceInfo; use fvm_shared::reward::ThisEpochRewardReturn; use fvm_shared::sector::StoragePower; @@ -23,8 +24,9 @@ use num_traits::{FromPrimitive, Signed, Zero}; use fil_actors_runtime::cbor::serialize_vec; use fil_actors_runtime::runtime::{ActorCode, Policy, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorContext, ActorError, BURNT_FUNDS_ACTOR_ADDR, CRON_ACTOR_ADDR, - REWARD_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, + actor_error, cbor, ActorContext, ActorContext2, ActorError, BURNT_FUNDS_ACTOR_ADDR, + CRON_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + VERIFIED_REGISTRY_ACTOR_ADDR, }; use crate::ext::verifreg::UseBytesParams; @@ -59,7 +61,8 @@ where RawBytes::default(), TokenAmount::zero(), )?; - let addrs: ext::miner::GetControlAddressesReturnParams = ret.deserialize()?; + let addrs: ext::miner::GetControlAddressesReturnParams = + ret.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok((addrs.owner, addrs.worker, addrs.control_addresses)) } @@ -172,7 +175,7 @@ impl Actor { .as_ref() .unwrap() .get(&nominal) - .context("failed to get locked balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance")?; let ex = msm .escrow_table @@ -323,12 +326,11 @@ impl Actor { // check proposalCids for duplication within message batch // check state PendingProposals for duplication across messages - let duplicate_in_state = msm - .pending_deals - .as_ref() - .unwrap() - .has(&pcid.to_bytes()) - .context("failed to check for existence of deal proposal")?; + let duplicate_in_state = + msm.pending_deals.as_ref().unwrap().has(&pcid.to_bytes()).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to check for existence of deal proposal", + )?; let duplicate_in_message = proposal_cid_lookup.contains(&pcid); if duplicate_in_state || duplicate_in_message { info!("invalid deal {}: cannot publish duplicate deal proposal", di); @@ -344,7 +346,8 @@ impl Actor { RawBytes::serialize(UseBytesParams { address: client, deal_size: BigInt::from(deal.proposal.piece_size.0), - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ) { info!("invalid deal {}: failed to acquire datacap exitcode: {}", di, e); @@ -402,12 +405,12 @@ impl Actor { .as_mut() .unwrap() .put(pcid.to_bytes().into()) - .context("failed to set pending deal")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set pending deal")?; msm.deal_proposals .as_mut() .unwrap() .set(id, valid_deal.proposal.clone()) - .context("failed to set deal")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal")?; // We randomize the first epoch for when the deal will be processed so an attacker isn't able to // schedule too many deals for the same tick. @@ -418,7 +421,7 @@ impl Actor { .as_mut() .unwrap() .put(process_epoch, id) - .context("failed to set deal ops by epoch")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal ops by epoch")?; new_deal_ids.push(id); } @@ -447,8 +450,8 @@ impl Actor { let curr_epoch = rt.curr_epoch(); let st: State = rt.state()?; - let proposals = - DealArray::load(&st.proposals, rt.store()).context("failed to load deal proposals")?; + let proposals = DealArray::load(&st.proposals, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals")?; let mut weights = Vec::with_capacity(params.sectors.len()); for sector in params.sectors.iter() { @@ -499,8 +502,12 @@ impl Actor { for deal_id in params.deal_ids { // This construction could be replaced with a single "update deal state" // state method, possibly batched over all deal ids at once. - let s = - msm.deal_states.as_ref().unwrap().get(deal_id).with_context(|| { + let s = msm + .deal_states + .as_ref() + .unwrap() + .get(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to get state for deal_id ({})", deal_id) })?; if s.is_some() { @@ -516,19 +523,24 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .with_context(|| format!("failed to get deal_id ({})", deal_id))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get deal_id ({})", deal_id) + })? .ok_or_else(|| actor_error!(not_found, "no such deal_id: {}", deal_id))?; - let propc = proposal - .cid() - .map_err(|e| ActorError::from(e).wrap("failed to calculate proposal Cid"))?; + let propc = proposal.cid().context_code( + ExitCode::USR_SERIALIZATION, + "failed to calculate proposal Cid", + )?; let has = msm .pending_deals .as_ref() .unwrap() .has(&propc.to_bytes()) - .with_context(|| format!("failed to get pending proposal ({})", propc))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get pending proposal ({})", propc) + })?; if !has { return Err(actor_error!( @@ -549,10 +561,13 @@ impl Actor { slash_epoch: EPOCH_UNDEFINED, }, ) - .with_context(|| format!("failed to set deal state {}", deal_id))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set deal state {}", deal_id) + })?; } - msm.commit_state().context("failed to flush state")?; + msm.commit_state() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush state")?; Ok(()) })?; @@ -586,7 +601,7 @@ impl Actor { .as_ref() .unwrap() .get(id) - .context("failed to get deal proposal")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get deal proposal")?; // The deal may have expired and been deleted before the sector is terminated. // Nothing to do, but continue execution for the other deals. if deal.is_none() { @@ -616,7 +631,7 @@ impl Actor { .as_ref() .unwrap() .get(id) - .context("failed to get deal state")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get deal state")? // A deal with a proposal but no state is not activated, but then it should not be // part of a sector that is terminating. .ok_or_else(|| actor_error!(illegal_argument, "no state for deal {}", id))?; @@ -635,7 +650,9 @@ impl Actor { .as_mut() .unwrap() .set(id, state) - .with_context(|| format!("failed to set deal state ({}", id))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set deal state ({}", id) + })?; } msm.commit_state().context("failed to flush state")?; @@ -656,23 +673,27 @@ impl Actor { let st: State = rt.state()?; - let proposals = - DealArray::load(&st.proposals, rt.store()).context("failed to load deal proposals")?; + let proposals = DealArray::load(&st.proposals, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deal proposals")?; let mut commds = Vec::with_capacity(params.inputs.len()); for comm_input in params.inputs.iter() { let mut pieces: Vec = Vec::with_capacity(comm_input.deal_ids.len()); for deal_id in &comm_input.deal_ids { let deal = proposals .get(*deal_id) - .with_context(|| format!("failed to get deal_id ({})", deal_id))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get deal_id ({})", deal_id) + })? .ok_or_else(|| { actor_error!(not_found, "proposal doesn't exist ({})", deal_id) })?; pieces.push(PieceInfo { cid: deal.piece_cid, size: deal.piece_size }); } - let commd = rt - .compute_unsealed_sector_cid(comm_input.sector_type, &pieces) - .context("failed to compute unsealed sector CID")?; + let commd = + rt.compute_unsealed_sector_cid(comm_input.sector_type, &pieces).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to compute unsealed sector CID", + )?; commds.push(commd); } @@ -716,7 +737,7 @@ impl Actor { .for_each(i, |deal_id| { deal_ids.push(deal_id); }) - .context("failed to set deal state")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal state")?; for deal_id in deal_ids { let deal = msm @@ -724,15 +745,16 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .with_context(|| format!("failed to get deal_id ({})", deal_id))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get deal_id ({})", deal_id) + })? .ok_or_else(|| { actor_error!(not_found, "proposal doesn't exist ({})", deal_id) })? .clone(); - let dcid = deal.cid().map_err(|e| { - ActorError::from(e) - .wrap(format!("failed to calculate cid for proposal {}", deal_id)) + let dcid = deal.cid().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to calculate cid for proposal {}", deal_id) })?; let state = msm @@ -740,7 +762,7 @@ impl Actor { .as_ref() .unwrap() .get(deal_id) - .context("failed to get deal state")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get deal state")? .cloned(); // deal has been published but not activated yet -> terminate it @@ -765,10 +787,14 @@ impl Actor { } // Delete the proposal (but not state, which doesn't exist). - let deleted = - msm.deal_proposals.as_mut().unwrap().delete(deal_id).with_context( - || format!("failed to delete deal proposal {}", deal_id), - )?; + let deleted = msm + .deal_proposals + .as_mut() + .unwrap() + .delete(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete deal proposal {}", deal_id) + })?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -782,7 +808,7 @@ impl Actor { .as_mut() .unwrap() .delete(&dcid.to_bytes()) - .with_context(|| { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to delete pending proposal {}", deal_id) })? .ok_or_else(|| { @@ -801,7 +827,9 @@ impl Actor { .as_mut() .unwrap() .delete(&dcid.to_bytes()) - .with_context(|| format!("failed to delete pending proposal {}", dcid))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete pending proposal {}", dcid) + })? .ok_or_else(|| { actor_error!( illegal_state, @@ -836,12 +864,11 @@ impl Actor { amount_slashed += slash_amount; // Delete proposal and state simultaneously. - let deleted = msm - .deal_states - .as_mut() - .unwrap() - .delete(deal_id) - .context("failed to delete deal state")?; + let deleted = + msm.deal_states.as_mut().unwrap().delete(deal_id).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to delete deal state", + )?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -849,12 +876,11 @@ impl Actor { )); } - let deleted = msm - .deal_proposals - .as_mut() - .unwrap() - .delete(deal_id) - .context("failed to delete deal proposal")?; + let deleted = + msm.deal_proposals.as_mut().unwrap().delete(deal_id).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to delete deal proposal", + )?; if deleted.is_none() { return Err(actor_error!( illegal_state, @@ -879,11 +905,10 @@ impl Actor { } state.last_updated_epoch = curr_epoch; - msm.deal_states - .as_mut() - .unwrap() - .set(deal_id, state) - .context("failed to set deal state")?; + msm.deal_states.as_mut().unwrap().set(deal_id, state).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to set deal state", + )?; if let Some(ev) = updates_needed.get_mut(&next_epoch) { ev.push(deal_id); @@ -896,7 +921,9 @@ impl Actor { .as_mut() .unwrap() .remove_all(i) - .with_context(|| format!("failed to delete deal ops for epoch {}", i))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete deal ops for epoch {}", i) + })?; } // updates_needed is already sorted by epoch. @@ -905,7 +932,9 @@ impl Actor { .as_mut() .unwrap() .put_many(epoch, &deals) - .with_context(|| format!("failed to reinsert deal IDs for epoch {}", epoch))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to reinsert deal IDs for epoch {}", epoch) + })?; } msm.st.last_cron = rt.curr_epoch(); @@ -921,7 +950,8 @@ impl Actor { RawBytes::serialize(ext::verifreg::RestoreBytesParams { address: d.client, deal_size: BigInt::from(d.piece_size.0), - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ); if let Err(e) = res { @@ -957,7 +987,7 @@ pub fn validate_deals_for_activation( where BS: Blockstore, { - let proposals = DealArray::load(&st.proposals, store)?; + let proposals = DealArray::load(&st.proposals, store).exit_code(ExitCode::USR_SERIALIZATION)?; validate_and_compute_deal_weight(&proposals, deal_ids, miner_addr, sector_expiry, curr_epoch) } @@ -985,7 +1015,8 @@ where )); } let proposal = proposals - .get(*deal_id)? + .get(*deal_id) + .exit_code(ExitCode::USR_SERIALIZATION)? .ok_or_else(|| actor_error!(not_found, "no such deal {}", deal_id))?; validate_deal_can_activate(proposal, miner_addr, sector_expiry, sector_activation) @@ -1145,7 +1176,7 @@ where // Generate unsigned bytes let sv_bz = serialize_vec(&proposal.proposal, "deal proposal")?; rt.verify_signature(&proposal.client_signature, &proposal.proposal.client, &sv_bz) - .context("signature proposal invalid")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "signature proposal invalid")?; Ok(()) } @@ -1190,7 +1221,7 @@ where RawBytes::default(), 0.into(), )?; - let ret: ThisEpochRewardReturn = rwret.deserialize()?; + let ret: ThisEpochRewardReturn = rwret.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok(ret.this_epoch_baseline_power) } @@ -1209,7 +1240,8 @@ where RawBytes::default(), 0.into(), )?; - let ret: ext::power::CurrentTotalPowerReturnParams = rwret.deserialize()?; + let ret: ext::power::CurrentTotalPowerReturnParams = + rwret.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok((ret.raw_byte_power, ret.quality_adj_power)) } @@ -1234,15 +1266,15 @@ impl ActorCode for Actor { } Some(Method::WithdrawBalance) => { let res = Self::withdraw_balance(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::PublishStorageDeals) => { let res = Self::publish_storage_deals(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::VerifyDealsForActivation) => { let res = Self::verify_deals_for_activation(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::ActivateDeals) => { Self::activate_deals(rt, cbor::deserialize_params(params)?)?; @@ -1254,7 +1286,7 @@ impl ActorCode for Actor { } Some(Method::ComputeDataCommitment) => { let res = Self::compute_data_commitment(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::CronTick) => { Self::cron_tick(rt)?; diff --git a/actors/market/src/state.rs b/actors/market/src/state.rs index 780130261..cd3e80412 100644 --- a/actors/market/src/state.rs +++ b/actors/market/src/state.rs @@ -3,6 +3,7 @@ use crate::balance_table::BalanceTable; use cid::Cid; +use fil_actors_runtime::ActorContext2; use fil_actors_runtime::{ actor_error, make_empty_map, runtime::Policy, ActorContext, ActorError, Array, Set, SetMultimap, }; @@ -14,6 +15,7 @@ use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, EPOCH_UNDEFINED}; use fvm_shared::deal::DealID; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::HAMT_BIT_WIDTH; use num_traits::{Signed, Zero}; @@ -70,19 +72,27 @@ impl State { let empty_proposals_array = Array::<(), BS>::new_with_bit_width(store, PROPOSALS_AMT_BITWIDTH) .flush() - .context("Failed to create empty proposals array")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty proposals array", + )?; let empty_states_array = Array::<(), BS>::new_with_bit_width(store, STATES_AMT_BITWIDTH) .flush() - .context("Failed to create empty states array")?; - - let empty_pending_proposals_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) - .flush() - .context("Failed to create empty pending proposals map state")?; - let empty_balance_table = - BalanceTable::new(store).root().context("Failed to create empty balance table map")?; - - let empty_deal_ops_hamt = - SetMultimap::new(store).root().context("Failed to create empty multiset")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; + + let empty_pending_proposals_map = + make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH).flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty pending proposals map state", + )?; + let empty_balance_table = BalanceTable::new(store).root().context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty balance table map", + )?; + + let empty_deal_ops_hamt = SetMultimap::new(store) + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty multiset")?; Ok(Self { proposals: empty_proposals_array, @@ -214,15 +224,24 @@ where pub(super) fn build(&mut self) -> Result<&mut Self, ActorError> { if self.proposal_permit != Permission::Invalid { - self.deal_proposals = Some(DealArray::load(&self.st.proposals, self.store)?); + self.deal_proposals = Some( + DealArray::load(&self.st.proposals, self.store) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.state_permit != Permission::Invalid { - self.deal_states = Some(DealMetaArray::load(&self.st.states, self.store)?); + self.deal_states = Some( + DealMetaArray::load(&self.st.states, self.store) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.locked_permit != Permission::Invalid { - self.locked_table = Some(BalanceTable::from_root(self.store, &self.st.locked_table)?); + self.locked_table = Some( + BalanceTable::from_root(self.store, &self.st.locked_table) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); self.total_client_locked_collateral = Some(self.st.total_client_locked_collateral.clone()); self.total_client_storage_fee = Some(self.st.total_client_storage_fee.clone()); @@ -231,16 +250,24 @@ where } if self.escrow_permit != Permission::Invalid { - self.escrow_table = Some(BalanceTable::from_root(self.store, &self.st.escrow_table)?); + self.escrow_table = Some( + BalanceTable::from_root(self.store, &self.st.escrow_table) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.pending_permit != Permission::Invalid { - self.pending_deals = Some(Set::from_root(self.store, &self.st.pending_proposals)?); + self.pending_deals = Some( + Set::from_root(self.store, &self.st.pending_proposals) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } if self.dpe_permit != Permission::Invalid { - self.deals_by_epoch = - Some(SetMultimap::from_root(self.store, &self.st.deal_ops_by_epoch)?); + self.deals_by_epoch = Some( + SetMultimap::from_root(self.store, &self.st.deal_ops_by_epoch) + .exit_code(ExitCode::USR_SERIALIZATION)?, + ); } self.next_deal_id = self.st.next_id; @@ -281,19 +308,25 @@ where pub(super) fn commit_state(&mut self) -> Result<(), ActorError> { if self.proposal_permit == Permission::Write { if let Some(s) = &mut self.deal_proposals { - self.st.proposals = s.flush().context("failed to flush deal proposals")?; + self.st.proposals = s + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush deal proposals")?; } } if self.state_permit == Permission::Write { if let Some(s) = &mut self.deal_states { - self.st.states = s.flush().context("failed to flush deal states")?; + self.st.states = s + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush deal states")?; } } if self.locked_permit == Permission::Write { if let Some(s) = &mut self.locked_table { - self.st.locked_table = s.root().context("failed to flush locked table")?; + self.st.locked_table = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush locked table")?; } if let Some(s) = &mut self.total_client_locked_collateral { self.st.total_client_locked_collateral = s.clone(); @@ -308,19 +341,25 @@ where if self.escrow_permit == Permission::Write { if let Some(s) = &mut self.escrow_table { - self.st.escrow_table = s.root().context("failed to flush escrow table")?; + self.st.escrow_table = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } if self.pending_permit == Permission::Write { if let Some(s) = &mut self.pending_deals { - self.st.pending_proposals = s.root().context("failed to flush escrow table")?; + self.st.pending_proposals = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } if self.dpe_permit == Permission::Write { if let Some(s) = &mut self.deals_by_epoch { - self.st.deal_ops_by_epoch = s.root().context("failed to flush escrow table")?; + self.st.deal_ops_by_epoch = s + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush escrow table")?; } } @@ -490,13 +529,13 @@ where .as_ref() .unwrap() .get(&addr) - .context("failed to get locked balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance")?; let escrow_balance = self .escrow_table .as_ref() .unwrap() .get(&addr) - .context("failed to get escrow balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get escrow balance")?; Ok((prev_locked + amount_to_lock) <= escrow_balance) } @@ -514,14 +553,14 @@ where .as_ref() .unwrap() .get(addr) - .context("failed to get locked balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get locked balance")?; let escrow_balance = self .escrow_table .as_ref() .unwrap() .get(addr) - .context("failed to get escrow balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get escrow balance")?; if &prev_locked + amount > escrow_balance { return Err(actor_error!(insufficient_funds; @@ -534,7 +573,7 @@ where .as_mut() .unwrap() .add(addr, amount) - .context("failed to add locked balance")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to add locked balance")?; Ok(()) } diff --git a/actors/market/tests/market_actor_test.rs b/actors/market/tests/market_actor_test.rs index c9e3f66ed..0fa11df93 100644 --- a/actors/market/tests/market_actor_test.rs +++ b/actors/market/tests/market_actor_test.rs @@ -18,12 +18,12 @@ use fil_actor_verifreg::UseBytesParams; use fil_actors_runtime::cbor::deserialize; use fil_actors_runtime::network::EPOCHS_IN_DAY; use fil_actors_runtime::runtime::{Policy, Runtime}; -use fil_actors_runtime::test_utils::*; use fil_actors_runtime::{ make_empty_map, ActorError, SetMultimap, BURNT_FUNDS_ACTOR_ADDR, CRON_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, VERIFIED_REGISTRY_ACTOR_ADDR, }; +use fil_actors_runtime::{test_utils::*, ActorContext2}; use fvm_ipld_amt::Amt; use fvm_ipld_encoding::{to_vec, RawBytes}; use fvm_shared::address::Address; @@ -122,12 +122,12 @@ fn simple_construction() { fn label_cbor() { let label = Label::String("i_am_random_string____i_am_random_string____".parse().unwrap()); let _ = to_vec(&label) - .map_err(|e| ActorError::from(e).wrap("failed to serialize DealProposal")) + .context_code(ExitCode::USR_SERIALIZATION, "failed to serialize DealProposal") .unwrap(); let label2 = Label::Bytes(b"i_am_random_____i_am_random_____".to_vec()); let _ = to_vec(&label2) - .map_err(|e| ActorError::from(e).wrap("failed to serialize DealProposal")) + .context_code(ExitCode::USR_SERIALIZATION, "failed to serialize DealProposal") .unwrap(); let empty_string_label = Label::String("".parse().unwrap()); diff --git a/actors/miner/src/bitfield_queue.rs b/actors/miner/src/bitfield_queue.rs index 3b5dacc89..f4c215df6 100644 --- a/actors/miner/src/bitfield_queue.rs +++ b/actors/miner/src/bitfield_queue.rs @@ -5,7 +5,7 @@ use std::convert::TryInto; use std::num::TryFromIntError; use cid::Cid; -use fil_actors_runtime::{ActorError, Array}; +use fil_actors_runtime::Array; use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -29,16 +29,6 @@ pub enum Error { Bitfield(#[from] fvm_ipld_bitfield::OutOfRangeError), } -impl From> for ActorError { - fn from(e: Error) -> Self { - match e { - Error::Amt(e) => e.into(), - Error::Int(e) => e.into(), - Error::Bitfield(e) => e.into(), - } - } -} - impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result> { Ok(Self { amt: Array::load(root, store)?, quant }) diff --git a/actors/miner/src/deadline_state.rs b/actors/miner/src/deadline_state.rs index 0ed0c0d5b..a275f362f 100644 --- a/actors/miner/src/deadline_state.rs +++ b/actors/miner/src/deadline_state.rs @@ -7,13 +7,14 @@ use std::collections::BTreeSet; use cid::multihash::Code; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorContext2, ActorError, Array}; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::CborStore; use fvm_shared::clock::{ChainEpoch, QuantSpec}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{PoStProof, SectorSize}; use num_traits::{Signed, Zero}; @@ -58,9 +59,12 @@ impl Deadlines { return Err(actor_error!(illegal_argument, "invalid deadline {}", deadline_idx)); } - store.get_cbor(&self.due[deadline_idx as usize])?.ok_or_else(|| { - actor_error!(illegal_state, "failed to lookup deadline {}", deadline_idx) - }) + store + .get_cbor(&self.due[deadline_idx as usize]) + .exit_code(ExitCode::USR_SERIALIZATION)? + .ok_or_else(|| { + actor_error!(illegal_state, "failed to lookup deadline {}", deadline_idx) + }) } pub fn for_each( @@ -90,7 +94,8 @@ impl Deadlines { deadline.validate_state()?; - self.due[deadline_idx as usize] = store.put_cbor(deadline, Code::Blake2b256)?; + self.due[deadline_idx as usize] = + store.put_cbor(deadline, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } } @@ -179,20 +184,22 @@ impl Deadline { let empty_partitions_array = Array::<(), BS>::new_with_bit_width(store, DEADLINE_PARTITIONS_AMT_BITWIDTH) .flush() - .context("Failed to create empty states array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; let empty_deadline_expiration_array = Array::<(), BS>::new_with_bit_width(store, DEADLINE_EXPIRATIONS_AMT_BITWIDTH) .flush() - .context("Failed to create empty states array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; let empty_post_submissions_array = Array::<(), BS>::new_with_bit_width( store, DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH, ) .flush() - .context("Failed to create empty states array")?; - let empty_sectors_array = Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) - .flush() - .context("Failed to construct empty sectors snapshot array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty states array")?; + let empty_sectors_array = + Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH).flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to construct empty sectors snapshot array", + )?; Ok(Self { partitions: empty_partitions_array, expirations_epochs: empty_deadline_expiration_array, @@ -212,28 +219,30 @@ impl Deadline { &self, store: &'db BS, ) -> Result, ActorError> { - Ok(Array::load(&self.partitions, store)?) + Ok(Array::load(&self.partitions, store).exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn optimistic_proofs_amt<'db, BS: Blockstore>( &self, store: &'db BS, ) -> Result, ActorError> { - Ok(Array::load(&self.optimistic_post_submissions, store)?) + Ok(Array::load(&self.optimistic_post_submissions, store) + .exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn partitions_snapshot_amt<'db, BS: Blockstore>( &self, store: &'db BS, ) -> Result, ActorError> { - Ok(Array::load(&self.partitions_snapshot, store)?) + Ok(Array::load(&self.partitions_snapshot, store).exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn optimistic_proofs_snapshot_amt<'db, BS: Blockstore>( &self, store: &'db BS, ) -> Result, ActorError> { - Ok(Array::load(&self.optimistic_post_submissions_snapshot, store)?) + Ok(Array::load(&self.optimistic_post_submissions_snapshot, store) + .exit_code(ExitCode::USR_SERIALIZATION)?) } pub fn load_partition( @@ -241,11 +250,14 @@ impl Deadline { store: &BS, partition_idx: u64, ) -> Result { - let partitions = Array::::load(&self.partitions, store)?; + let partitions = Array::::load(&self.partitions, store) + .exit_code(ExitCode::USR_SERIALIZATION)?; let partition = partitions .get(partition_idx) - .with_context(|| format!("failed to lookup partition {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup partition {}", partition_idx) + })? .ok_or_else(|| actor_error!(not_found, "no partition {}", partition_idx))?; Ok(partition.clone()) @@ -256,11 +268,14 @@ impl Deadline { store: &BS, partition_idx: u64, ) -> Result { - let partitions = Array::::load(&self.partitions_snapshot, store)?; + let partitions = Array::::load(&self.partitions_snapshot, store) + .exit_code(ExitCode::USR_SERIALIZATION)?; let partition = partitions .get(partition_idx) - .with_context(|| format!("failed to lookup partition snapshot {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup partition snapshot {}", partition_idx) + })? .ok_or_else(|| actor_error!(not_found, "no partition snapshot {}", partition_idx))?; Ok(partition.clone()) @@ -279,12 +294,17 @@ impl Deadline { return Ok(()); } - let mut queue = BitFieldQueue::new(store, &self.expirations_epochs, quant)?; + let mut queue = BitFieldQueue::new(store, &self.expirations_epochs, quant) + .exit_code(ExitCode::USR_SERIALIZATION)?; queue - .add_to_queue_values(expiration_epoch, partitions.iter().copied())?; + .add_to_queue_values(expiration_epoch, partitions.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)?; - self.expirations_epochs = queue.amt.flush().context("failed to save expiration queue")?; + self.expirations_epochs = queue + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save expiration queue")?; Ok(()) } @@ -316,9 +336,13 @@ impl Deadline { // For each partition with an expiry, remove and collect expirations from the partition queue. for i in expired_partitions.iter() { let partition_idx = i; - let mut partition = partitions.get(partition_idx)?.cloned().ok_or_else(|| { - actor_error!(illegal_state, "missing expected partition {}", partition_idx) - })?; + let mut partition = partitions + .get(partition_idx) + .exit_code(ExitCode::USR_SERIALIZATION)? + .cloned() + .ok_or_else(|| { + actor_error!(illegal_state, "missing expected partition {}", partition_idx) + })?; let partition_expiration = partition.pop_expired_sectors(store, until, quant).with_context(|| { @@ -335,10 +359,10 @@ impl Deadline { all_faulty_power += &partition_expiration.faulty_power; all_on_time_pledge += &partition_expiration.on_time_pledge; - partitions.set(partition_idx, partition)?; + partitions.set(partition_idx, partition).exit_code(ExitCode::USR_SERIALIZATION)?; } - self.partitions = partitions.flush()?; + self.partitions = partitions.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Update early expiration bitmap. let new_early_terminations = BitField::try_from_bits(partitions_with_early_terminations) @@ -396,15 +420,16 @@ impl Deadline { } // Get/create partition to update. - let mut partition = match partitions.get(partition_idx)? { - Some(partition) => partition.clone(), - None => { - // This case will usually happen zero times. - // It would require adding more than a full partition in one go - // to happen more than once. - Partition::new(store)? - } - }; + let mut partition = + match partitions.get(partition_idx).exit_code(ExitCode::USR_SERIALIZATION)? { + Some(partition) => partition.clone(), + None => { + // This case will usually happen zero times. + // It would require adding more than a full partition in one go + // to happen more than once. + Partition::new(store)? + } + }; // Figure out which (if any) sectors we want to add to this partition. let sector_count = partition.sectors.len(); @@ -424,7 +449,7 @@ impl Deadline { total_power += &partition_power; // Save partition back. - partitions.set(partition_idx, partition)?; + partitions.set(partition_idx, partition).exit_code(ExitCode::USR_SERIALIZATION)?; // Record deadline -> partition mapping so we can later update the deadlines. partition_deadline_updates @@ -432,15 +457,17 @@ impl Deadline { } // Save partitions back. - self.partitions = partitions.flush()?; + self.partitions = partitions.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Next, update the expiration queue. - let mut deadline_expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load expiration epochs")?; + let mut deadline_expirations = + BitFieldQueue::new(store, &self.expirations_epochs, quant) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load expiration epochs")?; deadline_expirations .add_many_to_queue_values(partition_deadline_updates.iter().copied()) - .context("failed to add expirations for new deadlines")?; - self.expirations_epochs = deadline_expirations.amt.flush()?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to add expirations for new deadlines")?; + self.expirations_epochs = + deadline_expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(total_power) } @@ -461,8 +488,9 @@ impl Deadline { let mut partition = match partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? - { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? { Some(partition) => partition.clone(), None => { partitions_finished.push(partition_idx); @@ -485,7 +513,9 @@ impl Deadline { // Save partition partitions .set(partition_idx, partition) - .with_context(|| format!("failed to store partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store partition {}", partition_idx) + })?; if !result.below_limit(max_partitions, max_sectors) { break; @@ -498,7 +528,9 @@ impl Deadline { } // Save deadline's partitions - self.partitions = partitions.flush().context("failed to update partitions")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to update partitions")?; // Update global early terminations bitfield. let no_early_terminations = self.early_terminations.is_empty(); @@ -511,12 +543,15 @@ impl Deadline { until: ChainEpoch, quant: QuantSpec, ) -> Result<(BitField, bool), ActorError> { - let mut expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant)?; - let (popped, modified) = - expirations.pop_until(until).context("failed to pop expiring partitions")?; + let mut expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant) + .exit_code(ExitCode::USR_SERIALIZATION)?; + let (popped, modified) = expirations + .pop_until(until) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to pop expiring partitions")?; if modified { - self.expirations_epochs = expirations.amt.flush()?; + self.expirations_epochs = + expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; } Ok((popped, modified)) @@ -539,7 +574,9 @@ impl Deadline { for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? .ok_or_else( || actor_error!(not_found; "failed to find partition {}", partition_idx), )? @@ -561,7 +598,9 @@ impl Deadline { partitions .set(partition_idx, partition) - .with_context(|| format!("failed to store updated partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store updated partition {}", partition_idx) + })?; if !removed.is_empty() { // Record that partition now has pending early terminations. @@ -578,7 +617,9 @@ impl Deadline { } // save partitions back - self.partitions = partitions.flush().context("failed to persist partitions")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist partitions")?; Ok(power_lost) } @@ -642,7 +683,9 @@ impl Deadline { .try_for_each::<_, ActorError>(|partition_idx, partition| { // If we're keeping the partition as-is, append it to the new partitions array. if !to_remove_set.contains(&partition_idx) { - new_partitions.set(new_partitions.count(), partition.clone())?; + new_partitions + .set(new_partitions.count(), partition.clone()) + .exit_code(ExitCode::USR_SERIALIZATION)?; return Ok(()); } @@ -675,10 +718,11 @@ impl Deadline { Ok(()) }) - .context("while removing partitions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "while removing partitions")?; - self.partitions = - new_partitions.flush().context("failed to persist new partition table")?; + self.partitions = new_partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist new partition table")?; let dead = BitField::union(&all_dead_sectors); let live = BitField::union(&all_live_sectors); @@ -692,14 +736,17 @@ impl Deadline { // Update expiration bitfields. let mut expiration_epochs = BitFieldQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load expiration queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load expiration queue")?; - expiration_epochs - .cut(to_remove) - .context("failed cut removed partitions from deadline expiration queue")?; + expiration_epochs.cut(to_remove).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed cut removed partitions from deadline expiration queue", + )?; - self.expirations_epochs = - expiration_epochs.amt.flush().context("failed persist deadline expiration queue")?; + self.expirations_epochs = expiration_epochs.amt.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed persist deadline expiration queue", + )?; Ok((live, dead, removed_power)) } @@ -723,7 +770,9 @@ impl Deadline { for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? .ok_or_else(|| actor_error!(not_found; "no such partition {}", partition_idx))? .clone(); @@ -748,10 +797,14 @@ impl Deadline { partitions .set(partition_idx, partition) - .with_context(|| format!("failed to store partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store partition {}", partition_idx) + })?; } - self.partitions = partitions.flush().context("failed to store partitions root")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions root")?; self.add_expiration_partitions( store, @@ -776,7 +829,9 @@ impl Deadline { for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? .ok_or_else(|| actor_error!(not_found; "no such partition {}", partition_idx))? .clone(); @@ -786,12 +841,16 @@ impl Deadline { partitions .set(partition_idx, partition) - .with_context(|| format!("failed to update partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update partition {}", partition_idx) + })?; } // Power is not regained until the deadline end, when the recovery is confirmed. - self.partitions = partitions.flush().context("failed to store partitions root")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions root")?; Ok(()) } @@ -821,7 +880,9 @@ impl Deadline { let mut partition = partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? .ok_or_else(|| actor_error!(illegal_state; "no partition {}", partition_idx))? .clone(); @@ -851,7 +912,9 @@ impl Deadline { // Save new partition state. partitions .set(partition_idx, partition) - .with_context(|| format!("failed to update partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update partition {}", partition_idx) + })?; self.faulty_power += &part_new_faulty_power; @@ -861,7 +924,9 @@ impl Deadline { // Save modified deadline state. if detected_any { - self.partitions = partitions.flush().context("failed to store partitions")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions")?; } self.add_expiration_partitions( @@ -881,7 +946,7 @@ impl Deadline { DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH, ) .flush() - .context("failed to clear pending proofs array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to clear pending proofs array")?; // only snapshot sectors if there's a proof that might be disputed (this is equivalent to asking if the OptimisticPoStSubmissionsSnapshot is empty) if self.optimistic_post_submissions != self.optimistic_post_submissions_snapshot { @@ -890,7 +955,10 @@ impl Deadline { self.sectors_snapshot = Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) .flush() - .context("failed to clear sectors snapshot array")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to clear sectors snapshot array", + )?; } Ok((power_delta, penalized_power)) } @@ -901,7 +969,7 @@ impl Deadline { f: impl FnMut(u64, &Partition) -> Result<(), ActorError>, ) -> Result<(), ActorError> { let parts = self.partitions_amt(store)?; - parts.try_for_each(f)?; + parts.try_for_each(f).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -933,9 +1001,12 @@ impl Deadline { let mut disputed_sectors = PartitionSectorMap::default(); let mut disputed_power = PowerPair::zero(); for part_idx in partitions.iter() { - let partition_snapshot = partitions_snapshot.get(part_idx)?.ok_or_else(|| { - actor_error!(illegal_state, "failed to find partition {}", part_idx) - })?; + let partition_snapshot = partitions_snapshot + .get(part_idx) + .exit_code(ExitCode::USR_SERIALIZATION)? + .ok_or_else(|| { + actor_error!(illegal_state, "failed to find partition {}", part_idx) + })?; // Record sectors for proof verification all_sectors.push(partition_snapshot.sectors.clone()); @@ -1062,7 +1133,9 @@ impl Deadline { for post in post_partitions { let mut partition = partitions .get(post.index) - .with_context(|| format!("failed to load partition {}", post.index))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", post.index) + })? .ok_or_else(|| actor_error!(not_found; "no such partition {}", post.index))? .clone(); @@ -1105,7 +1178,9 @@ impl Deadline { // This will be rolled back if the method aborts with a failed proof. partitions .set(post.index, partition) - .with_context(|| format!("failed to update partition {}", post.index))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update partition {}", post.index) + })?; new_faulty_power_total += &new_fault_power; retracted_recovery_power_total += &retracted_recovery_power; @@ -1124,7 +1199,9 @@ impl Deadline { self.faulty_power -= &recovered_power_total; self.faulty_power += &new_faulty_power_total; - self.partitions = partitions.flush().context("failed to persist partitions")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist partitions")?; // Collect all sectors, faults, and recoveries for proof verification. let all_sector_numbers = BitField::union(&all_sectors); @@ -1157,8 +1234,9 @@ impl Deadline { // TODO: Can we do this with out cloning? WindowedPoSt { partitions: partitions.clone(), proofs: proofs.to_vec() }, ) - .context("failed to store proof")?; - let root = proof_arr.flush().context("failed to save proofs")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store proof")?; + let root = + proof_arr.flush().context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save proofs")?; self.optimistic_post_submissions = root; Ok(()) } @@ -1178,10 +1256,13 @@ impl Deadline { // This will not affect concurrent attempts to refute other proofs. let post = proof_arr .delete(idx) - .with_context(|| format!("failed to retrieve proof {}", idx))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to retrieve proof {}", idx) + })? .ok_or_else(|| actor_error!(illegal_argument, "proof {} not found", idx))?; - let root = proof_arr.flush().context("failed to save proofs")?; + let root = + proof_arr.flush().context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save proofs")?; self.optimistic_post_submissions_snapshot = root; Ok((post.partitions, post.proofs)) } @@ -1212,8 +1293,9 @@ impl Deadline { for (partition_idx, sector_numbers) in partition_sectors.iter() { let mut partition = match partitions .get(partition_idx) - .with_context(|| format!("failed to load partition {}", partition_idx))? - { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {}", partition_idx) + })? { Some(partition) => partition.clone(), None => { // We failed to find the partition, it could have moved @@ -1245,11 +1327,15 @@ impl Deadline { rescheduled_partitions.push(partition_idx); partitions .set(partition_idx, partition) - .with_context(|| format!("failed to store partition {}", partition_idx))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store partition {}", partition_idx) + })?; } if !rescheduled_partitions.is_empty() { - self.partitions = partitions.flush().context("failed to save partitions")?; + self.partitions = partitions + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save partitions")?; self.add_expiration_partitions(store, expiration, &rescheduled_partitions, quant) .context("failed to reschedule partition expirations")?; diff --git a/actors/miner/src/deadlines.rs b/actors/miner/src/deadlines.rs index 01d042be1..625873d8b 100644 --- a/actors/miner/src/deadlines.rs +++ b/actors/miner/src/deadlines.rs @@ -2,10 +2,11 @@ // SPDX-License-Identifier: Apache-2.0, MIT use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext2, ActorError, Array}; use fvm_ipld_blockstore::Blockstore; use fvm_shared::clock::{ChainEpoch, QuantSpec}; +use fvm_shared::error::ExitCode; use fvm_shared::sector::SectorNumber; use super::{DeadlineInfo, Deadlines, Partition}; @@ -40,18 +41,21 @@ impl Deadlines { for i in 0..self.due.len() { let deadline_idx = i as u64; let deadline = self.load_deadline(policy, store, deadline_idx)?; - let partitions = Array::::load(&deadline.partitions, store)?; + let partitions = Array::::load(&deadline.partitions, store) + .exit_code(ExitCode::USR_SERIALIZATION)?; let mut partition_idx = None; - partitions.for_each_while(|i, partition| { - if partition.sectors.get(sector_number) { - partition_idx = Some(i); - false - } else { - true - } - })?; + partitions + .for_each_while(|i, partition| { + if partition.sectors.get(sector_number) { + partition_idx = Some(i); + false + } else { + true + } + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; if let Some(partition_idx) = partition_idx { return Ok((deadline_idx, partition_idx)); diff --git a/actors/miner/src/expiration_queue.rs b/actors/miner/src/expiration_queue.rs index c86c4f635..620f4bee2 100644 --- a/actors/miner/src/expiration_queue.rs +++ b/actors/miner/src/expiration_queue.rs @@ -6,7 +6,7 @@ use std::convert::TryInto; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorContext2, ActorError, Array}; use fvm_ipld_amt::{Error as AmtError, ValueMut}; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -14,6 +14,7 @@ use fvm_ipld_encoding::tuple::*; use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, QuantSpec}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{SectorNumber, SectorSize}; use num_traits::{Signed, Zero}; @@ -192,7 +193,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let mut total_sectors = Vec::::new(); for group in group_new_sectors_by_declared_expiration(sector_size, sectors, self.quant) { - let sector_numbers = BitField::try_from_bits(group.sectors)?; + let sector_numbers = + BitField::try_from_bits(group.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; self.add( group.epoch, @@ -272,7 +274,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { } else { // Remove sectors from on-time expiry and active power. let sectors_bitfield = - BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied())?; + BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)?; group.expiration_set.on_time_sectors -= §ors_bitfield; group.expiration_set.on_time_pledge -= &group.sector_epoch_set.pledge; group.expiration_set.active_power -= &group.sector_epoch_set.power; @@ -289,7 +292,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { if !sectors_total.is_empty() { // Add sectors to new expiration as early-terminating and faulty. - let early_sectors = BitField::try_from_bits(sectors_total)?; + let early_sectors = + BitField::try_from_bits(sectors_total).exit_code(ExitCode::USR_SERIALIZATION)?; self.add( new_expiration, &BitField::new(), @@ -314,35 +318,37 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let mut mutated_expiration_sets = Vec::<(ChainEpoch, ExpirationSet)>::new(); - self.amt.try_for_each(|e, expiration_set| { - let epoch: ChainEpoch = - e.try_into().map_err(|e| actor_error!(illegal_state, "{}", e))?; - - if epoch <= self.quant.quantize_up(fault_expiration) { - let mut expiration_set = expiration_set.clone(); - - // Regardless of whether the sectors were expiring on-time or early, all the power is now faulty. - // Pledge is still on-time. - expiration_set.faulty_power += &expiration_set.active_power; - expiration_set.active_power = PowerPair::zero(); - mutated_expiration_sets.push((epoch, expiration_set)); - } else { - rescheduled_epochs.push(e); - // sanity check to make sure we're not trying to re-schedule already faulty sectors. - if !expiration_set.early_sectors.is_empty() { - // TODO: correct exit code? - return Err(actor_error!( - illegal_state, - "attempted to re-schedule early expirations to an earlier epoch" - )); + self.amt + .try_for_each(|e, expiration_set| { + let epoch: ChainEpoch = + e.try_into().map_err(|e| actor_error!(illegal_state, "{}", e))?; + + if epoch <= self.quant.quantize_up(fault_expiration) { + let mut expiration_set = expiration_set.clone(); + + // Regardless of whether the sectors were expiring on-time or early, all the power is now faulty. + // Pledge is still on-time. + expiration_set.faulty_power += &expiration_set.active_power; + expiration_set.active_power = PowerPair::zero(); + mutated_expiration_sets.push((epoch, expiration_set)); + } else { + rescheduled_epochs.push(e); + // sanity check to make sure we're not trying to re-schedule already faulty sectors. + if !expiration_set.early_sectors.is_empty() { + // TODO: correct exit code? + return Err(actor_error!( + illegal_state, + "attempted to re-schedule early expirations to an earlier epoch" + )); + } + rescheduled_sectors |= &expiration_set.on_time_sectors; + rescheduled_power += &expiration_set.active_power; + rescheduled_power += &expiration_set.faulty_power; } - rescheduled_sectors |= &expiration_set.on_time_sectors; - rescheduled_power += &expiration_set.active_power; - rescheduled_power += &expiration_set.faulty_power; - } - Ok(()) - })?; + Ok(()) + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; for (epoch, expiration_set) in mutated_expiration_sets { let res = expiration_set.validate_state(); @@ -366,7 +372,7 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { )?; // Trim the rescheduled epochs from the queue. - self.amt.batch_delete(rescheduled_epochs, true)?; + self.amt.batch_delete(rescheduled_epochs, true).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -624,22 +630,24 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let mut on_time_pledge = TokenAmount::zero(); let mut popped_keys = Vec::::new(); - self.amt.for_each_while(|i, this_value| { - if i as ChainEpoch > until { - return false; - } + self.amt + .for_each_while(|i, this_value| { + if i as ChainEpoch > until { + return false; + } - popped_keys.push(i); - on_time_sectors |= &this_value.on_time_sectors; - early_sectors |= &this_value.early_sectors; - active_power += &this_value.active_power; - faulty_power += &this_value.faulty_power; - on_time_pledge += &this_value.on_time_pledge; + popped_keys.push(i); + on_time_sectors |= &this_value.on_time_sectors; + early_sectors |= &this_value.early_sectors; + active_power += &this_value.active_power; + faulty_power += &this_value.faulty_power; + on_time_pledge += &this_value.on_time_pledge; - true - })?; + true + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; - self.amt.batch_delete(popped_keys, true)?; + self.amt.batch_delete(popped_keys, true).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(ExpirationSet { on_time_sectors, @@ -682,17 +690,19 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let epoch = self.quant.quantize_up(raw_epoch); let mut expiration_set = self .amt - .get(epoch.try_into()?) - .with_context(|| format!("failed to lookup queue epoch {}", epoch))? + .get(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup queue epoch {}", epoch) + })? .ok_or_else(|| { actor_error!(illegal_state, "missing expected expiration set at epoch {}", epoch) })? .clone(); expiration_set .remove(on_time_sectors, early_sectors, pledge, active_power, faulty_power) - .with_context(|| { - format!("failed to remove expiration values for queue epoch {}", epoch) - })?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to remove expiration values for queue epoch {}", epoch) + })?; self.must_update_or_delete(epoch, expiration_set)?; Ok(()) @@ -711,7 +721,8 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { let groups = self.find_sectors_by_expiration(sector_size, sectors)?; for group in groups { let sectors_bitfield = - BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied())?; + BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)?; self.remove( group.sector_epoch_set.epoch, §ors_bitfield, @@ -727,7 +738,12 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { removed_pledge += &group.sector_epoch_set.pledge; } - Ok((BitField::try_from_bits(removed_sector_numbers)?, removed_power, removed_pledge)) + Ok(( + BitField::try_from_bits(removed_sector_numbers) + .exit_code(ExitCode::USR_SERIALIZATION)?, + removed_power, + removed_pledge, + )) } /// Traverses the entire queue with a callback function that may mutate entries. @@ -742,19 +758,22 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { ) -> Result<(), ActorError> { let mut epochs_emptied = Vec::::new(); - self.amt.try_for_each_while_mut::<_, ActorError>(|e, expiration_set| { - let keep_going = f(e.try_into()?, expiration_set)?; - - if expiration_set.is_empty() { - // Mark expiration set as unchanged, it will be removed after the iteration. - expiration_set.mark_unchanged(); - epochs_emptied.push(e); - } + self.amt + .try_for_each_while_mut::<_, ActorError>(|e, expiration_set| { + let keep_going = + f(e.try_into().exit_code(ExitCode::USR_SERIALIZATION)?, expiration_set)?; + + if expiration_set.is_empty() { + // Mark expiration set as unchanged, it will be removed after the iteration. + expiration_set.mark_unchanged(); + epochs_emptied.push(e); + } - Ok(keep_going) - })?; + Ok(keep_going) + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; - self.amt.batch_delete(epochs_emptied, true)?; + self.amt.batch_delete(epochs_emptied, true).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -762,8 +781,10 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { fn may_get(&self, key: ChainEpoch) -> Result { Ok(self .amt - .get(key.try_into()?) - .with_context(|| format!("failed to lookup queue epoch {}", key))? + .get(key.try_into().exit_code(ExitCode::USR_SERIALIZATION)?) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup queue epoch {}", key) + })? .cloned() .unwrap_or_default()) } @@ -774,8 +795,10 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { expiration_set: ExpirationSet, ) -> Result<(), ActorError> { self.amt - .set(epoch.try_into()?, expiration_set) - .with_context(|| format!("failed to set queue epoch {}", epoch)) + .set(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?, expiration_set) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set queue epoch {}", epoch) + }) } /// Since this might delete the node, it's not safe for use inside an iteration. @@ -786,12 +809,16 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { ) -> Result<(), ActorError> { if expiration_set.is_empty() { self.amt - .delete(epoch.try_into()?) - .with_context(|| format!("failed to delete queue epoch {}", epoch))?; + .delete(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete queue epoch {}", epoch) + })?; } else { self.amt - .set(epoch.try_into()?, expiration_set) - .with_context(|| format!("failed to set queue epoch {}", epoch))?; + .set(epoch.try_into().exit_code(ExitCode::USR_SERIALIZATION)?, expiration_set) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set queue epoch {}", epoch) + })?; } Ok(()) @@ -838,34 +865,36 @@ impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { // If sectors remain, traverse next in epoch order. Remaining sectors should be // rescheduled to expire soon, so this traversal should exit early. if !all_remaining.is_empty() { - self.amt.try_for_each_while::<_, ActorError>(|epoch, es| { - let epoch = epoch as ChainEpoch; - // If this set's epoch is one of our declared epochs, we've already processed it - // in the loop above, so skip processing here. Sectors rescheduled to this epoch - // would have been included in the earlier processing. - if declared_expirations.contains_key(&epoch) { - return Ok(true); - } + self.amt + .try_for_each_while::<_, ActorError>(|epoch, es| { + let epoch = epoch as ChainEpoch; + // If this set's epoch is one of our declared epochs, we've already processed it + // in the loop above, so skip processing here. Sectors rescheduled to this epoch + // would have been included in the earlier processing. + if declared_expirations.contains_key(&epoch) { + return Ok(true); + } - // Sector should not be found in EarlyExpirations which holds faults. An implicit assumption - // of grouping is that it only returns sectors with active power. ExpirationQueue should not - // provide operations that allow this to happen. - check_no_early_sectors(&all_remaining, es)?; - - let group = group_expiration_set( - sector_size, - §ors_by_number, - &mut all_remaining, - es.clone(), - epoch, - ); - - if !group.sector_epoch_set.sectors.is_empty() { - expiration_groups.push(group); - } + // Sector should not be found in EarlyExpirations which holds faults. An implicit assumption + // of grouping is that it only returns sectors with active power. ExpirationQueue should not + // provide operations that allow this to happen. + check_no_early_sectors(&all_remaining, es)?; + + let group = group_expiration_set( + sector_size, + §ors_by_number, + &mut all_remaining, + es.clone(), + epoch, + ); + + if !group.sector_epoch_set.sectors.is_empty() { + expiration_groups.push(group); + } - Ok(!all_remaining.is_empty()) - })?; + Ok(!all_remaining.is_empty()) + }) + .exit_code(ExitCode::USR_SERIALIZATION)?; } if !all_remaining.is_empty() { diff --git a/actors/miner/src/lib.rs b/actors/miner/src/lib.rs index ecb65f908..3ff93c2e1 100644 --- a/actors/miner/src/lib.rs +++ b/actors/miner/src/lib.rs @@ -17,8 +17,8 @@ pub use deadlines::*; pub use expiration_queue::*; use fil_actors_runtime::runtime::{ActorCode, Policy, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorContext, ActorError, BURNT_FUNDS_ACTOR_ADDR, INIT_ACTOR_ADDR, - REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, + actor_error, cbor, ActorContext, ActorContext2, ActorError, BURNT_FUNDS_ACTOR_ADDR, + INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, STORAGE_MARKET_ACTOR_ADDR, STORAGE_POWER_ACTOR_ADDR, }; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; use fvm_ipld_blockstore::Blockstore; @@ -179,8 +179,10 @@ impl Actor { params.multi_addresses, params.window_post_proof_type, )?; - let info_cid = - rt.store().put_cbor(&info, Blake2b256).context("failed to construct illegal state")?; + let info_cid = rt + .store() + .put_cbor(&info, Blake2b256) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; let st = State::new(policy, rt.store(), info_cid, period_start, deadline_idx) .context("failed to construct state")?; @@ -522,8 +524,8 @@ impl Actor { return Err(actor_error!(illegal_argument, "post commit randomness mismatched")); } - let sectors = - Sectors::load(rt.store(), &state.sectors).context("failed to load sectors")?; + let sectors = Sectors::load(rt.store(), &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors")?; let mut deadlines = state.load_deadlines(rt.store()).context("failed to load deadlines")?; @@ -760,7 +762,7 @@ impl Actor { proof: params.aggregate_proof, infos: svis, }) - .context("aggregate seal verify failed")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "aggregate seal verify failed")?; let rew = request_current_epoch_block_reward(rt)?; let pwr = request_current_total_power(rt)?; @@ -822,8 +824,8 @@ impl Actor { )?; let sector_store = rt.store().clone(); - let mut sectors = - Sectors::load(§or_store, &state.sectors).context("failed to load sectors array")?; + let mut sectors = Sectors::load(§or_store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut power_delta = PowerPair::zero(); let mut pledge_delta = TokenAmount::zero(); @@ -937,7 +939,8 @@ impl Actor { RawBytes::serialize(ext::market::ActivateDealsParams { deal_ids: update.deals.clone(), sector_expiry: sector_info.expiration, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ); @@ -1057,7 +1060,7 @@ impl Actor { new_unsealed_cid: with_details.unsealed_cid, proof: with_details.update.replica_proof.clone(), } - ).with_context(|| { + ).with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to verify replica proof for sector {}", with_details.sector_info.sector_number) })?; @@ -1140,7 +1143,7 @@ impl Actor { let mut partition = partitions .get(with_details.update.partition) - .with_context(|| format!("failed to load deadline {} partition {}", with_details.update.deadline, with_details.update.partition))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || format!("failed to load deadline {} partition {}", with_details.update.deadline, with_details.update.partition))? .cloned() .ok_or_else(|| actor_error!(not_found, "no such deadline {} partition {}", dl_idx, with_details.update.partition))?; @@ -1158,7 +1161,7 @@ impl Actor { partitions .set(with_details.update.partition, partition) - .with_context(|| { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to save deadline {} partition {}", with_details.update.deadline, with_details.update.partition) })?; @@ -1166,7 +1169,7 @@ impl Actor { new_sectors.push(new_sector_info); } - deadline.partitions = partitions.flush().with_context(|| { + deadline.partitions = partitions.flush().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to save partitions for deadline {}", dl_idx) })?; @@ -1196,7 +1199,7 @@ impl Actor { // Overwrite sector infos. sectors.store(new_sectors).context("failed to update sector infos")?; - state.sectors = sectors.amt.flush().context("failed to save sectors")?; + state.sectors = sectors.amt.flush().context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors")?; state.save_deadlines(rt.store(), deadlines).context("failed to save deadlines")?; BitField::try_from_bits(succeeded).map_err(|_|{ @@ -1299,10 +1302,13 @@ impl Actor { // Load sectors for the dispute. let sectors = Sectors::load(rt.store(), &dl_current.sectors_snapshot) - .context("failed to load sectors array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let sector_infos = sectors .load_for_proof(&dispute_info.all_sector_nos, &dispute_info.ignored_sector_nos) - .context("failed to load sectors to dispute window post")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load sectors to dispute window post", + )?; // Check proof, we fail if validation succeeds. if verify_windowed_post(rt, target_deadline.challenge, §or_infos, proofs)? { @@ -1674,7 +1680,9 @@ impl Actor { let st: State = rt.state()?; let precommit = st .get_precommitted_sector(rt.store(), sector_number) - .with_context(|| format!("failed to load pre-committed sector {}", sector_number))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load pre-committed sector {}", sector_number) + })? .ok_or_else(|| actor_error!(not_found, "no pre-commited sector {}", sector_number))?; let max_proof_size = precommit.info.seal_proof.proof_size().map_err(|e| { @@ -1730,7 +1738,7 @@ impl Actor { rt.send( *STORAGE_POWER_ACTOR_ADDR, ext::power::SUBMIT_POREP_FOR_BULK_VERIFY_METHOD, - RawBytes::serialize(&svi)?, + RawBytes::serialize(&svi).exit_code(ExitCode::USR_ILLEGAL_STATE)?, BigInt::zero(), )?; @@ -1900,7 +1908,7 @@ impl Actor { } let mut sectors = Sectors::load(rt.store(), &state.sectors) - .context("failed to load sectors array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut power_delta = PowerPair::zero(); let mut pledge_delta = TokenAmount::zero(); @@ -1926,7 +1934,9 @@ impl Actor { let mut partition = partitions .get(decl.partition) - .with_context(|| format!("failed to load partition {:?}", key))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load partition {:?}", key) + })? .cloned() .ok_or_else(|| actor_error!(not_found, "no such partition {:?}", key))?; @@ -2011,7 +2021,9 @@ impl Actor { partitions .set(decl.partition, partition) - .with_context(|| format!("failed to save partition {:?}", key))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to save partition {:?}", key) + })?; // Record the new partition expiration epoch for setting outside this loop // over declarations. @@ -2026,9 +2038,10 @@ impl Actor { } } - deadline.partitions = partitions.flush().with_context(|| { - format!("failed to save partitions for deadline {}", deadline_idx) - })?; + deadline.partitions = + partitions.flush().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to save partitions for deadline {}", deadline_idx) + })?; // Record partitions in deadline expiration queue for epoch in epochs_to_reschedule { @@ -2048,7 +2061,10 @@ impl Actor { .with_context(|| format!("failed to save deadline {}", deadline_idx))?; } - state.sectors = sectors.amt.flush().context("failed to save sectors")?; + state.sectors = sectors + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sectors")?; state.save_deadlines(store, deadlines).context("failed to save deadlines")?; Ok((power_delta, pledge_delta)) @@ -2146,7 +2162,8 @@ impl Actor { // We're only reading the sectors, so there's no need to save this back. // However, we still want to avoid re-loading this array per-partition. - let sectors = Sectors::load(store, &state.sectors).context("failed to load sectors")?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors")?; for (deadline_idx, partition_sectors) in to_process.iter() { // If the deadline is the current or next deadline to prove, don't allow terminating sectors. @@ -2275,8 +2292,8 @@ impl Actor { let mut deadlines = state.load_deadlines(store).map_err(|e| e.wrap("failed to load deadlines"))?; - let sectors = - Sectors::load(store, &state.sectors).context("failed to load sectors array")?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut new_fault_power_total = PowerPair::zero(); let curr_epoch = rt.curr_epoch(); @@ -2416,8 +2433,8 @@ impl Actor { let mut deadlines = state.load_deadlines(store).map_err(|e| e.wrap("failed to load deadlines"))?; - let sectors = - Sectors::load(store, &state.sectors).context("failed to load sectors array")?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let curr_epoch = rt.curr_epoch(); for (deadline_idx, partition_map) in to_process.iter() { let policy = rt.policy(); @@ -2738,7 +2755,7 @@ impl Actor { let fault = rt .verify_consensus_fault(¶ms.header1, ¶ms.header2, ¶ms.header_extra) - .context("fault not verified")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "fault not verified")? .ok_or_else(|| actor_error!(illegal_argument, "No consensus fault found"))?; if fault.target != rt.message().receiver() { return Err(actor_error!( @@ -2795,7 +2812,7 @@ impl Actor { rt.curr_epoch(), &rt.current_balance(), ) - .context("failed to pay fees")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to pay fees")?; let mut burn_amount = &penalty_from_vesting + &penalty_from_balance; pledge_delta -= penalty_from_vesting; @@ -3017,8 +3034,8 @@ where } let info = get_miner_info(rt.store(), state)?; - let sectors = - Sectors::load(store, &state.sectors).context("failed to load sectors array")?; + let sectors = Sectors::load(store, &state.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors array")?; let mut total_initial_pledge = TokenAmount::zero(); let mut deals_to_terminate = @@ -3030,7 +3047,7 @@ where for (epoch, sector_numbers) in result.iter() { let sectors = sectors .load_sector(sector_numbers) - .map_err(|e| e.wrap("failed to load sector infos"))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector infos")?; penalty += termination_penalty( info.sector_size, @@ -3417,7 +3434,8 @@ where RawBytes::serialize(ext::power::UpdateClaimedPowerParams { raw_byte_delta: delta.raw, quality_adjusted_delta: delta.qa, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ) .map_err(|e| e.wrap(format!("failed to update power with {:?}", delta_clone)))?; @@ -3443,7 +3461,8 @@ where RawBytes::serialize(ext::market::OnMinerSectorsTerminateParamsRef { epoch, deal_ids: chunk, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )?; } @@ -3584,10 +3603,12 @@ where ext::market::COMPUTE_DATA_COMMITMENT_METHOD, RawBytes::serialize(ext::market::ComputeDataCommitmentParamsRef { inputs: data_commitment_inputs, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )? - .deserialize()?; + .deserialize() + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; if data_commitment_inputs.len() != ret.commds.len() { return Err(actor_error!(illegal_state, "number of data commitments computed {} does not match number of data commitment inputs {}", @@ -3627,11 +3648,12 @@ where let serialized = rt.send( *STORAGE_MARKET_ACTOR_ADDR, ext::market::VERIFY_DEALS_FOR_ACTIVATION_METHOD, - RawBytes::serialize(ext::market::VerifyDealsForActivationParamsRef { sectors })?, + RawBytes::serialize(ext::market::VerifyDealsForActivationParamsRef { sectors }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )?; - Ok(serialized.deserialize()?) + Ok(serialized.deserialize().exit_code(ExitCode::USR_ILLEGAL_STATE)?) } /// Requests the current epoch target block reward from the reward actor. @@ -3771,7 +3793,7 @@ where rt.send( *STORAGE_POWER_ACTOR_ADDR, ext::power::UPDATE_PLEDGE_TOTAL_METHOD, - RawBytes::serialize(BigIntSer(pledge_delta))?, + RawBytes::serialize(BigIntSer(pledge_delta)).exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), )?; } @@ -3786,7 +3808,7 @@ fn assign_proving_period_offset( current_epoch: ChainEpoch, blake2b: impl FnOnce(&[u8]) -> [u8; 32], ) -> Result { - let mut my_addr = addr.marshal_cbor()?; + let mut my_addr = addr.marshal_cbor().exit_code(ExitCode::USR_ILLEGAL_STATE)?; my_addr .write_i64::(current_epoch) .map_err(|err| actor_error!(serialization, "{}", err))?; @@ -4065,7 +4087,8 @@ where RawBytes::serialize(ext::market::ActivateDealsParams { deal_ids: pre_commit.info.deal_ids.clone(), sector_expiry: pre_commit.info.expiration, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::zero(), ); @@ -4170,7 +4193,7 @@ where state .delete_precommitted_sectors(store, &new_sector_numbers) - .context("failed to delete precommited sectors")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to delete precommited sectors")?; state .assign_sectors_to_deadlines( @@ -4234,7 +4257,7 @@ impl ActorCode for Actor { } Some(Method::ControlAddresses) => { let res = Self::control_addresses(rt)?; - Ok(RawBytes::serialize(&res)?) + Ok(RawBytes::serialize(&res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::ChangeWorkerAddress) => { Self::change_worker_address(rt, cbor::deserialize_params(params)?)?; @@ -4262,7 +4285,7 @@ impl ActorCode for Actor { } Some(Method::TerminateSectors) => { let ret = Self::terminate_sectors(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(ret)?) + Ok(RawBytes::serialize(ret).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::DeclareFaults) => { Self::declare_faults(rt, cbor::deserialize_params(params)?)?; @@ -4290,7 +4313,7 @@ impl ActorCode for Actor { } Some(Method::WithdrawBalance) => { let res = Self::withdraw_balance(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(&res)?) + Ok(RawBytes::serialize(&res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::ConfirmSectorProofsValid) => { Self::confirm_sector_proofs_valid(rt, cbor::deserialize_params(params)?)?; @@ -4334,7 +4357,7 @@ impl ActorCode for Actor { } Some(Method::ProveReplicaUpdates) => { let res = Self::prove_replica_updates(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message, "Invalid method")), } diff --git a/actors/miner/src/partition_state.rs b/actors/miner/src/partition_state.rs index 9233f5e77..3c2819688 100644 --- a/actors/miner/src/partition_state.rs +++ b/actors/miner/src/partition_state.rs @@ -6,13 +6,14 @@ use std::ops::{self, Neg}; use cid::Cid; use fil_actors_runtime::runtime::Policy; -use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext, ActorContext2, ActorError, Array}; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, QuantSpec, NO_QUANTIZATION}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{SectorSize, StoragePower}; use num_traits::{Signed, Zero}; @@ -66,12 +67,14 @@ impl Partition { pub fn new(store: &BS) -> Result { let empty_expiration_array = Array::::new_with_bit_width(store, PARTITION_EXPIRATION_AMT_BITWIDTH) - .flush()?; + .flush() + .exit_code(ExitCode::USR_SERIALIZATION)?; let empty_early_termination_array = Array::::new_with_bit_width( store, PARTITION_EARLY_TERMINATION_ARRAY_AMT_BITWIDTH, ) - .flush()?; + .flush() + .exit_code(ExitCode::USR_SERIALIZATION)?; Ok(Self { sectors: BitField::new(), @@ -116,14 +119,16 @@ impl Partition { quant: QuantSpec, ) -> Result { let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load sector expirations")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; let (sector_numbers, power, _) = expirations .add_active_sectors(sectors, sector_size) .context("failed to record new sector expirations")?; - self.expirations_epochs = - expirations.amt.flush().context("failed to store sector expirations")?; + self.expirations_epochs = expirations + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store sector expirations")?; if self.sectors.contains_any(§or_numbers) { return Err(actor_error!(illegal_argument, "not all added sectors are new")); @@ -158,7 +163,7 @@ impl Partition { ) -> Result<(PowerPair, PowerPair), ActorError> { // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load partition queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load partition queue")?; // Reschedule faults let new_faulty_power = queue @@ -166,7 +171,7 @@ impl Partition { .context("failed to add faults to partition queue")?; // Save expiration queue - self.expirations_epochs = queue.amt.flush()?; + self.expirations_epochs = queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Update partition metadata self.faults |= sector_numbers; @@ -216,8 +221,10 @@ impl Partition { validate_partition_contains_sectors(self, sector_numbers) .map_err(|e| actor_error!(illegal_argument; "failed fault declaration: {}", e))?; - let sector_numbers = - sector_numbers.validate().context("failed to intersect sectors with recoveries")?; + let sector_numbers = sector_numbers.validate().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to intersect sectors with recoveries", + )?; // Split declarations into declarations of new faults, and retraction of declared recoveries. let retracted_recoveries = &self.recoveries & sector_numbers; @@ -270,7 +277,7 @@ impl Partition { // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load partition queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load partition queue")?; // Reschedule recovered let power = queue @@ -278,7 +285,7 @@ impl Partition { .context("failed to reschedule faults in partition queue")?; // Save expiration queue - self.expirations_epochs = queue.amt.flush()?; + self.expirations_epochs = queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Update partition metadata self.faults -= &self.recoveries; @@ -312,7 +319,9 @@ impl Partition { validate_partition_contains_sectors(self, sector_numbers) .map_err(|e| actor_error!(illegal_argument; "failed fault declaration: {}", e))?; - let sector_numbers = sector_numbers.validate().context("failed to validate recoveries")?; + let sector_numbers = sector_numbers + .validate() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to validate recoveries")?; // Ignore sectors not faulty or already declared recovered let mut recoveries = sector_numbers & &self.faults; @@ -382,9 +391,9 @@ impl Partition { let sector_infos = sectors.load_sector(&active)?; let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load sector expirations")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; expirations.reschedule_expirations(new_expiration, §or_infos, sector_size)?; - self.expirations_epochs = expirations.amt.flush()?; + self.expirations_epochs = expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // check invariants self.validate_state()?; @@ -406,14 +415,16 @@ impl Partition { quant: QuantSpec, ) -> Result<(PowerPair, TokenAmount), ActorError> { let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load sector expirations")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; let (old_sector_numbers, new_sector_numbers, power_delta, pledge_delta) = expirations .replace_sectors(old_sectors, new_sectors, sector_size) .context("failed to replace sector expirations")?; - self.expirations_epochs = - expirations.amt.flush().context("failed to save sector expirations")?; + self.expirations_epochs = expirations + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sector expirations")?; // Check the sectors being removed are active (alive, not faulty). let active = self.active_sectors(); @@ -449,17 +460,20 @@ impl Partition { sectors: &BitField, ) -> Result<(), ActorError> { let mut early_termination_queue = - BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION) - .context("failed to load early termination queue")?; + BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load early termination queue", + )?; - early_termination_queue - .add_to_queue(epoch, sectors) - .context("failed to add to early termination queue")?; + early_termination_queue.add_to_queue(epoch, sectors).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to add to early termination queue", + )?; self.early_terminated = early_termination_queue .amt .flush() - .context("failed to save early termination queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save early termination queue")?; Ok(()) } @@ -489,13 +503,15 @@ impl Partition { let sector_infos = sectors.load_sector(sector_numbers)?; let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load sector expirations")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sector expirations")?; let (mut removed, removed_recovering) = expirations .remove_sectors(policy, §or_infos, &self.faults, &self.recoveries, sector_size) .context("failed to remove sector expirations")?; - self.expirations_epochs = - expirations.amt.flush().context("failed to save sector expirations")?; + self.expirations_epochs = expirations + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save sector expirations")?; let removed_sectors = &removed.on_time_sectors | &removed.early_sectors; @@ -545,12 +561,12 @@ impl Partition { } let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load expiration queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load expiration queue")?; let popped = expirations .pop_until(until) .with_context(|| format!("failed to pop expiration queue until {}", until))?; - self.expirations_epochs = expirations.amt.flush()?; + self.expirations_epochs = expirations.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; let expired_sectors = &popped.on_time_sectors | &popped.early_sectors; @@ -584,7 +600,7 @@ impl Partition { // Record the epoch of any sectors expiring early, for termination fee calculation later. self.record_early_termination(store, until, &popped.early_sectors) - .context("failed to record early terminations")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to record early terminations")?; // check invariants self.validate_state()?; @@ -604,14 +620,14 @@ impl Partition { // Collapse tail of queue into the last entry, and mark all power faulty. // Load expiration queue let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) - .context("failed to load partition queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load partition queue")?; queue .reschedule_all_as_faults(fault_expiration) .context("failed to reschedule all as faults")?; // Save expiration queue - self.expirations_epochs = queue.amt.flush()?; + self.expirations_epochs = queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; // Compute faulty power for penalization. New faulty power is the total power minus already faulty. let new_faulty_power = &self.live_power - &self.faulty_power; @@ -644,7 +660,8 @@ impl Partition { ) -> Result<(TerminationResult, /* has more */ bool), ActorError> { // Load early terminations. let mut early_terminated_queue = - BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION)?; + BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION) + .exit_code(ExitCode::USR_SERIALIZATION)?; let mut processed = Vec::::new(); let mut remaining: Option<(BitField, ChainEpoch)> = None; @@ -654,7 +671,7 @@ impl Partition { early_terminated_queue .amt .try_for_each_while::<_, ActorError>(|i, sectors| { - let epoch: ChainEpoch = i.try_into()?; + let epoch: ChainEpoch = i.try_into().exit_code(ExitCode::USR_SERIALIZATION)?; let count = sectors.len(); let limit = max_sectors - result.sectors_processed; @@ -677,26 +694,29 @@ impl Partition { let keep_going = result.sectors_processed < max_sectors; Ok(keep_going) }) - .context("failed to walk early terminations queue")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to walk early terminations queue")?; // Update early terminations - early_terminated_queue - .amt - .batch_delete(processed, true) - .context("failed to remove entries from early terminations queue")?; + early_terminated_queue.amt.batch_delete(processed, true).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to remove entries from early terminations queue", + )?; if let Some((remaining_sectors, remaining_epoch)) = remaining.take() { early_terminated_queue .amt .set(remaining_epoch as u64, remaining_sectors) - .context("failed to update remaining entry early terminations queue")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to update remaining entry early terminations queue", + )?; } // Save early terminations. - self.early_terminated = early_terminated_queue - .amt - .flush() - .context("failed to store early terminations queue")?; + self.early_terminated = early_terminated_queue.amt.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to store early terminations queue", + )?; // check invariants self.validate_state()?; diff --git a/actors/miner/src/sector_map.rs b/actors/miner/src/sector_map.rs index 8d197b6cf..9e467b865 100644 --- a/actors/miner/src/sector_map.rs +++ b/actors/miner/src/sector_map.rs @@ -4,9 +4,10 @@ use std::collections::BTreeMap; use fvm_ipld_bitfield::{BitField, UnvalidatedBitField, Validate}; +use fvm_shared::error::ExitCode; use serde::{Deserialize, Serialize}; -use fil_actors_runtime::{actor_error, runtime::Policy, ActorContext, ActorError}; +use fil_actors_runtime::{actor_error, runtime::Policy, ActorContext, ActorContext2, ActorError}; /// Maps deadlines to partition maps. #[derive(Default)] @@ -87,7 +88,9 @@ impl DeadlineSectorMap { policy, deadline_idx, partition_idx, - BitField::try_from_bits(sector_numbers.iter().copied())?.into(), + BitField::try_from_bits(sector_numbers.iter().copied()) + .exit_code(ExitCode::USR_SERIALIZATION)? + .into(), ) } @@ -113,7 +116,10 @@ impl PartitionSectorMap { partition_idx: u64, sector_numbers: Vec, ) -> Result<(), ActorError> { - self.add(partition_idx, BitField::try_from_bits(sector_numbers)?.into()) + self.add( + partition_idx, + BitField::try_from_bits(sector_numbers).exit_code(ExitCode::USR_SERIALIZATION)?.into(), + ) } /// Records the given sector bitfield at the given partition index, merging /// it with any existing bitfields if necessary. @@ -124,11 +130,14 @@ impl PartitionSectorMap { ) -> Result<(), ActorError> { match self.0.get_mut(&partition_idx) { Some(old_sector_numbers) => { - let old = old_sector_numbers - .validate_mut() - .context("failed to validate sector bitfield")?; - let new = - sector_numbers.validate().context("failed to validate new sector bitfield")?; + let old = old_sector_numbers.validate_mut().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to validate sector bitfield", + )?; + let new = sector_numbers.validate().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to validate new sector bitfield", + )?; *old |= new; } None => { @@ -141,7 +150,7 @@ impl PartitionSectorMap { /// Counts the number of partitions & sectors within the map. pub fn count(&mut self) -> Result<(/* partitions */ u64, /* sectors */ u64), ActorError> { let sectors = self.0.iter_mut().try_fold(0_u64, |sectors, (partition_idx, bf)| { - let validated = bf.validate().with_context(|| { + let validated = bf.validate().with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to parse bitmap for partition {}", partition_idx) })?; sectors.checked_add(validated.len() as u64).ok_or_else(|| { diff --git a/actors/miner/src/sectors.rs b/actors/miner/src/sectors.rs index d7032a244..8e892e6af 100644 --- a/actors/miner/src/sectors.rs +++ b/actors/miner/src/sectors.rs @@ -4,11 +4,14 @@ use std::collections::BTreeSet; use cid::Cid; -use fil_actors_runtime::{actor_error, ActorContext, ActorError, Array}; +use fil_actors_runtime::{actor_error, ActorContext2, ActorError, Array}; use fvm_ipld_amt::Error as AmtError; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; -use fvm_shared::sector::{SectorNumber, MAX_SECTOR_NUMBER}; +use fvm_shared::{ + error::ExitCode, + sector::{SectorNumber, MAX_SECTOR_NUMBER}, +}; use super::SectorOnChainInfo; @@ -35,7 +38,9 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { let sector_on_chain = self .amt .get(sector_number) - .with_context(|| format!("failed to load sector {}", sector_number))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load sector {}", sector_number) + })? .cloned() .ok_or_else(|| actor_error!(not_found; "sector not found: {}", sector_number))?; sector_infos.push(sector_on_chain); @@ -50,7 +55,9 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { Ok(self .amt .get(sector_number) - .with_context(|| format!("failed to get sector {}", sector_number))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get sector {}", sector_number) + })? .cloned()) } @@ -68,7 +75,9 @@ impl<'db, BS: Blockstore> Sectors<'db, BS> { self.amt .set(sector_number, info) - .with_context(|| format!("failed to store sector {}", sector_number))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store sector {}", sector_number) + })?; } Ok(()) diff --git a/actors/miner/src/state.rs b/actors/miner/src/state.rs index 2a59de5d7..9a7c45c7a 100644 --- a/actors/miner/src/state.rs +++ b/actors/miner/src/state.rs @@ -9,7 +9,7 @@ use cid::Cid; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ actor_error, make_empty_map, make_map_with_root_and_bitwidth, u64_key, ActorContext, - ActorError, Array, + ActorContext2, ActorError, Array, }; use fvm_ipld_bitfield::BitField; use fvm_ipld_blockstore::Blockstore; @@ -20,6 +20,7 @@ use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser; use fvm_shared::clock::{ChainEpoch, QuantSpec, EPOCH_UNDEFINED}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{RegisteredPoStProof, SectorNumber, SectorSize, MAX_SECTOR_NUMBER}; use fvm_shared::HAMT_BIT_WIDTH; use num_traits::{Signed, Zero}; @@ -130,33 +131,36 @@ impl State { ) -> Result { let empty_precommit_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .context("failed to construct empty precommit map")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct empty precommit map")?; let empty_precommits_cleanup_array = Array::::new_with_bit_width(store, PRECOMMIT_EXPIRY_AMT_BITWIDTH) .flush() - .context("failed to construct empty precommits array")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to construct empty precommits array", + )?; let empty_sectors_array = Array::::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) .flush() - .context("failed to construct sectors array")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct sectors array")?; let empty_bitfield = store .put_cbor(&BitField::new(), Code::Blake2b256) - .context("failed to construct empty bitfield")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct empty bitfield")?; let deadline = Deadline::new(store)?; let empty_deadline = store .put_cbor(&deadline, Code::Blake2b256) - .context("failed to construct illegal state")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; let empty_deadlines = store .put_cbor(&Deadlines::new(policy, empty_deadline), Code::Blake2b256) - .context("failed to construct illegal state")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; let empty_vesting_funds_cid = store .put_cbor(&VestingFunds::new(), Code::Blake2b256) - .context("failed to construct illegal state")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct illegal state")?; Ok(Self { info: info_cid, @@ -185,7 +189,7 @@ impl State { match store.get_cbor(&self.info) { Ok(Some(info)) => Ok(info), Ok(None) => Err(actor_error!(not_found, "failed to get miner info")), - Err(e) => Err(ActorError::from(e).wrap("failed to get miner info")), + Err(e) => Err(actor_error!(illegal_state, "failed to get miner info: {:?}", e)), } } @@ -194,7 +198,7 @@ impl State { store: &BS, info: &MinerInfo, ) -> Result<(), ActorError> { - let cid = store.put_cbor(&info, Code::Blake2b256)?; + let cid = store.put_cbor(&info, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; self.info = cid; Ok(()) } @@ -238,7 +242,7 @@ impl State { ) -> Result<(), ActorError> { let prior_allocation = store .get_cbor(&self.allocated_sectors) - .context("failed to load allocated sectors bitfield")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load allocated sectors bitfield")? .ok_or_else(|| actor_error!(illegal_state, "allocated sectors bitfield not found"))?; if policy != CollisionPolicy::AllowCollisions { @@ -254,8 +258,9 @@ impl State { } } let new_allocation = &prior_allocation | sector_numbers; - self.allocated_sectors = - store.put_cbor(&new_allocation, Code::Blake2b256).with_context(|| { + self.allocated_sectors = store + .put_cbor(&new_allocation, Code::Blake2b256) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!( "failed to store allocated sectors bitfield after adding {:?}", sector_numbers, @@ -272,12 +277,15 @@ impl State { precommits: Vec, ) -> Result<(), ActorError> { let mut precommitted = - make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; + make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_SERIALIZATION)?; for precommit in precommits.into_iter() { let sector_no = precommit.info.sector_number; let modified = precommitted .set_if_absent(u64_key(precommit.info.sector_number), precommit) - .with_context(|| format!("failed to store precommitment for {:?}", sector_no,))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store precommitment for {:?}", sector_no,) + })?; if !modified { return Err(actor_error!( @@ -288,7 +296,7 @@ impl State { } } - self.pre_committed_sectors = precommitted.flush()?; + self.pre_committed_sectors = precommitted.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -312,14 +320,16 @@ impl State { &self.pre_committed_sectors, store, HAMT_BIT_WIDTH, - )?; + ) + .exit_code(ExitCode::USR_SERIALIZATION)?; let mut result = Vec::with_capacity(sector_numbers.len()); for §or_number in sector_numbers { let info = match precommitted .get(&u64_key(sector_number)) - .with_context(|| format!("failed to load precommitment for {}", sector_number))? - { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load precommitment for {}", sector_number) + })? { Some(info) => info.clone(), None => continue, }; @@ -354,7 +364,7 @@ impl State { store: &BS, sector_num: SectorNumber, ) -> Result { - let sectors = Sectors::load(store, &self.sectors)?; + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(sectors.get(sector_num)?.is_some()) } @@ -363,11 +373,15 @@ impl State { store: &BS, new_sectors: Vec, ) -> Result<(), ActorError> { - let mut sectors = Sectors::load(store, &self.sectors).context("failed to load sectors")?; + let mut sectors = Sectors::load(store, &self.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load sectors")?; sectors.store(new_sectors)?; - self.sectors = sectors.amt.flush().context("failed to persist sectors")?; + self.sectors = sectors + .amt + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to persist sectors")?; Ok(()) } @@ -377,7 +391,7 @@ impl State { store: &BS, sector_num: SectorNumber, ) -> Result, ActorError> { - let sectors = Sectors::load(store, &self.sectors)?; + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; sectors.get(sector_num) } @@ -386,13 +400,17 @@ impl State { store: &BS, sector_nos: &BitField, ) -> Result<(), ActorError> { - let mut sectors = Sectors::load(store, &self.sectors)?; + let mut sectors = + Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; for sector_num in sector_nos.iter() { - sectors.amt.delete(sector_num).context("could not delete sector number")?; + sectors + .amt + .delete(sector_num) + .context_code(ExitCode::USR_ILLEGAL_STATE, "could not delete sector number")?; } - self.sectors = sectors.amt.flush()?; + self.sectors = sectors.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -400,8 +418,8 @@ impl State { where F: FnMut(&SectorOnChainInfo) -> Result<(), ActorError>, { - let sectors = Sectors::load(store, &self.sectors)?; - sectors.amt.try_for_each(|_, v| f(v))?; + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; + sectors.amt.try_for_each(|_, v| f(v)).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -434,7 +452,7 @@ impl State { mut deadline_sectors: DeadlineSectorMap, ) -> Result, ActorError> { let mut deadlines = self.load_deadlines(store)?; - let sectors = Sectors::load(store, &self.sectors)?; + let sectors = Sectors::load(store, &self.sectors).exit_code(ExitCode::USR_SERIALIZATION)?; let mut all_replaced = Vec::new(); for (deadline_idx, partition_sectors) in deadline_sectors.iter() { @@ -687,13 +705,15 @@ impl State { store: &BS, sectors: &BitField, ) -> Result, ActorError> { - Sectors::load(store, &self.sectors)?.load_sector(sectors) + Sectors::load(store, &self.sectors) + .exit_code(ExitCode::USR_SERIALIZATION)? + .load_sector(sectors) } pub fn load_deadlines(&self, store: &BS) -> Result { store .get_cbor::(&self.deadlines) - .context("failed to load deadlines")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load deadlines")? .ok_or_else( || actor_error!(illegal_state; "failed to load deadlines {}", self.deadlines), ) @@ -704,7 +724,8 @@ impl State { store: &BS, deadlines: Deadlines, ) -> Result<(), ActorError> { - self.deadlines = store.put_cbor(&deadlines, Code::Blake2b256)?; + self.deadlines = + store.put_cbor(&deadlines, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -715,7 +736,9 @@ impl State { ) -> Result { store .get_cbor(&self.vesting_funds) - .with_context(|| format!("failed to load vesting funds {}", self.vesting_funds))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load vesting funds {}", self.vesting_funds) + })? .ok_or_else( || actor_error!(not_found; "failed to load vesting funds {:?}", self.vesting_funds), ) @@ -727,7 +750,8 @@ impl State { store: &BS, funds: &VestingFunds, ) -> Result<(), ActorError> { - self.vesting_funds = store.put_cbor(funds, Code::Blake2b256)?; + self.vesting_funds = + store.put_cbor(funds, Code::Blake2b256).exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -1013,10 +1037,16 @@ impl State { let quant = self.quant_spec_every_deadline(policy); let mut queue = super::BitFieldQueue::new(store, &self.pre_committed_sectors_cleanup, quant) - .context("failed to load pre-commit clean up queue")?; - - queue.add_many_to_queue_values(cleanup_events.into_iter())?; - self.pre_committed_sectors_cleanup = queue.amt.flush()?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load pre-commit clean up queue", + )?; + + queue + .add_many_to_queue_values(cleanup_events.into_iter()) + .exit_code(ExitCode::USR_SERIALIZATION)?; + self.pre_committed_sectors_cleanup = + queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; Ok(()) } @@ -1033,12 +1063,15 @@ impl State { store, &self.pre_committed_sectors_cleanup, self.quant_spec_every_deadline(policy), - )?; + ) + .exit_code(ExitCode::USR_SERIALIZATION)?; - let (sectors, modified) = cleanup_queue.pop_until(current_epoch)?; + let (sectors, modified) = + cleanup_queue.pop_until(current_epoch).exit_code(ExitCode::USR_SERIALIZATION)?; if modified { - self.pre_committed_sectors_cleanup = cleanup_queue.amt.flush()?; + self.pre_committed_sectors_cleanup = + cleanup_queue.amt.flush().exit_code(ExitCode::USR_SERIALIZATION)?; } let mut precommits_to_delete = Vec::new(); @@ -1046,7 +1079,10 @@ impl State { for i in sectors.iter() { let sector_number = i as SectorNumber; - let sector = match self.get_precommitted_sector(store, sector_number)? { + let sector = match self + .get_precommitted_sector(store, sector_number) + .exit_code(ExitCode::USR_SERIALIZATION)? + { Some(sector) => sector, // already committed/deleted None => continue, @@ -1061,7 +1097,8 @@ impl State { // Actually delete it. if !precommits_to_delete.is_empty() { - self.delete_precommitted_sectors(store, &precommits_to_delete)?; + self.delete_precommitted_sectors(store, &precommits_to_delete) + .exit_code(ExitCode::USR_SERIALIZATION)?; } self.pre_commit_deposits -= &deposit_to_burn; @@ -1167,15 +1204,16 @@ impl State { ) -> Result, ActorError> { let mut precommits = Vec::new(); let precommitted = - make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH)?; + make_map_with_root_and_bitwidth(&self.pre_committed_sectors, store, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_SERIALIZATION)?; for sector_no in sector_nos.iter() { if sector_no as u64 > MAX_SECTOR_NUMBER { return Err(actor_error!(illegal_argument; "sector number greater than maximum")); } - let info: &SectorPreCommitOnChainInfo = - precommitted - .get(&u64_key(sector_no as u64))? - .ok_or_else(|| actor_error!(not_found, "sector {} not found", sector_no))?; + let info: &SectorPreCommitOnChainInfo = precommitted + .get(&u64_key(sector_no as u64)) + .exit_code(ExitCode::USR_SERIALIZATION)? + .ok_or_else(|| actor_error!(not_found, "sector {} not found", sector_no))?; precommits.push(info.clone()); } Ok(precommits) diff --git a/actors/miner/tests/util.rs b/actors/miner/tests/util.rs index a1befac76..3629dc7cc 100644 --- a/actors/miner/tests/util.rs +++ b/actors/miner/tests/util.rs @@ -1187,7 +1187,7 @@ impl ActorHarness { let live = part.live_sectors(); let to_prove = &live & §or_nos; if to_prove.is_empty() { - return Ok(()); + return; } let mut to_skip = &live - &to_prove; @@ -1204,7 +1204,6 @@ impl ActorHarness { if skipped_proven.get(i) { skipped_proven_sector_infos.push(sector.clone()); } - Ok(()) }) .unwrap(); let new_faulty_power = @@ -1218,7 +1217,6 @@ impl ActorHarness { if new_proven.get(i) { new_proven_infos.push(sector.clone()); } - Ok(()) }) .unwrap(); let new_proven_power = self.power_pair_for_sectors(&new_proven_infos); @@ -1230,8 +1228,6 @@ impl ActorHarness { index: part_idx, skipped: UnvalidatedBitField::Validated(to_skip), }); - - Ok(()) }) .unwrap(); diff --git a/actors/multisig/src/lib.rs b/actors/multisig/src/lib.rs index 2cca97075..6ab70338d 100644 --- a/actors/multisig/src/lib.rs +++ b/actors/multisig/src/lib.rs @@ -7,7 +7,7 @@ use fil_actors_runtime::cbor::serialize_vec; use fil_actors_runtime::runtime::{ActorCode, Primitives, Runtime}; use fil_actors_runtime::{ actor_error, cbor, make_empty_map, make_map_with_root, resolve_to_id_addr, ActorContext, - ActorError, Map, INIT_ACTOR_ADDR, + ActorContext2, ActorError, Map, INIT_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -99,7 +99,7 @@ impl Actor { let empty_root = make_empty_map::<_, ()>(rt.store(), HAMT_BIT_WIDTH) .flush() - .context("Failed to create empty map")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty map")?; let mut st: State = State { signers: resolved_signers, @@ -146,7 +146,7 @@ impl Actor { } let mut ptx = make_map_with_root(&st.pending_txs, rt.store()) - .context("failed to load pending transactions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; let t_id = st.next_tx_id; st.next_tx_id.0 += 1; @@ -159,9 +159,15 @@ impl Actor { approved: Vec::new(), }; - ptx.set(t_id.key(), txn.clone()).context("failed to put transaction for propose")?; + ptx.set(t_id.key(), txn.clone()).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to put transaction for propose", + )?; - st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; Ok((t_id, txn)) })?; @@ -187,7 +193,7 @@ impl Actor { } let ptx = make_map_with_root(&st.pending_txs, rt.store()) - .context("failed to load pending transactions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; let txn = get_transaction(rt, &ptx, params.id, params.proposal_hash)?; @@ -222,11 +228,13 @@ impl Actor { } let mut ptx = make_map_with_root::<_, Transaction>(&st.pending_txs, rt.store()) - .context("failed to load pending transactions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; let (_, tx) = ptx .delete(¶ms.id.key()) - .with_context(|| format!("failed to pop transaction {:?} for cancel", params.id,))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to pop transaction {:?} for cancel", params.id,) + })? .ok_or_else(|| { actor_error!(not_found, "no such transaction {:?} to cancel", params.id) })?; @@ -236,15 +244,19 @@ impl Actor { return Err(actor_error!(forbidden; "Cannot cancel another signers transaction")); } - let calculated_hash = compute_proposal_hash(&tx, rt).with_context(|| { - format!("failed to compute proposal hash for (tx: {:?})", params.id) - })?; + let calculated_hash = compute_proposal_hash(&tx, rt) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to compute proposal hash for (tx: {:?})", params.id) + })?; if !params.proposal_hash.is_empty() && params.proposal_hash != calculated_hash { return Err(actor_error!(illegal_state, "hash does not match proposal params")); } - st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; Ok(()) }) @@ -448,15 +460,20 @@ impl Actor { let st = rt.transaction(|st: &mut State, rt| { let mut ptx = make_map_with_root(&st.pending_txs, rt.store()) - .context("failed to load pending transactions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; // update approved on the transaction txn.approved.push(rt.message().caller()); ptx.set(tx_id.key(), txn.clone()) - .with_context(|| format!("failed to put transaction {} for approval", tx_id.0,))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to put transaction {} for approval", tx_id.0,) + })?; - st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; // Go implementation holds reference to state after transaction so this must be cloned // to match to handle possible exit code inconsistency @@ -497,11 +514,17 @@ where rt.transaction(|st: &mut State, rt| { let mut ptx = make_map_with_root::<_, Transaction>(&st.pending_txs, rt.store()) - .context("failed to load pending transactions")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load pending transactions")?; - ptx.delete(&txn_id.key()).context("failed to delete transaction for cleanup")?; + ptx.delete(&txn_id.key()).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to delete transaction for cleanup", + )?; - st.pending_txs = ptx.flush().context("failed to flush pending transactions")?; + st.pending_txs = ptx.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush pending transactions", + )?; Ok(()) })?; } @@ -521,7 +544,9 @@ where { let txn = ptx .get(&txn_id.key()) - .with_context(|| format!("failed to load transaction {:?} for approval", txn_id,))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load transaction {:?} for approval", txn_id,) + })? .ok_or_else(|| actor_error!(not_found, "no such transaction {:?} for approval", txn_id))?; if !proposal_hash.is_empty() { @@ -573,11 +598,11 @@ impl ActorCode for Actor { } Some(Method::Propose) => { let res = Self::propose(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::Approve) => { let res = Self::approve(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::Cancel) => { Self::cancel(rt, cbor::deserialize_params(params)?)?; diff --git a/actors/multisig/src/state.rs b/actors/multisig/src/state.rs index 738bab2f4..5dbaa5854 100644 --- a/actors/multisig/src/state.rs +++ b/actors/multisig/src/state.rs @@ -3,6 +3,7 @@ use cid::Cid; use fil_actors_runtime::actor_error; +use fil_actors_runtime::ActorContext2; use fil_actors_runtime::ActorError; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -11,6 +12,7 @@ use fvm_shared::address::Address; use fvm_shared::bigint::{bigint_ser, Integer}; use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use indexmap::IndexMap; use num_traits::Zero; @@ -77,7 +79,8 @@ impl State { store: &BS, addr: &Address, ) -> Result<(), ActorError> { - let mut txns = make_map_with_root(&self.pending_txs, store)?; + let mut txns = + make_map_with_root(&self.pending_txs, store).exit_code(ExitCode::USR_ILLEGAL_STATE)?; // Identify transactions that need updating let mut txn_ids_to_purge = IndexMap::new(); @@ -87,20 +90,21 @@ impl State { txn_ids_to_purge.insert(tx_id.0.clone(), txn.clone()); } } - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; // Update or remove those transactions. for (tx_id, mut txn) in txn_ids_to_purge { txn.approved.retain(|approver| approver != addr); if !txn.approved.is_empty() { - txns.set(tx_id.into(), txn)?; + txns.set(tx_id.into(), txn).exit_code(ExitCode::USR_ILLEGAL_STATE)?; } else { - txns.delete(&tx_id)?; + txns.delete(&tx_id).exit_code(ExitCode::USR_ILLEGAL_STATE)?; } } - self.pending_txs = txns.flush()?; + self.pending_txs = txns.flush().exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok(()) } diff --git a/actors/paych/src/lib.rs b/actors/paych/src/lib.rs index acf5f6c73..1d931d5f1 100644 --- a/actors/paych/src/lib.rs +++ b/actors/paych/src/lib.rs @@ -2,7 +2,9 @@ // SPDX-License-Identifier: Apache-2.0, MIT use fil_actors_runtime::runtime::{ActorCode, Runtime}; -use fil_actors_runtime::{actor_error, cbor, resolve_to_id_addr, ActorContext, ActorError, Array}; +use fil_actors_runtime::{ + actor_error, cbor, resolve_to_id_addr, ActorContext, ActorContext2, ActorError, Array, +}; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::actor::builtin::Type; @@ -58,7 +60,7 @@ impl Actor { let empty_arr_cid = Array::<(), _>::new_with_bit_width(rt.store(), LANE_STATES_AMT_BITWIDTH) .flush() - .context("failed to create empty AMT")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to create empty AMT")?; rt.create(&State::new(from, to, empty_arr_cid))?; Ok(()) @@ -129,7 +131,8 @@ impl Actor { })?; // Validate signature - rt.verify_signature(sig, &signer, &sv_bz).context("voucher signature invalid")?; + rt.verify_signature(sig, &signer, &sv_bz) + .context_code(ExitCode::USR_ILLEGAL_STATE, "voucher signature invalid")?; let pch_addr = rt.message().receiver(); let svpch_id_addr = rt.resolve_address(&sv.channel_addr).ok_or_else(|| { @@ -169,15 +172,15 @@ impl Actor { rt.send( extra.actor, extra.method, - RawBytes::serialize(&extra.data)?, + RawBytes::serialize(&extra.data).exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::from(0u8), ) .map_err(|e| e.wrap("spend voucher verification failed"))?; } rt.transaction(|st: &mut State, rt| { - let mut l_states = - Array::load(&st.lane_states, rt.store()).context("failed to load lane states")?; + let mut l_states = Array::load(&st.lane_states, rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load lane states")?; // Find the voucher lane, create and insert it in sorted order if necessary. let lane_id = sv.lane; @@ -219,7 +222,9 @@ impl Actor { other_ls.nonce = merge.nonce; l_states .set(merge.lane, other_ls) - .with_context(|| format!("failed to store lane {}", merge.lane,))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store lane {}", merge.lane,) + })?; } // 2. To prevent double counting, remove already redeemed amounts (from @@ -258,9 +263,13 @@ impl Actor { l_states .set(lane_id, lane_state) - .with_context(|| format!("failed to store lane {}", lane_id,))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store lane {}", lane_id,) + })?; - st.lane_states = l_states.flush().context("failed to save lanes")?; + st.lane_states = l_states + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to save lanes")?; Ok(()) }) } @@ -321,7 +330,8 @@ where return Err(actor_error!(illegal_argument; "maximum lane ID is 2^63-1")); } - ls.get(id).with_context(|| format!("failed to load lane {}", id)) + ls.get(id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || format!("failed to load lane {}", id)) } impl ActorCode for Actor { diff --git a/actors/power/src/lib.rs b/actors/power/src/lib.rs index ee24d0e81..df16bb138 100644 --- a/actors/power/src/lib.rs +++ b/actors/power/src/lib.rs @@ -7,8 +7,8 @@ use std::convert::TryInto; use ext::init; use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, make_map_with_root_and_bitwidth, ActorContext, ActorError, Multimap, - CRON_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + actor_error, cbor, make_map_with_root_and_bitwidth, ActorContext, ActorContext2, ActorError, + Multimap, CRON_ACTOR_ADDR, INIT_ACTOR_ADDR, REWARD_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; @@ -96,7 +96,8 @@ impl Actor { peer_id: params.peer, multi_addresses: params.multiaddrs, control_addresses: Default::default(), - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let miner_actor_code_cid = rt.get_code_cid_for_type(Type::Miner); let ext::init::ExecReturn { id_address, robust_address } = rt @@ -106,16 +107,18 @@ impl Actor { RawBytes::serialize(init::ExecParams { code_cid: miner_actor_code_cid, constructor_params, - })?, + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?, value, )? - .deserialize()?; + .deserialize() + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let window_post_proof_type = params.window_post_proof_type; rt.transaction(|st: &mut State, rt| { let mut claims = make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load claims")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; set_claim( &mut claims, &id_address, @@ -138,7 +141,9 @@ impl Actor { ) })?; - st.claims = claims.flush().context("failed to flush claims")?; + st.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims")?; Ok(()) })?; Ok(CreateMinerReturn { id_address, robust_address }) @@ -160,7 +165,7 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut claims = make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load claims")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; st.add_to_claim( rt.policy(), @@ -176,7 +181,9 @@ impl Actor { ) })?; - st.claims = claims.flush().context("failed to flush claims")?; + st.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims")?; Ok(()) }) } @@ -209,12 +216,14 @@ impl Actor { CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH, ) - .context("failed to load cron events")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load cron events")?; st.append_cron_event(&mut events, params.event_epoch, miner_event) .context("failed to enroll cron event")?; - st.cron_event_queue = events.root().context("failed to flush cron events")?; + st.cron_event_queue = events + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush cron events")?; Ok(()) })?; Ok(()) @@ -235,7 +244,8 @@ impl Actor { TokenAmount::zero(), ) .map_err(|e| e.wrap("failed to check epoch baseline power"))? - .deserialize()?; + .deserialize() + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; if let Err(e) = Self::process_batch_proof_verifies(rt, &rewret) { error!("unexpected error processing batch proof verifies: {}. Skipping all verification for epoch {}", e, rt.curr_epoch()); @@ -257,7 +267,7 @@ impl Actor { rt.send( *REWARD_ACTOR_ADDR, ext::reward::UPDATE_NETWORK_KPI, - this_epoch_raw_byte_power?, + this_epoch_raw_byte_power.exit_code(ExitCode::USR_ILLEGAL_STATE)?, TokenAmount::from(0_u32), ) .map_err(|e| e.wrap("failed to update network KPI with reward actor"))?; @@ -305,15 +315,17 @@ impl Actor { HAMT_BIT_WIDTH, PROOF_VALIDATION_BATCH_AMT_BITWIDTH, ) - .context("failed to load proof batching set")? + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load proof batching set")? } else { debug!("ProofValidationBatch created"); Multimap::new(rt.store(), HAMT_BIT_WIDTH, PROOF_VALIDATION_BATCH_AMT_BITWIDTH) }; let miner_addr = rt.message().caller(); - let arr = mmap.get::(&miner_addr.to_bytes()).with_context(|| { - format!("failed to get seal verify infos at addr {}", miner_addr) - })?; + let arr = mmap + .get::(&miner_addr.to_bytes()) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get seal verify infos at addr {}", miner_addr) + })?; if let Some(arr) = arr { if arr.count() >= MAX_MINER_PROVE_COMMITS_PER_EPOCH { return Err(ActorError::unchecked( @@ -327,9 +339,11 @@ impl Actor { } mmap.add(miner_addr.to_bytes().into(), seal_info) - .context("failed to insert proof into set")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to insert proof into set")?; - let mmrc = mmap.root().context("failed to flush proofs batch map")?; + let mmrc = mmap + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush proofs batch map")?; rt.charge_gas("OnSubmitVerifySeal", GAS_ON_SUBMIT_VERIFY_SEAL); st.proof_validation_batch = Some(mmrc); @@ -514,11 +528,11 @@ impl Actor { CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH, ) - .context("failed to load cron events")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load cron events")?; let claims = make_map_with_root_and_bitwidth::<_, Claim>(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load claims")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; for epoch in st.first_cron_epoch..=rt_epoch { let epoch_events = load_cron_events(&events, epoch) .with_context(|| format!("failed to load cron events at {}", epoch))?; @@ -530,7 +544,7 @@ impl Actor { for evt in epoch_events.into_iter() { let miner_has_claim = claims .contains_key(&evt.miner_addr.to_bytes()) - .context("failed to look up claim")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to look up claim")?; if !miner_has_claim { debug!("skipping cron event for unknown miner: {}", evt.miner_addr); continue; @@ -540,11 +554,15 @@ impl Actor { events .remove_all(&epoch_key(epoch)) - .with_context(|| format!("failed to clear cron events at {}", epoch))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to clear cron events at {}", epoch) + })?; } st.first_cron_epoch = rt_epoch + 1; - st.cron_event_queue = events.root().context("failed to flush events")?; + st.cron_event_queue = events + .root() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush events")?; Ok(()) })?; @@ -555,7 +573,8 @@ impl Actor { event_payload: event.callback_payload.bytes().to_owned(), reward_smoothed: rewret.this_epoch_reward_smoothed.clone(), quality_adj_power_smoothed: st.this_epoch_qa_power_smoothed.clone(), - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let res = rt.send( event.miner_addr, ext::miner::ON_DEFERRED_CRON_EVENT_METHOD, @@ -576,7 +595,7 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut claims = make_map_with_root_and_bitwidth(&st.claims, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load claims")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; // Remove power and leave miner frozen for miner_addr in failed_miner_crons { @@ -591,7 +610,9 @@ impl Actor { st.miner_count -= 1 } - st.claims = claims.flush().context("failed to flush claims")?; + st.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims")?; Ok(()) })?; } @@ -616,7 +637,7 @@ impl ActorCode for Actor { } Some(Method::CreateMiner) => { let res = Self::create_miner(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::UpdateClaimedPower) => { Self::update_claimed_power(rt, cbor::deserialize_params(params)?)?; @@ -641,7 +662,7 @@ impl ActorCode for Actor { } Some(Method::CurrentTotalPower) => { let res = Self::current_total_power(rt)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), } diff --git a/actors/power/src/state.rs b/actors/power/src/state.rs index a7a566cac..a22bbbd61 100644 --- a/actors/power/src/state.rs +++ b/actors/power/src/state.rs @@ -7,7 +7,7 @@ use cid::Cid; use fil_actors_runtime::runtime::Policy; use fil_actors_runtime::{ actor_error, make_empty_map, make_map_with_root, make_map_with_root_and_bitwidth, ActorContext, - ActorError, Map, Multimap, + ActorContext2, ActorError, Map, Multimap, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; @@ -17,6 +17,7 @@ use fvm_shared::address::Address; use fvm_shared::bigint::{bigint_ser, BigInt}; use fvm_shared::clock::ChainEpoch; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::{RegisteredPoStProof, StoragePower}; use fvm_shared::smooth::{AlphaBetaFilter, FilterEstimate, DEFAULT_ALPHA, DEFAULT_BETA}; use fvm_shared::HAMT_BIT_WIDTH; @@ -76,11 +77,11 @@ impl State { pub fn new(store: &BS) -> Result { let empty_map = make_empty_map::<_, ()>(store, HAMT_BIT_WIDTH) .flush() - .context("Failed to create empty map")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create empty map")?; let empty_mmap = Multimap::new(store, CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH) .root() - .context("Failed to get empty multimap cid")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to get empty multimap cid")?; Ok(State { cron_event_queue: empty_mmap, @@ -104,10 +105,13 @@ impl State { s: &BS, miner: &Address, ) -> Result { - let claims = make_map_with_root_and_bitwidth(&self.claims, s, HAMT_BIT_WIDTH)?; + let claims = make_map_with_root_and_bitwidth(&self.claims, s, HAMT_BIT_WIDTH) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; let claim = get_claim(&claims, miner)? - .ok_or_else(|| actor_error!(not_found, "no claim for actor: {}", miner))?; + .with_context_code(ExitCode::USR_NOT_FOUND, || { + format!("no claim for actor: {}", miner) + })?; let miner_nominal_power = &claim.raw_byte_power; let miner_min_power = consensus_miner_min_power(policy, claim.window_post_proof_type) @@ -130,7 +134,7 @@ impl State { s: &BS, miner: &Address, ) -> Result, ActorError> { - let claims = make_map_with_root(&self.claims, s)?; + let claims = make_map_with_root(&self.claims, s).exit_code(ExitCode::USR_ILLEGAL_STATE)?; get_claim(&claims, miner).map(|s| s.cloned()) } @@ -222,7 +226,9 @@ impl State { events .add(epoch_key(epoch), event) - .with_context(|| format!("failed to store cron event at epoch {}", epoch))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to store cron event at epoch {}", epoch) + })?; Ok(()) } @@ -268,10 +274,13 @@ impl State { where BS: Blockstore, { - let claims = - make_map_with_root::<_, Claim>(&self.claims, store).context("failed to load claims")?; + let claims = make_map_with_root::<_, Claim>(&self.claims, store) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; - if !claims.contains_key(&miner_addr.to_bytes()).context("failed to look up claim")? { + if !claims + .contains_key(&miner_addr.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to look up claim")? + { return Err(actor_error!( forbidden, "unknown miner {} forbidden to interact with power actor", @@ -288,7 +297,7 @@ impl State { ) -> Result, ActorError> { let claims = make_map_with_root_and_bitwidth::<_, Claim>(&self.claims, store, HAMT_BIT_WIDTH) - .context("failed to load claims")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims")?; let claim = get_claim(&claims, miner)?; Ok(claim.cloned()) @@ -313,7 +322,9 @@ impl State { claims .delete(&miner.to_bytes()) - .with_context(|| format!("failed to delete claim for address {}", miner))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete claim for address {}", miner) + })? .ok_or_else(|| { actor_error!(illegal_state, "failed to delete claim for address: doesn't exist") })?; @@ -329,7 +340,8 @@ pub(super) fn load_cron_events( mmap.for_each(&epoch_key(epoch), |_, v: &CronEvent| { events.push(v.clone()); - })?; + }) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; Ok(events) } @@ -339,7 +351,9 @@ fn get_claim<'m, BS: Blockstore>( claims: &'m Map, a: &Address, ) -> Result, ActorError> { - claims.get(&a.to_bytes()).with_context(|| format!("failed to get claim for address {}", a)) + claims.get(&a.to_bytes()).with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get claim for address {}", a) + }) } pub fn set_claim( @@ -364,7 +378,9 @@ pub fn set_claim( claims .set(a.to_bytes().into(), claim) - .with_context(|| format!("failed to set claim for address {}", a))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set claim for address {}", a) + })?; Ok(()) } diff --git a/actors/power/tests/harness/mod.rs b/actors/power/tests/harness/mod.rs index a91180e03..eedbeb64e 100644 --- a/actors/power/tests/harness/mod.rs +++ b/actors/power/tests/harness/mod.rs @@ -245,7 +245,6 @@ impl Harness { events_map .for_each::<_, CronEvent>(&epoch_key(epoch), |_, v| { events.push(v.to_owned()); - Ok(()) }) .unwrap(); diff --git a/actors/reward/src/lib.rs b/actors/reward/src/lib.rs index a46dcc242..ac5397ecc 100644 --- a/actors/reward/src/lib.rs +++ b/actors/reward/src/lib.rs @@ -3,14 +3,15 @@ use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ - actor_error, cbor, ActorError, BURNT_FUNDS_ACTOR_ADDR, EXPECTED_LEADERS_PER_EPOCH, - STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + actor_error, cbor, ActorContext2, ActorError, BURNT_FUNDS_ACTOR_ADDR, + EXPECTED_LEADERS_PER_EPOCH, STORAGE_POWER_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_shared::bigint::bigint_ser::BigIntDe; use fvm_shared::bigint::{Integer, Sign}; use fvm_shared::econ::TokenAmount; +use fvm_shared::error::ExitCode; use fvm_shared::sector::StoragePower; use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR, METHOD_SEND}; use log::{error, warn}; @@ -159,7 +160,7 @@ impl Actor { let res = rt.send( miner_addr, ext::miner::APPLY_REWARDS_METHOD, - RawBytes::serialize(&reward_params)?, + RawBytes::serialize(&reward_params).exit_code(ExitCode::USR_ILLEGAL_STATE)?, total_reward.clone(), ); if let Err(e) = res { @@ -251,7 +252,7 @@ impl ActorCode for Actor { } Some(Method::ThisEpochReward) => { let res = Self::this_epoch_reward(rt)?; - Ok(RawBytes::serialize(&res)?) + Ok(RawBytes::serialize(&res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::UpdateNetworkKPI) => { let param: Option = cbor::deserialize_params(params)?; diff --git a/actors/runtime/src/actor_error.rs b/actors/runtime/src/actor_error.rs index 8968419af..e2d66a3d1 100644 --- a/actors/runtime/src/actor_error.rs +++ b/actors/runtime/src/actor_error.rs @@ -1,4 +1,4 @@ -use std::{fmt::Display, num::TryFromIntError}; +use std::fmt::Display; use fvm_shared::error::ExitCode; use thiserror::Error; @@ -66,94 +66,6 @@ impl ActorError { } } -/// Converts a raw encoding error into an ErrSerialization. -impl From for ActorError { - fn from(e: fvm_ipld_encoding::Error) -> Self { - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - -impl From> for ActorError { - fn from(e: fvm_ipld_amt::Error) -> Self { - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - -impl From> for ActorError { - fn from(e: fvm_ipld_hamt::Error) -> Self { - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - -impl From> for ActorError { - fn from(e: fvm_ipld_encoding::CborStoreError) -> Self { - Self { exit_code: ExitCode::USR_ILLEGAL_STATE, msg: e.to_string() } - } -} - -impl From for ActorError { - fn from(e: fvm_ipld_bitfield::Error) -> Self { - // TODO: correct code? - Self { exit_code: ExitCode::USR_ILLEGAL_STATE, msg: e.to_string() } - } -} - -impl From for ActorError { - fn from(e: TryFromIntError) -> Self { - // TODO: correct code? - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - -impl From for ActorError { - fn from(e: fvm_ipld_bitfield::OutOfRangeError) -> Self { - // TODO: correct code? - Self { exit_code: ExitCode::USR_SERIALIZATION, msg: e.to_string() } - } -} - -impl From> for ActorError { - fn from(e: crate::util::MultiMapError) -> Self { - match e { - crate::util::MultiMapError::Amt(e) => e.into(), - crate::util::MultiMapError::Hamt(e) => e.into(), - } - } -} - -impl, E: std::error::Error> From> - for ActorError -{ - fn from(e: crate::util::MultiMapEitherError) -> Self { - match e { - crate::util::MultiMapEitherError::User(e) => e.into(), - crate::util::MultiMapEitherError::MultiMap(e) => e.into(), - } - } -} - -impl, E: std::error::Error> From> - for ActorError -{ - fn from(e: fvm_ipld_amt::EitherError) -> Self { - match e { - fvm_ipld_amt::EitherError::User(e) => e.into(), - fvm_ipld_amt::EitherError::Amt(e) => e.into(), - } - } -} - -impl, E: std::error::Error> From> - for ActorError -{ - fn from(e: fvm_ipld_hamt::EitherError) -> Self { - match e { - fvm_ipld_hamt::EitherError::User(e) => e.into(), - fvm_ipld_hamt::EitherError::Hamt(e) => e.into(), - } - } -} - /// Converts an actor deletion error into an actor error with the appropriate exit code. This /// facilitates propagation. #[cfg(feature = "fil-actor")] @@ -202,28 +114,48 @@ pub trait ActorContext { F: FnOnce() -> C; } -impl> ActorContext for Result { - fn context(self, context: C) -> Result +pub trait ActorContext2: Sized { + fn exit_code(self, code: ExitCode) -> Result; + + fn context_code(self, code: ExitCode, context: C) -> Result where C: Display + Send + Sync + 'static, { - self.map_err(|err| { - let mut err: ActorError = err.into(); - err.msg = format!("{}: {}", context, err.msg); - err - }) + self.with_context_code(code, || context) } - fn with_context(self, f: F) -> Result + fn with_context_code(self, code: ExitCode, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C; +} + +// hack to allow anyhow::Error + std::error::Error, can be dropped once Runtime is fixed +impl ActorContext2 for Result { + fn exit_code(self, code: ExitCode) -> Result { + self.map_err(|err| ActorError { exit_code: code, msg: err.to_string() }) + } + + fn with_context_code(self, code: ExitCode, f: F) -> Result where C: Display + Send + Sync + 'static, F: FnOnce() -> C, { - self.map_err(|err| { - let mut err: ActorError = err.into(); - err.msg = format!("{}: {}", f(), err.msg); - err - }) + self.map_err(|err| ActorError { exit_code: code, msg: format!("{}: {}", f(), err) }) + } +} + +impl ActorContext2 for Option { + fn exit_code(self, code: ExitCode) -> Result { + self.ok_or_else(|| ActorError { exit_code: code, msg: "None".to_string() }) + } + + fn with_context_code(self, code: ExitCode, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C, + { + self.ok_or_else(|| ActorError { exit_code: code, msg: format!("{}", f()) }) } } @@ -239,3 +171,26 @@ impl From for ActorError { } } } + +impl ActorContext for Result { + fn context(self, context: C) -> Result + where + C: Display + Send + Sync + 'static, + { + self.map_err(|mut err| { + err.msg = format!("{}: {}", context, err.msg); + err + }) + } + + fn with_context(self, f: F) -> Result + where + C: Display + Send + Sync + 'static, + F: FnOnce() -> C, + { + self.map_err(|mut err| { + err.msg = format!("{}: {}", f(), err.msg); + err + }) + } +} diff --git a/actors/runtime/src/util/chaos/mod.rs b/actors/runtime/src/util/chaos/mod.rs index 9e563e802..8bf61624b 100644 --- a/actors/runtime/src/util/chaos/mod.rs +++ b/actors/runtime/src/util/chaos/mod.rs @@ -14,7 +14,7 @@ pub use state::*; pub use types::*; use crate::runtime::{ActorCode, Runtime}; -use crate::{actor_error, cbor, ActorError}; +use crate::{actor_error, cbor, ActorContext2, ActorError}; mod state; mod types; @@ -219,12 +219,12 @@ impl ActorCode for Actor { } Some(Method::ResolveAddress) => { let res = Self::resolve_address(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::Send) => { let res: SendReturn = Self::send(rt, cbor::deserialize_params(params)?)?; - Ok(RawBytes::serialize(res)?) + Ok(RawBytes::serialize(res).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } Some(Method::DeleteActor) => { @@ -244,7 +244,7 @@ impl ActorCode for Actor { Some(Method::InspectRuntime) => { let inspect = Self::inspect_runtime(rt)?; - Ok(RawBytes::serialize(inspect)?) + Ok(RawBytes::serialize(inspect).exit_code(ExitCode::USR_ILLEGAL_STATE)?) } None => Err(actor_error!(unhandled_message; "Invalid method")), diff --git a/actors/system/src/lib.rs b/actors/system/src/lib.rs index 93b9befd3..d01c8b896 100644 --- a/actors/system/src/lib.rs +++ b/actors/system/src/lib.rs @@ -1,12 +1,13 @@ // Copyright 2019-2022 ChainSafe Systems // SPDX-License-Identifier: Apache-2.0, MIT -use anyhow::anyhow; + use cid::{multihash, Cid}; -use fil_actors_runtime::ActorContext; +use fil_actors_runtime::ActorContext2; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::tuple::*; use fvm_ipld_encoding::CborStore; use fvm_ipld_encoding::{Cbor, RawBytes}; +use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::FromPrimitive; @@ -35,10 +36,11 @@ pub struct State { impl Cbor for State {} impl State { - pub fn new(store: &BS) -> anyhow::Result { + pub fn new(store: &BS) -> Result { let c = store .put_cbor(&Vec::<(String, Cid)>::new(), multihash::Code::Blake2b256) - .map_err(|e| anyhow!("failed to put system state to store: {}", e))?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to put system state to store")?; + Ok(Self { builtin_actors: c }) } @@ -65,7 +67,8 @@ impl Actor { { rt.validate_immediate_caller_is(std::iter::once(&*SYSTEM_ACTOR_ADDR))?; - let state = State::new(rt.store()).context("failed to construct state")?; + let state = State::new(rt.store()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to construct state")?; rt.create(&state)?; Ok(()) diff --git a/actors/verifreg/src/lib.rs b/actors/verifreg/src/lib.rs index 1704c7272..168b25941 100644 --- a/actors/verifreg/src/lib.rs +++ b/actors/verifreg/src/lib.rs @@ -4,13 +4,14 @@ use fil_actors_runtime::runtime::{ActorCode, Runtime}; use fil_actors_runtime::{ actor_error, cbor, make_map_with_root_and_bitwidth, resolve_to_id_addr, ActorContext, - ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, + ActorContext2, ActorError, Map, STORAGE_MARKET_ACTOR_ADDR, SYSTEM_ACTOR_ADDR, }; use fvm_ipld_blockstore::Blockstore; use fvm_ipld_encoding::RawBytes; use fvm_ipld_hamt::BytesKey; use fvm_shared::address::Address; use fvm_shared::bigint::bigint_ser::BigIntDe; +use fvm_shared::error::ExitCode; use fvm_shared::{MethodNum, HAMT_BIT_WIDTH, METHOD_CONSTRUCTOR}; use num_derive::FromPrimitive; use num_traits::{FromPrimitive, Signed, Zero}; @@ -54,7 +55,8 @@ impl Actor { .resolve_address(&root_key) .ok_or_else(|| actor_error!(illegal_argument, "root should be an ID address"))?; - let st = State::new(rt.store(), id_addr).context("Failed to create verifreg state")?; + let st = State::new(rt.store(), id_addr) + .context_code(ExitCode::USR_ILLEGAL_STATE, "Failed to create verifreg state")?; rt.create(&st)?; Ok(()) @@ -87,18 +89,20 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verifiers = make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let verified_clients = make_map_with_root_and_bitwidth::<_, BigIntDe>( &st.verified_clients, rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let found = verified_clients .contains_key(&verifier.to_bytes()) - .with_context(|| format!("failed to get client state for {}", verifier))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get client state for {}", verifier) + })?; if found { return Err(actor_error!( illegal_argument, @@ -109,9 +113,11 @@ impl Actor { verifiers .set(verifier.to_bytes().into(), BigIntDe(params.allowance.clone())) - .context("failed to add verifier")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to add verifier")?; - st.verifiers = verifiers.flush().context("failed to flush verifiers")?; + st.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; Ok(()) })?; @@ -136,15 +142,18 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; verifiers .delete(&verifier.to_bytes()) - .context("failed to remove verifier")? - .ok_or_else(|| { - actor_error!(illegal_argument, "failed to remove verifier: not found") - })?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to remove verifier")? + .context_code( + ExitCode::USR_ILLEGAL_ARGUMENT, + "failed to remove verifier: not found", + )?; - st.verifiers = verifiers.flush().context("failed to flush verifiers")?; + st.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; Ok(()) })?; @@ -183,22 +192,27 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verifiers = make_map_with_root_and_bitwidth(&st.verifiers, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; // Validate caller is one of the verifiers. let verifier = rt.message().caller(); let BigIntDe(verifier_cap) = verifiers .get(&verifier.to_bytes()) - .with_context(|| format!("failed to get Verifier {}", verifier))? - .ok_or_else(|| actor_error!(not_found, format!("no such Verifier {}", verifier)))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get Verifier {}", verifier) + })? + .with_context_code(ExitCode::USR_NOT_FOUND, || { + format!("no such Verifier {}", verifier) + })?; // Validate client to be added isn't a verifier - let found = - verifiers.contains_key(&client.to_bytes()).context("failed to get verifier")?; + let found = verifiers + .contains_key(&client.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; if found { return Err(actor_error!( @@ -221,11 +235,15 @@ impl Actor { verifiers .set(verifier.to_bytes().into(), BigIntDe(new_verifier_cap)) - .with_context(|| format!("Failed to update new verifier cap for {}", verifier))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to update new verifier cap for {}", verifier) + })?; let client_cap = verified_clients .get(&client.to_bytes()) - .with_context(|| format!("Failed to get verified client {}", client))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to get verified client {}", client) + })?; // if verified client exists, add allowance to existing cap // otherwise, create new client with allownace @@ -237,13 +255,16 @@ impl Actor { verified_clients .set(client.to_bytes().into(), BigIntDe(client_cap.clone())) - .with_context(|| { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("Failed to add verified client {} with cap {}", client, client_cap,) })?; - st.verifiers = verifiers.flush().context("failed to flush verifiers")?; - st.verified_clients = - verified_clients.flush().context("failed to flush verified clients")?; + st.verifiers = verifiers + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verifiers")?; + st.verified_clients = verified_clients + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients")?; Ok(()) })?; @@ -275,12 +296,16 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let BigIntDe(vc_cap) = verified_clients .get(&client.to_bytes()) - .with_context(|| format!("failed to get verified client {}", &client))? - .ok_or_else(|| actor_error!(not_found, "no such verified client {}", client))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get verified client {}", client) + })? + .with_context_code(ExitCode::USR_NOT_FOUND, || { + format!("no such verified client {}", client) + })?; if vc_cap.is_negative() { return Err(actor_error!( illegal_state, @@ -306,22 +331,23 @@ impl Actor { // Will be restored later if the deal did not get activated with a ProvenSector. verified_clients .delete(&client.to_bytes()) - .with_context(|| format!("Failed to delete verified client {}", client))? - .ok_or_else(|| { - actor_error!( - illegal_state, - "Failed to delete verified client {}: not found", - client - ) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to delete verified client {}", client) + })? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to delete verified client {}: not found", client) })?; } else { verified_clients .set(client.to_bytes().into(), BigIntDe(new_vc_cap)) - .with_context(|| format!("Failed to update verified client {}", client))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to update verified client {}", client) + })?; } - st.verified_clients = - verified_clients.flush().context("failed to flush verified clients")?; + st.verified_clients = verified_clients + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients")?; Ok(()) })?; @@ -356,18 +382,19 @@ impl Actor { rt.transaction(|st: &mut State, rt| { let mut verified_clients = make_map_with_root_and_bitwidth(&st.verified_clients, rt.store(), HAMT_BIT_WIDTH) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; let verifiers = make_map_with_root_and_bitwidth::<_, BigIntDe>( &st.verifiers, rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load verifiers")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verifiers")?; // validate we are NOT attempting to do this for a verifier - let found = - verifiers.contains_key(&client.to_bytes()).context("failed to get verifier")?; + let found = verifiers + .contains_key(&client.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; if found { return Err(actor_error!( @@ -380,7 +407,9 @@ impl Actor { // Get existing cap let BigIntDe(vc_cap) = verified_clients .get(&client.to_bytes()) - .with_context(|| format!("failed to get verified client {}", &client))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get verified client {}", &client) + })? .cloned() .unwrap_or_default(); @@ -388,10 +417,13 @@ impl Actor { let new_vc_cap = vc_cap + ¶ms.deal_size; verified_clients .set(client.to_bytes().into(), BigIntDe(new_vc_cap)) - .with_context(|| format!("Failed to put verified client {}", client))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("Failed to put verified client {}", client) + })?; - st.verified_clients = - verified_clients.flush().context("failed to flush verified clients")?; + st.verified_clients = verified_clients + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush verified clients")?; Ok(()) })?; @@ -449,7 +481,7 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; // check that `client` is currently a verified client if !is_verifier(rt, st, client)? { @@ -459,7 +491,9 @@ impl Actor { // get existing cap allocated to client let BigIntDe(previous_data_cap) = verified_clients .get(&client.to_bytes()) - .with_context(|| format!("failed to get verified client {}", &client))? + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get verified client {}", &client) + })? .cloned() .unwrap_or_default(); @@ -479,7 +513,10 @@ impl Actor { rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load datacap removal proposal ids")?; + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load datacap removal proposal ids", + )?; let verifier_1_id = use_proposal_id(&mut proposal_ids, verifier_1, client)?; let verifier_2_id = use_proposal_id(&mut proposal_ids, verifier_2, client)?; @@ -504,13 +541,15 @@ impl Actor { // no DataCap remaining, delete verified client verified_clients .delete(&client.to_bytes()) - .with_context(|| format!("failed to delete verified client {}", &client))?; + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete verified client {}", &client) + })?; removed_data_cap_amount = previous_data_cap; } else { // update DataCap amount after removal verified_clients .set(BytesKey::from(client.to_bytes()), BigIntDe(new_data_cap)) - .with_context(|| { + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { format!("failed to update datacap for verified client {}", &client) })?; removed_data_cap_amount = params.data_cap_amount_to_remove.clone(); @@ -550,11 +589,12 @@ where rt.store(), HAMT_BIT_WIDTH, ) - .context("failed to load verified clients")?; + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load verified clients")?; // check that the `address` is currently a verified client - let found = - verified_clients.contains_key(&address.to_bytes()).context("failed to get verifier")?; + let found = verified_clients + .contains_key(&address.to_bytes()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get verifier")?; Ok(found) }