Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

12 changes: 12 additions & 0 deletions polkadot/node/core/pvf/common/src/executor_interface.rs
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,10 @@ impl sp_externalities::Externalities for ValidationExternalities {
panic!("storage: unsupported feature for parachain validation")
}

fn storage_with_status(&mut self, _: &[u8]) -> sp_externalities::StateLoad<Option<Vec<u8>>> {
panic!("storage_with_status: unsupported feature for parachain validation")
}

fn storage_hash(&mut self, _: &[u8]) -> Option<Vec<u8>> {
panic!("storage_hash: unsupported feature for parachain validation")
}
Expand All @@ -231,6 +235,14 @@ impl sp_externalities::Externalities for ValidationExternalities {
panic!("child_storage: unsupported feature for parachain validation")
}

fn child_storage_with_status(
&mut self,
_: &ChildInfo,
_: &[u8],
) -> sp_externalities::StateLoad<Option<Vec<u8>>> {
panic!("child_storage_with_status: unsupported feature for parachain validation")
}

fn kill_child_storage(
&mut self,
_child_info: &ChildInfo,
Expand Down
1 change: 1 addition & 0 deletions substrate/client/db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ sp-arithmetic = { workspace = true, default-features = true }
sp-blockchain = { workspace = true, default-features = true }
sp-core = { workspace = true, default-features = true }
sp-database = { workspace = true, default-features = true }
sp-externalities = { workspace = true, default-features = true }
sp-runtime = { workspace = true, default-features = true }
sp-state-machine = { workspace = true, default-features = true }
sp-trie = { workspace = true, default-features = true }
Expand Down
23 changes: 23 additions & 0 deletions substrate/client/db/src/bench.rs
Original file line number Diff line number Diff line change
Expand Up @@ -353,6 +353,15 @@ impl<Hasher: Hash> StateBackend<Hasher> for BenchmarkingState<Hasher> {
self.state.borrow().as_ref().ok_or_else(state_err)?.storage(key)
}

fn storage_with_status(
&self,
key: &[u8],
) -> Result<sp_externalities::StateLoad<Option<Vec<u8>>>, Self::Error> {
// Only if cold load ?
self.add_read_key(None, key);
self.state.borrow().as_ref().ok_or_else(state_err)?.storage_with_status(key)
}

fn storage_hash(&self, key: &[u8]) -> Result<Option<Hasher::Output>, Self::Error> {
self.add_read_key(None, key);
self.state.borrow().as_ref().ok_or_else(state_err)?.storage_hash(key)
Expand All @@ -371,6 +380,20 @@ impl<Hasher: Hash> StateBackend<Hasher> for BenchmarkingState<Hasher> {
.child_storage(child_info, key)
}

fn child_storage_with_status(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<sp_externalities::StateLoad<Option<Vec<u8>>>, Self::Error> {
// Only if cold load ?
self.add_read_key(Some(child_info.storage_key()), key);
self.state
.borrow()
.as_ref()
.ok_or_else(state_err)?
.child_storage_with_status(child_info, key)
}

fn child_storage_hash(
&self,
child_info: &ChildInfo,
Expand Down
13 changes: 13 additions & 0 deletions substrate/client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ use sp_core::{
storage::{well_known_keys, ChildInfo},
};
use sp_database::Transaction;
use sp_externalities::StateLoad;
use sp_runtime::{
generic::BlockId,
traits::{
Expand Down Expand Up @@ -199,6 +200,10 @@ impl<B: BlockT> StateBackend<HashingFor<B>> for RefTrackingState<B> {
self.state.storage(key)
}

fn storage_with_status(&self, key: &[u8]) -> Result<StateLoad<Option<Vec<u8>>>, Self::Error> {
self.state.storage_with_status(key)
}

fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
self.state.storage_hash(key)
}
Expand All @@ -211,6 +216,14 @@ impl<B: BlockT> StateBackend<HashingFor<B>> for RefTrackingState<B> {
self.state.child_storage(child_info, key)
}

fn child_storage_with_status(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<StateLoad<Option<Vec<u8>>>, Self::Error> {
self.state.child_storage_with_status(child_info, key)
}

fn child_storage_hash(
&self,
child_info: &ChildInfo,
Expand Down
21 changes: 21 additions & 0 deletions substrate/client/db/src/record_stats_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

use crate::stats::StateUsageStats;
use sp_core::storage::ChildInfo;
use sp_externalities::StateLoad;
use sp_runtime::{
traits::{Block as BlockT, HashingFor},
StateVersion,
Expand Down Expand Up @@ -119,6 +120,12 @@ impl<S: StateBackend<HashingFor<B>>, B: BlockT> StateBackend<HashingFor<B>>
Ok(value)
}

fn storage_with_status(&self, key: &[u8]) -> Result<StateLoad<Option<Vec<u8>>>, Self::Error> {
let result = self.state.storage_with_status(key)?;
self.usage.tally_key_read(key, result.data.as_ref(), false);
Ok(result)
}

fn storage_hash(&self, key: &[u8]) -> Result<Option<B::Hash>, Self::Error> {
self.state.storage_hash(key)
}
Expand All @@ -137,6 +144,20 @@ impl<S: StateBackend<HashingFor<B>>, B: BlockT> StateBackend<HashingFor<B>>
Ok(value)
}

fn child_storage_with_status(
&self,
child_info: &ChildInfo,
key: &[u8],
) -> Result<StateLoad<Option<Vec<u8>>>, Self::Error> {
let key_tuple = (child_info.storage_key().to_vec(), key.to_vec());
let result = self.state.child_storage_with_status(child_info, &key_tuple.1)?;

// just pass it through the usage counter
let value = self.usage.tally_child_key_read(&key_tuple, result.data, false);

Ok(StateLoad { is_cold: result.is_cold, data: value })
}

fn child_storage_hash(
&self,
child_info: &ChildInfo,
Expand Down
84 changes: 72 additions & 12 deletions substrate/frame/revive/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1407,10 +1407,12 @@ mod benchmarks {

// n: new byte size
// o: old byte size
// c: is_cold (0 = hot, 1 = cold)
#[benchmark(skip_meta, pov_mode = Measured)]
fn seal_set_storage(
n: Linear<0, { limits::STORAGE_BYTES }>,
o: Linear<0, { limits::STORAGE_BYTES }>,
c: Linear<0, 1>,
) -> Result<(), BenchmarkError> {
let max_key_len = limits::STORAGE_KEY_BYTES;
let key = Key::try_from_var(vec![0u8; max_key_len as usize])
Expand All @@ -1423,6 +1425,13 @@ mod benchmarks {
info.write(&key, Some(vec![42u8; o as usize]), None, false)
.map_err(|_| "Failed to write to storage during setup.")?;

// Whitelist key if c=0 (hot)
if c == 0 {
let mut full_key = info.child_trie_info().prefixed_storage_key().into_inner();
full_key.extend_from_slice(&key.hash());
frame_benchmarking::benchmarking::add_to_whitelist(full_key.into());
}

let result;
#[block]
{
Expand All @@ -1437,12 +1446,15 @@ mod benchmarks {
}

assert_ok!(result);
assert_eq!(info.read(&key).unwrap(), value);
assert_eq!(info.read(&key).data.unwrap(), value);
Ok(())
}

#[benchmark(skip_meta, pov_mode = Measured)]
fn clear_storage(n: Linear<0, { limits::STORAGE_BYTES }>) -> Result<(), BenchmarkError> {
fn clear_storage(
n: Linear<0, { limits::STORAGE_BYTES }>,
c: Linear<0, 1>,
) -> Result<(), BenchmarkError> {
let max_key_len = limits::STORAGE_KEY_BYTES;
let key = Key::try_from_var(vec![0u8; max_key_len as usize])
.map_err(|_| "Key has wrong length")?;
Expand All @@ -1455,6 +1467,15 @@ mod benchmarks {
.abi_encode();

let mut call_setup = CallSetup::<T>::default();

// Whitelist key if c=0 (hot)
if c == 0 {
let info = call_setup.contract().info()?;
let mut full_key = info.child_trie_info().prefixed_storage_key().into_inner();
full_key.extend_from_slice(&key.hash());
frame_benchmarking::benchmarking::add_to_whitelist(full_key.into());
}

let (mut ext, _) = call_setup.ext();
ext.set_storage(&key, Some(vec![42u8; max_key_len as usize]), false)
.map_err(|_| "Failed to write to storage during setup.")?;
Expand All @@ -1465,17 +1486,21 @@ mod benchmarks {
result = run_builtin_precompile(
&mut ext,
H160(BenchmarkStorage::<T>::MATCHER.base_address()).as_fixed_bytes(),
input_bytes,
input_bytes.clone(),
);
}
assert_ok!(result);
assert!(ext.get_storage(&key).is_none());
assert!(ext.get_storage(&key).data.is_none());

Ok(())
}

// c: is_cold (0 = hot, 1 = cold)
#[benchmark(skip_meta, pov_mode = Measured)]
fn seal_get_storage(n: Linear<0, { limits::STORAGE_BYTES }>) -> Result<(), BenchmarkError> {
fn seal_get_storage(
n: Linear<0, { limits::STORAGE_BYTES }>,
c: Linear<0, 1>,
) -> Result<(), BenchmarkError> {
let max_key_len = limits::STORAGE_KEY_BYTES;
let key = Key::try_from_var(vec![0u8; max_key_len as usize])
.map_err(|_| "Key has wrong length")?;
Expand All @@ -1486,6 +1511,15 @@ mod benchmarks {
.map_err(|_| "Failed to write to storage during setup.")?;

let out_ptr = max_key_len + 4;

// Whitelist key if c=0 (hot)
if c == 0 {
let mut full_key = info.child_trie_info().prefixed_storage_key().into_inner();
full_key.extend_from_slice(&key.hash());
frame_benchmarking::benchmarking::add_to_whitelist(full_key.into());
let _ = info.read(&key);
}

let result;
#[block]
{
Expand All @@ -1500,12 +1534,16 @@ mod benchmarks {
}

assert_ok!(result);
assert_eq!(&info.read(&key).unwrap(), &memory[out_ptr as usize..]);
assert_eq!(&info.read(&key).data.unwrap(), &memory[out_ptr as usize..]);
Ok(())
}

// c: is_cold (0 = hot, 1 = cold)
#[benchmark(skip_meta, pov_mode = Measured)]
fn contains_storage(n: Linear<0, { limits::STORAGE_BYTES }>) -> Result<(), BenchmarkError> {
fn contains_storage(
n: Linear<0, { limits::STORAGE_BYTES }>,
c: Linear<0, 1>,
) -> Result<(), BenchmarkError> {
let max_key_len = limits::STORAGE_KEY_BYTES;
let key = Key::try_from_var(vec![0u8; max_key_len as usize])
.map_err(|_| "Key has wrong length")?;
Expand All @@ -1517,6 +1555,15 @@ mod benchmarks {
.abi_encode();

let mut call_setup = CallSetup::<T>::default();

// Whitelist key if c=0 (hot)
if c == 0 {
let info = call_setup.contract().info()?;
let mut full_key = info.child_trie_info().prefixed_storage_key().into_inner();
full_key.extend_from_slice(&key.hash());
frame_benchmarking::benchmarking::add_to_whitelist(full_key.into());
}

let (mut ext, _) = call_setup.ext();
ext.set_storage(&key, Some(vec![42u8; max_key_len as usize]), false)
.map_err(|_| "Failed to write to storage during setup.")?;
Expand All @@ -1527,17 +1574,21 @@ mod benchmarks {
result = run_builtin_precompile(
&mut ext,
H160(BenchmarkStorage::<T>::MATCHER.base_address()).as_fixed_bytes(),
input_bytes,
input_bytes.clone(),
);
}
assert_ok!(result);
assert!(ext.get_storage(&key).is_some());
assert!(ext.get_storage(&key).data.is_some());

Ok(())
}

// c: is_cold (0 = hot, 1 = cold)
#[benchmark(skip_meta, pov_mode = Measured)]
fn take_storage(n: Linear<0, { limits::STORAGE_BYTES }>) -> Result<(), BenchmarkError> {
fn take_storage(
n: Linear<0, { limits::STORAGE_BYTES }>,
c: Linear<0, 1>,
) -> Result<(), BenchmarkError> {
let max_key_len = limits::STORAGE_KEY_BYTES;
let key = Key::try_from_var(vec![3u8; max_key_len as usize])
.map_err(|_| "Key has wrong length")?;
Expand All @@ -1550,6 +1601,15 @@ mod benchmarks {
.abi_encode();

let mut call_setup = CallSetup::<T>::default();

// Whitelist key if c=0 (hot)
if c == 0 {
let info = call_setup.contract().info()?;
let mut full_key = info.child_trie_info().prefixed_storage_key().into_inner();
full_key.extend_from_slice(&key.hash());
frame_benchmarking::benchmarking::add_to_whitelist(full_key.into());
}

let (mut ext, _) = call_setup.ext();
ext.set_storage(&key, Some(vec![42u8; max_key_len as usize]), false)
.map_err(|_| "Failed to write to storage during setup.")?;
Expand All @@ -1560,11 +1620,11 @@ mod benchmarks {
result = run_builtin_precompile(
&mut ext,
H160(BenchmarkStorage::<T>::MATCHER.base_address()).as_fixed_bytes(),
input_bytes,
input_bytes.clone(),
);
}
assert_ok!(result);
assert!(ext.get_storage(&key).is_none());
assert!(ext.get_storage(&key).data.is_none());

Ok(())
}
Expand Down
12 changes: 6 additions & 6 deletions substrate/frame/revive/src/exec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -527,9 +527,9 @@ pub trait PrecompileExt: sealing::Sealed {

/// Returns the storage entry of the executing account by the given `key`.
///
/// Returns `None` if the `key` wasn't previously set by `set_storage` or
/// was deleted.
fn get_storage(&mut self, key: &Key) -> Option<Vec<u8>>;
/// Returns a `StateLoad` containing the value (or `None` if the key wasn't previously set
/// or was deleted) and whether it was a cold or hot load.
fn get_storage(&mut self, key: &Key) -> sp_io::StateLoad<Option<Vec<u8>>>;

/// Returns `Some(len)` (in bytes) if a storage item exists at `key`.
///
Expand All @@ -544,7 +544,7 @@ pub trait PrecompileExt: sealing::Sealed {
key: &Key,
value: Option<Vec<u8>>,
take_old: bool,
) -> Result<WriteOutcome, DispatchError>;
) -> Result<sp_io::StateLoad<WriteOutcome>, DispatchError>;

/// Charges `diff` from the meter.
fn charge_storage(&mut self, diff: &Diff) -> DispatchResult;
Expand Down Expand Up @@ -2461,7 +2461,7 @@ where
frame.frame_meter.eth_gas_left().unwrap_or_default().saturated_into::<u64>()
}

fn get_storage(&mut self, key: &Key) -> Option<Vec<u8>> {
fn get_storage(&mut self, key: &Key) -> sp_io::StateLoad<Option<Vec<u8>>> {
assert!(self.has_contract_info());
self.top_frame_mut().contract_info().read(key)
}
Expand All @@ -2476,7 +2476,7 @@ where
key: &Key,
value: Option<Vec<u8>>,
take_old: bool,
) -> Result<WriteOutcome, DispatchError> {
) -> Result<sp_io::StateLoad<WriteOutcome>, DispatchError> {
assert!(self.has_contract_info());
let frame = self.top_frame_mut();
frame.contract_info.get(&frame.account_id).write(
Expand Down
Loading
Loading