Skip to content

Commit 1e2026e

Browse files
morph-devOmar Carey
authored andcommitted
feat: create fn for history content migration (ethereum#1684)
1 parent 420fc01 commit 1e2026e

File tree

6 files changed

+188
-0
lines changed

6 files changed

+188
-0
lines changed

Cargo.lock

Lines changed: 1 addition & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

crates/storage/src/versioned/id_indexed_v1/store.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -553,7 +553,9 @@ impl<TContentKey: OverlayContentKey> IdIndexedV1Store<TContentKey> {
553553
const fn extra_disk_usage_per_content_bytes(content_type: &ContentType) -> u64 {
554554
match content_type {
555555
ContentType::History => 750,
556+
ContentType::HistoryEternal => 750,
556557
ContentType::State => 500,
558+
ContentType::HistoryEphemeral => panic!("HistoryEphemeral is not supported"),
557559
}
558560
}
559561

crates/storage/src/versioned/mod.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,17 @@ pub use utils::create_store;
1818
#[derive(Clone, Debug, Display, Eq, PartialEq, AsRefStr)]
1919
#[strum(serialize_all = "snake_case")]
2020
pub enum ContentType {
21+
/// Corresponds to the history network content.
22+
///
23+
/// This type is deprecated and `HistoryEternal` or should be used instead.
24+
/// See https://github.com/ethereum/trin/issues/1666".
2125
History,
26+
/// Corresponds to the state network content.
2227
State,
28+
/// Corresponds to the non-ephemeral history network content.
29+
HistoryEternal,
30+
/// Corresponds to the ephemeral history network content.
31+
HistoryEphemeral,
2332
}
2433

2534
/// The version of the store. There should be exactly one implementation of the

crates/subnetworks/history/Cargo.toml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ ethereum_ssz.workspace = true
1919
ethportal-api.workspace = true
2020
parking_lot.workspace = true
2121
portalnet.workspace = true
22+
rusqlite.workspace = true
2223
serde_json.workspace = true
2324
tokio.workspace = true
2425
tracing.workspace = true

crates/subnetworks/history/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ mod jsonrpc;
66
pub mod network;
77
mod ping_extensions;
88
mod storage;
9+
mod storage_migration;
910
pub mod validation;
1011

1112
use std::sync::Arc;
Lines changed: 174 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
use ethportal_api::{
2+
types::{
3+
content_value::{
4+
history::HistoryContentValue as OldHistoryContentValue,
5+
history_new::HistoryContentValue as NewHistoryContentValue,
6+
},
7+
execution::{
8+
header_with_proof::BlockHeaderProof as OldBlockHeaderProof,
9+
header_with_proof_new::{
10+
BlockHeaderProof, BlockProofHistoricalHashesAccumulator, BlockProofHistoricalRoots,
11+
BlockProofHistoricalSummaries, ExecutionBlockProofCapella, HeaderWithProof,
12+
},
13+
},
14+
network::Subnetwork,
15+
},
16+
ContentValue, HistoryContentKey, OverlayContentKey, RawContentValue,
17+
};
18+
use rusqlite::{named_params, types::Type};
19+
use tracing::{debug, error, info};
20+
use trin_storage::{
21+
error::ContentStoreError,
22+
versioned::{create_store, ContentType, IdIndexedV1Store, IdIndexedV1StoreConfig},
23+
PortalStorageConfig,
24+
};
25+
26+
mod sql {
27+
pub const TABLE_EXISTS: &str =
28+
"SELECT name FROM sqlite_master WHERE type='table' AND name='ii1_history';";
29+
30+
pub const BATCH_DELETE: &str = "
31+
DELETE FROM ii1_history
32+
WHERE rowid IN (
33+
SELECT rowid
34+
FROM ii1_history
35+
ORDER BY content_id
36+
LIMIT :limit
37+
)
38+
RETURNING content_key, content_value;
39+
";
40+
41+
pub const DROP_TABLE: &str = "DROP TABLE ii1_history;";
42+
}
43+
44+
const BATCH_DELETE_LIMIT: usize = 100;
45+
46+
#[allow(unused)]
47+
pub fn maybe_migrate(config: &PortalStorageConfig) -> Result<(), ContentStoreError> {
48+
let conn = config.sql_connection_pool.get()?;
49+
50+
if !conn.prepare(sql::TABLE_EXISTS)?.exists(())? {
51+
info!("Legacy history table doesn't exist!");
52+
return Ok(());
53+
}
54+
55+
info!("Legacy history table exists. Starting migration.");
56+
57+
let config = IdIndexedV1StoreConfig::new(
58+
ContentType::HistoryEternal,
59+
Subnetwork::History,
60+
config.clone(),
61+
);
62+
63+
let mut store: IdIndexedV1Store<HistoryContentKey> = create_store(
64+
ContentType::HistoryEternal,
65+
config.clone(),
66+
config.sql_connection_pool,
67+
)?;
68+
69+
let mut batch_delete_query = conn.prepare(sql::BATCH_DELETE)?;
70+
71+
loop {
72+
let deleted = batch_delete_query
73+
.query_map(named_params! { ":limit": BATCH_DELETE_LIMIT }, |row| {
74+
let key_bytes: Vec<u8> = row.get("content_key")?;
75+
let value_bytes: Vec<u8> = row.get("content_value")?;
76+
let value = RawContentValue::from(value_bytes);
77+
HistoryContentKey::try_from_bytes(key_bytes)
78+
.map(|key| (key, value))
79+
.map_err(|e| rusqlite::Error::FromSqlConversionFailure(0, Type::Blob, e.into()))
80+
})?
81+
.collect::<Result<Vec<(HistoryContentKey, RawContentValue)>, rusqlite::Error>>()?;
82+
83+
if deleted.is_empty() {
84+
break;
85+
}
86+
87+
for (content_key, old_content_value) in deleted {
88+
match convert_content_value(&content_key, old_content_value) {
89+
Ok(Some(new_content_value)) => {
90+
store.insert(&content_key, new_content_value)?;
91+
}
92+
Ok(None) => {
93+
debug!(
94+
key=%content_key.to_bytes(),
95+
"Not migrating content item",
96+
)
97+
}
98+
Err(err) => {
99+
error!(
100+
key=%content_key.to_bytes(),
101+
err=%err,
102+
"Error converting content item",
103+
);
104+
}
105+
}
106+
}
107+
}
108+
109+
conn.execute_batch(sql::DROP_TABLE)?;
110+
111+
info!("Migration finished!");
112+
113+
Ok(())
114+
}
115+
116+
fn convert_content_value(
117+
content_key: &HistoryContentKey,
118+
raw_content_value: RawContentValue,
119+
) -> Result<Option<RawContentValue>, ContentStoreError> {
120+
let old_content_value = OldHistoryContentValue::decode(content_key, &raw_content_value)?;
121+
match old_content_value {
122+
OldHistoryContentValue::BlockHeaderWithProof(old_header_with_proof) => {
123+
let proof = match old_header_with_proof.proof {
124+
OldBlockHeaderProof::None(_) => return Ok(None),
125+
OldBlockHeaderProof::PreMergeAccumulatorProof(pre_merge_accumulator_proof) => {
126+
let proof = BlockProofHistoricalHashesAccumulator::new(
127+
pre_merge_accumulator_proof.proof.to_vec(),
128+
)
129+
.map_err(|err| ContentStoreError::InvalidData {
130+
message: format!("Invalid HistoricalHashes proof: {err:?}"),
131+
})?;
132+
BlockHeaderProof::HistoricalHashes(proof)
133+
}
134+
OldBlockHeaderProof::HistoricalRootsBlockProof(historical_roots_proof) => {
135+
// Note: there is inconsistency between field names because old types are not
136+
// in sync with most recent spec.
137+
let proof = BlockProofHistoricalRoots {
138+
beacon_block_proof: historical_roots_proof.historical_roots_proof,
139+
beacon_block_root: historical_roots_proof.beacon_block_root,
140+
execution_block_proof: historical_roots_proof.beacon_block_proof,
141+
slot: historical_roots_proof.slot,
142+
};
143+
BlockHeaderProof::HistoricalRoots(proof)
144+
}
145+
OldBlockHeaderProof::HistoricalSummariesBlockProof(historical_summaries_proof) => {
146+
// Note: there is inconsistency between field names because old types are not
147+
// in sync with most recent spec.
148+
let execution_block_proof = ExecutionBlockProofCapella::new(
149+
historical_summaries_proof.beacon_block_proof.to_vec(),
150+
)
151+
.map_err(|err| ContentStoreError::InvalidData {
152+
message: format!("Invalid ExecutionBlockProofCapella proof: {err:?}"),
153+
})?;
154+
let proof = BlockProofHistoricalSummaries {
155+
beacon_block_proof: historical_summaries_proof.historical_summaries_proof,
156+
beacon_block_root: historical_summaries_proof.beacon_block_root,
157+
execution_block_proof,
158+
slot: historical_summaries_proof.slot,
159+
};
160+
BlockHeaderProof::HistoricalSummaries(proof)
161+
}
162+
};
163+
let new_content_value = NewHistoryContentValue::BlockHeaderWithProof(HeaderWithProof {
164+
header: old_header_with_proof.header,
165+
proof,
166+
});
167+
Ok(Some(new_content_value.encode()))
168+
}
169+
OldHistoryContentValue::BlockBody(_) | OldHistoryContentValue::Receipts(_) => {
170+
// TODO: consider whether to filter post-merge bodies and receipts
171+
Ok(Some(raw_content_value))
172+
}
173+
}
174+
}

0 commit comments

Comments
 (0)