|
| 1 | +use ethportal_api::{ |
| 2 | + types::{ |
| 3 | + content_value::{ |
| 4 | + history::HistoryContentValue as OldHistoryContentValue, |
| 5 | + history_new::HistoryContentValue as NewHistoryContentValue, |
| 6 | + }, |
| 7 | + execution::{ |
| 8 | + header_with_proof::BlockHeaderProof as OldBlockHeaderProof, |
| 9 | + header_with_proof_new::{ |
| 10 | + BlockHeaderProof, BlockProofHistoricalHashesAccumulator, BlockProofHistoricalRoots, |
| 11 | + BlockProofHistoricalSummaries, ExecutionBlockProofCapella, HeaderWithProof, |
| 12 | + }, |
| 13 | + }, |
| 14 | + network::Subnetwork, |
| 15 | + }, |
| 16 | + ContentValue, HistoryContentKey, OverlayContentKey, RawContentValue, |
| 17 | +}; |
| 18 | +use rusqlite::{named_params, types::Type}; |
| 19 | +use tracing::{debug, error, info}; |
| 20 | +use trin_storage::{ |
| 21 | + error::ContentStoreError, |
| 22 | + versioned::{create_store, ContentType, IdIndexedV1Store, IdIndexedV1StoreConfig}, |
| 23 | + PortalStorageConfig, |
| 24 | +}; |
| 25 | + |
| 26 | +mod sql { |
| 27 | + pub const TABLE_EXISTS: &str = |
| 28 | + "SELECT name FROM sqlite_master WHERE type='table' AND name='ii1_history';"; |
| 29 | + |
| 30 | + pub const BATCH_DELETE: &str = " |
| 31 | + DELETE FROM ii1_history |
| 32 | + WHERE rowid IN ( |
| 33 | + SELECT rowid |
| 34 | + FROM ii1_history |
| 35 | + ORDER BY content_id |
| 36 | + LIMIT :limit |
| 37 | + ) |
| 38 | + RETURNING content_key, content_value; |
| 39 | + "; |
| 40 | + |
| 41 | + pub const DROP_TABLE: &str = "DROP TABLE ii1_history;"; |
| 42 | +} |
| 43 | + |
| 44 | +const BATCH_DELETE_LIMIT: usize = 100; |
| 45 | + |
| 46 | +#[allow(unused)] |
| 47 | +pub fn maybe_migrate(config: &PortalStorageConfig) -> Result<(), ContentStoreError> { |
| 48 | + let conn = config.sql_connection_pool.get()?; |
| 49 | + |
| 50 | + if !conn.prepare(sql::TABLE_EXISTS)?.exists(())? { |
| 51 | + info!("Legacy history table doesn't exist!"); |
| 52 | + return Ok(()); |
| 53 | + } |
| 54 | + |
| 55 | + info!("Legacy history table exists. Starting migration."); |
| 56 | + |
| 57 | + let config = IdIndexedV1StoreConfig::new( |
| 58 | + ContentType::HistoryEternal, |
| 59 | + Subnetwork::History, |
| 60 | + config.clone(), |
| 61 | + ); |
| 62 | + |
| 63 | + let mut store: IdIndexedV1Store<HistoryContentKey> = create_store( |
| 64 | + ContentType::HistoryEternal, |
| 65 | + config.clone(), |
| 66 | + config.sql_connection_pool, |
| 67 | + )?; |
| 68 | + |
| 69 | + let mut batch_delete_query = conn.prepare(sql::BATCH_DELETE)?; |
| 70 | + |
| 71 | + loop { |
| 72 | + let deleted = batch_delete_query |
| 73 | + .query_map(named_params! { ":limit": BATCH_DELETE_LIMIT }, |row| { |
| 74 | + let key_bytes: Vec<u8> = row.get("content_key")?; |
| 75 | + let value_bytes: Vec<u8> = row.get("content_value")?; |
| 76 | + let value = RawContentValue::from(value_bytes); |
| 77 | + HistoryContentKey::try_from_bytes(key_bytes) |
| 78 | + .map(|key| (key, value)) |
| 79 | + .map_err(|e| rusqlite::Error::FromSqlConversionFailure(0, Type::Blob, e.into())) |
| 80 | + })? |
| 81 | + .collect::<Result<Vec<(HistoryContentKey, RawContentValue)>, rusqlite::Error>>()?; |
| 82 | + |
| 83 | + if deleted.is_empty() { |
| 84 | + break; |
| 85 | + } |
| 86 | + |
| 87 | + for (content_key, old_content_value) in deleted { |
| 88 | + match convert_content_value(&content_key, old_content_value) { |
| 89 | + Ok(Some(new_content_value)) => { |
| 90 | + store.insert(&content_key, new_content_value)?; |
| 91 | + } |
| 92 | + Ok(None) => { |
| 93 | + debug!( |
| 94 | + key=%content_key.to_bytes(), |
| 95 | + "Not migrating content item", |
| 96 | + ) |
| 97 | + } |
| 98 | + Err(err) => { |
| 99 | + error!( |
| 100 | + key=%content_key.to_bytes(), |
| 101 | + err=%err, |
| 102 | + "Error converting content item", |
| 103 | + ); |
| 104 | + } |
| 105 | + } |
| 106 | + } |
| 107 | + } |
| 108 | + |
| 109 | + conn.execute_batch(sql::DROP_TABLE)?; |
| 110 | + |
| 111 | + info!("Migration finished!"); |
| 112 | + |
| 113 | + Ok(()) |
| 114 | +} |
| 115 | + |
| 116 | +fn convert_content_value( |
| 117 | + content_key: &HistoryContentKey, |
| 118 | + raw_content_value: RawContentValue, |
| 119 | +) -> Result<Option<RawContentValue>, ContentStoreError> { |
| 120 | + let old_content_value = OldHistoryContentValue::decode(content_key, &raw_content_value)?; |
| 121 | + match old_content_value { |
| 122 | + OldHistoryContentValue::BlockHeaderWithProof(old_header_with_proof) => { |
| 123 | + let proof = match old_header_with_proof.proof { |
| 124 | + OldBlockHeaderProof::None(_) => return Ok(None), |
| 125 | + OldBlockHeaderProof::PreMergeAccumulatorProof(pre_merge_accumulator_proof) => { |
| 126 | + let proof = BlockProofHistoricalHashesAccumulator::new( |
| 127 | + pre_merge_accumulator_proof.proof.to_vec(), |
| 128 | + ) |
| 129 | + .map_err(|err| ContentStoreError::InvalidData { |
| 130 | + message: format!("Invalid HistoricalHashes proof: {err:?}"), |
| 131 | + })?; |
| 132 | + BlockHeaderProof::HistoricalHashes(proof) |
| 133 | + } |
| 134 | + OldBlockHeaderProof::HistoricalRootsBlockProof(historical_roots_proof) => { |
| 135 | + // Note: there is inconsistency between field names because old types are not |
| 136 | + // in sync with most recent spec. |
| 137 | + let proof = BlockProofHistoricalRoots { |
| 138 | + beacon_block_proof: historical_roots_proof.historical_roots_proof, |
| 139 | + beacon_block_root: historical_roots_proof.beacon_block_root, |
| 140 | + execution_block_proof: historical_roots_proof.beacon_block_proof, |
| 141 | + slot: historical_roots_proof.slot, |
| 142 | + }; |
| 143 | + BlockHeaderProof::HistoricalRoots(proof) |
| 144 | + } |
| 145 | + OldBlockHeaderProof::HistoricalSummariesBlockProof(historical_summaries_proof) => { |
| 146 | + // Note: there is inconsistency between field names because old types are not |
| 147 | + // in sync with most recent spec. |
| 148 | + let execution_block_proof = ExecutionBlockProofCapella::new( |
| 149 | + historical_summaries_proof.beacon_block_proof.to_vec(), |
| 150 | + ) |
| 151 | + .map_err(|err| ContentStoreError::InvalidData { |
| 152 | + message: format!("Invalid ExecutionBlockProofCapella proof: {err:?}"), |
| 153 | + })?; |
| 154 | + let proof = BlockProofHistoricalSummaries { |
| 155 | + beacon_block_proof: historical_summaries_proof.historical_summaries_proof, |
| 156 | + beacon_block_root: historical_summaries_proof.beacon_block_root, |
| 157 | + execution_block_proof, |
| 158 | + slot: historical_summaries_proof.slot, |
| 159 | + }; |
| 160 | + BlockHeaderProof::HistoricalSummaries(proof) |
| 161 | + } |
| 162 | + }; |
| 163 | + let new_content_value = NewHistoryContentValue::BlockHeaderWithProof(HeaderWithProof { |
| 164 | + header: old_header_with_proof.header, |
| 165 | + proof, |
| 166 | + }); |
| 167 | + Ok(Some(new_content_value.encode())) |
| 168 | + } |
| 169 | + OldHistoryContentValue::BlockBody(_) | OldHistoryContentValue::Receipts(_) => { |
| 170 | + // TODO: consider whether to filter post-merge bodies and receipts |
| 171 | + Ok(Some(raw_content_value)) |
| 172 | + } |
| 173 | + } |
| 174 | +} |
0 commit comments