diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/mod.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/mod.rs index 84b3dcbcc7d..abcb4184a65 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/mod.rs @@ -25,6 +25,7 @@ mod v0_to_v5; mod v10_to_v11; mod v11_to_v12; mod v12_to_v13; +mod v13_to_v15; mod v5_to_v7; mod v7; mod v7_to_v8; @@ -156,13 +157,22 @@ pub async fn open_and_upgrade_db( } if old_version < 12 { - v11_to_v12::schema_add(name).await?; + v11_to_v12::schema_bump(name).await?; } if old_version < 13 { v12_to_v13::schema_add(name).await?; } + if old_version < 14 { + v13_to_v15::schema_add(name).await?; + } + + if old_version < 15 { + v13_to_v15::data_migrate(name, serializer).await?; + v13_to_v15::schema_delete(name).await?; + } + // If you add more migrations here, you'll need to update // `tests::EXPECTED_SCHEMA_VERSION`. @@ -190,13 +200,12 @@ type OldVersion = u32; /// * `name` - name of the indexeddb database to be upgraded. /// * `version` - version we are upgrading to. /// * `f` - closure which will be called if the database is below the version -/// given. It will be called with three arguments `(db, txn, oldver)`, where: +/// given. It will be called with two arguments `(db, oldver)`, where: /// * `db` - the [`IdbDatabase`] -/// * `txn` - the database transaction: a [`IdbTransaction`] /// * `oldver` - the version number before the upgrade. async fn do_schema_upgrade(name: &str, version: u32, f: F) -> Result<(), DomException> where - F: Fn(&IdbDatabase, IdbTransaction<'_>, OldVersion) -> Result<(), JsValue> + 'static, + F: Fn(&IdbDatabase, OldVersion) -> Result<(), JsValue> + 'static, { info!("IndexeddbCryptoStore upgrade schema -> v{version} starting"); let mut db_req: OpenDbRequest = IdbDatabase::open_u32(name, version)?; @@ -208,7 +217,7 @@ where let old_version = evt.old_version() as u32; // Run the upgrade code we were supplied - f(evt.db(), evt.transaction(), old_version) + f(evt.db(), old_version) })); let db = db_req.await?; @@ -267,7 +276,7 @@ mod tests { wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser); /// The schema version we expect after we open the store. - const EXPECTED_SCHEMA_VERSION: u32 = 13; + const EXPECTED_SCHEMA_VERSION: u32 = 15; /// Adjust this to test do a more comprehensive perf test const NUM_RECORDS_FOR_PERF: usize = 2_000; @@ -488,25 +497,22 @@ mod tests { /// Test migrating `inbound_group_sessions` data from store v5 to latest, /// on a store with encryption disabled. #[async_test] - async fn test_v8_v10_v12_migration_unencrypted() { - test_v8_v10_v12_migration_with_cipher("test_v8_migration_unencrypted", None).await + async fn test_v8_v15_migration_unencrypted() { + test_v8_v15_migration_with_cipher("test_v8_migration_unencrypted", None).await } /// Test migrating `inbound_group_sessions` data from store v5 to store v8, /// on a store with encryption enabled. #[async_test] - async fn test_v8_v10_v12_migration_encrypted() { + async fn test_v8_v15_migration_encrypted() { let cipher = StoreCipher::new().unwrap(); - test_v8_v10_v12_migration_with_cipher( - "test_v8_migration_encrypted", - Some(Arc::new(cipher)), - ) - .await; + test_v8_v15_migration_with_cipher("test_v8_migration_encrypted", Some(Arc::new(cipher))) + .await; } /// Helper function for `test_v8_v10_v12_migration_{un,}encrypted`: test /// migrating `inbound_group_sessions` data from store v5 to store v12. - async fn test_v8_v10_v12_migration_with_cipher( + async fn test_v8_v15_migration_with_cipher( db_prefix: &str, store_cipher: Option>, ) { @@ -549,65 +555,43 @@ mod tests { assert!(fetched_backed_up_session.backed_up()); assert!(!fetched_not_backed_up_session.backed_up()); - // For v10: they have the backed_up_to property and it is indexed - assert_matches_v10_schema(&db_name, &store, &fetched_backed_up_session).await; - - // For v12: they have the session_id, sender_key and sender_data_type properties + // For v15: they have the backed_up_to property and it is indexed and + // they have the session_id, sender_key and sender_data_type properties // and they are indexed - assert_matches_v12_schema(&db_name, &store, &fetched_backed_up_session).await; - } - - async fn assert_matches_v10_schema( - db_name: &str, - store: &IndexeddbCryptoStore, - fetched_backed_up_session: &InboundGroupSession, - ) { - let db = IdbDatabase::open(&db_name).unwrap().await.unwrap(); - assert!(db.version() >= 10.0); - let transaction = db.transaction_on_one("inbound_group_sessions3").unwrap(); - let raw_store = transaction.object_store("inbound_group_sessions3").unwrap(); - let key = store.serializer.encode_key( - keys::INBOUND_GROUP_SESSIONS_V3, - (fetched_backed_up_session.room_id(), fetched_backed_up_session.session_id()), - ); - let idb_object: InboundGroupSessionIndexedDbObject = - serde_wasm_bindgen::from_value(raw_store.get(&key).unwrap().await.unwrap().unwrap()) - .unwrap(); - - assert_eq!(idb_object.backed_up_to, -1); - assert!(raw_store.index_names().find(|idx| idx == "backed_up_to").is_some()); - - db.close(); + assert_matches_v15_schema(&db_name, &store, &fetched_backed_up_session).await; } - async fn assert_matches_v12_schema( + async fn assert_matches_v15_schema( db_name: &str, store: &IndexeddbCryptoStore, session: &InboundGroupSession, ) { let db = IdbDatabase::open(&db_name).unwrap().await.unwrap(); - assert!(db.version() >= 12.0); - let transaction = db.transaction_on_one("inbound_group_sessions3").unwrap(); - let raw_store = transaction.object_store("inbound_group_sessions3").unwrap(); + assert!(db.version() >= 15.0); + let transaction = db.transaction_on_one(keys::INBOUND_GROUP_SESSIONS_V4).unwrap(); + let raw_store = transaction.object_store(keys::INBOUND_GROUP_SESSIONS_V4).unwrap(); let key = store .serializer - .encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (session.room_id(), session.session_id())); + .encode_key(keys::INBOUND_GROUP_SESSIONS_V4, (session.room_id(), session.session_id())); let idb_object: InboundGroupSessionIndexedDbObject = serde_wasm_bindgen::from_value(raw_store.get(&key).unwrap().await.unwrap().unwrap()) .unwrap(); + assert_eq!(idb_object.backed_up_to, -1); + assert!(raw_store.index_names().find(|idx| idx == "backed_up_to").is_some()); + assert_eq!( idb_object.session_id, Some( store .serializer - .encode_key_as_string(keys::INBOUND_GROUP_SESSIONS_V3, session.session_id()) + .encode_key_as_string(keys::INBOUND_GROUP_SESSIONS_V4, session.session_id()) ) ); assert_eq!( idb_object.sender_key, Some(store.serializer.encode_key_as_string( - keys::INBOUND_GROUP_SESSIONS_V3, + keys::INBOUND_GROUP_SESSIONS_V4, session.sender_key().to_base64() )) ); diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/old_keys.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/old_keys.rs index 1a82a14549e..c0826de2125 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/old_keys.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/old_keys.rs @@ -24,5 +24,7 @@ pub const INBOUND_GROUP_SESSIONS_V1: &str = "inbound_group_sessions"; /// Also lacked the `backed_up_to` property+index. pub const INBOUND_GROUP_SESSIONS_V2: &str = "inbound_group_sessions2"; +pub const INBOUND_GROUP_SESSIONS_V3: &str = "inbound_group_sessions3"; + /// An old name for [`BACKUP_VERSION_V1`]. pub const BACKUP_KEY_V1: &str = "backup_key_v1"; diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v0_to_v5.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v0_to_v5.rs index 75b1dc69865..0800b7e14ea 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v0_to_v5.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v0_to_v5.rs @@ -26,7 +26,7 @@ use crate::crypto_store::{ /// Perform schema migrations as needed, up to schema version 5. pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 5, |db, _, old_version| { + do_schema_upgrade(name, 5, |db, old_version| { // An old_version of 1 could either mean actually the first version of the // schema, or a completely empty schema that has been created with a // call to `IdbDatabase::open` with no explicit "version". So, to determine diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v10_to_v11.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v10_to_v11.rs index 2e6c1bb2297..1dae5783f98 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v10_to_v11.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v10_to_v11.rs @@ -59,5 +59,5 @@ pub(crate) async fn data_migrate( pub(crate) async fn schema_bump(name: &str) -> crate::crypto_store::Result<(), DomException> { // Just bump the version number to 11 to demonstrate that we have run the data // changes from data_migrate. - do_schema_upgrade(name, 11, |_, _, _| Ok(())).await + do_schema_upgrade(name, 11, |_, _| Ok(())).await } diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v11_to_v12.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v11_to_v12.rs index 4da1bcedccd..cc6534564af 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v11_to_v12.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v11_to_v12.rs @@ -12,23 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -use indexed_db_futures::IdbKeyPath; use web_sys::DomException; -use crate::crypto_store::{keys, migrations::do_schema_upgrade, Result}; +use crate::crypto_store::{migrations::do_schema_upgrade, Result}; -/// Perform the schema upgrade v11 to v12, adding an index on -/// `(curve_key, sender_data_type, session_id)` to `inbound_group_sessions3`. -pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 12, |_, transaction, _| { - let object_store = transaction.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; - - object_store.create_index( - keys::INBOUND_GROUP_SESSIONS_SENDER_KEY_INDEX, - &IdbKeyPath::str_sequence(&["sender_key", "sender_data_type", "session_id"]), - )?; - - Ok(()) - }) - .await +/// Perform the schema upgrade v11 to v12, just bumping the schema version +pub(crate) async fn schema_bump(name: &str) -> Result<(), DomException> { + do_schema_upgrade(name, 12, |_, _| Ok(())).await } diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v12_to_v13.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v12_to_v13.rs index 638c7835b15..7173b2c1e3d 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v12_to_v13.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v12_to_v13.rs @@ -21,7 +21,7 @@ use crate::crypto_store::{keys, migrations::do_schema_upgrade, Result}; /// Perform the schema upgrade v12 to v13, adding the /// `received_room_key_bundles` store. pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 13, |db, _, _| { + do_schema_upgrade(name, 13, |db, _| { db.create_object_store(keys::RECEIVED_ROOM_KEY_BUNDLES)?; Ok(()) }) diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v13_to_v15.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v13_to_v15.rs new file mode 100644 index 00000000000..461194e701c --- /dev/null +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v13_to_v15.rs @@ -0,0 +1,112 @@ +// Copyright 2024 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use indexed_db_futures::{IdbKeyPath, IdbQuerySource}; +use tracing::{debug, info}; +use web_sys::{DomException, IdbTransactionMode}; + +use crate::{ + crypto_store::{ + deserialize_inbound_group_session, keys, + migrations::{do_schema_upgrade, old_keys, v8_to_v10, MigrationDb}, + serialize_inbound_group_session, Result, + }, + serializer::IndexeddbSerializer, +}; + +/// Perform the schema upgrade v13 to v14 +/// +/// This creates an identical object store to `inbound_group_sessions3`, but +/// adds index on `(curve_key, sender_data_type, session_id)`. +pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { + do_schema_upgrade(name, 14, |db, _| { + let object_store = db.create_object_store(keys::INBOUND_GROUP_SESSIONS_V4)?; + v8_to_v10::index_add(&object_store)?; + object_store.create_index( + keys::INBOUND_GROUP_SESSIONS_SENDER_KEY_INDEX, + &IdbKeyPath::str_sequence(&["sender_key", "sender_data_type", "session_id"]), + )?; + Ok(()) + }) + .await +} + +/// Migrate data from `inbound_group_sessions3` into `inbound_group_sessions4`. +pub(crate) async fn data_migrate(name: &str, serializer: &IndexeddbSerializer) -> Result<()> { + let db = MigrationDb::new(name, 15).await?; + + let txn = db.transaction_on_multi_with_mode( + &[old_keys::INBOUND_GROUP_SESSIONS_V3, keys::INBOUND_GROUP_SESSIONS_V4], + IdbTransactionMode::Readwrite, + )?; + + let inbound_group_sessions3 = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V3)?; + let inbound_group_sessions4 = txn.object_store(keys::INBOUND_GROUP_SESSIONS_V4)?; + + let row_count = inbound_group_sessions3.count()?.await?; + info!(row_count, "Shrinking inbound_group_session records"); + + // Iterate through all rows + if let Some(cursor) = inbound_group_sessions3.open_cursor()?.await? { + let mut idx = 0; + loop { + idx += 1; + + if idx % 100 == 0 { + debug!("Migrating session {idx} of {row_count}"); + } + + // Deserialize the session from the old store + let session = deserialize_inbound_group_session(cursor.value(), serializer)?; + + // Calculate its key in the new table + let new_key = serializer.encode_key( + keys::INBOUND_GROUP_SESSIONS_V4, + (&session.room_id, session.session_id()), + ); + + // Serialize the session in the new format + let new_session = serialize_inbound_group_session(&session, serializer).await?; + + // Write it to the new store + inbound_group_sessions4.add_key_val(&new_key, &new_session)?; + + // We are done with the original data, so delete it now. + cursor.delete()?; + + // Continue to the next record, or stop if we're done + if !cursor.continue_cursor()?.await? { + debug!("Migrated {idx} sessions."); + break; + } + } + } + + // We have finished with the old store. Clear it, since it is faster to + // clear+delete than just delete. See https://www.artificialworlds.net/blog/2024/02/02/deleting-an-indexed-db-store-can-be-incredibly-slow-on-firefox/ + // for more details. + inbound_group_sessions3.clear()?.await?; + + txn.await.into_result()?; + Ok(()) +} + +/// Perform the schema upgrade v14 to v15, deleting `inbound_group_sessions3`. +pub(crate) async fn schema_delete(name: &str) -> Result<(), DomException> { + do_schema_upgrade(name, 15, |db, _| { + db.delete_object_store(old_keys::INBOUND_GROUP_SESSIONS_V3)?; + Ok(()) + }) + .await +} diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v5_to_v7.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v5_to_v7.rs index dd54665f6a2..cf24ea37f0b 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v5_to_v7.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v5_to_v7.rs @@ -36,7 +36,7 @@ use crate::{ /// Perform the schema upgrade v5 to v6, creating `inbound_group_sessions2`. pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 6, |db, _, _| { + do_schema_upgrade(name, 6, |db, _| { let object_store = db.create_object_store(old_keys::INBOUND_GROUP_SESSIONS_V2)?; add_nonunique_index( @@ -109,7 +109,7 @@ pub(crate) async fn data_migrate(name: &str, serializer: &IndexeddbSerializer) - /// Perform the schema upgrade v6 to v7, deleting `inbound_group_sessions`. pub(crate) async fn schema_delete(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 7, |db, _, _| { + do_schema_upgrade(name, 7, |db, _| { db.delete_object_store(old_keys::INBOUND_GROUP_SESSIONS_V1)?; Ok(()) }) diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v7_to_v8.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v7_to_v8.rs index 8df6a3d50bc..71d6d2692d2 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v7_to_v8.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v7_to_v8.rs @@ -116,7 +116,7 @@ pub(crate) async fn data_migrate(name: &str, serializer: &IndexeddbSerializer) - /// Perform the schema upgrade v7 to v8, Just bumping the schema version. pub(crate) async fn schema_bump(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 8, |_, _, _| { + do_schema_upgrade(name, 8, |_, _| { // Just bump the version number to 8 to demonstrate that we have run the data // changes from prepare_data_for_v8. Ok(()) diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v8_to_v10.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v8_to_v10.rs index 61a8f58da3e..7fef5458916 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v8_to_v10.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/migrations/v8_to_v10.rs @@ -15,7 +15,7 @@ //! Migration code that moves from inbound_group_sessions2 to //! inbound_group_sessions3, shrinking the values stored in each record. -use indexed_db_futures::IdbQuerySource; +use indexed_db_futures::{prelude::IdbObjectStore, IdbQuerySource}; use matrix_sdk_crypto::olm::InboundGroupSession; use tracing::{debug, info}; use web_sys::{DomException, IdbTransactionMode}; @@ -33,26 +33,26 @@ use crate::{ IndexeddbCryptoStoreError, }; +pub(crate) fn index_add(object_store: &IdbObjectStore<'_>) -> Result<(), DomException> { + add_nonunique_index(object_store, keys::INBOUND_GROUP_SESSIONS_BACKUP_INDEX, "needs_backup")?; + + // See https://github.com/element-hq/element-web/issues/26892#issuecomment-1906336076 + // for the plan concerning this property and index. At time of writing, it is + // unused, and needs_backup is still used. + add_nonunique_index( + object_store, + keys::INBOUND_GROUP_SESSIONS_BACKED_UP_TO_INDEX, + "backed_up_to", + )?; + + Ok(()) +} + /// Perform the schema upgrade v8 to v9, creating `inbound_group_sessions3`. pub(crate) async fn schema_add(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 9, |db, _, _| { - let object_store = db.create_object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; - - add_nonunique_index( - &object_store, - keys::INBOUND_GROUP_SESSIONS_BACKUP_INDEX, - "needs_backup", - )?; - - // See https://github.com/element-hq/element-web/issues/26892#issuecomment-1906336076 - // for the plan concerning this property and index. At time of writing, it is - // unused, and needs_backup is still used. - add_nonunique_index( - &object_store, - keys::INBOUND_GROUP_SESSIONS_BACKED_UP_TO_INDEX, - "backed_up_to", - )?; - + do_schema_upgrade(name, 9, |db, _| { + let object_store = db.create_object_store(old_keys::INBOUND_GROUP_SESSIONS_V3)?; + index_add(&object_store)?; Ok(()) }) .await @@ -63,12 +63,12 @@ pub(crate) async fn data_migrate(name: &str, serializer: &IndexeddbSerializer) - let db = MigrationDb::new(name, 10).await?; let txn = db.transaction_on_multi_with_mode( - &[old_keys::INBOUND_GROUP_SESSIONS_V2, keys::INBOUND_GROUP_SESSIONS_V3], + &[old_keys::INBOUND_GROUP_SESSIONS_V2, old_keys::INBOUND_GROUP_SESSIONS_V3], IdbTransactionMode::Readwrite, )?; let inbound_group_sessions2 = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V2)?; - let inbound_group_sessions3 = txn.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; + let inbound_group_sessions3 = txn.object_store(old_keys::INBOUND_GROUP_SESSIONS_V3)?; let row_count = inbound_group_sessions2.count()?.await?; info!(row_count, "Shrinking inbound_group_session records"); @@ -94,7 +94,7 @@ pub(crate) async fn data_migrate(name: &str, serializer: &IndexeddbSerializer) - // Calculate its key in the new table let new_key = serializer.encode_key( - keys::INBOUND_GROUP_SESSIONS_V3, + old_keys::INBOUND_GROUP_SESSIONS_V3, (&session.room_id, session.session_id()), ); @@ -128,7 +128,7 @@ pub(crate) async fn data_migrate(name: &str, serializer: &IndexeddbSerializer) - /// Perform the schema upgrade v8 to v10, deleting `inbound_group_sessions2`. pub(crate) async fn schema_delete(name: &str) -> Result<(), DomException> { - do_schema_upgrade(name, 10, |db, _, _| { + do_schema_upgrade(name, 10, |db, _| { db.delete_object_store(old_keys::INBOUND_GROUP_SESSIONS_V2)?; Ok(()) }) diff --git a/crates/matrix-sdk-indexeddb/src/crypto_store/mod.rs b/crates/matrix-sdk-indexeddb/src/crypto_store/mod.rs index 3d6ff0efbb0..61f56253d09 100644 --- a/crates/matrix-sdk-indexeddb/src/crypto_store/mod.rs +++ b/crates/matrix-sdk-indexeddb/src/crypto_store/mod.rs @@ -63,7 +63,7 @@ mod keys { pub const SESSION: &str = "session"; - pub const INBOUND_GROUP_SESSIONS_V3: &str = "inbound_group_sessions3"; + pub const INBOUND_GROUP_SESSIONS_V4: &str = "inbound_group_sessions4"; pub const INBOUND_GROUP_SESSIONS_BACKUP_INDEX: &str = "backup"; pub const INBOUND_GROUP_SESSIONS_BACKED_UP_TO_INDEX: &str = "backed_up_to"; pub const INBOUND_GROUP_SESSIONS_SENDER_KEY_INDEX: &str = @@ -280,6 +280,41 @@ impl PendingIndexeddbChanges { } } +/// Transform an [`InboundGroupSession`] into a `JsValue` holding a +/// [`InboundGroupSessionIndexedDbObject`], ready for storing. +async fn serialize_inbound_group_session( + session: &InboundGroupSession, + serializer: &IndexeddbSerializer, +) -> Result { + let obj = InboundGroupSessionIndexedDbObject::from_session(session, serializer).await?; + Ok(serde_wasm_bindgen::to_value(&obj)?) +} + +/// Transform a JsValue holding a [`InboundGroupSessionIndexedDbObject`] +/// back into a [`InboundGroupSession`]. +fn deserialize_inbound_group_session( + stored_value: JsValue, + serializer: &IndexeddbSerializer, +) -> Result { + let idb_object: InboundGroupSessionIndexedDbObject = + serde_wasm_bindgen::from_value(stored_value)?; + let pickled_session: PickledInboundGroupSession = + serializer.maybe_decrypt_value(idb_object.pickled_session)?; + let session = InboundGroupSession::from_pickle(pickled_session) + .map_err(|e| IndexeddbCryptoStoreError::CryptoStoreError(e.into()))?; + + // Although a "backed up" flag is stored inside `idb_object.pickled_session`, it + // is not maintained when backups are reset. Overwrite the flag with the + // needs_backup value from the IDB object. + if idb_object.needs_backup { + session.reset_backup_state(); + } else { + session.mark_as_backed_up(); + } + + Ok(session) +} + impl IndexeddbCryptoStore { pub(crate) async fn open_with_store_cipher( prefix: &str, @@ -425,9 +460,7 @@ impl IndexeddbCryptoStore { &self, session: &InboundGroupSession, ) -> Result { - let obj = - InboundGroupSessionIndexedDbObject::from_session(session, &self.serializer).await?; - Ok(serde_wasm_bindgen::to_value(&obj)?) + serialize_inbound_group_session(session, &self.serializer).await } /// Transform a JsValue holding a [`InboundGroupSessionIndexedDbObject`] @@ -436,23 +469,7 @@ impl IndexeddbCryptoStore { &self, stored_value: JsValue, ) -> Result { - let idb_object: InboundGroupSessionIndexedDbObject = - serde_wasm_bindgen::from_value(stored_value)?; - let pickled_session: PickledInboundGroupSession = - self.serializer.maybe_decrypt_value(idb_object.pickled_session)?; - let session = InboundGroupSession::from_pickle(pickled_session) - .map_err(|e| IndexeddbCryptoStoreError::CryptoStoreError(e.into()))?; - - // Although a "backed up" flag is stored inside `idb_object.pickled_session`, it - // is not maintained when backups are reset. Overwrite the flag with the - // needs_backup value from the IDB object. - if idb_object.needs_backup { - session.reset_backup_state(); - } else { - session.mark_as_backed_up(); - } - - Ok(session) + deserialize_inbound_group_session(stored_value, &self.serializer) } /// Transform a [`GossipRequest`] into a `JsValue` holding a @@ -548,14 +565,14 @@ impl IndexeddbCryptoStore { } if !changes.inbound_group_sessions.is_empty() { - let mut sessions = indexeddb_changes.get(keys::INBOUND_GROUP_SESSIONS_V3); + let mut sessions = indexeddb_changes.get(keys::INBOUND_GROUP_SESSIONS_V4); for session in &changes.inbound_group_sessions { let room_id = session.room_id(); let session_id = session.session_id(); let key = self .serializer - .encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (room_id, session_id)); + .encode_key(keys::INBOUND_GROUP_SESSIONS_V4, (room_id, session_id)); let value = self.serialize_inbound_group_session(session).await?; sessions.put(key, value); } @@ -950,14 +967,14 @@ impl_crypto_store! { room_id: &RoomId, session_id: &str, ) -> Result> { - let key = self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (room_id, session_id)); + let key = self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V4, (room_id, session_id)); if let Some(value) = self .inner .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, + keys::INBOUND_GROUP_SESSIONS_V4, IdbTransactionMode::Readonly, )? - .object_store(keys::INBOUND_GROUP_SESSIONS_V3)? + .object_store(keys::INBOUND_GROUP_SESSIONS_V4)? .get(&key)? .await? { @@ -973,11 +990,11 @@ impl_crypto_store! { let transaction = self .inner .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, + keys::INBOUND_GROUP_SESSIONS_V4, IdbTransactionMode::Readonly, )?; - let object_store = transaction.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; + let object_store = transaction.object_store(keys::INBOUND_GROUP_SESSIONS_V4)?; fetch_from_object_store_batched( object_store, @@ -993,10 +1010,10 @@ impl_crypto_store! { after_session_id: Option, limit: usize, ) -> Result> { - let sender_key = self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, sender_key.to_base64()); + let sender_key = self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V4, sender_key.to_base64()); // The empty string is before all keys in Indexed DB - first batch starts there. - let after_session_id = after_session_id.map(|s| self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, s)).unwrap_or("".into()); + let after_session_id = after_session_id.map(|s| self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V4, s)).unwrap_or("".into()); let lower_bound: Array = [sender_key.clone(), (sender_data_type as u8).into(), after_session_id].iter().collect(); let upper_bound: Array = [sender_key, ((sender_data_type as u8) + 1).into()].iter().collect(); @@ -1009,11 +1026,11 @@ impl_crypto_store! { let tx = self .inner .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, + keys::INBOUND_GROUP_SESSIONS_V4, IdbTransactionMode::Readonly, )?; - let store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; + let store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V4)?; let idx = store.index(keys::INBOUND_GROUP_SESSIONS_SENDER_KEY_INDEX)?; let serialized_sessions = idx.get_all_with_key_and_limit_owned(key, limit as u32)?.await?; @@ -1035,10 +1052,10 @@ impl_crypto_store! { let tx = self .inner .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, + keys::INBOUND_GROUP_SESSIONS_V4, IdbTransactionMode::Readonly, )?; - let store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; + let store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V4)?; let all = store.count()?.await? as usize; let not_backed_up = store.index(keys::INBOUND_GROUP_SESSIONS_BACKUP_INDEX)?.count()?.await? as usize; tx.await.into_result()?; @@ -1053,12 +1070,12 @@ impl_crypto_store! { let tx = self .inner .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, + keys::INBOUND_GROUP_SESSIONS_V4, IdbTransactionMode::Readonly, )?; - let store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; + let store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V4)?; let idx = store.index(keys::INBOUND_GROUP_SESSIONS_BACKUP_INDEX)?; // XXX ideally we would use `get_all_with_key_and_limit`, but that doesn't appear to be @@ -1099,14 +1116,14 @@ impl_crypto_store! { let tx = self .inner .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, + keys::INBOUND_GROUP_SESSIONS_V4, IdbTransactionMode::Readwrite, )?; - let object_store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?; + let object_store = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V4)?; for (room_id, session_id) in room_and_session_ids { - let key = self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V3, (room_id, session_id)); + let key = self.serializer.encode_key(keys::INBOUND_GROUP_SESSIONS_V4, (room_id, session_id)); if let Some(idb_object_js) = object_store.get(&key)?.await? { let mut idb_object: InboundGroupSessionIndexedDbObject = serde_wasm_bindgen::from_value(idb_object_js)?; idb_object.needs_backup = false; @@ -1123,11 +1140,11 @@ impl_crypto_store! { let tx = self .inner .transaction_on_one_with_mode( - keys::INBOUND_GROUP_SESSIONS_V3, + keys::INBOUND_GROUP_SESSIONS_V4, IdbTransactionMode::Readwrite, )?; - if let Some(cursor) = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V3)?.open_cursor()?.await? { + if let Some(cursor) = tx.object_store(keys::INBOUND_GROUP_SESSIONS_V4)?.open_cursor()?.await? { loop { let mut idb_object: InboundGroupSessionIndexedDbObject = serde_wasm_bindgen::from_value(cursor.value())?; if !idb_object.needs_backup { @@ -1782,10 +1799,10 @@ impl InboundGroupSessionIndexedDbObject { serializer: &IndexeddbSerializer, ) -> Result { let session_id = - serializer.encode_key_as_string(keys::INBOUND_GROUP_SESSIONS_V3, session.session_id()); + serializer.encode_key_as_string(keys::INBOUND_GROUP_SESSIONS_V4, session.session_id()); let sender_key = serializer.encode_key_as_string( - keys::INBOUND_GROUP_SESSIONS_V3, + keys::INBOUND_GROUP_SESSIONS_V4, session.sender_key().to_base64(), );