diff --git a/Cargo.lock b/Cargo.lock index 770f168860..cd762d64fa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1413,7 +1413,6 @@ dependencies = [ "tokio", "tracing", "tracing-slog", - "udb-util", "universaldb", "url", "uuid", @@ -1723,7 +1722,6 @@ dependencies = [ "tracing-logfmt", "tracing-opentelemetry", "tracing-subscriber", - "udb-util", "universaldb", "universalpubsub", "url", @@ -2792,7 +2790,6 @@ dependencies = [ "serde", "strum", "tracing", - "udb-util", "universaldb", "url", "utoipa", @@ -3280,7 +3277,6 @@ dependencies = [ "serde_json", "strum", "tracing", - "udb-util", "universaldb", "utoipa", "versioned-data-util", @@ -3301,7 +3297,6 @@ dependencies = [ "tracing", "tracing-logfmt", "tracing-subscriber", - "udb-util", "universaldb", ] @@ -3367,7 +3362,6 @@ dependencies = [ "rivet-runner-protocol", "rivet-types", "tracing", - "udb-util", "universaldb", ] @@ -4373,7 +4367,6 @@ dependencies = [ "tokio-tungstenite", "tracing", "tracing-subscriber", - "udb-util", "universaldb", "url", "uuid", @@ -4450,7 +4443,6 @@ dependencies = [ "tokio", "tower 0.5.2", "tracing", - "udb-util", "universaldb", "universalpubsub", "url", @@ -4560,7 +4552,6 @@ dependencies = [ "tracing", "tracing-logfmt", "tracing-subscriber", - "udb-util", "universaldb", "universalpubsub", "url", @@ -4710,7 +4701,7 @@ dependencies = [ "rivet-runner-protocol", "rivet-util", "serde", - "udb-util", + "universaldb", "utoipa", "versioned-data-util", ] @@ -4767,7 +4758,7 @@ version = "0.0.1" dependencies = [ "serde", "thiserror 1.0.69", - "udb-util", + "universaldb", "utoipa", "uuid", ] @@ -6335,20 +6326,6 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" -[[package]] -name = "udb-util" -version = "0.0.1" -dependencies = [ - "anyhow", - "async-trait", - "futures-util", - "lazy_static", - "rivet-metrics", - "tokio", - "tracing", - "universaldb", -] - [[package]] name = "uname" version = "0.1.1" @@ -6416,16 +6393,17 @@ dependencies = [ "rand 0.8.5", "rivet-config", "rivet-env", + "rivet-metrics", "rivet-pools", "rivet-test-deps-docker", "rocksdb", "serde", "tempfile", + "thiserror 1.0.69", "tokio", "tokio-postgres", "tracing", "tracing-subscriber", - "udb-util", "uuid", ] diff --git a/Cargo.toml b/Cargo.toml index 3018250c06..d3e2cc4372 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [workspace] resolver = "2" -members = ["packages/common/api-builder","packages/common/api-client","packages/common/api-types","packages/common/api-util","packages/common/cache/build","packages/common/cache/result","packages/common/clickhouse-inserter","packages/common/clickhouse-user-query","packages/common/config","packages/common/env","packages/common/error/core","packages/common/error/macros","packages/common/gasoline/core","packages/common/gasoline/macros","packages/common/logs","packages/common/metrics","packages/common/pools","packages/common/runtime","packages/common/service-manager","packages/common/telemetry","packages/common/test-deps","packages/common/test-deps-docker","packages/common/types","packages/common/udb-util","packages/common/universaldb","packages/common/universalpubsub","packages/common/util/core","packages/common/util/id","packages/common/versioned-data-util","packages/core/actor-kv","packages/core/api-peer","packages/core/api-public","packages/core/bootstrap","packages/core/dump-openapi","packages/core/guard/core","packages/core/guard/server","packages/core/pegboard-gateway","packages/core/pegboard-runner-ws","packages/core/pegboard-serverless","packages/core/pegboard-tunnel","packages/core/workflow-worker","packages/infra/engine","packages/services/epoxy","packages/services/internal","packages/services/namespace","packages/services/pegboard","sdks/rust/api-full","sdks/rust/bare_gen","sdks/rust/data","sdks/rust/epoxy-protocol","sdks/rust/runner-protocol","sdks/rust/tunnel-protocol","sdks/rust/ups-protocol"] +members = ["packages/common/api-builder","packages/common/api-client","packages/common/api-types","packages/common/api-util","packages/common/cache/build","packages/common/cache/result","packages/common/clickhouse-inserter","packages/common/clickhouse-user-query","packages/common/config","packages/common/env","packages/common/error/core","packages/common/error/macros","packages/common/gasoline/core","packages/common/gasoline/macros","packages/common/logs","packages/common/metrics","packages/common/pools","packages/common/runtime","packages/common/service-manager","packages/common/telemetry","packages/common/test-deps","packages/common/test-deps-docker","packages/common/types","packages/common/universaldb","packages/common/universalpubsub","packages/common/util/core","packages/common/util/id","packages/common/versioned-data-util","packages/core/actor-kv","packages/core/api-peer","packages/core/api-public","packages/core/bootstrap","packages/core/dump-openapi","packages/core/guard/core","packages/core/guard/server","packages/core/pegboard-gateway","packages/core/pegboard-runner-ws","packages/core/pegboard-serverless","packages/core/pegboard-tunnel","packages/core/workflow-worker","packages/infra/engine","packages/services/epoxy","packages/services/internal","packages/services/namespace","packages/services/pegboard","sdks/rust/api-full","sdks/rust/bare_gen","sdks/rust/data","sdks/rust/epoxy-protocol","sdks/rust/runner-protocol","sdks/rust/tunnel-protocol","sdks/rust/ups-protocol"] [workspace.package] version = "25.6.1" @@ -318,9 +318,6 @@ path = "packages/common/test-deps-docker" [workspace.dependencies.rivet-types] path = "packages/common/types" -[workspace.dependencies.udb-util] -path = "packages/common/udb-util" - [workspace.dependencies.universaldb] path = "packages/common/universaldb" diff --git a/packages/common/gasoline/core/Cargo.toml b/packages/common/gasoline/core/Cargo.toml index 83223ecd37..ca0e51ae96 100644 --- a/packages/common/gasoline/core/Cargo.toml +++ b/packages/common/gasoline/core/Cargo.toml @@ -39,7 +39,6 @@ tokio-util.workspace = true tokio.workspace = true tracing-logfmt.workspace = true tracing-opentelemetry.workspace = true -udb-util.workspace = true universaldb.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter"] } tracing.workspace = true diff --git a/packages/common/gasoline/core/src/ctx/workflow.rs b/packages/common/gasoline/core/src/ctx/workflow.rs index a8432f406a..7e9d0fc34e 100644 --- a/packages/common/gasoline/core/src/ctx/workflow.rs +++ b/packages/common/gasoline/core/src/ctx/workflow.rs @@ -671,34 +671,35 @@ impl WorkflowCtx { exec.execute(self).await } - /// Tests if the given error is unrecoverable. If it is, allows the user to run recovery code safely. - /// Should always be used when trying to handle activity errors manually. - #[tracing::instrument(skip_all)] - pub fn catch_unrecoverable(&mut self, res: Result) -> Result> { - match res { - Err(err) => { - // TODO: This should check .chain() for the error - match err.downcast::() { - Ok(inner_err) => { - // Despite "history diverged" errors being unrecoverable, they should not have be returned - // by this function because the state of the history is already messed up and no new - // workflow items should be run. - if !inner_err.is_recoverable() - && !matches!(inner_err, WorkflowError::HistoryDiverged(_)) - { - self.cursor.inc(); - - Ok(Err(inner_err.into())) - } else { - Err(inner_err.into()) - } - } - Err(err) => Err(err), - } - } - Ok(x) => Ok(Ok(x)), - } - } + // TODO: Replace with some method on WorkflowError + // /// Tests if the given error is unrecoverable. If it is, allows the user to run recovery code safely. + // /// Should always be used when trying to handle activity errors manually. + // #[tracing::instrument(skip_all)] + // pub fn catch_unrecoverable(&mut self, res: Result) -> Result> { + // match res { + // Err(err) => { + // // TODO: This should check .chain() for the error + // match err.downcast::() { + // Ok(inner_err) => { + // // Despite "history diverged" errors being unrecoverable, they should not have be returned + // // by this function because the state of the history is already messed up and no new + // // workflow items should be run. + // if !inner_err.is_recoverable() + // && !matches!(inner_err, WorkflowError::HistoryDiverged(_)) + // { + // self.cursor.inc(); + + // Ok(Err(inner_err.into())) + // } else { + // Err(inner_err.into()) + // } + // } + // Err(err) => Err(err), + // } + // } + // Ok(x) => Ok(Ok(x)), + // } + // } /// Creates a signal builder. pub fn signal(&mut self, body: T) -> builder::signal::SignalBuilder { diff --git a/packages/common/gasoline/core/src/db/kv/debug.rs b/packages/common/gasoline/core/src/db/kv/debug.rs index 7a556c1b58..50ffc08b0b 100644 --- a/packages/common/gasoline/core/src/db/kv/debug.rs +++ b/packages/common/gasoline/core/src/db/kv/debug.rs @@ -4,16 +4,16 @@ use std::{ result::Result::{Err, Ok}, }; -use anyhow::*; +use anyhow::{Context, Result, ensure}; use futures_util::{StreamExt, TryStreamExt}; use rivet_util::Id; use tracing::Instrument; -use udb_util::{FormalChunkedKey, FormalKey, SERIALIZABLE, SNAPSHOT, TxnExt, end_of_key_range}; +use universaldb::utils::{FormalChunkedKey, FormalKey, IsolationLevel::*, end_of_key_range}; use universaldb::{ - self as udb, - future::FdbValue, + RangeOption, options::{ConflictRangeType, StreamingMode}, tuple::{PackResult, TupleDepth, TupleUnpack}, + value::Value, }; use super::{DatabaseKv, keys, update_metric}; @@ -35,8 +35,8 @@ impl DatabaseKv { async fn get_workflows_inner( &self, workflow_ids: Vec, - tx: &udb::RetryableTransaction, - ) -> std::result::Result, udb::FdbBindingError> { + tx: &universaldb::RetryableTransaction, + ) -> Result> { let mut res = Vec::new(); // TODO: Parallelize @@ -70,91 +70,49 @@ impl DatabaseKv { silence_ts_entry, ) = tokio::try_join!( tx.get_ranges_keyvalues( - udb::RangeOption { + RangeOption { mode: StreamingMode::WantAll, ..(&tags_subspace).into() }, - SNAPSHOT, + Snapshot, ) - .map(|res| match res { - Ok(entry) => { - let key = self - .subspace - .unpack::(entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; - let v = serde_json::Value::String(key.v.clone()); + .map(|res| { + let key = self.subspace.unpack::(res?.key())?; + let v = serde_json::Value::String(key.v.clone()); - Ok((key.k, v)) - } - Err(err) => Err(Into::::into(err)), + Ok((key.k, v)) }) .try_collect::>(), - async { - tx.get(&self.subspace.pack(&name_key), SNAPSHOT) - .await - .map_err(Into::into) - }, - async { - tx.get(&self.subspace.pack(&create_ts_key), SNAPSHOT) - .await - .map_err(Into::into) - }, - async { - tx.get_ranges_keyvalues( - udb::RangeOption { - mode: StreamingMode::WantAll, - ..(&input_subspace).into() - }, - SNAPSHOT, - ) - .try_collect::>() - .await - .map_err(Into::into) - }, - async { - tx.get_ranges_keyvalues( - udb::RangeOption { - mode: StreamingMode::WantAll, - ..(&state_subspace).into() - }, - SNAPSHOT, - ) - .try_collect::>() - .await - .map_err(Into::into) - }, - async { - tx.get_ranges_keyvalues( - udb::RangeOption { - mode: StreamingMode::WantAll, - ..(&output_subspace).into() - }, - SNAPSHOT, - ) - .try_collect::>() - .await - .map_err(Into::into) - }, - async { - tx.get(&self.subspace.pack(&error_key), SNAPSHOT) - .await - .map_err(Into::into) - }, - async { - tx.get(&self.subspace.pack(&has_wake_condition_key), SNAPSHOT) - .await - .map_err(Into::into) - }, - async { - tx.get(&self.subspace.pack(&worker_instance_id_key), SNAPSHOT) - .await - .map_err(Into::into) - }, - async { - tx.get(&self.subspace.pack(&silence_ts_key), SNAPSHOT) - .await - .map_err(Into::into) - }, + tx.get(&self.subspace.pack(&name_key), Snapshot), + tx.get(&self.subspace.pack(&create_ts_key), Snapshot), + tx.get_ranges_keyvalues( + RangeOption { + mode: StreamingMode::WantAll, + ..(&input_subspace).into() + }, + Snapshot, + ) + .try_collect::>(), + tx.get_ranges_keyvalues( + RangeOption { + mode: StreamingMode::WantAll, + ..(&state_subspace).into() + }, + Snapshot, + ) + .try_collect::>(), + tx.get_ranges_keyvalues( + RangeOption { + mode: StreamingMode::WantAll, + ..(&output_subspace).into() + }, + Snapshot, + ) + .try_collect::>(), + tx.get(&self.subspace.pack(&error_key), Snapshot), + tx.get(&self.subspace.pack(&has_wake_condition_key), Snapshot), + tx.get(&self.subspace.pack(&worker_instance_id_key), Snapshot), + tx.get(&self.subspace.pack(&silence_ts_key), Snapshot), )?; let Some(create_ts_entry) = &create_ts_entry else { @@ -162,44 +120,26 @@ impl DatabaseKv { continue; }; - let create_ts = create_ts_key - .deserialize(&create_ts_entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let create_ts = create_ts_key.deserialize(&create_ts_entry)?; - let workflow_name = name_key - .deserialize(&name_entry.ok_or(udb::FdbBindingError::CustomError( - format!("key should exist: {name_key:?}").into(), - ))?) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let workflow_name = name_key.deserialize(&name_entry.context("key should exist")?)?; - let input = input_key - .combine(input_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let input = input_key.combine(input_chunks)?; let data = if state_chunks.is_empty() { serde_json::value::RawValue::NULL.to_owned() } else { - state_key - .combine(state_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? + state_key.combine(state_chunks)? }; let output = if output_chunks.is_empty() { None } else { - Some( - output_key - .combine(output_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - ) + Some(output_key.combine(output_chunks)?) }; let error = if let Some(error_entry) = error_entry { - Some( - error_key - .deserialize(&error_entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - ) + Some(error_key.deserialize(&error_entry)?) } else { None }; @@ -221,14 +161,9 @@ impl DatabaseKv { workflow_name, tags: serde_json::Value::Object(tags), create_ts, - input: serde_json::from_str(input.get()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - data: serde_json::from_str(data.get()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - output: output - .map(|x| serde_json::from_str(x.get())) - .transpose() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + input: serde_json::from_str(input.get())?, + data: serde_json::from_str(data.get())?, + output: output.map(|x| serde_json::from_str(x.get())).transpose()?, error, state, }); @@ -241,8 +176,8 @@ impl DatabaseKv { async fn get_signals_inner( &self, signal_ids: Vec, - tx: &udb::RetryableTransaction, - ) -> std::result::Result, udb::FdbBindingError> { + tx: &universaldb::RetryableTransaction, + ) -> Result> { let mut res = Vec::new(); // TODO: Parallelize @@ -263,22 +198,22 @@ impl DatabaseKv { ack_ts_entry, silence_ts_entry, ) = tokio::try_join!( - tx.get(&self.subspace.pack(&name_key), SNAPSHOT), - tx.get(&self.subspace.pack(&workflow_id_key), SNAPSHOT), - tx.get(&self.subspace.pack(&create_ts_key), SNAPSHOT), + tx.get(&self.subspace.pack(&name_key), Snapshot), + tx.get(&self.subspace.pack(&workflow_id_key), Snapshot), + tx.get(&self.subspace.pack(&create_ts_key), Snapshot), async { tx.get_ranges_keyvalues( - udb::RangeOption { + RangeOption { mode: StreamingMode::WantAll, ..(&body_subspace).into() }, - SNAPSHOT, + Snapshot, ) .try_collect::>() .await }, - tx.get(&self.subspace.pack(&ack_ts_key), SNAPSHOT), - tx.get(&self.subspace.pack(&silence_ts_key), SNAPSHOT), + tx.get(&self.subspace.pack(&ack_ts_key), Snapshot), + tx.get(&self.subspace.pack(&silence_ts_key), Snapshot), )?; let Some(create_ts_entry) = &create_ts_entry else { @@ -286,36 +221,20 @@ impl DatabaseKv { continue; }; - let create_ts = create_ts_key - .deserialize(&create_ts_entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let create_ts = create_ts_key.deserialize(&create_ts_entry)?; - let signal_name = name_key - .deserialize(&name_entry.ok_or(udb::FdbBindingError::CustomError( - format!("key should exist: {name_key:?}").into(), - ))?) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let signal_name = name_key.deserialize(&name_entry.context("key should exist")?)?; - let body = body_key - .combine(body_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let body = body_key.combine(body_chunks)?; let workflow_id = if let Some(workflow_id_entry) = workflow_id_entry { - Some( - workflow_id_key - .deserialize(&workflow_id_entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - ) + Some(workflow_id_key.deserialize(&workflow_id_entry)?) } else { None }; let ack_ts = if let Some(ack_ts_entry) = ack_ts_entry { - Some( - ack_ts_key - .deserialize(&ack_ts_entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - ) + Some(ack_ts_key.deserialize(&ack_ts_entry)?) } else { None }; @@ -335,8 +254,7 @@ impl DatabaseKv { workflow_id, create_ts, ack_ts, - body: serde_json::from_str(body.get()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + body: serde_json::from_str(body.get())?, state, }); } @@ -345,7 +263,7 @@ impl DatabaseKv { } } -// NOTE: Most of the reads here are SNAPSHOT because we don't want this to conflict with the actual wf engine. +// NOTE: Most of the reads here are Snapshot because we don't want this to conflict with the actual wf engine. // Its just for debugging #[async_trait::async_trait] impl DatabaseDebug for DatabaseKv { @@ -353,7 +271,7 @@ impl DatabaseDebug for DatabaseKv { async fn get_workflows(&self, workflow_ids: Vec) -> Result> { self.pools .udb()? - .run(|tx, _mc| { + .run(|tx| { let workflow_ids = workflow_ids.clone(); async move { self.get_workflows_inner(workflow_ids, &tx).await } }) @@ -371,7 +289,7 @@ impl DatabaseDebug for DatabaseKv { // NOTE: this does a full scan of all keys under workflow/data and filters in memory self.pools .udb()? - .run(|tx, _mc| { + .run(|tx| { let name = name.clone(); async move { let mut workflow_ids = Vec::new(); @@ -381,11 +299,11 @@ impl DatabaseDebug for DatabaseKv { .subspace(&keys::workflow::DataSubspaceKey::new()); let mut stream = tx.get_ranges_keyvalues( - udb::RangeOption { + RangeOption { mode: StreamingMode::Iterator, ..(&data_subspace).into() }, - SNAPSHOT, + Snapshot, ); let mut current_workflow_id = None; @@ -394,10 +312,7 @@ impl DatabaseDebug for DatabaseKv { let mut state_matches = state.is_none() || state == Some(WorkflowState::Dead); while let Some(entry) = stream.try_next().await? { - let workflow_id = *self - .subspace - .unpack::(entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let workflow_id = *self.subspace.unpack::(entry.key())?; if let Some(curr) = current_workflow_id { if workflow_id != curr { @@ -431,9 +346,7 @@ impl DatabaseDebug for DatabaseKv { self.subspace.unpack::(entry.key()) { if let Some(name) = &name { - let workflow_name = name_key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let workflow_name = name_key.deserialize(entry.value())?; name_matches = &workflow_name == name; } @@ -499,7 +412,7 @@ impl DatabaseDebug for DatabaseKv { async fn silence_workflows(&self, workflow_ids: Vec) -> Result<()> { self.pools .udb()? - .run(|tx, _mc| { + .run(|tx| { let workflow_ids = workflow_ids.clone(); async move { @@ -524,15 +437,13 @@ impl DatabaseDebug for DatabaseKv { let error_key = keys::workflow::ErrorKey::new(workflow_id); let Some(name_entry) = - tx.get(&self.subspace.pack(&name_key), SERIALIZABLE).await? + tx.get(&self.subspace.pack(&name_key), Serializable).await? else { tracing::warn!(?workflow_id, "workflow not found"); continue; }; - let workflow_name = name_key - .deserialize(&name_entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let workflow_name = name_key.deserialize(&name_entry)?; let wake_conditions_subspace = self.subspace.subspace( &keys::wake::WorkflowWakeConditionKey::subspace_without_ts( @@ -553,107 +464,87 @@ impl DatabaseDebug for DatabaseKv { ) = tokio::try_join!( // Read sub workflow wake conditions tx.get_ranges_keyvalues( - udb::RangeOption { + RangeOption { mode: StreamingMode::WantAll, ..(&sub_workflow_wake_subspace).into() }, - SERIALIZABLE, + Serializable, ) - .map(|res| match res { - Ok(entry) => self - .subspace - .unpack::(entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into())), - Err(err) => Err(Into::::into(err)), - }) + .map(|res| self + .subspace + .unpack::(res?.key()) + .map_err(Into::into)) .try_collect::>(), // Read tags tx.get_ranges_keyvalues( - udb::RangeOption { + RangeOption { mode: StreamingMode::WantAll, ..(&tags_subspace).into() }, - SERIALIZABLE, + Serializable, ) - .map(|res| match res { - Ok(entry) => self - .subspace - .unpack::(entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into())), - Err(err) => Err(Into::::into(err)), - }) + .map(|res| self + .subspace + .unpack::(res?.key()) + .map_err(Into::into)) .try_collect::>(), // Read wake conditions tx.get_ranges_keyvalues( - udb::RangeOption { + RangeOption { mode: StreamingMode::WantAll, ..(&wake_conditions_subspace).into() }, - SNAPSHOT, + Snapshot, ) - .map(|res| match res { - Ok(entry) => Ok(( + .map(|res| { + let entry = res?; + + Ok(( entry.key().to_vec(), self.subspace - .unpack::(entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - )), - Err(err) => Err(Into::::into(err)), + .unpack::( + entry.key(), + )?, + )) }) .try_collect::>(), async { - tx.get(&self.subspace.pack(&worker_instance_id_key), SERIALIZABLE) + tx.get(&self.subspace.pack(&worker_instance_id_key), Serializable) .await - .map_err(Into::into) .map(|x| x.is_some()) }, async { tx.get_ranges_keyvalues( - udb::RangeOption { + RangeOption { mode: StreamingMode::WantAll, limit: Some(1), ..(&output_subspace).into() }, - SNAPSHOT, + Snapshot, ) .try_next() .await - .map_err(Into::into) .map(|x| x.is_some()) }, async { - tx.get(&self.subspace.pack(&has_wake_condition_key), SERIALIZABLE) + tx.get(&self.subspace.pack(&has_wake_condition_key), Serializable) .await - .map_err(Into::into) .map(|x| x.is_some()) }, async { - tx.get(&self.subspace.pack(&silence_ts_key), SERIALIZABLE) + tx.get(&self.subspace.pack(&silence_ts_key), Serializable) .await - .map_err(Into::into) .map(|x| x.is_some()) }, - async { - tx.get(&self.subspace.pack(&wake_sub_workflow_key), SERIALIZABLE) - .await - .map_err(Into::into) - }, - async { - tx.get(&self.subspace.pack(&error_key), SERIALIZABLE) - .await - .map_err(Into::into) - }, + tx.get(&self.subspace.pack(&wake_sub_workflow_key), Serializable), + tx.get(&self.subspace.pack(&error_key), Serializable), )?; if is_silenced { continue; } - if is_running { - return Err(udb::FdbBindingError::CustomError( - "cannot silence a running workflow".into(), - )); - } + ensure!(!is_running, "cannot silence a running workflow"); for key in sub_workflow_wake_keys { tracing::warn!( @@ -699,9 +590,7 @@ impl DatabaseDebug for DatabaseKv { // Clear sub workflow secondary idx if let Some(entry) = wake_sub_workflow_entry { - let sub_workflow_id = wake_sub_workflow_key - .deserialize(&entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let sub_workflow_id = wake_sub_workflow_key.deserialize(&entry)?; let sub_workflow_wake_key = keys::wake::SubWorkflowWakeKey::new(sub_workflow_id, workflow_id); @@ -720,29 +609,22 @@ impl DatabaseDebug for DatabaseKv { tx.set( &self.subspace.pack(&silence_ts_key), - &silence_ts_key - .serialize(rivet_util::timestamp::now()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &silence_ts_key.serialize(rivet_util::timestamp::now())?, ); // Clear metric let metric = if has_output { keys::metric::GaugeMetric::WorkflowComplete(workflow_name.clone()) } else if has_wake_condition { - let error = error_key - .deserialize(&error_entry.ok_or( - udb::FdbBindingError::CustomError( - format!("key should exist: {error_key:?}").into(), - ), - )?) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let error = + error_key.deserialize(&error_entry.context("key should exist")?)?; keys::metric::GaugeMetric::WorkflowDead(workflow_name.clone(), error) } else { keys::metric::GaugeMetric::WorkflowSleeping(workflow_name.clone()) }; - update_metric(&tx.subspace(self.subspace.clone()), Some(metric), None); + update_metric(&tx.with_subspace(self.subspace.clone()), Some(metric), None); } Ok(()) @@ -756,10 +638,10 @@ impl DatabaseDebug for DatabaseKv { async fn wake_workflows(&self, workflow_ids: Vec) -> Result<()> { self.pools .udb()? - .run(|tx, _mc| { + .run(|tx| { let workflow_ids = workflow_ids.clone(); async move { - let txs = tx.subspace(self.subspace.clone()); + let tx = tx.with_subspace(self.subspace.clone()); for workflow_id in workflow_ids { let name_key = keys::workflow::NameKey::new(workflow_id); @@ -780,38 +662,34 @@ impl DatabaseDebug for DatabaseKv { has_output, error, ) = tokio::try_join!( - txs.read(&name_key, SERIALIZABLE), - txs.exists(&worker_instance_id_key, SERIALIZABLE), - txs.exists(&has_wake_condition_key, SERIALIZABLE), - txs.exists(&silence_ts_key, SERIALIZABLE), + tx.read(&name_key, Serializable), + tx.exists(&worker_instance_id_key, Serializable), + tx.exists(&has_wake_condition_key, Serializable), + tx.exists(&silence_ts_key, Serializable), async { tx.get_ranges_keyvalues( - udb::RangeOption { + RangeOption { mode: StreamingMode::WantAll, limit: Some(1), ..(&output_subspace).into() }, - SNAPSHOT, + Snapshot, ) .try_next() .await .map_err(Into::into) .map(|x| x.is_some()) }, - txs.read_opt(&error_key, SERIALIZABLE), + tx.read_opt(&error_key, Serializable), )?; if is_running || is_silenced { continue; } - if has_output { - return Err(udb::FdbBindingError::CustomError( - "cannot silence a running workflow".into(), - )); - } + ensure!(!has_output, "cannot wake a completed workflow"); - txs.write( + tx.write( &keys::wake::WorkflowWakeConditionKey::new( workflow_name.clone(), workflow_id, @@ -820,16 +698,14 @@ impl DatabaseDebug for DatabaseKv { (), )?; - txs.write(&has_wake_condition_key, ())?; + tx.write(&has_wake_condition_key, ())?; if !has_wake_condition { update_metric( - &txs, + &tx, Some(keys::metric::GaugeMetric::WorkflowDead( workflow_name.clone(), - error.ok_or(udb::FdbBindingError::CustomError( - format!("key should exist: {error_key:?}").into(), - ))?, + error.context("key should exist")?, )), Some(keys::metric::GaugeMetric::WorkflowSleeping(workflow_name)), ); @@ -855,7 +731,7 @@ impl DatabaseDebug for DatabaseKv { ) -> Result> { self.pools .udb()? - .run(|tx, _mc| { + .run(|tx| { async move { let history_subspace = self.subspace @@ -872,7 +748,6 @@ impl DatabaseDebug for DatabaseKv { async { self.get_workflows(vec![workflow_id]) .await - .map_err(|x| udb::FdbBindingError::CustomError(x.into())) .map(|wfs| wfs.into_iter().next()) }, async { @@ -882,11 +757,11 @@ impl DatabaseDebug for DatabaseKv { WorkflowHistoryEventBuilder::new(Location::empty(), false); let mut stream = tx.get_ranges_keyvalues( - udb::RangeOption { + RangeOption { mode: StreamingMode::WantAll, ..(&history_subspace).into() }, - SERIALIZABLE, + Serializable, ); loop { @@ -897,8 +772,7 @@ impl DatabaseDebug for DatabaseKv { // Parse only the wf id and location of the current key let partial_key = self .subspace - .unpack::(entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .unpack::(entry.key())?; if current_event.location != partial_key.location { if current_event.location.is_empty() { @@ -919,9 +793,7 @@ impl DatabaseDebug for DatabaseKv { events_by_location .entry(previous_event.location.root()) .or_default() - .push(Event::try_from(previous_event).map_err( - |x| udb::FdbBindingError::CustomError(x.into()), - )?); + .push(Event::try_from(previous_event)?); } } @@ -930,53 +802,41 @@ impl DatabaseDebug for DatabaseKv { .subspace .unpack::(entry.key()) { - let event_type = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let event_type = key.deserialize(entry.value())?; current_event.event_type = Some(event_type); } else if let Ok(key) = self .subspace .unpack::(entry.key()) { - let version = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let version = key.deserialize(entry.value())?; current_event.version = Some(version); } else if let Ok(key) = self .subspace .unpack::(entry.key()) { - let create_ts = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let create_ts = key.deserialize(entry.value())?; current_event.create_ts = Some(create_ts); } else if let Ok(key) = self.subspace.unpack::(entry.key()) { - let name = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let name = key.deserialize(entry.value())?; current_event.name = Some(name); } else if let Ok(key) = self .subspace .unpack::(entry.key()) { - let signal_id = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let signal_id = key.deserialize(entry.value())?; current_event.signal_id = Some(signal_id); } else if let Ok(key) = self .subspace .unpack::(entry.key()) { - let sub_workflow_id = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let sub_workflow_id = key.deserialize(entry.value())?; current_event.sub_workflow_id = Some(sub_workflow_id); } else if let Ok(_key) = self @@ -993,9 +853,7 @@ impl DatabaseDebug for DatabaseKv { .subspace .unpack::(entry.key()) { - let input_hash = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let input_hash = key.deserialize(entry.value())?; current_event.input_hash = Some(input_hash); } else if let Ok(key) = @@ -1019,36 +877,28 @@ impl DatabaseDebug for DatabaseKv { .subspace .unpack::(entry.key()) { - let iteration = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let iteration = key.deserialize(entry.value())?; current_event.iteration = Some(iteration); } else if let Ok(key) = self .subspace .unpack::(entry.key()) { - let deadline_ts = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let deadline_ts = key.deserialize(entry.value())?; current_event.deadline_ts = Some(deadline_ts); } else if let Ok(key) = self .subspace .unpack::(entry.key()) { - let sleep_state = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let sleep_state = key.deserialize(entry.value())?; current_event.sleep_state = Some(sleep_state); } else if let Ok(key) = self.subspace .unpack::(entry.key()) { - let inner_event_type = key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let inner_event_type = key.deserialize(entry.value())?; current_event.inner_event_type = Some(inner_event_type); } @@ -1060,9 +910,7 @@ impl DatabaseDebug for DatabaseKv { events_by_location .entry(current_event.location.root()) .or_default() - .push(Event::try_from(current_event).map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?); + .push(Event::try_from(current_event)?); } Ok(events_by_location) @@ -1077,7 +925,7 @@ impl DatabaseDebug for DatabaseKv { events.into_iter().flat_map(|(_, v)| v).collect::>(); flat_events.sort_by(|a, b| a.location.cmp(&b.location)); - Result::<_, udb::FdbBindingError>::Ok(Some(HistoryData { + Ok(Some(HistoryData { wf, events: flat_events, })) @@ -1092,7 +940,7 @@ impl DatabaseDebug for DatabaseKv { async fn get_signals(&self, signal_ids: Vec) -> Result> { self.pools .udb()? - .run(|tx, _mc| { + .run(|tx| { let signal_ids = signal_ids.clone(); async move { self.get_signals_inner(signal_ids, &tx).await } }) @@ -1112,7 +960,7 @@ impl DatabaseDebug for DatabaseKv { // NOTE: this does a full scan of all keys under signal/data and filters in memory self.pools .udb()? - .run(|tx, _mc| { + .run(|tx| { let name = name.clone(); let workflow_id = workflow_id.clone(); async move { @@ -1123,11 +971,11 @@ impl DatabaseDebug for DatabaseKv { .subspace(&keys::signal::DataSubspaceKey::new()); let mut stream = tx.get_ranges_keyvalues( - udb::RangeOption { + RangeOption { mode: StreamingMode::Iterator, ..(&data_subspace).into() }, - SNAPSHOT, + Snapshot, ); let mut current_signal_id = None; @@ -1136,10 +984,7 @@ impl DatabaseDebug for DatabaseKv { let mut state_matches = state.is_none() || state == Some(SignalState::Pending); while let Some(entry) = stream.try_next().await? { - let signal_id = *self - .subspace - .unpack::(entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let signal_id = *self.subspace.unpack::(entry.key())?; if let Some(curr) = current_signal_id { if signal_id != curr { @@ -1167,9 +1012,7 @@ impl DatabaseDebug for DatabaseKv { self.subspace.unpack::(entry.key()) { if let Some(name) = &name { - let signal_name = name_key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let signal_name = name_key.deserialize(entry.value())?; name_matches = &signal_name == name; } @@ -1178,9 +1021,8 @@ impl DatabaseDebug for DatabaseKv { .unpack::(entry.key()) { if let Some(workflow_id) = &workflow_id { - let signal_workflow_id = workflow_id_key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let signal_workflow_id = + workflow_id_key.deserialize(entry.value())?; workflow_id_matches = &signal_workflow_id == workflow_id; } @@ -1225,7 +1067,7 @@ impl DatabaseDebug for DatabaseKv { async fn silence_signals(&self, signal_ids: Vec) -> Result<()> { self.pools .udb()? - .run(|tx, _mc| { + .run(|tx| { let signal_ids = signal_ids.clone(); async move { @@ -1244,11 +1086,11 @@ impl DatabaseDebug for DatabaseKv { silence_ts_entry, ack_ts_entry, ) = tokio::try_join!( - tx.get(&self.subspace.pack(&signal_name_key), SERIALIZABLE), - tx.get(&self.subspace.pack(&create_ts_key), SERIALIZABLE), - tx.get(&self.subspace.pack(&workflow_id_key), SERIALIZABLE), - tx.get(&self.subspace.pack(&silence_ts_key), SERIALIZABLE), - tx.get(&self.subspace.pack(&ack_ts_key), SERIALIZABLE), + tx.get(&self.subspace.pack(&signal_name_key), Serializable), + tx.get(&self.subspace.pack(&create_ts_key), Serializable), + tx.get(&self.subspace.pack(&workflow_id_key), Serializable), + tx.get(&self.subspace.pack(&silence_ts_key), Serializable), + tx.get(&self.subspace.pack(&ack_ts_key), Serializable), )?; if silence_ts_entry.is_some() { @@ -1260,39 +1102,22 @@ impl DatabaseDebug for DatabaseKv { continue; }; - let signal_name = signal_name_key - .deserialize(&signal_name_entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let signal_name = signal_name_key.deserialize(&signal_name_entry)?; let create_ts = create_ts_key - .deserialize(&create_ts_entry.ok_or( - udb::FdbBindingError::CustomError( - format!("key should exist: {create_ts_key:?}").into(), - ), - )?) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .deserialize(&create_ts_entry.context("key should exist")?)?; let workflow_id = workflow_id_key - .deserialize(&workflow_id_entry.ok_or( - udb::FdbBindingError::CustomError( - format!("key should exist: {workflow_id_key:?}").into(), - ), - )?) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .deserialize(&workflow_id_entry.context("key should exist")?)?; let workflow_name_key = keys::workflow::NameKey::new(workflow_id); let workflow_name_entry = tx - .get(&self.subspace.pack(&workflow_name_key), SERIALIZABLE) + .get(&self.subspace.pack(&workflow_name_key), Serializable) .await?; let workflow_name = workflow_name_key - .deserialize(&workflow_name_entry.ok_or( - udb::FdbBindingError::CustomError( - format!("key should exist: {workflow_name_key:?}").into(), - ), - )?) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .deserialize(&workflow_name_entry.context("key should exist")?)?; // Clear pending key let mut pending_signal_key = keys::workflow::PendingSignalKey::new( @@ -1314,14 +1139,12 @@ impl DatabaseDebug for DatabaseKv { tx.set( &self.subspace.pack(&silence_ts_key), - &silence_ts_key - .serialize(rivet_util::timestamp::now()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &silence_ts_key.serialize(rivet_util::timestamp::now())?, ); if ack_ts_entry.is_none() { update_metric( - &tx.subspace(self.subspace.clone()), + &tx.with_subspace(self.subspace.clone()), Some(keys::metric::GaugeMetric::SignalPending(signal_name)), None, ); @@ -1365,8 +1188,8 @@ struct WorkflowHistoryEventBuilder { name: Option, signal_id: Option, sub_workflow_id: Option, - input_chunks: Vec, - output_chunks: Vec, + input_chunks: Vec, + output_chunks: Vec, tags: Vec<(String, String)>, input_hash: Option>, errors: Vec, diff --git a/packages/common/gasoline/core/src/db/kv/keys/history.rs b/packages/common/gasoline/core/src/db/kv/keys/history.rs index 9b8a838fd1..e47b201c2b 100644 --- a/packages/common/gasoline/core/src/db/kv/keys/history.rs +++ b/packages/common/gasoline/core/src/db/kv/keys/history.rs @@ -2,7 +2,7 @@ use std::result::Result::Ok; use anyhow::*; use rivet_util::Id; -use udb_util::prelude::*; +use universaldb::prelude::*; use crate::history::{ event::{EventType, SleepState}, @@ -147,7 +147,7 @@ impl TuplePack for EventHistorySubspaceKey { // This ensures we are only reading events under the given location and not event data at the current // location - w.write_all(&[udb_util::codes::NESTED])?; + w.write_all(&[universaldb::utils::codes::NESTED])?; offset += 1; if let Some(idx) = self.idx { @@ -550,7 +550,7 @@ impl InputKey { Ok(value .get() .as_bytes() - .chunks(udb_util::CHUNK_SIZE) + .chunks(universaldb::utils::CHUNK_SIZE) .map(|x| x.to_vec()) .collect()) } @@ -569,7 +569,7 @@ impl FormalChunkedKey for InputKey { } } - fn combine(&self, chunks: Vec) -> Result { + fn combine(&self, chunks: Vec) -> Result { serde_json::value::RawValue::from_string(String::from_utf8( chunks .iter() @@ -666,7 +666,7 @@ impl OutputKey { Ok(value .get() .as_bytes() - .chunks(udb_util::CHUNK_SIZE) + .chunks(universaldb::utils::CHUNK_SIZE) .map(|x| x.to_vec()) .collect()) } @@ -685,7 +685,7 @@ impl FormalChunkedKey for OutputKey { } } - fn combine(&self, chunks: Vec) -> Result { + fn combine(&self, chunks: Vec) -> Result { serde_json::value::RawValue::from_string(String::from_utf8( chunks .iter() @@ -1301,8 +1301,7 @@ fn unpack_history_key<'de>( pub mod insert { use anyhow::Result; use rivet_util::Id; - use udb_util::{FormalChunkedKey, FormalKey}; - use universaldb as udb; + use universaldb::utils::{FormalChunkedKey, FormalKey}; use super::super::super::value_to_str; use crate::{ @@ -1314,8 +1313,8 @@ pub mod insert { }; pub fn common( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, event_type: EventType, @@ -1344,8 +1343,8 @@ pub mod insert { } pub fn signal_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, version: usize, @@ -1379,12 +1378,7 @@ pub mod insert { let signal_body_key = super::InputKey::new(workflow_id, location.clone()); // Write signal body - for (i, chunk) in signal_body_key - .split_ref(&body) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in signal_body_key.split_ref(&body)?.into_iter().enumerate() { let chunk_key = signal_body_key.chunk(i); tx.set(&subspace.pack(&chunk_key), &chunk); @@ -1394,8 +1388,8 @@ pub mod insert { } pub fn signal_send_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, version: usize, @@ -1430,12 +1424,7 @@ pub mod insert { let signal_body_key = super::InputKey::new(workflow_id, location.clone()); // Write signal body - for (i, chunk) in signal_body_key - .split_ref(&body) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in signal_body_key.split_ref(&body)?.into_iter().enumerate() { let chunk_key = signal_body_key.chunk(i); tx.set(&subspace.pack(&chunk_key), &chunk); @@ -1451,8 +1440,8 @@ pub mod insert { } pub fn sub_workflow_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, version: usize, @@ -1490,34 +1479,22 @@ pub mod insert { x.as_object() .ok_or_else(|| WorkflowError::InvalidTags("must be an object".to_string())) }) - .transpose() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? + .transpose()? .into_iter() .flatten() .map(|(k, v)| Ok((k.clone(), value_to_str(v)?))) - .collect::>>() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .collect::>>()?; for (k, v) in &tags { // Write tag key let tag_key = super::TagKey::new(workflow_id, location.clone(), k.clone(), v.clone()); - tx.set( - &subspace.pack(&tag_key), - &tag_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - ); + tx.set(&subspace.pack(&tag_key), &tag_key.serialize(())?); } let input_key = super::InputKey::new(workflow_id, location.clone()); // Write input - for (i, chunk) in input_key - .split_ref(&input) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in input_key.split_ref(&input)?.into_iter().enumerate() { let chunk_key = input_key.chunk(i); tx.set(&subspace.pack(&chunk_key), &chunk); @@ -1527,8 +1504,8 @@ pub mod insert { } pub fn activity_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, version: usize, @@ -1563,12 +1540,7 @@ pub mod insert { let input_key = super::InputKey::new(workflow_id, location.clone()); // Write input - for (i, chunk) in input_key - .split_ref(&input) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in input_key.split_ref(&input)?.into_iter().enumerate() { let chunk_key = input_key.chunk(i); tx.set(&subspace.pack(&chunk_key), &chunk); @@ -1579,12 +1551,7 @@ pub mod insert { let output_key = super::OutputKey::new(workflow_id, location.clone()); // Write output - for (i, chunk) in output_key - .split_ref(&output) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in output_key.split_ref(&output)?.into_iter().enumerate() { let chunk_key = output_key.chunk(i); tx.set(&subspace.pack(&chunk_key), &chunk); @@ -1605,8 +1572,8 @@ pub mod insert { } pub fn message_send_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, version: usize, @@ -1628,22 +1595,15 @@ pub mod insert { // Write tags let tags = tags .as_object() - .ok_or_else(|| WorkflowError::InvalidTags("must be an object".to_string())) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? + .ok_or_else(|| WorkflowError::InvalidTags("must be an object".to_string()))? .into_iter() .map(|(k, v)| Ok((k.clone(), value_to_str(v)?))) - .collect::>>() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .collect::>>()?; for (k, v) in &tags { // Write tag key let tag_key = super::TagKey::new(workflow_id, location.clone(), k.clone(), v.clone()); - tx.set( - &subspace.pack(&tag_key), - &tag_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - ); + tx.set(&subspace.pack(&tag_key), &tag_key.serialize(())?); } let message_name_key = super::NameKey::new(workflow_id, location.clone()); @@ -1655,12 +1615,7 @@ pub mod insert { let body_key = super::InputKey::new(workflow_id, location.clone()); // Write body - for (i, chunk) in body_key - .split_ref(&body) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in body_key.split_ref(&body)?.into_iter().enumerate() { let chunk_key = body_key.chunk(i); tx.set(&subspace.pack(&chunk_key), &chunk); @@ -1670,8 +1625,8 @@ pub mod insert { } pub fn loop_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, version: usize, @@ -1699,12 +1654,7 @@ pub mod insert { let state_key = super::InputKey::new(workflow_id, location.clone()); // Write state - for (i, chunk) in state_key - .split_ref(&state) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in state_key.split_ref(&state)?.into_iter().enumerate() { let chunk_key = state_key.chunk(i); tx.set(&subspace.pack(&chunk_key), &chunk); @@ -1714,12 +1664,7 @@ pub mod insert { let output_key = super::OutputKey::new(workflow_id, location.clone()); // Write output - for (i, chunk) in output_key - .split_ref(&output) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in output_key.split_ref(&output)?.into_iter().enumerate() { let chunk_key = output_key.chunk(i); tx.set(&subspace.pack(&chunk_key), &chunk); @@ -1730,8 +1675,8 @@ pub mod insert { } pub fn update_loop_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, iteration: usize, @@ -1747,12 +1692,7 @@ pub mod insert { let state_key = super::InputKey::new(workflow_id, location.clone()); // Write state - for (i, chunk) in state_key - .split_ref(&state) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in state_key.split_ref(&state)?.into_iter().enumerate() { let chunk_key = state_key.chunk(i); tx.set(&subspace.pack(&chunk_key), &chunk); @@ -1762,12 +1702,7 @@ pub mod insert { let output_key = super::OutputKey::new(workflow_id, location.clone()); // Write output - for (i, chunk) in output_key - .split_ref(&output) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in output_key.split_ref(&output)?.into_iter().enumerate() { let chunk_key = output_key.chunk(i); tx.set(&subspace.pack(&chunk_key), &chunk); @@ -1778,8 +1713,8 @@ pub mod insert { } pub fn sleep_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, version: usize, @@ -1813,8 +1748,8 @@ pub mod insert { } pub fn update_sleep_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, sleep_state: SleepState, @@ -1829,8 +1764,8 @@ pub mod insert { } pub fn branch_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, version: usize, @@ -1850,8 +1785,8 @@ pub mod insert { } pub fn removed_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, version: usize, @@ -1887,8 +1822,8 @@ pub mod insert { } pub fn version_check_event( - subspace: &udb::tuple::Subspace, - tx: &udb::RetryableTransaction, + subspace: &universaldb::tuple::Subspace, + tx: &universaldb::RetryableTransaction, workflow_id: Id, location: &Location, version: usize, diff --git a/packages/common/gasoline/core/src/db/kv/keys/metric.rs b/packages/common/gasoline/core/src/db/kv/keys/metric.rs index 60697d670c..c20c60d2c7 100644 --- a/packages/common/gasoline/core/src/db/kv/keys/metric.rs +++ b/packages/common/gasoline/core/src/db/kv/keys/metric.rs @@ -1,5 +1,5 @@ use anyhow::*; -use udb_util::prelude::*; +use universaldb::prelude::*; #[derive(Debug, PartialEq, Eq)] pub enum GaugeMetric { diff --git a/packages/common/gasoline/core/src/db/kv/keys/signal.rs b/packages/common/gasoline/core/src/db/kv/keys/signal.rs index 150c34b681..04cb4437c5 100644 --- a/packages/common/gasoline/core/src/db/kv/keys/signal.rs +++ b/packages/common/gasoline/core/src/db/kv/keys/signal.rs @@ -2,7 +2,7 @@ use std::result::Result::Ok; use anyhow::*; use rivet_util::Id; -use udb_util::prelude::*; +use universaldb::prelude::*; pub struct BodyKey { signal_id: Id, @@ -17,7 +17,7 @@ impl BodyKey { Ok(value .get() .as_bytes() - .chunks(udb_util::CHUNK_SIZE) + .chunks(universaldb::utils::CHUNK_SIZE) .map(|x| x.to_vec()) .collect()) } @@ -34,7 +34,7 @@ impl FormalChunkedKey for BodyKey { } } - fn combine(&self, chunks: Vec) -> Result { + fn combine(&self, chunks: Vec) -> Result { serde_json::value::RawValue::from_string(String::from_utf8( chunks .iter() diff --git a/packages/common/gasoline/core/src/db/kv/keys/wake.rs b/packages/common/gasoline/core/src/db/kv/keys/wake.rs index 721f3714a4..9e71b3f43a 100644 --- a/packages/common/gasoline/core/src/db/kv/keys/wake.rs +++ b/packages/common/gasoline/core/src/db/kv/keys/wake.rs @@ -2,7 +2,7 @@ use std::result::Result::Ok; use anyhow::*; use rivet_util::Id; -use udb_util::prelude::*; +use universaldb::prelude::*; #[derive(Debug)] pub enum WakeCondition { diff --git a/packages/common/gasoline/core/src/db/kv/keys/worker_instance.rs b/packages/common/gasoline/core/src/db/kv/keys/worker_instance.rs index 851b1825bf..47b4a1103a 100644 --- a/packages/common/gasoline/core/src/db/kv/keys/worker_instance.rs +++ b/packages/common/gasoline/core/src/db/kv/keys/worker_instance.rs @@ -2,7 +2,7 @@ use std::result::Result::Ok; use anyhow::*; use rivet_util::Id; -use udb_util::prelude::*; +use universaldb::prelude::*; #[derive(Debug)] pub struct LastPingTsKey { diff --git a/packages/common/gasoline/core/src/db/kv/keys/workflow.rs b/packages/common/gasoline/core/src/db/kv/keys/workflow.rs index 4a5e01d8ef..63b166c42e 100644 --- a/packages/common/gasoline/core/src/db/kv/keys/workflow.rs +++ b/packages/common/gasoline/core/src/db/kv/keys/workflow.rs @@ -2,7 +2,7 @@ use std::result::Result::Ok; use anyhow::*; use rivet_util::Id; -use udb_util::prelude::*; +use universaldb::prelude::*; #[derive(Debug)] pub struct LeaseKey { @@ -159,7 +159,7 @@ impl InputKey { Ok(value .get() .as_bytes() - .chunks(udb_util::CHUNK_SIZE) + .chunks(universaldb::utils::CHUNK_SIZE) .map(|x| x.to_vec()) .collect()) } @@ -176,7 +176,7 @@ impl FormalChunkedKey for InputKey { } } - fn combine(&self, chunks: Vec) -> Result { + fn combine(&self, chunks: Vec) -> Result { serde_json::value::RawValue::from_string(String::from_utf8( chunks .iter() @@ -246,7 +246,7 @@ impl OutputKey { Ok(value .get() .as_bytes() - .chunks(udb_util::CHUNK_SIZE) + .chunks(universaldb::utils::CHUNK_SIZE) .map(|x| x.to_vec()) .collect()) } @@ -263,7 +263,7 @@ impl FormalChunkedKey for OutputKey { } } - fn combine(&self, chunks: Vec) -> Result { + fn combine(&self, chunks: Vec) -> Result { serde_json::value::RawValue::from_string(String::from_utf8( chunks .iter() @@ -333,7 +333,7 @@ impl StateKey { Ok(value .get() .as_bytes() - .chunks(udb_util::CHUNK_SIZE) + .chunks(universaldb::utils::CHUNK_SIZE) .map(|x| x.to_vec()) .collect()) } @@ -350,7 +350,7 @@ impl FormalChunkedKey for StateKey { } } - fn combine(&self, chunks: Vec) -> Result { + fn combine(&self, chunks: Vec) -> Result { serde_json::value::RawValue::from_string(String::from_utf8( chunks .iter() diff --git a/packages/common/gasoline/core/src/db/kv/mod.rs b/packages/common/gasoline/core/src/db/kv/mod.rs index 0321d3453c..9c2db70896 100644 --- a/packages/common/gasoline/core/src/db/kv/mod.rs +++ b/packages/common/gasoline/core/src/db/kv/mod.rs @@ -7,18 +7,18 @@ use std::{ time::Instant, }; +use anyhow::{Context, Result}; use futures_util::{StreamExt, TryStreamExt, stream::BoxStream}; use rivet_util::Id; use rivet_util::future::CustomInstrumentExt; use serde_json::json; use tracing::Instrument; -use udb_util::{ - FormalChunkedKey, FormalKey, SERIALIZABLE, SNAPSHOT, TxnExt, end_of_key_range, keys::*, +use universaldb::utils::{ + FormalChunkedKey, FormalKey, IsolationLevel::*, end_of_key_range, keys::*, }; use universaldb::{ - self as udb, - future::FdbValue, options::{ConflictRangeType, MutationType, StreamingMode}, + value::Value, }; use rivet_metrics::KeyValue; @@ -48,7 +48,7 @@ const WORKER_WAKE_SUBJECT: &str = "gasoline.worker.wake"; pub struct DatabaseKv { pools: rivet_pools::Pools, - subspace: udb_util::Subspace, + subspace: universaldb::utils::Subspace, } impl DatabaseKv { @@ -81,23 +81,21 @@ impl DatabaseKv { } } -// MARK: FDB Helpers +// MARK: UDB Helpers impl DatabaseKv { fn write_signal_wake_idxs( &self, workflow_id: Id, wake_signals: &[&str], - tx: &udb::RetryableTransaction, - ) -> Result<(), udb::FdbBindingError> { + tx: &universaldb::Transaction, + ) -> Result<()> { for signal_name in wake_signals { // Write to wake signals list let wake_signal_key = keys::workflow::WakeSignalKey::new(workflow_id, signal_name.to_string()); tx.set( &self.subspace.pack(&wake_signal_key), - &wake_signal_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &wake_signal_key.serialize(())?, ); } @@ -109,16 +107,14 @@ impl DatabaseKv { workflow_id: Id, workflow_name: &str, sub_workflow_id: Id, - tx: &udb::RetryableTransaction, - ) -> Result<(), udb::FdbBindingError> { + tx: &universaldb::Transaction, + ) -> Result<()> { let sub_workflow_wake_key = keys::wake::SubWorkflowWakeKey::new(sub_workflow_id, workflow_id); tx.set( &self.subspace.pack(&sub_workflow_wake_key), - &sub_workflow_wake_key - .serialize(workflow_name.to_string()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &sub_workflow_wake_key.serialize(workflow_name.to_string())?, ); Ok(()) @@ -131,8 +127,8 @@ impl DatabaseKv { signal_id: Id, signal_name: &str, body: &serde_json::value::RawValue, - tx: &udb::RetryableTransaction, - ) -> Result<(), udb::FdbBindingError> { + tx: &universaldb::Transaction, + ) -> Result<()> { tracing::debug!( ?ray_id, ?workflow_id, @@ -145,36 +141,25 @@ impl DatabaseKv { // Check if the workflow exists let Some(workflow_name_entry) = tx - .get(&self.subspace.pack(&workflow_name_key), SERIALIZABLE) + .get(&self.subspace.pack(&workflow_name_key), Serializable) .await? else { - return Err(udb::FdbBindingError::CustomError( - WorkflowError::WorkflowNotFound.into(), - )); + return Err(WorkflowError::WorkflowNotFound.into()); }; - let workflow_name = workflow_name_key - .deserialize(&workflow_name_entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let workflow_name = workflow_name_key.deserialize(&workflow_name_entry)?; // Write name let name_key = keys::signal::NameKey::new(signal_id); tx.set( &self.subspace.pack(&name_key), - &name_key - .serialize(signal_name.to_string()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &name_key.serialize(signal_name.to_string())?, ); let signal_body_key = keys::signal::BodyKey::new(signal_id); // Write signal body - for (i, chunk) in signal_body_key - .split_ref(body) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in signal_body_key.split_ref(body)?.into_iter().enumerate() { let chunk_key = signal_body_key.chunk(i); tx.set(&self.subspace.pack(&chunk_key), &chunk); @@ -186,36 +171,28 @@ impl DatabaseKv { tx.set( &self.subspace.pack(&pending_signal_key), - &pending_signal_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &pending_signal_key.serialize(())?, ); // Write create ts let create_ts_key = keys::signal::CreateTsKey::new(signal_id); tx.set( &self.subspace.pack(&create_ts_key), - &create_ts_key - .serialize(pending_signal_key.ts) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &create_ts_key.serialize(pending_signal_key.ts)?, ); // Write ray id let ray_id_key = keys::signal::RayIdKey::new(signal_id); tx.set( &self.subspace.pack(&ray_id_key), - &ray_id_key - .serialize(ray_id) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &ray_id_key.serialize(ray_id)?, ); // Write workflow id let workflow_id_key = keys::signal::WorkflowIdKey::new(signal_id); tx.set( &self.subspace.pack(&workflow_id_key), - &workflow_id_key - .serialize(workflow_id) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &workflow_id_key.serialize(workflow_id)?, ); let wake_signal_key = @@ -223,7 +200,7 @@ impl DatabaseKv { // If the workflow currently has a wake signal key for this signal, wake it if tx - .get(&self.subspace.pack(&wake_signal_key), SERIALIZABLE) + .get(&self.subspace.pack(&wake_signal_key), Serializable) .await? .is_some() { @@ -237,14 +214,12 @@ impl DatabaseKv { // Add wake condition for workflow tx.set( &self.subspace.pack(&wake_condition_key), - &wake_condition_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &wake_condition_key.serialize(())?, ); } update_metric( - &tx.subspace(self.subspace.clone()), + &tx.with_subspace(self.subspace.clone()), None, Some(keys::metric::GaugeMetric::SignalPending( signal_name.to_string(), @@ -262,15 +237,15 @@ impl DatabaseKv { tags: Option<&serde_json::Value>, input: &serde_json::value::RawValue, unique: bool, - tx: &udb::RetryableTransaction, - ) -> Result { - let txs = tx.subspace(self.subspace.clone()); + tx: &universaldb::Transaction, + ) -> Result { + let tx = tx.with_subspace(self.subspace.clone()); if unique { let empty_tags = json!({}); if let Some(existing_workflow_id) = self - .find_workflow_inner(workflow_name, tags.unwrap_or(&empty_tags), tx) + .find_workflow_inner(workflow_name, tags.unwrap_or(&empty_tags), &tx) .await? { tracing::debug!(?existing_workflow_id, "found existing workflow"); @@ -278,17 +253,17 @@ impl DatabaseKv { } } - txs.write( + tx.write( &keys::workflow::CreateTsKey::new(workflow_id), rivet_util::timestamp::now(), )?; - txs.write( + tx.write( &keys::workflow::NameKey::new(workflow_id), workflow_name.to_string(), )?; - txs.write(&keys::workflow::RayIdKey::new(workflow_id), ray_id)?; + tx.write(&keys::workflow::RayIdKey::new(workflow_id), ray_id)?; // Write tags let tags = tags @@ -296,17 +271,15 @@ impl DatabaseKv { x.as_object() .ok_or_else(|| WorkflowError::InvalidTags("must be an object".to_string())) }) - .transpose() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? + .transpose()? .into_iter() .flatten() .map(|(k, v)| Ok((k.clone(), value_to_str(v)?))) - .collect::>>() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .collect::>>()?; for (k, v) in &tags { // Write tag key - txs.write( + tx.write( &keys::workflow::TagKey::new(workflow_id, k.clone(), v.clone()), (), )?; @@ -318,7 +291,7 @@ impl DatabaseKv { .map(|(k, v)| (k.clone(), v.clone())) .collect(); - txs.write( + tx.write( &keys::workflow::ByNameAndTagKey::new( workflow_name.to_string(), k.clone(), @@ -330,7 +303,7 @@ impl DatabaseKv { } // Write null key for the "by name and first tag" secondary index (all workflows have this) - txs.write( + tx.write( &keys::workflow::ByNameAndTagKey::null(workflow_name.to_string(), workflow_id), tags, )?; @@ -338,19 +311,14 @@ impl DatabaseKv { // Write input let input_key = keys::workflow::InputKey::new(workflow_id); - for (i, chunk) in input_key - .split_ref(input) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in input_key.split_ref(input)?.into_iter().enumerate() { let chunk_key = input_key.chunk(i); - txs.set(&self.subspace.pack(&chunk_key), &chunk); + tx.set(&self.subspace.pack(&chunk_key), &chunk); } // Write immediate wake condition - txs.write( + tx.write( &keys::wake::WorkflowWakeConditionKey::new( workflow_name.to_string(), workflow_id, @@ -359,11 +327,11 @@ impl DatabaseKv { (), )?; - txs.write(&keys::workflow::HasWakeConditionKey::new(workflow_id), ())?; + tx.write(&keys::workflow::HasWakeConditionKey::new(workflow_id), ())?; // Write metric update_metric( - &txs, + &tx, None, Some(keys::metric::GaugeMetric::WorkflowSleeping( workflow_name.to_string(), @@ -377,20 +345,14 @@ impl DatabaseKv { &self, workflow_name: &str, tags: &serde_json::Value, - tx: &udb::RetryableTransaction, - ) -> Result, udb::FdbBindingError> { + tx: &universaldb::Transaction, + ) -> Result> { // Convert to flat vec of strings let mut tag_iter = tags .as_object() - .ok_or_else(|| WorkflowError::InvalidTags("must be an object".to_string())) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? + .ok_or_else(|| WorkflowError::InvalidTags("must be an object".to_string()))? .iter() - .map(|(k, v)| { - Result::<_, udb::FdbBindingError>::Ok(( - k.clone(), - value_to_str(v).map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - )) - }); + .map(|(k, v)| Result::<_>::Ok((k.clone(), value_to_str(v)?))); let first_tag = tag_iter.next().transpose()?; let rest_of_tags = tag_iter.collect::, _>>()?; @@ -412,11 +374,11 @@ impl DatabaseKv { }; let mut stream = tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::Iterator, ..(&workflow_by_name_and_tag_subspace).into() }, - SERIALIZABLE, + Serializable, ); loop { @@ -427,13 +389,10 @@ impl DatabaseKv { // Unpack key let workflow_by_name_and_tag_key = self .subspace - .unpack::(&entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .unpack::(&entry.key())?; // Deserialize value - let wf_rest_of_tags = workflow_by_name_and_tag_key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let wf_rest_of_tags = workflow_by_name_and_tag_key.deserialize(entry.value())?; // Compute intersection between wf tags and input let tags_match = rest_of_tags.iter().all(|(k, v)| { @@ -459,7 +418,7 @@ impl Database for DatabaseKv { async fn from_pools(pools: rivet_pools::Pools) -> anyhow::Result> { Ok(Arc::new(DatabaseKv { pools, - subspace: udb_util::Subspace::new(&(RIVET, GASOLINE, KV)), + subspace: universaldb::utils::Subspace::new(&(RIVET, GASOLINE, KV)), })) } @@ -496,7 +455,7 @@ impl Database for DatabaseKv { .pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { async move { let now = rivet_util::timestamp::now(); @@ -510,23 +469,21 @@ impl Database for DatabaseKv { // List all active leases let mut stream = tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&lease_subspace).into() }, - // Not SERIALIZABLE because we don't want this to conflict with other queries which write + // Not Serializable because we don't want this to conflict with other queries which write // leases - SNAPSHOT, + Snapshot, ); while let Some(lease_key_entry) = stream.try_next().await? { let lease_key = self .subspace - .unpack::(lease_key_entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; - let (workflow_name, worker_instance_id) = lease_key - .deserialize(lease_key_entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .unpack::(lease_key_entry.key())?; + let (workflow_name, worker_instance_id) = + lease_key.deserialize(lease_key_entry.value())?; let last_ping_ts_key = keys::worker_instance::LastPingTsKey::new(worker_instance_id); @@ -539,15 +496,13 @@ impl Database for DatabaseKv { } else if let Some(last_ping_entry) = tx .get( &self.subspace.pack(&last_ping_ts_key), - // Not SERIALIZABLE because we don't want this to conflict - SNAPSHOT, + // Not Serializable because we don't want this to conflict + Snapshot, ) .await? { // Deserialize last ping value - let last_ping_ts = last_ping_ts_key - .deserialize(&last_ping_entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let last_ping_ts = last_ping_ts_key.deserialize(&last_ping_entry)?; // Update cache last_ping_cache.push((worker_instance_id, last_ping_ts)); @@ -566,7 +521,7 @@ impl Database for DatabaseKv { let silence_ts_key = keys::workflow::SilenceTsKey::new(lease_key.workflow_id); if tx - .get(&self.subspace.pack(&silence_ts_key), SERIALIZABLE) + .get(&self.subspace.pack(&silence_ts_key), Serializable) .await? .is_some() { @@ -596,13 +551,11 @@ impl Database for DatabaseKv { ); tx.set( &self.subspace.pack(&wake_condition_key), - &wake_condition_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &wake_condition_key.serialize(())?, ); update_metric( - &tx.subspace(self.subspace.clone()), + &tx.with_subspace(self.subspace.clone()), Some(keys::metric::GaugeMetric::WorkflowActive( workflow_name.to_string(), )), @@ -622,7 +575,8 @@ impl Database for DatabaseKv { } }) .custom_instrument(tracing::info_span!("clear_expired_leases_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; if expired_workflow_count != 0 { tracing::info!( @@ -644,13 +598,13 @@ impl Database for DatabaseKv { .pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { async move { - let txs = tx.subspace(self.subspace.clone()); + let tx = tx.with_subspace(self.subspace.clone()); // Read existing lock - let lock_expired = if let Some(lock_ts) = txs - .read_opt(&keys::worker_instance::MetricsLockKey::new(), SERIALIZABLE) + let lock_expired = if let Some(lock_ts) = tx + .read_opt(&keys::worker_instance::MetricsLockKey::new(), Serializable) .await? { lock_ts < rivet_util::timestamp::now() - METRICS_LOCK_TIMEOUT_MS @@ -659,9 +613,9 @@ impl Database for DatabaseKv { }; if lock_expired { - // Write to lock key. FDB transactions guarantee that if multiple workers are running this + // Write to lock key. UDB transactions guarantee that if multiple workers are running this // query at the same time only one will succeed which means only one will have the lock. - txs.write( + tx.write( &keys::worker_instance::MetricsLockKey::new(), rivet_util::timestamp::now(), )?; @@ -671,33 +625,37 @@ impl Database for DatabaseKv { } }) .custom_instrument(tracing::info_span!("acquire_lock_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; if acquired_lock { let entries = self .pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { - let txs = tx.subspace(self.subspace.clone()); + .run(|tx| async move { + let tx = tx.with_subspace(self.subspace.clone()); - let metrics_subspace = txs.subspace(&keys::metric::GaugeMetricKey::subspace()); - txs.get_ranges_keyvalues( - udb::RangeOption { + let metrics_subspace = self + .subspace + .subspace(&keys::metric::GaugeMetricKey::subspace()); + tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&metrics_subspace).into() }, - SERIALIZABLE, + Serializable, ) .map(|res| match res { - Ok(entry) => txs.read_entry::(&entry), + Ok(entry) => tx.read_entry::(&entry), Err(err) => Err(err.into()), }) .try_collect::>() .await }) .custom_instrument(tracing::info_span!("read_metrics_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; let mut total_workflow_counts: Vec<(String, usize)> = Vec::new(); @@ -779,14 +737,15 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { let metrics_lock_key = keys::worker_instance::MetricsLockKey::new(); tx.clear(&self.subspace.pack(&metrics_lock_key)); Ok(()) }) .custom_instrument(tracing::info_span!("clear_lock_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; } Ok(()) @@ -805,23 +764,22 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { async move { // Update worker instance ping let last_ping_ts_key = keys::worker_instance::LastPingTsKey::new(worker_instance_id); tx.set( &self.subspace.pack(&last_ping_ts_key), - &last_ping_ts_key - .serialize(rivet_util::timestamp::now()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &last_ping_ts_key.serialize(rivet_util::timestamp::now())?, ); Ok(()) } }) .custom_instrument(tracing::info_span!("update_worker_ping_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } @@ -840,7 +798,7 @@ impl Database for DatabaseKv { .pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { self.dispatch_workflow_inner( ray_id, workflow_id, @@ -853,7 +811,8 @@ impl Database for DatabaseKv { .await }) .custom_instrument(tracing::info_span!("dispatch_workflow_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; self.wake_worker(); @@ -865,7 +824,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { let workflow_ids = workflow_ids.clone(); async move { futures_util::stream::iter(workflow_ids) @@ -889,56 +848,50 @@ impl Database for DatabaseKv { has_wake_condition_entry, ) = tokio::try_join!( tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&input_subspace).into() }, - SERIALIZABLE, + Serializable, ) .try_collect::>(), tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&state_subspace).into() }, - SERIALIZABLE, + Serializable, ) .try_collect::>(), tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&output_subspace).into() }, - SERIALIZABLE, + Serializable, ) .try_collect::>(), tx.get( &self.subspace.pack(&has_wake_condition_key), - SERIALIZABLE + Serializable ), )?; if input_chunks.is_empty() { Ok(None) } else { - let input = input_key - .combine(input_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let input = input_key.combine(input_chunks)?; let state = if state_chunks.is_empty() { serde_json::value::RawValue::NULL.to_owned() } else { - state_key.combine(state_chunks).map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })? + state_key.combine(state_chunks)? }; let output = if output_chunks.is_empty() { None } else { - Some(output_key.combine(output_chunks).map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?) + Some(output_key.combine(output_chunks)?) }; Ok(Some(WorkflowData { @@ -960,7 +913,7 @@ impl Database for DatabaseKv { }) .custom_instrument(tracing::info_span!("get_workflow_tx")) .await - .map_err(Into::into) + .map_err(WorkflowError::Udb) } /// Returns the first incomplete workflow with the given name and tags, first meaning the one with the @@ -978,9 +931,10 @@ impl Database for DatabaseKv { .pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { self.find_workflow_inner(workflow_name, tags, &tx).await }) + .run(|tx| async move { self.find_workflow_inner(workflow_name, tags, &tx).await }) .custom_instrument(tracing::info_span!("find_workflow_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; let dt = start_instant.elapsed().as_secs_f64(); metrics::FIND_WORKFLOWS_DURATION.record( @@ -1007,16 +961,14 @@ impl Database for DatabaseKv { .pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { let owned_filter = owned_filter.clone(); async move { let now = rivet_util::timestamp::now(); // All wake conditions with a timestamp after this timestamp will be pulled - let pull_before = now - + i64::try_from(self.worker_poll_interval().as_millis()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let pull_before = now + i64::try_from(self.worker_poll_interval().as_millis())?; // Pull all available wake conditions from all registered wf names let entries = futures_util::stream::iter(owned_filter) @@ -1044,24 +996,24 @@ impl Database for DatabaseKv { .to_vec(); tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(wake_subspace_start, wake_subspace_end).into() }, - // Must be a snapshot to not conflict with any new wake conditions being + // Must be a Snapshot to not conflict with any new wake conditions being // inserted - SNAPSHOT, + Snapshot, ) }) .flatten() - .map(|res| match res { - Ok(entry) => Ok(( + .map(|res| { + let entry = res?; + + anyhow::Ok(( entry.key().to_vec(), self.subspace - .unpack::(entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - )), - Err(err) => Err(Into::::into(err)), + .unpack::(entry.key())?, + )) }) .try_collect::>() .await?; @@ -1101,17 +1053,16 @@ impl Database for DatabaseKv { let lease_key_buf = self.subspace.pack(&lease_key); // Check lease - if tx.get(&lease_key_buf, SERIALIZABLE).await?.is_some() { - Result::<_, udb::FdbBindingError>::Ok(None) + if tx.get(&lease_key_buf, Serializable).await?.is_some() { + Result::<_>::Ok(None) } else { // Write lease tx.set( &lease_key_buf, - &lease_key - .serialize((workflow_name.clone(), worker_instance_id)) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?, + &lease_key.serialize(( + workflow_name.clone(), + worker_instance_id, + ))?, ); // Write worker instance id @@ -1119,15 +1070,11 @@ impl Database for DatabaseKv { keys::workflow::WorkerInstanceIdKey::new(workflow_id); tx.set( &self.subspace.pack(&worker_instance_id_key), - &worker_instance_id_key - .serialize(worker_instance_id) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?, + &worker_instance_id_key.serialize(worker_instance_id)?, ); update_metric( - &tx.subspace(self.subspace.clone()), + &tx.with_subspace(self.subspace.clone()), Some(keys::metric::GaugeMetric::WorkflowSleeping( workflow_name.clone(), )), @@ -1168,12 +1115,10 @@ impl Database for DatabaseKv { let wake_sub_workflow_key = keys::workflow::WakeSubWorkflowKey::new(*workflow_id); if let Some(entry) = tx - .get(&self.subspace.pack(&wake_sub_workflow_key), SERIALIZABLE) + .get(&self.subspace.pack(&wake_sub_workflow_key), Serializable) .await? { - let sub_workflow_id = wake_sub_workflow_key - .deserialize(&entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let sub_workflow_id = wake_sub_workflow_key.deserialize(&entry)?; let sub_workflow_wake_key = keys::wake::SubWorkflowWakeKey::new(sub_workflow_id, *workflow_id); @@ -1194,7 +1139,8 @@ impl Database for DatabaseKv { } }) .custom_instrument(tracing::info_span!("pull_workflows_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; let worker_instance_id_str = worker_instance_id.to_string(); let dt = start_instant.elapsed().as_secs_f64(); @@ -1223,7 +1169,7 @@ impl Database for DatabaseKv { .pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { let leased_workflows = leased_workflows.clone(); async move { @@ -1253,42 +1199,33 @@ impl Database for DatabaseKv { events, ) = tokio::try_join!( async { - tx.get(&self.subspace.pack(&create_ts_key), SERIALIZABLE) + tx.get(&self.subspace.pack(&create_ts_key), Serializable) .await - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - }) }, async { - tx.get(&self.subspace.pack(&ray_id_key), SERIALIZABLE) - .await - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - }) + tx.get(&self.subspace.pack(&ray_id_key), Serializable).await }, async { tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&input_subspace).into() }, - SERIALIZABLE, + Serializable, ) .try_collect::>() .await - .map_err(|x| udb::FdbBindingError::CustomError(x.into())) }, async { tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&state_subspace).into() }, - SERIALIZABLE, + Serializable, ) .try_collect::>() .await - .map_err(|x| udb::FdbBindingError::CustomError(x.into())) }, async { let mut events_by_location: HashMap> = @@ -1297,11 +1234,11 @@ impl Database for DatabaseKv { WorkflowHistoryEventBuilder::new(Location::empty()); let mut stream = tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&active_history_subspace).into() }, - SERIALIZABLE, + Serializable, ); loop { @@ -1313,11 +1250,8 @@ impl Database for DatabaseKv { let partial_key = self .subspace .unpack::( - entry.key(), - ) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + entry.key(), + )?; if current_event.location != partial_key.location { if current_event.location.is_empty() { @@ -1337,12 +1271,7 @@ impl Database for DatabaseKv { events_by_location .entry(previous_event.location.root()) .or_default() - .push( - Event::try_from(previous_event) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?, - ); + .push(Event::try_from(previous_event)?); } } @@ -1351,53 +1280,35 @@ impl Database for DatabaseKv { self.subspace.unpack::( entry.key(), ) { - let event_type = key - .deserialize(entry.value()) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + let event_type = key.deserialize(entry.value())?; current_event.event_type = Some(event_type); } else if let Ok(key) = self.subspace.unpack::( entry.key(), ) { - let version = key - .deserialize(entry.value()) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + let version = key.deserialize(entry.value())?; current_event.version = Some(version); } else if let Ok(key) = self.subspace.unpack::( entry.key(), ) { - let create_ts = key - .deserialize(entry.value()) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + let create_ts = key.deserialize(entry.value())?; current_event.create_ts = Some(create_ts); } else if let Ok(key) = self.subspace .unpack::(entry.key()) { - let name = key.deserialize(entry.value()).map_err( - |x| udb::FdbBindingError::CustomError(x.into()), - )?; + let name = key.deserialize(entry.value())?; current_event.name = Some(name); } else if let Ok(key) = self.subspace.unpack::( entry.key(), ) { - let signal_id = key - .deserialize(entry.value()) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + let signal_id = key.deserialize(entry.value())?; current_event.signal_id = Some(signal_id); } else if let Ok(key) = self @@ -1405,11 +1316,8 @@ impl Database for DatabaseKv { .unpack::( entry.key(), ) { - let sub_workflow_id = key - .deserialize(entry.value()) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + let sub_workflow_id = + key.deserialize(entry.value())?; current_event.sub_workflow_id = Some(sub_workflow_id); @@ -1429,11 +1337,7 @@ impl Database for DatabaseKv { self.subspace.unpack::( entry.key(), ) { - let input_hash = key - .deserialize(entry.value()) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + let input_hash = key.deserialize(entry.value())?; current_event.input_hash = Some(input_hash); } else if let Ok(_key) = @@ -1445,11 +1349,7 @@ impl Database for DatabaseKv { self.subspace.unpack::( entry.key(), ) { - let iteration = key - .deserialize(entry.value()) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + let iteration = key.deserialize(entry.value())?; current_event.iteration = Some(iteration); } else if let Ok(key) = self @@ -1457,11 +1357,7 @@ impl Database for DatabaseKv { .unpack::( entry.key(), ) { - let deadline_ts = key - .deserialize(entry.value()) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + let deadline_ts = key.deserialize(entry.value())?; current_event.deadline_ts = Some(deadline_ts); } else if let Ok(key) = self @@ -1469,11 +1365,7 @@ impl Database for DatabaseKv { .unpack::( entry.key(), ) { - let sleep_state = key - .deserialize(entry.value()) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + let sleep_state = key.deserialize(entry.value())?; current_event.sleep_state = Some(sleep_state); } else if let Ok(key) = self @@ -1481,11 +1373,8 @@ impl Database for DatabaseKv { .unpack::( entry.key(), ) { - let inner_event_type = key - .deserialize(entry.value()) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?; + let inner_event_type = + key.deserialize(entry.value())?; current_event.inner_event_type = Some(inner_event_type); @@ -1498,9 +1387,7 @@ impl Database for DatabaseKv { events_by_location .entry(current_event.location.root()) .or_default() - .push(Event::try_from(current_event).map_err( - |x| udb::FdbBindingError::CustomError(x.into()), - )?); + .push(Event::try_from(current_event)?); } Ok(events_by_location) @@ -1508,31 +1395,17 @@ impl Database for DatabaseKv { )?; let create_ts = create_ts_key - .deserialize(&create_ts_entry.ok_or( - udb::FdbBindingError::CustomError( - format!("key should exist: {create_ts_key:?}").into(), - ), - )?) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .deserialize(&create_ts_entry.context("key should exist")?)?; let ray_id = ray_id_key - .deserialize(&ray_id_entry.ok_or( - udb::FdbBindingError::CustomError( - format!("key should exist: {ray_id_key:?}").into(), - ), - )?) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; - let input = input_key - .combine(input_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .deserialize(&ray_id_entry.context("key should exist")?)?; + let input = input_key.combine(input_chunks)?; let state = if state_chunks.is_empty() { serde_json::value::RawValue::NULL.to_owned() } else { - state_key - .combine(state_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? + state_key.combine(state_chunks)? }; - Result::<_, udb::FdbBindingError>::Ok(PulledWorkflowData { + Result::<_>::Ok(PulledWorkflowData { workflow_id, workflow_name, create_ts, @@ -1552,7 +1425,8 @@ impl Database for DatabaseKv { } }) .custom_instrument(tracing::info_span!("pull_workflow_history_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; let dt2 = start_instant2.elapsed().as_secs_f64(); let dt = start_instant.elapsed().as_secs_f64(); @@ -1601,7 +1475,7 @@ impl Database for DatabaseKv { .pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { async move { let sub_workflow_wake_subspace = self .subspace @@ -1612,12 +1486,12 @@ impl Database for DatabaseKv { let wake_deadline_key = keys::workflow::WakeDeadlineKey::new(workflow_id); let mut stream = tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&sub_workflow_wake_subspace).into() }, - // NOTE: Must be serializable to conflict with `get_sub_workflow` - SERIALIZABLE, + // NOTE: Must be Serializable to conflict with `get_sub_workflow` + Serializable, ); let (wrote_to_wake_idx, tag_keys, wake_deadline_entry) = tokio::try_join!( @@ -1626,13 +1500,11 @@ impl Database for DatabaseKv { let mut wrote_to_wake_idx = false; while let Some(entry) = stream.try_next().await? { - let sub_workflow_wake_key = self - .subspace - .unpack::(&entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; - let workflow_name = sub_workflow_wake_key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let sub_workflow_wake_key = + self.subspace + .unpack::(&entry.key())?; + let workflow_name = + sub_workflow_wake_key.deserialize(entry.value())?; let wake_condition_key = keys::wake::WorkflowWakeConditionKey::new( workflow_name, @@ -1645,9 +1517,7 @@ impl Database for DatabaseKv { // Add wake condition for workflow tx.set( &self.subspace.pack(&wake_condition_key), - &wake_condition_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &wake_condition_key.serialize(())?, ); // Clear secondary index @@ -1656,33 +1526,23 @@ impl Database for DatabaseKv { wrote_to_wake_idx = true; } - Result::<_, udb::FdbBindingError>::Ok(wrote_to_wake_idx) + Ok(wrote_to_wake_idx) }, // Read tags - async { - tx.get_ranges_keyvalues( - udb::RangeOption { - mode: StreamingMode::WantAll, - ..(&tags_subspace).into() - }, - SERIALIZABLE, - ) - .map(|res| match res { - Ok(entry) => self - .subspace - .unpack::(entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into())), - Err(err) => Err(Into::::into(err)), - }) - .try_collect::>() - .await - .map_err(Into::into) - }, - async { - tx.get(&self.subspace.pack(&wake_deadline_key), SERIALIZABLE) - .await - .map_err(Into::into) - }, + tx.get_ranges_keyvalues( + universaldb::RangeOption { + mode: StreamingMode::WantAll, + ..(&tags_subspace).into() + }, + Serializable, + ) + .map(|res| { + self.subspace + .unpack::(res?.key()) + .map_err(anyhow::Error::from) + }) + .try_collect::>(), + tx.get(&self.subspace.pack(&wake_deadline_key), Serializable), )?; for key in tag_keys { @@ -1711,9 +1571,7 @@ impl Database for DatabaseKv { // reason this isn't immediately cleared in `pull_workflows` along with the rest of the // wake conditions is because it might be in the future. if let Some(raw) = wake_deadline_entry { - let deadline_ts = wake_deadline_key - .deserialize(&raw) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let deadline_ts = wake_deadline_key.deserialize(&raw)?; let wake_condition_key = keys::wake::WorkflowWakeConditionKey::new( workflow_name.to_string(), @@ -1732,12 +1590,7 @@ impl Database for DatabaseKv { // Write output let output_key = keys::workflow::OutputKey::new(workflow_id); - for (i, chunk) in output_key - .split_ref(output) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in output_key.split_ref(output)?.into_iter().enumerate() { let chunk_key = output_key.chunk(i); tx.set(&self.subspace.pack(&chunk_key), &chunk); @@ -1751,7 +1604,7 @@ impl Database for DatabaseKv { tx.clear(&self.subspace.pack(&worker_instance_id_key)); update_metric( - &tx.subspace(self.subspace.clone()), + &tx.with_subspace(self.subspace.clone()), Some(keys::metric::GaugeMetric::WorkflowActive( workflow_name.to_string(), )), @@ -1764,7 +1617,8 @@ impl Database for DatabaseKv { } }) .custom_instrument(tracing::info_span!("complete_workflows_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; // Wake worker again in case some other workflow was waiting for this one to complete if wrote_to_wake_idx { @@ -1796,12 +1650,12 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { async move { let wake_deadline_key = keys::workflow::WakeDeadlineKey::new(workflow_id); let wake_deadline_entry = tx - .get(&self.subspace.pack(&wake_deadline_key), SERIALIZABLE) + .get(&self.subspace.pack(&wake_deadline_key), Serializable) .await?; // Add immediate wake for workflow @@ -1813,9 +1667,7 @@ impl Database for DatabaseKv { ); tx.set( &self.subspace.pack(&wake_condition_key), - &wake_condition_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &wake_condition_key.serialize(())?, ); } @@ -1826,9 +1678,7 @@ impl Database for DatabaseKv { // reason this isn't immediately cleared in `pull_workflows` along with the rest of the // wake conditions is because it might be in the future. if let Some(raw) = wake_deadline_entry { - let deadline_ts = wake_deadline_key - .deserialize(&raw) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let deadline_ts = wake_deadline_key.deserialize(&raw)?; let wake_condition_key = keys::wake::WorkflowWakeConditionKey::new( workflow_name.to_string(), @@ -1850,17 +1700,13 @@ impl Database for DatabaseKv { // Add wake condition for workflow tx.set( &self.subspace.pack(&wake_condition_key), - &wake_condition_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &wake_condition_key.serialize(())?, ); // Write to wake deadline tx.set( &self.subspace.pack(&wake_deadline_key), - &wake_deadline_key - .serialize(deadline_ts) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &wake_deadline_key.serialize(deadline_ts)?, ); } @@ -1886,9 +1732,7 @@ impl Database for DatabaseKv { if has_wake_condition { tx.set( &self.subspace.pack(&has_wake_condition_key), - &has_wake_condition_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &has_wake_condition_key.serialize(())?, ); } else { tx.clear(&self.subspace.pack(&has_wake_condition_key)); @@ -1898,9 +1742,7 @@ impl Database for DatabaseKv { let error_key = keys::workflow::ErrorKey::new(workflow_id); tx.set( &self.subspace.pack(&error_key), - &error_key - .serialize(error.to_string()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &error_key.serialize(error.to_string())?, ); // Clear lease @@ -1911,7 +1753,7 @@ impl Database for DatabaseKv { tx.clear(&self.subspace.pack(&worker_instance_id_key)); update_metric( - &tx.subspace(self.subspace.clone()), + &tx.with_subspace(self.subspace.clone()), Some(keys::metric::GaugeMetric::WorkflowActive( workflow_name.to_string(), )), @@ -1929,7 +1771,8 @@ impl Database for DatabaseKv { } }) .custom_instrument(tracing::info_span!("commit_workflow_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; // Always wake the worker immediately again. This is an IMPORTANT implementation detail to prevent // race conditions with workflow sleep. Imagine the scenario: @@ -1975,12 +1818,12 @@ impl Database for DatabaseKv { .map(|x| x.to_string()) .collect::>(); - // Fetch signal from FDB + // Fetch signal from UDB let signal = self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { let owned_filter = owned_filter.clone(); async move { @@ -1997,14 +1840,14 @@ impl Database for DatabaseKv { ); tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, limit: Some(1), ..(&pending_signal_subspace).into() }, - // NOTE: This is serializable because any insert into this subspace + // NOTE: This is Serializable because any insert into this subspace // should cause a conflict and retry of this txn - SERIALIZABLE, + Serializable, ) }) .collect::>(); @@ -2013,15 +1856,12 @@ impl Database for DatabaseKv { let mut results = futures_util::future::try_join_all( streams.into_iter().map(|mut stream| async move { if let Some(entry) = stream.try_next().await? { - Result::<_, udb::FdbBindingError>::Ok(Some(( + Result::<_>::Ok(Some(( entry.key().to_vec(), self.subspace .unpack::( &entry.key(), - ) - .map_err(|x| { - udb::FdbBindingError::CustomError(x.into()) - })?, + )?, ))) } else { Ok(None) @@ -2058,13 +1898,11 @@ impl Database for DatabaseKv { )?; tx.set( &self.subspace.pack(&ack_ts_key), - &ack_ts_key - .serialize(rivet_util::timestamp::now()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &ack_ts_key.serialize(rivet_util::timestamp::now())?, ); update_metric( - &tx.subspace(self.subspace.clone()), + &tx.with_subspace(self.subspace.clone()), Some(keys::metric::GaugeMetric::SignalPending( signal_name.to_string(), )), @@ -2082,18 +1920,16 @@ impl Database for DatabaseKv { let chunks = tx .get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&body_subspace).into() }, - SERIALIZABLE, + Serializable, ) .try_collect::>() .await?; - let body = body_key - .combine(chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let body = body_key.combine(chunks)?; // Insert history event keys::history::insert::signal_event( @@ -2106,8 +1942,7 @@ impl Database for DatabaseKv { signal_id, &signal_name, &body, - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; Ok(Some(SignalData { signal_id, @@ -2142,7 +1977,8 @@ impl Database for DatabaseKv { } }) .custom_instrument(tracing::info_span!("pull_next_signal_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(signal) } @@ -2157,7 +1993,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { async move { let input_key = keys::workflow::InputKey::new(sub_workflow_id); let input_subspace = self.subspace.subspace(&input_key); @@ -2171,45 +2007,41 @@ impl Database for DatabaseKv { // Read input and output let (input_chunks, state_chunks, output_chunks, has_wake_condition_entry) = tokio::try_join!( tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&input_subspace).into() }, - SERIALIZABLE, + Serializable, ) .try_collect::>(), tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&state_subspace).into() }, - SERIALIZABLE, + Serializable, ) .try_collect::>(), tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&output_subspace).into() }, - SERIALIZABLE, + Serializable, ) .try_collect::>(), - tx.get(&self.subspace.pack(&has_wake_condition_key), SERIALIZABLE), + tx.get(&self.subspace.pack(&has_wake_condition_key), Serializable), )?; if input_chunks.is_empty() { Ok(None) } else { - let input = input_key - .combine(input_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let input = input_key.combine(input_chunks)?; let state = if state_chunks.is_empty() { serde_json::value::RawValue::NULL.to_owned() } else { - state_key - .combine(state_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? + state_key.combine(state_chunks)? }; let output = if output_chunks.is_empty() { @@ -2230,11 +2062,7 @@ impl Database for DatabaseKv { None } else { - Some( - output_key - .combine(output_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - ) + Some(output_key.combine(output_chunks)?) }; Ok(Some(WorkflowData { @@ -2249,7 +2077,7 @@ impl Database for DatabaseKv { }) .custom_instrument(tracing::info_span!("get_sub_workflow_tx")) .await - .map_err(Into::into) + .map_err(WorkflowError::Udb) } #[tracing::instrument(skip_all)] @@ -2264,12 +2092,13 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { self.publish_signal_inner(ray_id, workflow_id, signal_id, signal_name, body, &tx) .await }) .custom_instrument(tracing::info_span!("publish_signal_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; self.wake_worker(); @@ -2292,7 +2121,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { self.publish_signal_inner( ray_id, to_workflow_id, @@ -2315,13 +2144,13 @@ impl Database for DatabaseKv { &signal_name, &body, to_workflow_id, - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; Ok(()) }) .custom_instrument(tracing::info_span!("publish_signal_from_workflow_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; self.wake_worker(); @@ -2346,7 +2175,7 @@ impl Database for DatabaseKv { .pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { let sub_workflow_id = self .dispatch_workflow_inner( ray_id, @@ -2371,13 +2200,13 @@ impl Database for DatabaseKv { sub_workflow_name, tags, input, - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; Ok(sub_workflow_id) }) .custom_instrument(tracing::info_span!("dispatch_sub_workflow_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; self.wake_worker(); @@ -2394,7 +2223,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { async move { let tags_subspace = self .subspace @@ -2403,18 +2232,16 @@ impl Database for DatabaseKv { // Read old tags let tag_keys = tx .get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&tags_subspace).into() }, - SERIALIZABLE, + Serializable, ) - .map(|res| match res { - Ok(entry) => self - .subspace - .unpack::(entry.key()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into())), - Err(err) => Err(Into::::into(err)), + .map(|res| { + self.subspace + .unpack::(res?.key()) + .map_err(anyhow::Error::from) }) .try_collect::>() .await?; @@ -2435,22 +2262,15 @@ impl Database for DatabaseKv { // Write new tags let tags = tags .as_object() - .ok_or_else(|| WorkflowError::InvalidTags("must be an object".to_string())) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? + .ok_or_else(|| WorkflowError::InvalidTags("must be an object".to_string()))? .into_iter() .map(|(k, v)| Ok((k.clone(), value_to_str(v)?))) - .collect::>>() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + .collect::>>()?; for (k, v) in &tags { let tag_key = keys::workflow::TagKey::new(workflow_id, k.clone(), v.clone()); - tx.set( - &self.subspace.pack(&tag_key), - &tag_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - ); + tx.set(&self.subspace.pack(&tag_key), &tag_key.serialize(())?); // Write new "by name and first tag" secondary index let by_name_and_tag_key = keys::workflow::ByNameAndTagKey::new( @@ -2466,9 +2286,7 @@ impl Database for DatabaseKv { .collect(); tx.set( &self.subspace.pack(&by_name_and_tag_key), - &by_name_and_tag_key - .serialize(rest_of_tags) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &by_name_and_tag_key.serialize(rest_of_tags)?, ); } @@ -2476,7 +2294,8 @@ impl Database for DatabaseKv { } }) .custom_instrument(tracing::info_span!("update_workflow_tags_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } @@ -2490,17 +2309,12 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| { + .run(|tx| { async move { let state_key = keys::workflow::StateKey::new(workflow_id); // Write state - for (i, chunk) in state_key - .split_ref(&state) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .into_iter() - .enumerate() - { + for (i, chunk) in state_key.split_ref(&state)?.into_iter().enumerate() { let chunk_key = state_key.chunk(i); tx.set(&self.subspace.pack(&chunk_key), &chunk); @@ -2510,7 +2324,8 @@ impl Database for DatabaseKv { } }) .custom_instrument(tracing::info_span!("update_workflow_state_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } @@ -2530,7 +2345,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { keys::history::insert::activity_event( &self.subspace, &tx, @@ -2542,13 +2357,13 @@ impl Database for DatabaseKv { &event_id.input_hash.to_be_bytes(), input, res, - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; Ok(()) }) .custom_instrument(tracing::info_span!("commit_workflow_activity_event_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } @@ -2567,7 +2382,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { keys::history::insert::message_send_event( &self.subspace, &tx, @@ -2578,13 +2393,13 @@ impl Database for DatabaseKv { tags, message_name, body, - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; Ok(()) }) .custom_instrument(tracing::info_span!("commit_workflow_message_send_event_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } @@ -2604,7 +2419,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { if iteration == 0 { keys::history::insert::loop_event( &self.subspace, @@ -2616,8 +2431,7 @@ impl Database for DatabaseKv { iteration, state, output, - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; } else { keys::history::insert::update_loop_event( &self.subspace, @@ -2627,8 +2441,7 @@ impl Database for DatabaseKv { iteration, state, output, - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; let active_history_subspace = self.subspace @@ -2653,11 +2466,11 @@ impl Database for DatabaseKv { )); let mut stream = tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&loop_events_subspace).into() }, - SERIALIZABLE, + Serializable, ); // Move all current events under this loop to the forgotten history @@ -2667,9 +2480,7 @@ impl Database for DatabaseKv { }; if !active_history_subspace.is_start_of(entry.key()) { - return Err(udb::FdbBindingError::CustomError( - udb::tuple::PackError::BadPrefix.into(), - )); + return Err(universaldb::tuple::PackError::BadPrefix.into()); } // Truncate tuple up to ACTIVE and replace it with FORGOTTEN @@ -2708,7 +2519,8 @@ impl Database for DatabaseKv { Ok(()) }) .custom_instrument(tracing::info_span!("commit_workflow_sleep_event_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } @@ -2725,7 +2537,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { keys::history::insert::sleep_event( &self.subspace, &tx, @@ -2735,13 +2547,13 @@ impl Database for DatabaseKv { rivet_util::timestamp::now(), deadline_ts, SleepState::Normal, - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; Ok(()) }) .custom_instrument(tracing::info_span!("commit_workflow_sleep_event_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } @@ -2756,20 +2568,20 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { keys::history::insert::update_sleep_event( &self.subspace, &tx, from_workflow_id, location, state, - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; Ok(()) }) .custom_instrument(tracing::info_span!("update_workflow_sleep_event_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } @@ -2785,7 +2597,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { keys::history::insert::branch_event( &self.subspace, &tx, @@ -2793,13 +2605,13 @@ impl Database for DatabaseKv { location, version, rivet_util::timestamp::now(), - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; Ok(()) }) .custom_instrument(tracing::info_span!("commit_workflow_branch_event_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } @@ -2816,7 +2628,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { keys::history::insert::removed_event( &self.subspace, &tx, @@ -2826,13 +2638,13 @@ impl Database for DatabaseKv { rivet_util::timestamp::now(), event_type, event_name, - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; Ok(()) }) .custom_instrument(tracing::info_span!("commit_workflow_removed_event_tx")) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } @@ -2848,7 +2660,7 @@ impl Database for DatabaseKv { self.pools .udb() .map_err(WorkflowError::PoolsGeneric)? - .run(|tx, _mc| async move { + .run(|tx| async move { keys::history::insert::version_check_event( &self.subspace, &tx, @@ -2856,22 +2668,22 @@ impl Database for DatabaseKv { location, version, rivet_util::timestamp::now(), - ) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + )?; Ok(()) }) .custom_instrument(tracing::info_span!( "commit_workflow_version_check_event_tx" )) - .await?; + .await + .map_err(WorkflowError::Udb)?; Ok(()) } } fn update_metric( - txs: &udb_util::TxnSubspace, + tx: &universaldb::Transaction, previous: Option, current: Option, ) { @@ -2880,7 +2692,7 @@ fn update_metric( } if let Some(previous) = previous { - txs.atomic_op( + tx.atomic_op( &keys::metric::GaugeMetricKey::new(previous), &(-1isize).to_le_bytes(), MutationType::Add, @@ -2888,7 +2700,7 @@ fn update_metric( } if let Some(current) = current { - txs.atomic_op( + tx.atomic_op( &keys::metric::GaugeMetricKey::new(current), &1usize.to_le_bytes(), MutationType::Add, @@ -2904,8 +2716,8 @@ struct WorkflowHistoryEventBuilder { name: Option, signal_id: Option, sub_workflow_id: Option, - input_chunks: Vec, - output_chunks: Vec, + input_chunks: Vec, + output_chunks: Vec, input_hash: Option>, error_count: usize, iteration: Option, diff --git a/packages/common/gasoline/core/src/error.rs b/packages/common/gasoline/core/src/error.rs index 7c79ae4617..7553046375 100644 --- a/packages/common/gasoline/core/src/error.rs +++ b/packages/common/gasoline/core/src/error.rs @@ -2,7 +2,6 @@ use std::time::{SystemTime, UNIX_EPOCH}; use rivet_util::Id; use tokio::time::Instant; -use universaldb as udb; use crate::ctx::common::RETRY_TIMEOUT_MS; @@ -134,8 +133,8 @@ pub enum WorkflowError { #[error("failed to deserialize event data: {0}")] DeserializeEventData(#[source] anyhow::Error), - #[error("fdb error: {0}")] - Fdb(#[from] udb::FdbBindingError), + #[error("udb error: {0}")] + Udb(#[source] anyhow::Error), #[error("pools error: {0}")] Pools(#[from] rivet_pools::Error), @@ -217,7 +216,7 @@ impl WorkflowError { } /// Any error that the workflow can continue on with its execution from. - pub fn is_recoverable(&self) -> bool { + pub(crate) fn is_recoverable(&self) -> bool { match self { WorkflowError::ActivityFailure(_, _) | WorkflowError::ActivityTimeout(_) diff --git a/packages/common/gasoline/core/src/history/location.rs b/packages/common/gasoline/core/src/history/location.rs index 3a7273b1be..d93f9d9141 100644 --- a/packages/common/gasoline/core/src/history/location.rs +++ b/packages/common/gasoline/core/src/history/location.rs @@ -158,9 +158,9 @@ impl Deref for Coordinate { } } -mod fdb { +mod udb { use super::Coordinate; - use udb_util::prelude::*; + use universaldb::prelude::*; impl TuplePack for Coordinate { fn pack( diff --git a/packages/common/gasoline/core/src/worker.rs b/packages/common/gasoline/core/src/worker.rs index b940811a4f..1223070763 100644 --- a/packages/common/gasoline/core/src/worker.rs +++ b/packages/common/gasoline/core/src/worker.rs @@ -257,11 +257,12 @@ impl Worker { // NOTE: No .in_current_span() because we want this to be a separate trace async move { if let Err(err) = ctx.run(current_span_ctx).await { - tracing::error!(?err, "unhandled workflow error"); + tracing::error!(?err, ?workflow_id, "unhandled workflow error"); sentry::with_scope( |scope| { scope.set_tag("error", err.to_string()); + scope.set_tag("workflow_id", workflow_id.to_string()); }, || { sentry::capture_message( diff --git a/packages/common/pools/Cargo.toml b/packages/common/pools/Cargo.toml index 113a5ddc00..f445b8cad3 100644 --- a/packages/common/pools/Cargo.toml +++ b/packages/common/pools/Cargo.toml @@ -10,8 +10,6 @@ anyhow.workspace = true async-nats.workspace = true clickhouse-inserter.workspace = true clickhouse.workspace = true -udb-util.workspace = true -universaldb.workspace = true futures-util.workspace = true governor.workspace = true hyper-tls.workspace = true @@ -20,8 +18,8 @@ lazy_static.workspace = true reqwest.workspace = true rivet-config.workspace = true rivet-metrics.workspace = true -universalpubsub.workspace = true rivet-util.workspace = true +serde.workspace = true tempfile.workspace = true thiserror.workspace = true tokio-native-tls.workspace = true @@ -30,9 +28,10 @@ tokio.workspace = true tracing-logfmt.workspace = true tracing-subscriber.workspace = true tracing.workspace = true +universaldb.workspace = true +universalpubsub.workspace = true url.workspace = true uuid.workspace = true -serde.workspace = true [dev-dependencies] divan.workspace = true diff --git a/packages/common/pools/src/db/udb.rs b/packages/common/pools/src/db/udb.rs index 7fff92efc8..9f6b0b855f 100644 --- a/packages/common/pools/src/db/udb.rs +++ b/packages/common/pools/src/db/udb.rs @@ -2,15 +2,14 @@ use std::{ops::Deref, sync::Arc}; use anyhow::*; use rivet_config::{Config, config}; -use universaldb as udb; #[derive(Clone)] pub struct UdbPool { - db: udb::Database, + db: universaldb::Database, } impl Deref for UdbPool { - type Target = udb::Database; + type Target = universaldb::Database; fn deref(&self) -> &Self::Target { &self.db @@ -21,18 +20,18 @@ impl Deref for UdbPool { pub async fn setup(config: Config) -> Result> { let db_driver = match config.database() { config::Database::Postgres(pg) => { - Arc::new(udb::driver::PostgresDatabaseDriver::new(pg.url.read().clone()).await?) - as udb::DatabaseDriverHandle + Arc::new(universaldb::driver::PostgresDatabaseDriver::new(pg.url.read().clone()).await?) + as universaldb::DatabaseDriverHandle } config::Database::FileSystem(fs) => { - Arc::new(udb::driver::RocksDbDatabaseDriver::new(fs.path.clone()).await?) - as udb::DatabaseDriverHandle + Arc::new(universaldb::driver::RocksDbDatabaseDriver::new(fs.path.clone()).await?) + as universaldb::DatabaseDriverHandle } }; tracing::debug!("udb started"); Ok(Some(UdbPool { - db: udb::Database::new(db_driver), + db: universaldb::Database::new(db_driver), })) } diff --git a/packages/common/types/Cargo.toml b/packages/common/types/Cargo.toml index cad21dfa05..0dde0d6af1 100644 --- a/packages/common/types/Cargo.toml +++ b/packages/common/types/Cargo.toml @@ -13,6 +13,6 @@ rivet-data.workspace = true rivet-runner-protocol.workspace = true rivet-util.workspace = true serde.workspace = true -udb-util.workspace = true +universaldb.workspace = true utoipa.workspace = true versioned-data-util.workspace = true diff --git a/packages/common/types/src/keys/pegboard/mod.rs b/packages/common/types/src/keys/pegboard/mod.rs index 1e3ff30358..406b2ae4b5 100644 --- a/packages/common/types/src/keys/pegboard/mod.rs +++ b/packages/common/types/src/keys/pegboard/mod.rs @@ -1,7 +1,7 @@ -use udb_util::prelude::*; +use universaldb::prelude::*; pub mod ns; -pub fn subspace() -> udb_util::Subspace { - udb_util::Subspace::new(&(RIVET, PEGBOARD)) +pub fn subspace() -> universaldb::utils::Subspace { + universaldb::utils::Subspace::new(&(RIVET, PEGBOARD)) } diff --git a/packages/common/types/src/keys/pegboard/ns.rs b/packages/common/types/src/keys/pegboard/ns.rs index fa04e89a6e..f513eae2fc 100644 --- a/packages/common/types/src/keys/pegboard/ns.rs +++ b/packages/common/types/src/keys/pegboard/ns.rs @@ -2,7 +2,7 @@ use std::result::Result::Ok; use anyhow::*; use gas::prelude::*; -use udb_util::prelude::*; +use universaldb::prelude::*; #[derive(Debug)] pub struct ServerlessDesiredSlotsKey { diff --git a/packages/common/udb-util/Cargo.toml b/packages/common/udb-util/Cargo.toml deleted file mode 100644 index e89a97a4ff..0000000000 --- a/packages/common/udb-util/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "udb-util" -version.workspace = true -authors.workspace = true -license.workspace = true -edition.workspace = true - -[dependencies] -anyhow.workspace = true -async-trait.workspace = true -universaldb.workspace = true -futures-util.workspace = true -lazy_static.workspace = true -rivet-metrics.workspace = true -tokio.workspace = true -tracing.workspace = true diff --git a/packages/common/udb-util/src/ext.rs b/packages/common/udb-util/src/ext.rs deleted file mode 100644 index 0d5bcc9c0f..0000000000 --- a/packages/common/udb-util/src/ext.rs +++ /dev/null @@ -1,322 +0,0 @@ -use std::{ops::Deref, result::Result::Ok}; - -use anyhow::*; -use futures_util::TryStreamExt; -use universaldb::{ - self as udb, - options::{ConflictRangeType, MutationType, StreamingMode}, - tuple::{TuplePack, TupleUnpack}, -}; - -use crate::{FormalKey, Subspace, end_of_key_range}; - -pub trait TxnExt { - fn subspace<'a>(&'a self, subspace: Subspace) -> TxnSubspace<'a>; -} - -impl TxnExt for udb::Transaction { - fn subspace<'a>(&'a self, subspace: Subspace) -> TxnSubspace<'a> { - TxnSubspace { - tx: &self, - subspace, - } - } -} - -#[derive(Clone)] -pub struct TxnSubspace<'a> { - tx: &'a udb::Transaction, - subspace: Subspace, -} - -impl<'a> TxnSubspace<'a> { - pub fn subspace(&self, t: &T) -> Subspace { - self.subspace.subspace(t) - } - - pub fn pack(&self, t: &T) -> Vec { - self.subspace.pack(t) - } - - pub fn unpack<'de, T: TupleUnpack<'de>>( - &self, - key: &'de [u8], - ) -> Result { - self.subspace - .unpack(key) - .with_context(|| format!("failed unpacking key of {}", std::any::type_name::())) - .map_err(|x| udb::FdbBindingError::CustomError(x.into())) - } - - pub fn write( - &self, - key: &T, - value: T::Value, - ) -> Result<(), udb::FdbBindingError> { - self.tx.set( - &self.subspace.pack(key), - &key.serialize(value) - .with_context(|| { - format!( - "failed serializing key value of {}", - std::any::type_name::(), - ) - }) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, - ); - - Ok(()) - } - - pub async fn read<'de, T: FormalKey + TuplePack + TupleUnpack<'de>>( - &self, - key: &'de T, - snapshot: bool, - ) -> Result { - self.tx - .get(&self.subspace.pack(key), snapshot) - .await? - .read(key) - } - - pub async fn read_opt<'de, T: FormalKey + TuplePack + TupleUnpack<'de>>( - &self, - key: &'de T, - snapshot: bool, - ) -> Result, udb::FdbBindingError> { - self.tx - .get(&self.subspace.pack(key), snapshot) - .await? - .read_opt(key) - } - - pub async fn exists( - &self, - key: &T, - snapshot: bool, - ) -> Result { - Ok(self - .tx - .get(&self.subspace.pack(key), snapshot) - .await? - .is_some()) - } - - pub fn delete(&self, key: &T) { - self.tx.clear(&self.subspace.pack(key)); - } - - pub fn delete_key_subspace(&self, key: &T) { - self.tx - .clear_subspace_range(&self.subspace(&self.subspace.pack(key))); - } - - pub fn read_entry TupleUnpack<'de>>( - &self, - entry: &udb::future::FdbValue, - ) -> Result<(T, T::Value), udb::FdbBindingError> { - let key = self.unpack::(entry.key())?; - let value = key - .deserialize(entry.value()) - .with_context(|| { - format!( - "failed deserializing key value of {}", - std::any::type_name::() - ) - }) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; - - Ok((key, value)) - } - - pub async fn cherry_pick( - &self, - subspace: impl TuplePack + Send, - snapshot: bool, - ) -> Result { - T::cherry_pick(self, subspace, snapshot).await - } - - pub fn add_conflict_key( - &self, - key: &T, - conflict_type: ConflictRangeType, - ) -> Result<(), udb::FdbBindingError> { - let key_buf = self.subspace.pack(key); - - self.tx - .add_conflict_range(&key_buf, &end_of_key_range(&key_buf), conflict_type) - .map_err(Into::into) - } - - pub fn atomic_op<'de, T: FormalKey + TuplePack + TupleUnpack<'de>>( - &self, - key: &'de T, - param: &[u8], - op_type: MutationType, - ) { - self.tx.atomic_op(&self.subspace.pack(key), param, op_type) - } -} - -impl<'a> Deref for TxnSubspace<'a> { - type Target = udb::Transaction; - - fn deref(&self) -> &Self::Target { - self.tx - } -} - -pub trait SliceExt { - fn read<'de, T: FormalKey + TupleUnpack<'de>>( - &self, - key: &'de T, - ) -> Result; -} - -pub trait OptSliceExt { - fn read<'de, T: FormalKey + TupleUnpack<'de>>( - &self, - key: &'de T, - ) -> Result; - fn read_opt<'de, T: FormalKey + TupleUnpack<'de>>( - &self, - key: &'de T, - ) -> Result, udb::FdbBindingError>; -} - -impl SliceExt for udb::future::FdbSlice { - fn read<'de, T: FormalKey + TupleUnpack<'de>>( - &self, - key: &'de T, - ) -> Result { - key.deserialize(self) - .with_context(|| { - format!( - "failed deserializing key value of {}", - std::any::type_name::(), - ) - }) - .map_err(|x| udb::FdbBindingError::CustomError(x.into())) - } -} - -impl OptSliceExt for Option { - fn read<'de, T: FormalKey + TupleUnpack<'de>>( - &self, - key: &'de T, - ) -> Result { - key.deserialize(&self.as_ref().ok_or(udb::FdbBindingError::CustomError( - format!("key should exist: {}", std::any::type_name::()).into(), - ))?) - .with_context(|| { - format!( - "failed deserializing key value of {}", - std::any::type_name::(), - ) - }) - .map_err(|x| udb::FdbBindingError::CustomError(x.into())) - } - - fn read_opt<'de, T: FormalKey + TupleUnpack<'de>>( - &self, - key: &'de T, - ) -> Result, udb::FdbBindingError> { - if let Some(data) = self { - key.deserialize(data) - .map(Some) - .with_context(|| { - format!( - "failed deserializing key value of {}", - std::any::type_name::(), - ) - }) - .map_err(|x| udb::FdbBindingError::CustomError(x.into())) - } else { - Ok(None) - } - } -} - -#[async_trait::async_trait] -pub trait CherryPick { - type Output; - - async fn cherry_pick( - txs: &TxnSubspace<'_>, - subspace: S, - snapshot: bool, - ) -> Result; -} - -// Implements `CherryPick` for any tuple size -macro_rules! impl_tuple { - ($($args:ident),*) => { - #[async_trait::async_trait] - impl<$($args: FormalKey + for<'de> TupleUnpack<'de>),*> CherryPick for ($($args),*) - where - $($args::Value: Send),* - { - type Output = ($($args::Value),*); - - async fn cherry_pick( - txs: &TxnSubspace<'_>, - subspace: S, - snapshot: bool, - ) -> Result { - let subspace = txs.subspace(&subspace); - - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { - mode: StreamingMode::WantAll, - ..(&subspace).into() - }, - snapshot, - ); - - $( - #[allow(non_snake_case)] - let mut $args = None; - )* - - loop { - let Some(entry) = stream.try_next().await? else { - break; - }; - - $( - if let Ok(key) = txs.unpack::<$args>(entry.key()) { - if $args.is_some() { - return Err(udb::FdbBindingError::CustomError( - format!("{} already picked", std::any::type_name::<$args>()).into() - )); - } - - let value = key.read(entry.value())?; - $args = Some(value); - continue; - } - )* - } - - Ok(( - $( - $args.ok_or(udb::FdbBindingError::CustomError( - format!("key not found in cherry pick: {}", std::any::type_name::<$args>()).into(), - ))?, - )* - )) - } - } - } -} - -impl_tuple!(A, B); -impl_tuple!(A, B, C); -impl_tuple!(A, B, C, D); -impl_tuple!(A, B, C, D, E); -impl_tuple!(A, B, C, D, E, F); -impl_tuple!(A, B, C, D, E, F, G); -impl_tuple!(A, B, C, D, E, F, G, H); -impl_tuple!(A, B, C, D, E, F, G, H, I); -impl_tuple!(A, B, C, D, E, F, G, H, I, J); diff --git a/packages/common/universaldb/Cargo.toml b/packages/common/universaldb/Cargo.toml index 8b9277637e..7a86606a08 100644 --- a/packages/common/universaldb/Cargo.toml +++ b/packages/common/universaldb/Cargo.toml @@ -8,20 +8,21 @@ edition.workspace = true [dependencies] anyhow.workspace = true async-trait.workspace = true +deadpool-postgres.workspace = true +foundationdb-tuple.workspace = true futures-util.workspace = true lazy_static.workspace = true rand.workspace = true +rivet-metrics.workspace = true rocksdb.workspace = true -foundationdb-tuple.workspace = true serde.workspace = true -tokio.workspace = true +thiserror.workspace = true tokio-postgres.workspace = true +tokio.workspace = true tracing.workspace = true -deadpool-postgres.workspace = true uuid.workspace = true [dev-dependencies] -udb-util.workspace = true rivet-config.workspace = true rivet-env.workspace = true rivet-pools.workspace = true diff --git a/packages/common/universaldb/src/database.rs b/packages/common/universaldb/src/database.rs index 6288e2de8d..508a638e6b 100644 --- a/packages/common/universaldb/src/database.rs +++ b/packages/common/universaldb/src/database.rs @@ -1,12 +1,12 @@ use std::future::Future; +use anyhow::{Result, anyhow}; use futures_util::FutureExt; -use crate::{FdbBindingError, FdbResult, driver::Erased}; - use crate::{ - MaybeCommitted, RetryableTransaction, Transaction, driver::DatabaseDriverHandle, + driver::{DatabaseDriverHandle, Erased}, options::DatabaseOption, + transaction::{RetryableTransaction, Transaction}, }; #[derive(Clone)] @@ -20,32 +20,32 @@ impl Database { } /// Run a closure with automatic retry logic - pub async fn run<'a, F, Fut, T>(&'a self, closure: F) -> Result + pub async fn run<'a, F, Fut, T>(&'a self, closure: F) -> Result where - F: Fn(RetryableTransaction, MaybeCommitted) -> Fut + Send + Sync, - Fut: Future> + Send, + F: Fn(RetryableTransaction) -> Fut + Send + Sync, + Fut: Future> + Send, T: Send + 'a + 'static, { let closure = &closure; self.driver - .run(Box::new(|tx, mc| { - async move { closure(tx, mc).await.map(|value| Box::new(value) as Erased) }.boxed() + .run(Box::new(|tx| { + async move { closure(tx).await.map(|value| Box::new(value) as Erased) }.boxed() })) .await .and_then(|res| { - res.downcast::().map(|x| *x).map_err(|_| { - FdbBindingError::CustomError("failed to downcast `run` return type".into()) - }) + res.downcast::() + .map(|x| *x) + .map_err(|_| anyhow!("failed to downcast `run` return type")) }) } /// Creates a new txn instance. - pub fn create_trx(&self) -> FdbResult { + pub fn create_trx(&self) -> Result { self.driver.create_trx() } /// Set a database option - pub fn set_option(&self, opt: DatabaseOption) -> FdbResult<()> { + pub fn set_option(&self, opt: DatabaseOption) -> Result<()> { self.driver.set_option(opt) } } diff --git a/packages/common/universaldb/src/driver/mod.rs b/packages/common/universaldb/src/driver/mod.rs index bbf059ea0b..dd8fb11f3f 100644 --- a/packages/common/universaldb/src/driver/mod.rs +++ b/packages/common/universaldb/src/driver/mod.rs @@ -1,11 +1,14 @@ use std::{any::Any, future::Future, pin::Pin, sync::Arc}; +use anyhow::{Result, bail}; + use crate::{ - FdbBindingError, FdbError, FdbResult, KeySelector, RangeOption, RetryableTransaction, - Transaction, - future::{FdbSlice, FdbValues}, + key_selector::KeySelector, options::{ConflictRangeType, DatabaseOption, MutationType}, - types::{MaybeCommitted, TransactionCommitError, TransactionCommitted}, + range_option::RangeOption, + transaction::{RetryableTransaction, Transaction}, + utils::IsolationLevel, + value::{Slice, Value, Values}, }; mod postgres; @@ -20,20 +23,12 @@ pub type Erased = Box; pub type DatabaseDriverHandle = Arc; pub trait DatabaseDriver: Send + Sync { - fn create_trx(&self) -> FdbResult; + fn create_trx(&self) -> Result; fn run<'a>( &'a self, - closure: Box< - dyn Fn( - RetryableTransaction, - MaybeCommitted, - ) -> BoxFut<'a, Result> - + Send - + Sync - + 'a, - >, - ) -> BoxFut<'a, Result>; - fn set_option(&self, opt: DatabaseOption) -> FdbResult<()>; + closure: Box BoxFut<'a, Result> + Send + Sync + 'a>, + ) -> BoxFut<'a, Result>; + fn set_option(&self, opt: DatabaseOption) -> Result<()>; } pub trait TransactionDriver: Send + Sync { @@ -43,24 +38,24 @@ pub trait TransactionDriver: Send + Sync { fn get<'a>( &'a self, key: &[u8], - snapshot: bool, - ) -> Pin>> + Send + 'a>>; + isolation_level: IsolationLevel, + ) -> Pin>> + Send + 'a>>; fn get_key<'a>( &'a self, selector: &KeySelector<'a>, - snapshot: bool, - ) -> Pin> + Send + 'a>>; + isolation_level: IsolationLevel, + ) -> Pin> + Send + 'a>>; fn get_range<'a>( &'a self, opt: &RangeOption<'a>, iteration: usize, - snapshot: bool, - ) -> Pin> + Send + 'a>>; + isolation_level: IsolationLevel, + ) -> Pin> + Send + 'a>>; fn get_ranges_keyvalues<'a>( &'a self, opt: RangeOption<'a>, - snapshot: bool, - ) -> crate::future::FdbStream<'a, crate::future::FdbValue>; + isolation_level: IsolationLevel, + ) -> crate::value::Stream<'a, Value>; // Write operations fn set(&self, key: &[u8], value: &[u8]); @@ -68,9 +63,7 @@ pub trait TransactionDriver: Send + Sync { fn clear_range(&self, begin: &[u8], end: &[u8]); // Transaction management - fn commit( - self: Box, - ) -> Pin> + Send>>; + fn commit(self: Box) -> Pin> + Send>>; fn reset(&mut self); fn cancel(&self); fn add_conflict_range( @@ -78,18 +71,17 @@ pub trait TransactionDriver: Send + Sync { begin: &[u8], end: &[u8], conflict_type: ConflictRangeType, - ) -> FdbResult<()>; + ) -> Result<()>; fn get_estimated_range_size_bytes<'a>( &'a self, begin: &'a [u8], end: &'a [u8], - ) -> Pin> + Send + 'a>>; + ) -> Pin> + Send + 'a>>; // Helper for committing without consuming self (for database drivers that need it) - fn commit_owned(&self) -> Pin> + Send + '_>> { + fn commit_ref(&self) -> Pin> + Send + '_>> { Box::pin(async move { - // Default implementation returns error - drivers that need this should override - Err(FdbError::from_code(1510)) + bail!("`commit_ref` unimplemented"); }) } } diff --git a/packages/common/universaldb/src/driver/postgres/database.rs b/packages/common/universaldb/src/driver/postgres/database.rs index 458d1e128b..4b0d2cc40f 100644 --- a/packages/common/universaldb/src/driver/postgres/database.rs +++ b/packages/common/universaldb/src/driver/postgres/database.rs @@ -1,13 +1,15 @@ use std::sync::{Arc, Mutex}; +use anyhow::{Context, Result}; use deadpool_postgres::{Config, ManagerConfig, Pool, PoolConfig, RecyclingMethod, Runtime}; use tokio_postgres::NoTls; use crate::{ - FdbBindingError, FdbError, FdbResult, MaybeCommitted, RetryableTransaction, Transaction, + RetryableTransaction, Transaction, driver::{BoxFut, DatabaseDriver, Erased}, + error::DatabaseError, options::DatabaseOption, - utils::calculate_tx_retry_backoff, + utils::{MaybeCommitted, calculate_tx_retry_backoff}, }; use super::transaction::PostgresTransactionDriver; @@ -18,7 +20,7 @@ pub struct PostgresDatabaseDriver { } impl PostgresDatabaseDriver { - pub async fn new(connection_string: String) -> FdbResult { + pub async fn new(connection_string: String) -> Result { tracing::debug!(connection_string = ?connection_string, "Creating PostgresDatabaseDriver"); // Create deadpool config from connection string @@ -36,22 +38,19 @@ impl PostgresDatabaseDriver { // Create the pool let pool = config .create_pool(Some(Runtime::Tokio1), NoTls) - .map_err(|e| { - tracing::error!(error = ?e, "Failed to create Postgres pool"); - FdbError::from_code(1510) - })?; + .context("failed to create postgres connection pool")?; tracing::debug!("Getting Postgres connection from pool"); // Get a connection from the pool to create the table - let conn = pool.get().await.map_err(|e| { - tracing::error!(error = ?e, "Failed to get Postgres connection"); - FdbError::from_code(1510) - })?; + let conn = pool + .get() + .await + .context("failed to get connection from postgres pool")?; // Enable btree gist conn.execute("CREATE EXTENSION IF NOT EXISTS btree_gist", &[]) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to create btree_gist extension")?; // Create the KV table if it doesn't exist conn.execute( @@ -62,7 +61,7 @@ impl PostgresDatabaseDriver { &[], ) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to create kv table")?; // Create range_type type if it doesn't exist conn.execute( @@ -74,7 +73,7 @@ impl PostgresDatabaseDriver { &[], ) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to create range_type enum")?; // Create bytearange type if it doesn't exist conn.execute( @@ -89,7 +88,7 @@ impl PostgresDatabaseDriver { &[], ) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to create bytearange type")?; // Create the conflict ranges table for non-snapshot reads // This enforces consistent reads for ranges by preventing overlapping conflict ranges @@ -110,7 +109,7 @@ impl PostgresDatabaseDriver { &[], ) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to create conflict_ranges table")?; // Connection is automatically returned to the pool when dropped drop(conn); @@ -123,69 +122,60 @@ impl PostgresDatabaseDriver { } impl DatabaseDriver for PostgresDatabaseDriver { - fn create_trx(&self) -> FdbResult { + fn create_trx(&self) -> Result { // Pass the connection pool to the transaction driver - Ok(Transaction::new(Box::new(PostgresTransactionDriver::new( + Ok(Transaction::new(Arc::new(PostgresTransactionDriver::new( self.pool.clone(), )))) } fn run<'a>( &'a self, - closure: Box< - dyn Fn( - RetryableTransaction, - MaybeCommitted, - ) -> BoxFut<'a, Result> - + Send - + Sync - + 'a, - >, - ) -> BoxFut<'a, Result> { + closure: Box BoxFut<'a, Result> + Send + Sync + 'a>, + ) -> BoxFut<'a, Result> { Box::pin(async move { let mut maybe_committed = MaybeCommitted(false); let max_retries = *self.max_retries.lock().unwrap(); for attempt in 0..max_retries { let tx = self.create_trx()?; - let retryable = RetryableTransaction::new(tx); + let mut retryable = RetryableTransaction::new(tx); + retryable.maybe_committed = maybe_committed; // Execute transaction - let result = closure(retryable.clone(), maybe_committed).await; - let fdb_error = match result { - std::result::Result::Ok(res) => { - match retryable.inner.driver.commit_owned().await { - Ok(_) => return Ok(res), - Err(e) => e, - } - } - std::result::Result::Err(e) => { - if let Some(fdb_error) = e.get_fdb_error() { - fdb_error - } else { - return Err(e); - } - } + let error = match closure(retryable.clone()).await { + Ok(res) => match retryable.inner.driver.commit_ref().await { + Ok(_) => return Ok(res), + Err(e) => e, + }, + Err(e) => e, }; - // Handle retry or return error - if fdb_error.is_retryable() { - if fdb_error.is_maybe_committed() { - maybe_committed = MaybeCommitted(true); - } + let chain = error + .chain() + .find_map(|x| x.downcast_ref::()); + + if let Some(db_error) = chain { + // Handle retry or return error + if db_error.is_retryable() { + if db_error.is_maybe_committed() { + maybe_committed = MaybeCommitted(true); + } - let backoff_ms = calculate_tx_retry_backoff(attempt as usize); - tokio::time::sleep(tokio::time::Duration::from_millis(backoff_ms)).await; - } else { - return Err(FdbBindingError::from(fdb_error)); + let backoff_ms = calculate_tx_retry_backoff(attempt as usize); + tokio::time::sleep(tokio::time::Duration::from_millis(backoff_ms)).await; + continue; + } } + + return Err(error); } - Err(FdbBindingError::from(FdbError::from_code(1007))) // Retry limit exceeded + Err(DatabaseError::MaxRetriesReached.into()) }) } - fn set_option(&self, opt: DatabaseOption) -> FdbResult<()> { + fn set_option(&self, opt: DatabaseOption) -> Result<()> { match opt { DatabaseOption::TransactionRetryLimit(limit) => { *self.max_retries.lock().unwrap() = limit; diff --git a/packages/common/universaldb/src/driver/postgres/transaction.rs b/packages/common/universaldb/src/driver/postgres/transaction.rs index 93f3a62177..d275cfbbb5 100644 --- a/packages/common/universaldb/src/driver/postgres/transaction.rs +++ b/packages/common/universaldb/src/driver/postgres/transaction.rs @@ -4,15 +4,19 @@ use std::{ sync::{Arc, Mutex}, }; +use anyhow::{Context, Result}; use deadpool_postgres::Pool; use tokio::sync::{OnceCell, mpsc, oneshot}; use crate::{ - FdbError, FdbResult, KeySelector, RangeOption, TransactionCommitError, TransactionCommitted, driver::TransactionDriver, - future::{FdbSlice, FdbValues}, + error::DatabaseError, + key_selector::KeySelector, options::{ConflictRangeType, MutationType}, + range_option::RangeOption, tx_ops::{Operation, TransactionOperations}, + utils::IsolationLevel, + value::{KeyValue, Slice, Value, Values}, }; use super::transaction_task::{TransactionCommand, TransactionIsolationLevel, TransactionTask}; @@ -49,7 +53,7 @@ impl PostgresTransactionDriver { } /// Get or create the transaction task - async fn ensure_transaction(&self) -> FdbResult<&mpsc::Sender> { + async fn ensure_transaction(&self) -> Result<&mpsc::Sender> { self.tx_sender .get_or_try_init(|| async { let (sender, receiver) = mpsc::channel(100); @@ -62,16 +66,16 @@ impl PostgresTransactionDriver { ); tokio::spawn(task.run()); - Ok(sender) + anyhow::Ok(sender) }) .await - .map_err(|_: anyhow::Error| FdbError::from_code(1510)) + .context("failed to initialize postgres transaction task") } /// Get or create the snapshot transaction task /// This creates a separate REPEATABLE READ READ ONLY transaction /// to enforce reading from a consistent snapshot - async fn ensure_snapshot_transaction(&self) -> FdbResult<&mpsc::Sender> { + async fn ensure_snapshot_transaction(&self) -> Result<&mpsc::Sender> { self.snapshot_tx_sender .get_or_try_init(|| async { let (sender, receiver) = mpsc::channel(100); @@ -84,10 +88,10 @@ impl PostgresTransactionDriver { ); tokio::spawn(task.run()); - Ok(sender) + anyhow::Ok(sender) }) .await - .map_err(|_: anyhow::Error| FdbError::from_code(1510)) + .context("failed to initialize postgres transaction task") } } @@ -101,8 +105,8 @@ impl TransactionDriver for PostgresTransactionDriver { fn get<'a>( &'a self, key: &[u8], - snapshot: bool, - ) -> Pin>> + Send + 'a>> { + isolation_level: IsolationLevel, + ) -> Pin>> + Send + 'a>> { let key = key.to_vec(); Box::pin(async move { // Both snapshot and non-snapshot reads check local operations first @@ -113,7 +117,7 @@ impl TransactionDriver for PostgresTransactionDriver { }; ops.get_with_callback(&key, || async { - let tx_sender = if snapshot { + let tx_sender = if let IsolationLevel::Snapshot = isolation_level { self.ensure_snapshot_transaction().await? } else { self.ensure_transaction().await? @@ -127,12 +131,14 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; // Wait for response - let value = response_rx.await.map_err(|_| FdbError::from_code(1510))??; + let value = response_rx + .await + .context("failed to receive postgres response")??; - Ok(value) + Ok(value.map(Into::into)) }) .await }) @@ -141,8 +147,8 @@ impl TransactionDriver for PostgresTransactionDriver { fn get_key<'a>( &'a self, selector: &KeySelector<'a>, - snapshot: bool, - ) -> Pin> + Send + 'a>> { + isolation_level: IsolationLevel, + ) -> Pin> + Send + 'a>> { let selector = selector.clone(); Box::pin(async move { @@ -158,7 +164,7 @@ impl TransactionDriver for PostgresTransactionDriver { }; ops.get_key(&selector, || async { - let tx_sender = if snapshot { + let tx_sender = if let IsolationLevel::Snapshot = isolation_level { self.ensure_snapshot_transaction().await? } else { self.ensure_transaction().await? @@ -174,13 +180,15 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; // Wait for response - let result_key = response_rx.await.map_err(|_| FdbError::from_code(1510))??; + let result_key = response_rx + .await + .context("failed to receive postgres key selector response")??; // Return the key if found, or empty vector if not - Ok(result_key.unwrap_or_else(Vec::new)) + Ok(result_key.map(Into::into).unwrap_or_else(Slice::new)) }) .await }) @@ -190,8 +198,8 @@ impl TransactionDriver for PostgresTransactionDriver { &'a self, opt: &RangeOption<'a>, _iteration: usize, - snapshot: bool, - ) -> Pin> + Send + 'a>> { + isolation_level: IsolationLevel, + ) -> Pin> + Send + 'a>> { let opt = opt.clone(); Box::pin(async move { @@ -212,7 +220,7 @@ impl TransactionDriver for PostgresTransactionDriver { }; ops.get_range(&opt, || async { - let tx_sender = if snapshot { + let tx_sender = if let IsolationLevel::Snapshot = isolation_level { self.ensure_snapshot_transaction().await? } else { self.ensure_transaction().await? @@ -233,17 +241,19 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; // Wait for response - let keyvalues_data = response_rx.await.map_err(|_| FdbError::from_code(1510))??; + let keyvalues_data = response_rx + .await + .context("failed to receive postgres range response")??; let keyvalues: Vec<_> = keyvalues_data .into_iter() - .map(|(key, value)| crate::future::FdbKeyValue::new(key, value)) + .map(|(key, value)| KeyValue::new(key, value)) .collect(); - Ok(crate::future::FdbValues::new(keyvalues)) + Ok(Values::new(keyvalues)) }) .await }) @@ -252,16 +262,16 @@ impl TransactionDriver for PostgresTransactionDriver { fn get_ranges_keyvalues<'a>( &'a self, opt: RangeOption<'a>, - snapshot: bool, - ) -> crate::future::FdbStream<'a, crate::future::FdbValue> { + isolation_level: IsolationLevel, + ) -> crate::value::Stream<'a, Value> { use futures_util::{StreamExt, stream}; // Convert the range result into a stream let fut = async move { - match self.get_range(&opt, 1, snapshot).await { + match self.get_range(&opt, 1, isolation_level).await { Ok(values) => values .into_iter() - .map(|kv| Ok(crate::future::FdbValue::from_keyvalue(kv))) + .map(|kv| Ok(Value::from_keyvalue(kv))) .collect::>(), Err(e) => vec![Err(e)], } @@ -288,10 +298,7 @@ impl TransactionDriver for PostgresTransactionDriver { } } - fn commit( - self: Box, - ) -> Pin> + Send>> - { + fn commit(self: Box) -> Pin> + Send>> { Box::pin(async move { // Get operations and mark as committed let operations = { @@ -321,16 +328,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })?; + .context("failed to send postgres transaction command")?; response_rx .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })? - .map_err(TransactionCommitError::new)?; + .context("failed to receive postgres response")??; } Operation::Clear { key } => { let (response_tx, response_rx) = oneshot::channel(); @@ -340,16 +342,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })?; + .context("failed to send postgres transaction command")?; response_rx .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })? - .map_err(TransactionCommitError::new)?; + .context("failed to receive postgres response")??; } Operation::ClearRange { begin, end } => { let (response_tx, response_rx) = oneshot::channel(); @@ -360,16 +357,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })?; + .context("failed to send postgres transaction command")?; response_rx .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })? - .map_err(TransactionCommitError::new)?; + .context("failed to receive postgres response")??; } Operation::AtomicOp { key, @@ -385,16 +377,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })?; + .context("failed to send postgres transaction command")?; response_rx .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })? - .map_err(TransactionCommitError::new)?; + .context("failed to receive postgres response")??; } } } @@ -407,20 +394,15 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| TransactionCommitError::new(FdbError::from_code(1510)))?; + .context("failed to send postgres transaction command")?; // Wait for commit response response_rx .await - .map_err(|_| TransactionCommitError::new(FdbError::from_code(1510)))? - .map_err(TransactionCommitError::new)?; + .context("failed to receive postgres commit response")??; } else if !operations.operations().is_empty() { // We have operations but no transaction - create one just for commit - let tx_sender = self - .ensure_transaction() - .await - .map_err(TransactionCommitError::new)?; - + let tx_sender = self.ensure_transaction().await?; // Execute all operations for op in operations.operations() { match op { @@ -433,16 +415,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })?; + .context("failed to send postgres transaction command")?; response_rx .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })? - .map_err(TransactionCommitError::new)?; + .context("failed to receive postgres response")??; } Operation::Clear { key } => { let (response_tx, response_rx) = oneshot::channel(); @@ -452,16 +429,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })?; + .context("failed to send postgres transaction command")?; response_rx .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })? - .map_err(TransactionCommitError::new)?; + .context("failed to receive postgres response")??; } Operation::ClearRange { begin, end } => { let (response_tx, response_rx) = oneshot::channel(); @@ -472,16 +444,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })?; + .context("failed to send postgres transaction command")?; response_rx .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })? - .map_err(TransactionCommitError::new)?; + .context("failed to receive postgres response")??; } Operation::AtomicOp { key, @@ -497,16 +464,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })?; + .context("failed to send postgres transaction command")?; response_rx .await - .map_err(|_| { - TransactionCommitError::new(FdbError::from_code(1510)) - })? - .map_err(TransactionCommitError::new)?; + .context("failed to receive postgres response")??; } } } @@ -519,13 +481,12 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| TransactionCommitError::new(FdbError::from_code(1510)))?; + .context("failed to send postgres transaction command")?; // Wait for commit response response_rx .await - .map_err(|_| TransactionCommitError::new(FdbError::from_code(1510)))? - .map_err(TransactionCommitError::new)?; + .context("failed to receive postgres commit response")??; } Ok(()) @@ -554,7 +515,7 @@ impl TransactionDriver for PostgresTransactionDriver { begin: &[u8], end: &[u8], conflict_type: ConflictRangeType, - ) -> FdbResult<()> { + ) -> Result<()> { // For PostgreSQL, we implement conflict ranges using the conflict_ranges table // This ensures serializable isolation for the specified range @@ -585,31 +546,21 @@ impl TransactionDriver for PostgresTransactionDriver { // Try to send the add conflict range command // Since this is a synchronous method, we use try_send let (response_tx, _response_rx) = oneshot::channel(); - match tx_sender.try_send(TransactionCommand::AddConflictRange { - begin: begin_vec, - end: end_vec, - conflict_type, - response: response_tx, - }) { - Ok(_) => { - // Command sent successfully - // Note: We can't wait for the response in a sync method - // The actual conflict range acquisition will happen asynchronously - Ok(()) - } - Err(_) => { - // Channel is full or closed - // Return an error indicating we couldn't add the conflict range - Err(FdbError::from_code(1020)) // Transaction conflict error - } - } + tx_sender + .try_send(TransactionCommand::AddConflictRange { + begin: begin_vec, + end: end_vec, + conflict_type, + response: response_tx, + }) + .map_err(|_| DatabaseError::NotCommitted.into()) } fn get_estimated_range_size_bytes<'a>( &'a self, begin: &'a [u8], end: &'a [u8], - ) -> Pin> + Send + 'a>> { + ) -> Pin> + Send + 'a>> { let begin = begin.to_vec(); let end = end.to_vec(); @@ -625,16 +576,18 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres command")?; // Wait for response - let size = response_rx.await.map_err(|_| FdbError::from_code(1510))??; + let size = response_rx + .await + .context("failed to receive postgres size response")??; Ok(size) }) } - fn commit_owned(&self) -> Pin> + Send + '_>> { + fn commit_ref(&self) -> Pin> + Send + '_>> { Box::pin(async move { // Get operations and mark as committed let operations = { @@ -664,9 +617,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; - response_rx.await.map_err(|_| FdbError::from_code(1510))??; + response_rx + .await + .context("failed to receive postgres response")??; } Operation::Clear { key } => { let (response_tx, response_rx) = oneshot::channel(); @@ -676,9 +631,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; - response_rx.await.map_err(|_| FdbError::from_code(1510))??; + response_rx + .await + .context("failed to receive postgres response")??; } Operation::ClearRange { begin, end } => { let (response_tx, response_rx) = oneshot::channel(); @@ -689,9 +646,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; - response_rx.await.map_err(|_| FdbError::from_code(1510))??; + response_rx + .await + .context("failed to receive postgres response")??; } Operation::AtomicOp { key, @@ -707,9 +666,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; - response_rx.await.map_err(|_| FdbError::from_code(1510))??; + response_rx + .await + .context("failed to receive postgres response")??; } } } @@ -722,10 +683,12 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; // Wait for commit response - response_rx.await.map_err(|_| FdbError::from_code(1510))??; + response_rx + .await + .context("failed to receive postgres commit response")??; } else if !operations.operations().is_empty() { // We have operations but no transaction - create one just for commit let tx_sender = self.ensure_transaction().await?; @@ -742,9 +705,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; - response_rx.await.map_err(|_| FdbError::from_code(1510))??; + response_rx + .await + .context("failed to receive postgres response")??; } Operation::Clear { key } => { let (response_tx, response_rx) = oneshot::channel(); @@ -754,9 +719,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; - response_rx.await.map_err(|_| FdbError::from_code(1510))??; + response_rx + .await + .context("failed to receive postgres response")??; } Operation::ClearRange { begin, end } => { let (response_tx, response_rx) = oneshot::channel(); @@ -767,9 +734,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; - response_rx.await.map_err(|_| FdbError::from_code(1510))??; + response_rx + .await + .context("failed to receive postgres response")??; } Operation::AtomicOp { key, @@ -785,9 +754,11 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; - response_rx.await.map_err(|_| FdbError::from_code(1510))??; + response_rx + .await + .context("failed to receive postgres response")??; } } } @@ -800,10 +771,12 @@ impl TransactionDriver for PostgresTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send postgres transaction command")?; // Wait for commit response - response_rx.await.map_err(|_| FdbError::from_code(1510))??; + response_rx + .await + .context("failed to receive postgres commit response")??; } Ok(()) diff --git a/packages/common/universaldb/src/driver/postgres/transaction_task.rs b/packages/common/universaldb/src/driver/postgres/transaction_task.rs index 230acbc1d7..3587b791db 100644 --- a/packages/common/universaldb/src/driver/postgres/transaction_task.rs +++ b/packages/common/universaldb/src/driver/postgres/transaction_task.rs @@ -1,10 +1,11 @@ +use anyhow::{Result, anyhow}; use deadpool_postgres::Pool; use tokio::sync::{mpsc, oneshot}; use tokio_postgres::IsolationLevel; use crate::{ - FdbError, FdbResult, atomic::apply_atomic_op, + error::DatabaseError, options::{ConflictRangeType, MutationType}, versionstamp::substitute_versionstamp_if_incomplete, }; @@ -20,13 +21,13 @@ pub enum TransactionCommand { // Read operations Get { key: Vec, - response: oneshot::Sender>>>, + response: oneshot::Sender>>>, }, GetKey { key: Vec, or_equal: bool, offset: i32, - response: oneshot::Sender>>>, + response: oneshot::Sender>>>, }, GetRange { begin: Vec, @@ -37,45 +38,45 @@ pub enum TransactionCommand { end_offset: i32, limit: Option, reverse: bool, - response: oneshot::Sender, Vec)>>>, + response: oneshot::Sender, Vec)>>>, }, // Write operations Set { key: Vec, value: Vec, - response: oneshot::Sender>, + response: oneshot::Sender>, }, Clear { key: Vec, - response: oneshot::Sender>, + response: oneshot::Sender>, }, ClearRange { begin: Vec, end: Vec, - response: oneshot::Sender>, + response: oneshot::Sender>, }, AtomicOp { key: Vec, param: Vec, op_type: MutationType, - response: oneshot::Sender>, + response: oneshot::Sender>, }, // Transaction control Commit { has_conflict_ranges: bool, - response: oneshot::Sender>, + response: oneshot::Sender>, }, // Conflict ranges AddConflictRange { begin: Vec, end: Vec, conflict_type: ConflictRangeType, - response: oneshot::Sender>, + response: oneshot::Sender>, }, GetEstimatedRangeSize { begin: Vec, end: Vec, - response: oneshot::Sender>, + response: oneshot::Sender>, }, } @@ -117,34 +118,44 @@ impl TransactionTask { while let Some(cmd) = self.receiver.recv().await { match cmd { TransactionCommand::Get { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::GetKey { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::GetRange { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::Set { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::Clear { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::ClearRange { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::AtomicOp { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::Commit { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::AddConflictRange { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::GetEstimatedRangeSize { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } } } @@ -176,34 +187,44 @@ impl TransactionTask { while let Some(cmd) = self.receiver.recv().await { match cmd { TransactionCommand::Get { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::GetKey { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::GetRange { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::Set { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::Clear { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::ClearRange { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::AtomicOp { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::Commit { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::AddConflictRange { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } TransactionCommand::GetEstimatedRangeSize { response, .. } => { - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); } } } @@ -262,7 +283,8 @@ impl TransactionTask { _ => { // For other offset values, we need more complex logic // This is a simplified fallback that may not handle all cases perfectly - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); continue; } }; @@ -373,10 +395,12 @@ impl TransactionTask { if let TransactionIsolationLevel::RepeatableReadReadOnly = self.isolation_level { tracing::error!("cannot set in read only txn"); - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = + response.send(Err(anyhow!("postgres transaction connection failed"))); continue; }; + // TODO: versionstamps need to be calculated on the sql side, not in rust let value = substitute_versionstamp_if_incomplete(value, 0); let query = "INSERT INTO kv (key, value) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET value = $2"; @@ -395,7 +419,8 @@ impl TransactionTask { if let TransactionIsolationLevel::RepeatableReadReadOnly = self.isolation_level { tracing::error!("cannot set in read only txn"); - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = + response.send(Err(anyhow!("postgres transaction connection failed"))); continue; }; @@ -419,7 +444,8 @@ impl TransactionTask { if let TransactionIsolationLevel::RepeatableReadReadOnly = self.isolation_level { tracing::error!("cannot clear range in read only txn"); - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = + response.send(Err(anyhow!("postgres transaction connection failed"))); continue; }; @@ -453,7 +479,8 @@ impl TransactionTask { if let TransactionIsolationLevel::RepeatableReadReadOnly = self.isolation_level { tracing::error!("cannot apply atomic op in read only txn"); - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = + response.send(Err(anyhow!("postgres transaction connection failed"))); continue; }; @@ -513,7 +540,8 @@ impl TransactionTask { self.isolation_level { tracing::error!("cannot release conflict ranges in read only txn"); - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = response + .send(Err(anyhow!("postgres transaction connection failed"))); continue; }; @@ -540,7 +568,8 @@ impl TransactionTask { if let TransactionIsolationLevel::RepeatableReadReadOnly = self.isolation_level { tracing::error!("cannot add conflict range in read only txn"); - let _ = response.send(Err(FdbError::from_code(1510))); + let _ = + response.send(Err(anyhow!("postgres transaction connection failed"))); continue; }; @@ -605,26 +634,26 @@ impl TransactionTask { } } -/// Maps PostgreSQL errors to FdbError codes -fn map_postgres_error(err: tokio_postgres::Error) -> FdbError { +/// Maps PostgreSQL error to DatabaseError +fn map_postgres_error(err: tokio_postgres::Error) -> anyhow::Error { let error_str = err.to_string(); if error_str.contains("exclusion_violation") || error_str.contains("violates exclusion constraint") { // Retryable - another transaction has a conflicting range - FdbError::from_code(1020) + DatabaseError::NotCommitted.into() } else if error_str.contains("serialization failure") || error_str.contains("could not serialize") || error_str.contains("deadlock detected") { // Retryable - transaction conflict - FdbError::from_code(1020) + DatabaseError::NotCommitted.into() } else if error_str.contains("current transaction is aborted") { // Returned by the rest of the commands in a txn if it failed for exclusion reasons - FdbError::from_code(1020) + DatabaseError::NotCommitted.into() } else { tracing::error!(%err, "postgres error"); // Non-retryable error - FdbError::from_code(1510) + anyhow::Error::new(err) } } diff --git a/packages/common/universaldb/src/driver/rocksdb/conflict_range_tracker.rs b/packages/common/universaldb/src/driver/rocksdb/conflict_range_tracker.rs index 22e2d20183..4f33b395b2 100644 --- a/packages/common/universaldb/src/driver/rocksdb/conflict_range_tracker.rs +++ b/packages/common/universaldb/src/driver/rocksdb/conflict_range_tracker.rs @@ -1,7 +1,9 @@ use std::collections::HashMap; use std::sync::{Arc, RwLock}; -use crate::{FdbError, FdbResult}; +use anyhow::Result; + +use crate::error::DatabaseError; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub struct TransactionId(u64); @@ -58,7 +60,7 @@ impl ConflictRangeTracker { begin: &[u8], end: &[u8], is_write: bool, - ) -> FdbResult<()> { + ) -> Result<()> { let new_range = ConflictRange { begin: begin.to_vec(), end: end.to_vec(), @@ -77,7 +79,7 @@ impl ConflictRangeTracker { for existing_range in ranges { if new_range.conflicts_with(existing_range) { // Found a conflict - return retryable error - return Err(FdbError::from_code(1020)); + return Err(DatabaseError::NotCommitted.into()); } } } @@ -92,7 +94,7 @@ impl ConflictRangeTracker { begin: &[u8], end: &[u8], is_write: bool, - ) -> FdbResult<()> { + ) -> Result<()> { // First check for conflicts self.check_conflict(tx_id, begin, end, is_write)?; @@ -200,9 +202,12 @@ mod tests { // Try to add overlapping write range for tx2 - should conflict let result = tracker.add_range(tx2, b"b", b"d", true); assert!(result.is_err()); - // Check for conflict error code 1020 + // Check for conflict error if let Err(e) = result { - assert_eq!(e.code(), 1020); + assert!(matches!( + e.downcast::().unwrap(), + DatabaseError::NotCommitted + )); } } @@ -220,9 +225,12 @@ mod tests { // Try to add overlapping write range for tx2 - should conflict let result = tracker.add_range(tx2, b"b", b"d", true); assert!(result.is_err()); - // Check for conflict error code 1020 + // Check for conflict error if let Err(e) = result { - assert_eq!(e.code(), 1020); + assert!(matches!( + e.downcast::().unwrap(), + DatabaseError::NotCommitted + )); } } @@ -264,9 +272,12 @@ mod tests { // Try to add overlapping read range for tx2 - should conflict let result = tracker.add_range(tx2, b"b", b"d", false); assert!(result.is_err()); - // Check for conflict error code 1020 + // Check for conflict error if let Err(e) = result { - assert_eq!(e.code(), 1020); + assert!(matches!( + e.downcast::().unwrap(), + DatabaseError::NotCommitted + )); } } diff --git a/packages/common/universaldb/src/driver/rocksdb/database.rs b/packages/common/universaldb/src/driver/rocksdb/database.rs index 745fb752cb..28575beefd 100644 --- a/packages/common/universaldb/src/driver/rocksdb/database.rs +++ b/packages/common/universaldb/src/driver/rocksdb/database.rs @@ -3,13 +3,15 @@ use std::{ sync::{Arc, Mutex}, }; +use anyhow::{Context, Result}; use rocksdb::{OptimisticTransactionDB, Options}; use crate::{ - FdbBindingError, FdbError, FdbResult, MaybeCommitted, RetryableTransaction, Transaction, + RetryableTransaction, Transaction, driver::{BoxFut, DatabaseDriver, Erased}, + error::DatabaseError, options::DatabaseOption, - utils::calculate_tx_retry_backoff, + utils::{MaybeCommitted, calculate_tx_retry_backoff}, }; use super::{conflict_range_tracker::ConflictRangeTracker, transaction::RocksDbTransactionDriver}; @@ -21,9 +23,9 @@ pub struct RocksDbDatabaseDriver { } impl RocksDbDatabaseDriver { - pub async fn new(db_path: PathBuf) -> FdbResult { + pub async fn new(db_path: PathBuf) -> Result { // Create directory if it doesn't exist - std::fs::create_dir_all(&db_path).map_err(|_| FdbError::from_code(1510))?; + std::fs::create_dir_all(&db_path).context("failed to create database directory")?; // Configure RocksDB options let mut opts = Options::default(); @@ -33,8 +35,7 @@ impl RocksDbDatabaseDriver { opts.set_max_total_wal_size(64 * 1024 * 1024); // 64MB // Open the OptimisticTransactionDB - let db = - OptimisticTransactionDB::open(&opts, db_path).map_err(|_| FdbError::from_code(1510))?; + let db = OptimisticTransactionDB::open(&opts, db_path).context("failed to open rocksdb")?; Ok(RocksDbDatabaseDriver { db: Arc::new(db), @@ -45,8 +46,8 @@ impl RocksDbDatabaseDriver { } impl DatabaseDriver for RocksDbDatabaseDriver { - fn create_trx(&self) -> FdbResult { - Ok(Transaction::new(Box::new(RocksDbTransactionDriver::new( + fn create_trx(&self) -> Result { + Ok(Transaction::new(Arc::new(RocksDbTransactionDriver::new( self.db.clone(), self.conflict_tracker.clone(), )))) @@ -54,61 +55,51 @@ impl DatabaseDriver for RocksDbDatabaseDriver { fn run<'a>( &'a self, - closure: Box< - dyn Fn( - RetryableTransaction, - MaybeCommitted, - ) -> BoxFut<'a, Result> - + Send - + Sync - + 'a, - >, - ) -> BoxFut<'a, Result> { + closure: Box BoxFut<'a, Result> + Send + Sync + 'a>, + ) -> BoxFut<'a, Result> { Box::pin(async move { let mut maybe_committed = MaybeCommitted(false); let max_retries = *self.max_retries.lock().unwrap(); for attempt in 0..max_retries { let tx = self.create_trx()?; - let retryable = RetryableTransaction::new(tx); + let mut retryable = RetryableTransaction::new(tx); + retryable.maybe_committed = maybe_committed; // Execute transaction - let result = closure(retryable.clone(), maybe_committed).await; - let fdb_error = match result { - std::result::Result::Ok(res) => { - match retryable.inner.driver.commit_owned().await { - Ok(_) => return Ok(res), - Err(e) => e, - } - } - std::result::Result::Err(e) => { - if let Some(fdb_error) = e.get_fdb_error() { - fdb_error - } else { - return Err(e); - } - } + let error = match closure(retryable.clone()).await { + Ok(res) => match retryable.inner.driver.commit_ref().await { + Ok(_) => return Ok(res), + Err(e) => e, + }, + Err(e) => e, }; - // Handle retry or return error - if fdb_error.is_retryable() { - if fdb_error.is_maybe_committed() { - maybe_committed = MaybeCommitted(true); - } + let chain = error + .chain() + .find_map(|x| x.downcast_ref::()); - let backoff_ms = calculate_tx_retry_backoff(attempt as usize); - tokio::time::sleep(tokio::time::Duration::from_millis(backoff_ms)).await; - } else { - return Err(FdbBindingError::from(fdb_error)); + if let Some(db_error) = chain { + // Handle retry or return error + if db_error.is_retryable() { + if db_error.is_maybe_committed() { + maybe_committed = MaybeCommitted(true); + } + + let backoff_ms = calculate_tx_retry_backoff(attempt as usize); + tokio::time::sleep(tokio::time::Duration::from_millis(backoff_ms)).await; + continue; + } } + + return Err(error); } - // Max retries exceeded - Err(FdbBindingError::from(FdbError::from_code(1007))) + Err(DatabaseError::MaxRetriesReached.into()) }) } - fn set_option(&self, opt: DatabaseOption) -> FdbResult<()> { + fn set_option(&self, opt: DatabaseOption) -> Result<()> { match opt { DatabaseOption::TransactionRetryLimit(limit) => { *self.max_retries.lock().unwrap() = limit; diff --git a/packages/common/universaldb/src/driver/rocksdb/transaction.rs b/packages/common/universaldb/src/driver/rocksdb/transaction.rs index 9798d1d369..82de40ca56 100644 --- a/packages/common/universaldb/src/driver/rocksdb/transaction.rs +++ b/packages/common/universaldb/src/driver/rocksdb/transaction.rs @@ -4,15 +4,19 @@ use std::{ sync::{Arc, Mutex}, }; +use anyhow::{Context, Result, anyhow}; use rocksdb::OptimisticTransactionDB; use tokio::sync::{OnceCell, mpsc, oneshot}; use crate::{ - FdbError, FdbResult, KeySelector, RangeOption, TransactionCommitError, TransactionCommitted, driver::TransactionDriver, - future::{FdbSlice, FdbValues}, + error::DatabaseError, + key_selector::KeySelector, options::{ConflictRangeType, MutationType}, + range_option::RangeOption, tx_ops::TransactionOperations, + utils::IsolationLevel, + value::{Slice, Value, Values}, }; use super::{ @@ -63,7 +67,7 @@ impl RocksDbTransactionDriver { } /// Get or create the transaction task for non-snapshot operations - async fn ensure_transaction(&self) -> FdbResult<&mpsc::Sender> { + async fn ensure_transaction(&self) -> Result<&mpsc::Sender> { self.tx_sender .get_or_try_init(|| async { let (sender, receiver) = mpsc::channel(100); @@ -76,14 +80,14 @@ impl RocksDbTransactionDriver { ); tokio::spawn(task.run()); - Ok(sender) + anyhow::Ok(sender) }) .await - .map_err(|_: anyhow::Error| FdbError::from_code(1510)) + .context("failed to initialize transaction task") } /// Get or create the transaction task for snapshot operations - async fn ensure_snapshot_transaction(&self) -> FdbResult<&mpsc::Sender> { + async fn ensure_snapshot_transaction(&self) -> Result<&mpsc::Sender> { self.snapshot_tx_sender .get_or_try_init(|| async { let (sender, receiver) = mpsc::channel(100); @@ -96,10 +100,10 @@ impl RocksDbTransactionDriver { ); tokio::spawn(task.run()); - Ok(sender) + anyhow::Ok(sender) }) .await - .map_err(|_: anyhow::Error| FdbError::from_code(1510)) + .context("failed to initialize transaction task") } } @@ -120,8 +124,8 @@ impl TransactionDriver for RocksDbTransactionDriver { fn get<'a>( &'a self, key: &[u8], - snapshot: bool, - ) -> Pin>> + Send + 'a>> { + isolation_level: IsolationLevel, + ) -> Pin>> + Send + 'a>> { let key = key.to_vec(); Box::pin(async move { // Both snapshot and non-snapshot reads check local operations first @@ -132,7 +136,7 @@ impl TransactionDriver for RocksDbTransactionDriver { }; ops.get_with_callback(&key, || async { - if snapshot { + if let IsolationLevel::Snapshot = isolation_level { // For snapshot reads, don't add conflict ranges let tx_sender = self.ensure_snapshot_transaction().await?; @@ -144,10 +148,12 @@ impl TransactionDriver for RocksDbTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send transaction command")?; // Wait for response - let value = response_rx.await.map_err(|_| FdbError::from_code(1510))??; + let value = response_rx + .await + .context("failed to receive transaction response")??; Ok(value) } else { @@ -169,10 +175,12 @@ impl TransactionDriver for RocksDbTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send transaction command")?; // Wait for response - let value = response_rx.await.map_err(|_| FdbError::from_code(1510))??; + let value = response_rx + .await + .context("failed to receive transaction response")??; Ok(value) } @@ -184,8 +192,8 @@ impl TransactionDriver for RocksDbTransactionDriver { fn get_key<'a>( &'a self, selector: &KeySelector<'a>, - snapshot: bool, - ) -> Pin> + Send + 'a>> { + isolation_level: IsolationLevel, + ) -> Pin> + Send + 'a>> { let selector = selector.clone(); Box::pin(async move { @@ -201,7 +209,7 @@ impl TransactionDriver for RocksDbTransactionDriver { }; ops.get_key(&selector, || async { - let tx_sender = if snapshot { + let tx_sender = if let IsolationLevel::Snapshot = isolation_level { self.ensure_snapshot_transaction().await? } else { self.ensure_transaction().await? @@ -217,13 +225,15 @@ impl TransactionDriver for RocksDbTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send commit command")?; // Wait for response - let result_key = response_rx.await.map_err(|_| FdbError::from_code(1510))??; + let result_key = response_rx + .await + .context("failed to receive key selector response")??; // Return the key if found, or empty vector if not - Ok(result_key.unwrap_or_else(Vec::new)) + Ok(result_key.unwrap_or_else(Slice::new)) }) .await }) @@ -233,8 +243,8 @@ impl TransactionDriver for RocksDbTransactionDriver { &'a self, opt: &RangeOption<'a>, iteration: usize, - snapshot: bool, - ) -> Pin> + Send + 'a>> { + isolation_level: IsolationLevel, + ) -> Pin> + Send + 'a>> { // Extract fields from RangeOption for the async closure let opt = opt.clone(); let begin_selector = opt.begin.clone(); @@ -251,7 +261,7 @@ impl TransactionDriver for RocksDbTransactionDriver { }; ops.get_range(&opt, || async { - if snapshot { + if let IsolationLevel::Snapshot = isolation_level { // For snapshot reads, don't add conflict ranges let tx_sender = self.ensure_snapshot_transaction().await?; @@ -271,10 +281,12 @@ impl TransactionDriver for RocksDbTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send transaction command")?; // Wait for response - let values = response_rx.await.map_err(|_| FdbError::from_code(1510))??; + let values = response_rx + .await + .context("failed to receive range response")??; Ok(values) } else { @@ -304,10 +316,12 @@ impl TransactionDriver for RocksDbTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send transaction command")?; // Wait for response - let values = response_rx.await.map_err(|_| FdbError::from_code(1510))??; + let values = response_rx + .await + .context("failed to receive range response")??; Ok(values) } @@ -319,8 +333,8 @@ impl TransactionDriver for RocksDbTransactionDriver { fn get_ranges_keyvalues<'a>( &'a self, opt: RangeOption<'a>, - snapshot: bool, - ) -> crate::future::FdbStream<'a, crate::future::FdbValue> { + isolation_level: IsolationLevel, + ) -> crate::value::Stream<'a, Value> { use futures_util::StreamExt; // Extract the selectors from RangeOption, same as get_range does @@ -332,7 +346,7 @@ impl TransactionDriver for RocksDbTransactionDriver { Box::pin( futures_util::stream::once(async move { // Get the transaction sender based on snapshot mode - let tx_sender = if snapshot { + let tx_sender = if let IsolationLevel::Snapshot = isolation_level { match self.ensure_snapshot_transaction().await { Ok(sender) => sender, Err(e) => return futures_util::stream::iter(vec![Err(e)]), @@ -360,26 +374,25 @@ impl TransactionDriver for RocksDbTransactionDriver { }) .await { - return futures_util::stream::iter(vec![Err(FdbError::from_code(1510))]); + return futures_util::stream::iter(vec![Err(anyhow!( + "failed to send stream command" + ))]); } match response_rx.await { Ok(Ok(result)) => { - // Convert to FdbValues for the stream + // Convert to Values for the stream let values: Vec<_> = result .iter() - .map(|kv| { - Ok(crate::future::FdbValue::new( - kv.key().to_vec(), - kv.value().to_vec(), - )) - }) + .map(|kv| Ok(Value::new(kv.key().to_vec(), kv.value().to_vec()))) .collect(); futures_util::stream::iter(values) } Ok(Err(e)) => futures_util::stream::iter(vec![Err(e)]), - Err(_) => futures_util::stream::iter(vec![Err(FdbError::from_code(1510))]), + Err(_) => futures_util::stream::iter(vec![Err(anyhow!( + "failed to receive stream response" + ))]), } }) .flatten(), @@ -422,16 +435,13 @@ impl TransactionDriver for RocksDbTransactionDriver { state.operations.clear_range(begin, end); } - fn commit( - self: Box, - ) -> Pin> + Send>> - { + fn commit(self: Box) -> Pin> + Send>> { Box::pin(async move { // Get the operations and conflict ranges to commit let operations = { let mut state = self.state.lock().unwrap(); if state.committed { - return Err(TransactionCommitError::new(FdbError::from_code(2017))); + return Err(DatabaseError::UsedDuringCommit.into()); } state.committed = true; @@ -439,10 +449,7 @@ impl TransactionDriver for RocksDbTransactionDriver { }; // Get the transaction sender - let tx_sender = self - .ensure_transaction() - .await - .map_err(|e| TransactionCommitError::new(e))?; + let tx_sender = self.ensure_transaction().await.map_err(|e| e)?; // Send commit command with operations and conflict ranges let (response_tx, response_rx) = oneshot::channel(); @@ -452,19 +459,19 @@ impl TransactionDriver for RocksDbTransactionDriver { response: response_tx, }) .await - .map_err(|_| TransactionCommitError::new(FdbError::from_code(1510)))?; + .context("failed to send commit command")?; // Wait for response let result = response_rx .await - .map_err(|_| TransactionCommitError::new(FdbError::from_code(1510)))?; + .context("failed to receive commit response")?; // Release conflict ranges after successful commit if result.is_ok() { self.conflict_tracker.release_transaction(self.tx_id); } - result.map_err(|e| TransactionCommitError::new(e)) + result.map_err(|e| e) }) } @@ -500,7 +507,7 @@ impl TransactionDriver for RocksDbTransactionDriver { begin: &[u8], end: &[u8], conflict_type: ConflictRangeType, - ) -> FdbResult<()> { + ) -> Result<()> { // Determine if this is a write conflict range let is_write = match conflict_type { ConflictRangeType::Write => true, @@ -523,7 +530,7 @@ impl TransactionDriver for RocksDbTransactionDriver { &'a self, begin: &'a [u8], end: &'a [u8], - ) -> Pin> + Send + 'a>> { + ) -> Pin> + Send + 'a>> { let begin = begin.to_vec(); let end = end.to_vec(); @@ -539,22 +546,24 @@ impl TransactionDriver for RocksDbTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send commit command")?; // Wait for response - let size = response_rx.await.map_err(|_| FdbError::from_code(1510))??; + let size = response_rx + .await + .context("failed to receive size response")??; Ok(size) }) } - fn commit_owned(&self) -> Pin> + Send + '_>> { + fn commit_ref(&self) -> Pin> + Send + '_>> { Box::pin(async move { // Get the operations to commit let operations = { let mut state = self.state.lock().unwrap(); if state.committed { - return Err(FdbError::from_code(2017)); + return Err(DatabaseError::UsedDuringCommit.into()); } state.committed = true; @@ -572,10 +581,12 @@ impl TransactionDriver for RocksDbTransactionDriver { response: response_tx, }) .await - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to send commit command")?; // Wait for response - let result = response_rx.await.map_err(|_| FdbError::from_code(1510))?; + let result = response_rx + .await + .context("failed to receive commit response")?; // Release conflict ranges after successful commit if result.is_ok() { diff --git a/packages/common/universaldb/src/driver/rocksdb/transaction_task.rs b/packages/common/universaldb/src/driver/rocksdb/transaction_task.rs index 6f00173fcc..9d14e7693c 100644 --- a/packages/common/universaldb/src/driver/rocksdb/transaction_task.rs +++ b/packages/common/universaldb/src/driver/rocksdb/transaction_task.rs @@ -1,27 +1,29 @@ use std::sync::Arc; +use anyhow::{Context, Result, bail}; use rocksdb::{ OptimisticTransactionDB, ReadOptions, Transaction as RocksDbTransaction, WriteOptions, }; use tokio::sync::{mpsc, oneshot}; use crate::{ - FdbError, FdbResult, KeySelector, TransactionCommitted, atomic::apply_atomic_op, - future::{FdbKeyValue, FdbSlice, FdbValues}, + error::DatabaseError, + key_selector::KeySelector, tx_ops::{Operation, TransactionOperations}, + value::{KeyValue, Slice, Values}, }; pub enum TransactionCommand { Get { key: Vec, - response: oneshot::Sender>>, + response: oneshot::Sender>>, }, GetKey { key: Vec, or_equal: bool, offset: i32, - response: oneshot::Sender>>, + response: oneshot::Sender>>, }, GetRange { begin_key: Vec, @@ -33,16 +35,16 @@ pub enum TransactionCommand { limit: Option, reverse: bool, iteration: usize, - response: oneshot::Sender>, + response: oneshot::Sender>, }, Commit { operations: TransactionOperations, - response: oneshot::Sender>, + response: oneshot::Sender>, }, GetEstimatedRangeSize { begin: Vec, end: Vec, - response: oneshot::Sender>, + response: oneshot::Sender>, }, Cancel, } @@ -138,16 +140,15 @@ impl TransactionTask { self.db.transaction_opt(&write_opts, &txn_opts) } - async fn handle_get(&mut self, key: &[u8]) -> FdbResult> { + async fn handle_get(&mut self, key: &[u8]) -> Result> { let txn = self.create_transaction(); let read_opts = ReadOptions::default(); - match txn.get_opt(key, &read_opts) { - Ok(Some(value)) => Ok(Some(value)), - Ok(None) => Ok(None), - Err(_) => Err(FdbError::from_code(1510)), - } + Ok(txn + .get_opt(key, &read_opts) + .context("failed to read key from rocksdb")? + .map(|v| v.into())) } async fn handle_get_key( @@ -155,7 +156,7 @@ impl TransactionTask { key: &[u8], or_equal: bool, offset: i32, - ) -> FdbResult> { + ) -> Result> { let txn = self.create_transaction(); let read_opts = ReadOptions::default(); @@ -174,14 +175,9 @@ impl TransactionTask { read_opts, ); for item in iter { - match item { - Ok((k, _v)) => { - return Ok(Some(k.to_vec())); - } - Err(_) => { - return Err(FdbError::from_code(1510)); - } - } + let (k, _v) = + item.context("failed to iterate rocksdb for first_greater_or_equal")?; + return Ok(Some(k.to_vec().into())); } Ok(None) } @@ -192,18 +188,13 @@ impl TransactionTask { read_opts, ); for item in iter { - match item { - Ok((k, _v)) => { - // Skip if it's the exact key - if k.as_ref() == key { - continue; - } - return Ok(Some(k.to_vec())); - } - Err(_) => { - return Err(FdbError::from_code(1510)); - } + let (k, _v) = + item.context("failed to iterate rocksdb for first_greater_than")?; + // Skip if it's the exact key + if k.as_ref() == key { + continue; } + return Ok(Some(k.to_vec().into())); } Ok(None) } @@ -216,16 +207,10 @@ impl TransactionTask { ); for item in iter { - match item { - Ok((k, _v)) => { - // We want strictly less than - if k.as_ref() < key { - return Ok(Some(k.to_vec())); - } - } - Err(_) => { - return Err(FdbError::from_code(1510)); - } + let (k, _v) = item.context("failed to iterate rocksdb for last_less_than")?; + // We want strictly less than + if k.as_ref() < key { + return Ok(Some(k.to_vec().into())); } } Ok(None) @@ -239,23 +224,18 @@ impl TransactionTask { ); for item in iter { - match item { - Ok((k, _v)) => { - // We want less than or equal - if k.as_ref() <= key { - return Ok(Some(k.to_vec())); - } - } - Err(_) => { - return Err(FdbError::from_code(1510)); - } + let (k, _v) = + item.context("failed to iterate rocksdb for last_less_or_equal")?; + // We want less than or equal + if k.as_ref() <= key { + return Ok(Some(k.to_vec().into())); } } Ok(None) } _ => { // For other offset values, return an error - Err(FdbError::from_code(1510)) + bail!("invalid key selector offset") } } } @@ -266,7 +246,7 @@ impl TransactionTask { txn: &RocksDbTransaction, selector: &KeySelector<'_>, _read_opts: &ReadOptions, - ) -> FdbResult> { + ) -> Result> { let key = selector.key(); let offset = selector.offset(); let or_equal = selector.or_equal(); @@ -285,14 +265,10 @@ impl TransactionTask { let mut keys: Vec> = Vec::new(); for item in iter { - match item { - Ok((k, _v)) => { - keys.push(k.to_vec()); - if keys.len() > (offset.abs() + 1) as usize { - break; - } - } - Err(_) => return Err(FdbError::from_code(1510)), + let (k, _v) = item.context("failed to iterate rocksdb for key selector")?; + keys.push(k.to_vec()); + if keys.len() > (offset.abs() + 1) as usize { + break; } } @@ -326,10 +302,7 @@ impl TransactionTask { } } - async fn handle_commit( - &mut self, - operations: TransactionOperations, - ) -> FdbResult { + async fn handle_commit(&mut self, operations: TransactionOperations) -> Result<()> { // Create a new transaction for this commit let txn = self.create_transaction(); @@ -346,10 +319,11 @@ impl TransactionTask { ); txn.put(key, &value) - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to set key in rocksdb")?; } Operation::Clear { key } => { - txn.delete(key).map_err(|_| FdbError::from_code(1510))?; + txn.delete(key) + .context("failed to delete key from rocksdb")?; } Operation::ClearRange { begin, end } => { // RocksDB doesn't have a native clear_range, so we need to iterate and delete @@ -360,15 +334,12 @@ impl TransactionTask { ); for item in iter { - match item { - Ok((k, _v)) => { - if k.as_ref() >= end.as_slice() { - break; - } - txn.delete(&k).map_err(|_| FdbError::from_code(1510))?; - } - Err(_) => return Err(FdbError::from_code(1510)), + let (k, _v) = item.context("failed to iterate rocksdb for clear range")?; + if k.as_ref() >= end.as_slice() { + break; } + txn.delete(&k) + .context("failed to delete key in range from rocksdb")?; } } Operation::AtomicOp { @@ -380,7 +351,7 @@ impl TransactionTask { let read_opts = ReadOptions::default(); let current_value = txn .get_opt(key, &read_opts) - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to get current value for atomic operation")?; // Apply the atomic operation let current_slice = current_value.as_deref(); @@ -389,9 +360,10 @@ impl TransactionTask { // Store the result if let Some(new_value) = &new_value { txn.put(key, new_value) - .map_err(|_| FdbError::from_code(1510))?; + .context("failed to set atomic operation result")?; } else { - txn.delete(key).map_err(|_| FdbError::from_code(1510))?; + txn.delete(key) + .context("failed to delete key after atomic operation")?; } } } @@ -407,10 +379,10 @@ impl TransactionTask { Err(e) => { // Check if this is a conflict error if e.to_string().contains("conflict") { - // Return retryable error code 1020 - Err(FdbError::from_code(1020)) + // Return retryable error + Err(DatabaseError::NotCommitted.into()) } else { - Err(FdbError::from_code(1510)) + Err(e.into()) } } } @@ -427,7 +399,7 @@ impl TransactionTask { limit: Option, reverse: bool, _iteration: usize, - ) -> FdbResult { + ) -> Result { let txn = self.create_transaction(); let read_opts = ReadOptions::default(); @@ -449,20 +421,16 @@ impl TransactionTask { let limit = limit.unwrap_or(usize::MAX); for item in iter { - match item { - Ok((k, v)) => { - // Check if we've reached the end key - if k.as_ref() >= resolved_end.as_slice() { - break; - } + let (k, v) = item.context("failed to iterate rocksdb for get range")?; + // Check if we've reached the end key + if k.as_ref() >= resolved_end.as_slice() { + break; + } - results.push(FdbKeyValue::new(k.to_vec(), v.to_vec())); + results.push(KeyValue::new(k.to_vec(), v.to_vec())); - if results.len() >= limit { - break; - } - } - Err(_) => return Err(FdbError::from_code(1510)), + if results.len() >= limit { + break; } } @@ -471,7 +439,7 @@ impl TransactionTask { results.reverse(); } - Ok(FdbValues::new(results)) + Ok(Values::new(results)) } fn resolve_key_selector_for_range( @@ -480,7 +448,7 @@ impl TransactionTask { key: &[u8], or_equal: bool, offset: i32, - ) -> FdbResult> { + ) -> Result> { // Based on PostgreSQL's interpretation: // (false, 1) => first_greater_or_equal // (true, 1) => first_greater_than @@ -497,14 +465,10 @@ impl TransactionTask { read_opts, ); for item in iter { - match item { - Ok((k, _v)) => { - return Ok(k.to_vec()); - } - Err(_) => { - return Err(FdbError::from_code(1510)); - } - } + let (k, _v) = item.context( + "failed to iterate rocksdb for range selector first_greater_or_equal", + )?; + return Ok(k.to_vec()); } // If no key found, return a key that will make the range empty Ok(vec![0xff; 255]) @@ -516,18 +480,14 @@ impl TransactionTask { read_opts, ); for item in iter { - match item { - Ok((k, _v)) => { - // Skip if it's the exact key - if k.as_ref() == key { - continue; - } - return Ok(k.to_vec()); - } - Err(_) => { - return Err(FdbError::from_code(1510)); - } + let (k, _v) = item.context( + "failed to iterate rocksdb for range selector first_greater_than", + )?; + // Skip if it's the exact key + if k.as_ref() == key { + continue; } + return Ok(k.to_vec()); } // If no key found, return a key that will make the range empty Ok(vec![0xff; 255]) @@ -540,11 +500,7 @@ impl TransactionTask { } } - async fn handle_get_estimated_range_size( - &mut self, - begin: &[u8], - end: &[u8], - ) -> FdbResult { + async fn handle_get_estimated_range_size(&mut self, begin: &[u8], end: &[u8]) -> Result { let range = rocksdb::Range::new(begin, end); Ok(self diff --git a/packages/common/universaldb/src/error.rs b/packages/common/universaldb/src/error.rs new file mode 100644 index 0000000000..04451239e5 --- /dev/null +++ b/packages/common/universaldb/src/error.rs @@ -0,0 +1,32 @@ +#[derive(thiserror::Error, Debug)] +pub enum DatabaseError { + #[error("transaction not committed due to conflict with another transaction")] + NotCommitted, + + // TODO: Implement in rocksdb and postgres drivers + #[error("transaction is too old to perform reads or be committed")] + TransactionTooOld, + + #[error("max number of transaction retries reached")] + MaxRetriesReached, + + #[error("operation issued while a commit was outstanding")] + UsedDuringCommit, + // #[error(transparent)] + // Custom(Box), +} + +impl DatabaseError { + pub fn is_retryable(&self) -> bool { + use DatabaseError::*; + + match self { + NotCommitted | TransactionTooOld | MaxRetriesReached => true, + _ => false, + } + } + + pub fn is_maybe_committed(&self) -> bool { + false + } +} diff --git a/packages/common/universaldb/src/future.rs b/packages/common/universaldb/src/future.rs deleted file mode 100644 index 56497cd845..0000000000 --- a/packages/common/universaldb/src/future.rs +++ /dev/null @@ -1,129 +0,0 @@ -use std::pin::Pin; - -use crate::FdbError; -use futures_util::Stream; - -pub type FdbSlice = Vec; - -#[derive(Debug, Clone)] -pub struct FdbValue(FdbKeyValue); - -impl FdbValue { - pub fn new(key: Vec, value: Vec) -> Self { - FdbValue(FdbKeyValue::new(key, value)) - } - - pub fn from_keyvalue(kv: FdbKeyValue) -> Self { - FdbValue(kv) - } - - pub fn key(&self) -> &[u8] { - self.0.key() - } - - pub fn value(&self) -> &[u8] { - self.0.value() - } - - pub fn into_parts(self) -> (Vec, Vec) { - self.0.into_parts() - } -} - -// FdbValues wraps a Vec to match FoundationDB API -#[derive(Debug, Clone)] -pub struct FdbValues { - values: Vec, - more: bool, -} - -impl FdbValues { - pub fn new(values: Vec) -> Self { - FdbValues { - values, - more: false, - } - } - - pub fn with_more(values: Vec, more: bool) -> Self { - FdbValues { values, more } - } - - pub fn more(&self) -> bool { - self.more - } - - pub fn into_vec(self) -> Vec { - self.values - } - - pub fn len(&self) -> usize { - self.values.len() - } - - pub fn is_empty(&self) -> bool { - self.values.is_empty() - } - - pub fn iter(&self) -> std::slice::Iter<'_, FdbKeyValue> { - self.values.iter() - } - - pub fn into_iter(self) -> std::vec::IntoIter { - self.values.into_iter() - } -} - -// impl Deref for FdbValues { -// type Target = [FdbKeyValue]; -// fn deref(&self) -> &Self::Target { -// &self.values -// } -// } -// impl AsRef<[FdbKeyValue]> for FdbValues { -// fn as_ref(&self) -> &[FdbKeyValue] { -// self.deref() -// } -// } - -// KeyValue type with key() and value() methods -#[derive(Debug, Clone)] -pub struct FdbKeyValue { - key: Vec, - value: Vec, -} - -impl FdbKeyValue { - pub fn new(key: Vec, value: Vec) -> Self { - FdbKeyValue { key, value } - } - - pub fn key(&self) -> &[u8] { - &self.key - } - - pub fn value(&self) -> &[u8] { - &self.value - } - - pub fn into_parts(self) -> (Vec, Vec) { - (self.key, self.value) - } - - pub fn to_value(self) -> FdbValue { - FdbValue::from_keyvalue(self) - } - - pub fn value_ref(&self) -> FdbValue { - FdbValue::from_keyvalue(self.clone()) - } -} - -// Stream type for range queries - generic over item type -pub type FdbStream<'a, T = FdbKeyValue> = - Pin> + Send + 'a>>; - -// UNIMPLEMENTED: -pub type FdbAddress = (); -pub type FdbAddresses = (); -pub type FdbValuesIter = (); diff --git a/packages/common/universaldb/src/inherited/README.md b/packages/common/universaldb/src/inherited/README.md deleted file mode 100644 index 8a554cd3c8..0000000000 --- a/packages/common/universaldb/src/inherited/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Inherited source - -Files from this folder were copied from the foundationdbrs crate. Since this repo doesn't directly use FDB, we only copied parts that we needed for the UniversalDB api to work. - -This removes the dependency on foundationdb-sys, allowing static compilation with musl. - -Origin: https://github.com/foundationdb-rs/foundationdb-rs at 34955a582e964c42c68717b03f97fd0ea3b3cc02 \ No newline at end of file diff --git a/packages/common/universaldb/src/inherited/error.rs b/packages/common/universaldb/src/inherited/error.rs deleted file mode 100644 index 8ac63b35e2..0000000000 --- a/packages/common/universaldb/src/inherited/error.rs +++ /dev/null @@ -1,1313 +0,0 @@ -// Copyright 2018 foundationdb-rs developers, https://github.com/Clikengo/foundationdb-rs/graphs/contributors -// Copyright 2013-2018 Apple, Inc and the FoundationDB project authors. -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - -//! Error types for the Fdb crate - -#![allow(non_upper_case_globals)] - -use crate::options; -use crate::tuple::PackError; -use std::fmt; -use std::fmt::{Debug, Display, Formatter}; - -#[allow(non_camel_case_types)] -pub type fdb_error_t = ::std::os::raw::c_int; -#[allow(non_camel_case_types)] -pub type fdb_bool_t = ::std::os::raw::c_int; - -pub const success: i32 = 0; -pub const end_of_stream: i32 = 1; -pub const operation_failed: i32 = 1000; -pub const wrong_shard_server: i32 = 1001; -pub const operation_obsolete: i32 = 1002; -pub const cold_cache_server: i32 = 1003; -pub const timed_out: i32 = 1004; -pub const coordinated_state_conflict: i32 = 1005; -pub const all_alternatives_failed: i32 = 1006; -pub const transaction_too_old: i32 = 1007; -pub const no_more_servers: i32 = 1008; -pub const future_version: i32 = 1009; -pub const movekeys_conflict: i32 = 1010; -pub const tlog_stopped: i32 = 1011; -pub const server_request_queue_full: i32 = 1012; -pub const not_committed: i32 = 1020; -pub const commit_unknown_result: i32 = 1021; -pub const commit_unknown_result_fatal: i32 = 1022; -pub const transaction_cancelled: i32 = 1025; -pub const connection_failed: i32 = 1026; -pub const coordinators_changed: i32 = 1027; -pub const new_coordinators_timed_out: i32 = 1028; -pub const watch_cancelled: i32 = 1029; -pub const request_maybe_delivered: i32 = 1030; -pub const transaction_timed_out: i32 = 1031; -pub const too_many_watches: i32 = 1032; -pub const locality_information_unavailable: i32 = 1033; -pub const watches_disabled: i32 = 1034; -pub const default_error_or: i32 = 1035; -pub const accessed_unreadable: i32 = 1036; -pub const process_behind: i32 = 1037; -pub const database_locked: i32 = 1038; -pub const cluster_version_changed: i32 = 1039; -pub const external_client_already_loaded: i32 = 1040; -pub const lookup_failed: i32 = 1041; -pub const commit_proxy_memory_limit_exceeded: i32 = 1042; -pub const shutdown_in_progress: i32 = 1043; -pub const serialization_failed: i32 = 1044; -pub const connection_unreferenced: i32 = 1048; -pub const connection_idle: i32 = 1049; -pub const disk_adapter_reset: i32 = 1050; -pub const batch_transaction_throttled: i32 = 1051; -pub const dd_cancelled: i32 = 1052; -pub const dd_not_found: i32 = 1053; -pub const wrong_connection_file: i32 = 1054; -pub const version_already_compacted: i32 = 1055; -pub const local_config_changed: i32 = 1056; -pub const failed_to_reach_quorum: i32 = 1057; -pub const unsupported_format_version: i32 = 1058; -pub const unknown_change_feed: i32 = 1059; -pub const change_feed_not_registered: i32 = 1060; -pub const granule_assignment_conflict: i32 = 1061; -pub const change_feed_cancelled: i32 = 1062; -pub const blob_granule_file_load_error: i32 = 1063; -pub const blob_granule_transaction_too_old: i32 = 1064; -pub const blob_manager_replaced: i32 = 1065; -pub const change_feed_popped: i32 = 1066; -pub const remote_kvs_cancelled: i32 = 1067; -pub const page_header_wrong_page_id: i32 = 1068; -pub const page_header_checksum_failed: i32 = 1069; -pub const page_header_version_not_supported: i32 = 1070; -pub const page_encoding_not_supported: i32 = 1071; -pub const page_decoding_failed: i32 = 1072; -pub const unexpected_encoding_type: i32 = 1073; -pub const encryption_key_not_found: i32 = 1074; -pub const data_move_cancelled: i32 = 1075; -pub const data_move_dest_team_not_found: i32 = 1076; -pub const blob_worker_full: i32 = 1077; -pub const grv_proxy_memory_limit_exceeded: i32 = 1078; -pub const blob_granule_request_failed: i32 = 1079; -pub const storage_too_many_feed_streams: i32 = 1080; -pub const storage_engine_not_initialized: i32 = 1081; -pub const unknown_storage_engine: i32 = 1082; -pub const duplicate_snapshot_request: i32 = 1083; -pub const dd_config_changed: i32 = 1084; -pub const consistency_check_urgent_task_failed: i32 = 1085; -pub const data_move_conflict: i32 = 1086; -pub const consistency_check_urgent_duplicate_request: i32 = 1087; -pub const broken_promise: i32 = 1100; -pub const operation_cancelled: i32 = 1101; -pub const future_released: i32 = 1102; -pub const connection_leaked: i32 = 1103; -pub const never_reply: i32 = 1104; -pub const retry: i32 = 1105; -pub const recruitment_failed: i32 = 1200; -pub const move_to_removed_server: i32 = 1201; -pub const worker_removed: i32 = 1202; -pub const cluster_recovery_failed: i32 = 1203; -pub const master_max_versions_in_flight: i32 = 1204; -pub const tlog_failed: i32 = 1205; -pub const worker_recovery_failed: i32 = 1206; -pub const please_reboot: i32 = 1207; -pub const please_reboot_delete: i32 = 1208; -pub const commit_proxy_failed: i32 = 1209; -pub const resolver_failed: i32 = 1210; -pub const server_overloaded: i32 = 1211; -pub const backup_worker_failed: i32 = 1212; -pub const tag_throttled: i32 = 1213; -pub const grv_proxy_failed: i32 = 1214; -pub const dd_tracker_cancelled: i32 = 1215; -pub const failed_to_progress: i32 = 1216; -pub const invalid_cluster_id: i32 = 1217; -pub const restart_cluster_controller: i32 = 1218; -pub const please_reboot_kv_store: i32 = 1219; -pub const incompatible_software_version: i32 = 1220; -pub const audit_storage_failed: i32 = 1221; -pub const audit_storage_exceeded_request_limit: i32 = 1222; -pub const proxy_tag_throttled: i32 = 1223; -pub const key_value_store_deadline_exceeded: i32 = 1224; -pub const storage_quota_exceeded: i32 = 1225; -pub const audit_storage_error: i32 = 1226; -pub const master_failed: i32 = 1227; -pub const test_failed: i32 = 1228; -pub const retry_clean_up_datamove_tombstone_added: i32 = 1229; -pub const persist_new_audit_metadata_error: i32 = 1230; -pub const cancel_audit_storage_failed: i32 = 1231; -pub const audit_storage_cancelled: i32 = 1232; -pub const location_metadata_corruption: i32 = 1233; -pub const audit_storage_task_outdated: i32 = 1234; -pub const transaction_throttled_hot_shard: i32 = 1235; -pub const storage_replica_comparison_error: i32 = 1236; -pub const unreachable_storage_replica: i32 = 1237; -pub const bulkload_task_failed: i32 = 1238; -pub const bulkload_task_outdated: i32 = 1239; -pub const range_lock_failed: i32 = 1241; -pub const transaction_rejected_range_locked: i32 = 1242; -pub const bulkdump_task_failed: i32 = 1243; -pub const bulkdump_task_outdated: i32 = 1244; -pub const bulkload_fileset_invalid_filepath: i32 = 1245; -pub const bulkload_manifest_decode_error: i32 = 1246; -pub const range_lock_reject: i32 = 1247; -pub const range_unlock_reject: i32 = 1248; -pub const bulkload_dataset_not_cover_required_range: i32 = 1249; -pub const platform_error: i32 = 1500; -pub const large_alloc_failed: i32 = 1501; -pub const performance_counter_error: i32 = 1502; -pub const bad_allocator: i32 = 1503; -pub const io_error: i32 = 1510; -pub const file_not_found: i32 = 1511; -pub const bind_failed: i32 = 1512; -pub const file_not_readable: i32 = 1513; -pub const file_not_writable: i32 = 1514; -pub const no_cluster_file_found: i32 = 1515; -pub const file_too_large: i32 = 1516; -pub const non_sequential_op: i32 = 1517; -pub const http_bad_response: i32 = 1518; -pub const http_not_accepted: i32 = 1519; -pub const checksum_failed: i32 = 1520; -pub const io_timeout: i32 = 1521; -pub const file_corrupt: i32 = 1522; -pub const http_request_failed: i32 = 1523; -pub const http_auth_failed: i32 = 1524; -pub const http_bad_request_id: i32 = 1525; -pub const rest_invalid_uri: i32 = 1526; -pub const rest_invalid_rest_client_knob: i32 = 1527; -pub const rest_connectpool_key_not_found: i32 = 1528; -pub const lock_file_failure: i32 = 1529; -pub const rest_unsupported_protocol: i32 = 1530; -pub const rest_malformed_response: i32 = 1531; -pub const rest_max_base_cipher_len: i32 = 1532; -pub const resource_not_found: i32 = 1533; -pub const client_invalid_operation: i32 = 2000; -pub const commit_read_incomplete: i32 = 2002; -pub const test_specification_invalid: i32 = 2003; -pub const key_outside_legal_range: i32 = 2004; -pub const inverted_range: i32 = 2005; -pub const invalid_option_value: i32 = 2006; -pub const invalid_option: i32 = 2007; -pub const network_not_setup: i32 = 2008; -pub const network_already_setup: i32 = 2009; -pub const read_version_already_set: i32 = 2010; -pub const version_invalid: i32 = 2011; -pub const range_limits_invalid: i32 = 2012; -pub const invalid_database_name: i32 = 2013; -pub const attribute_not_found: i32 = 2014; -pub const future_not_set: i32 = 2015; -pub const future_not_error: i32 = 2016; -pub const used_during_commit: i32 = 2017; -pub const invalid_mutation_type: i32 = 2018; -pub const attribute_too_large: i32 = 2019; -pub const transaction_invalid_version: i32 = 2020; -pub const no_commit_version: i32 = 2021; -pub const environment_variable_network_option_failed: i32 = 2022; -pub const transaction_read_only: i32 = 2023; -pub const invalid_cache_eviction_policy: i32 = 2024; -pub const network_cannot_be_restarted: i32 = 2025; -pub const blocked_from_network_thread: i32 = 2026; -pub const invalid_config_db_range_read: i32 = 2027; -pub const invalid_config_db_key: i32 = 2028; -pub const invalid_config_path: i32 = 2029; -pub const mapper_bad_index: i32 = 2030; -pub const mapper_no_such_key: i32 = 2031; -pub const mapper_bad_range_decriptor: i32 = 2032; -pub const quick_get_key_values_has_more: i32 = 2033; -pub const quick_get_value_miss: i32 = 2034; -pub const quick_get_key_values_miss: i32 = 2035; -pub const blob_granule_no_ryw: i32 = 2036; -pub const blob_granule_not_materialized: i32 = 2037; -pub const get_mapped_key_values_has_more: i32 = 2038; -pub const get_mapped_range_reads_your_writes: i32 = 2039; -pub const checkpoint_not_found: i32 = 2040; -pub const key_not_tuple: i32 = 2041; -pub const value_not_tuple: i32 = 2042; -pub const mapper_not_tuple: i32 = 2043; -pub const invalid_checkpoint_format: i32 = 2044; -pub const invalid_throttle_quota_value: i32 = 2045; -pub const failed_to_create_checkpoint: i32 = 2046; -pub const failed_to_restore_checkpoint: i32 = 2047; -pub const failed_to_create_checkpoint_shard_metadata: i32 = 2048; -pub const address_parse_error: i32 = 2049; -pub const incompatible_protocol_version: i32 = 2100; -pub const transaction_too_large: i32 = 2101; -pub const key_too_large: i32 = 2102; -pub const value_too_large: i32 = 2103; -pub const connection_string_invalid: i32 = 2104; -pub const address_in_use: i32 = 2105; -pub const invalid_local_address: i32 = 2106; -pub const tls_error: i32 = 2107; -pub const unsupported_operation: i32 = 2108; -pub const too_many_tags: i32 = 2109; -pub const tag_too_long: i32 = 2110; -pub const too_many_tag_throttles: i32 = 2111; -pub const special_keys_cross_module_read: i32 = 2112; -pub const special_keys_no_module_found: i32 = 2113; -pub const special_keys_write_disabled: i32 = 2114; -pub const special_keys_no_write_module_found: i32 = 2115; -pub const special_keys_cross_module_clear: i32 = 2116; -pub const special_keys_api_failure: i32 = 2117; -pub const client_lib_invalid_metadata: i32 = 2118; -pub const client_lib_already_exists: i32 = 2119; -pub const client_lib_not_found: i32 = 2120; -pub const client_lib_not_available: i32 = 2121; -pub const client_lib_invalid_binary: i32 = 2122; -pub const no_external_client_provided: i32 = 2123; -pub const all_external_clients_failed: i32 = 2124; -pub const incompatible_client: i32 = 2125; -pub const tenant_name_required: i32 = 2130; -pub const tenant_not_found: i32 = 2131; -pub const tenant_already_exists: i32 = 2132; -pub const tenant_not_empty: i32 = 2133; -pub const invalid_tenant_name: i32 = 2134; -pub const tenant_prefix_allocator_conflict: i32 = 2135; -pub const tenants_disabled: i32 = 2136; -pub const illegal_tenant_access: i32 = 2138; -pub const invalid_tenant_group_name: i32 = 2139; -pub const invalid_tenant_configuration: i32 = 2140; -pub const cluster_no_capacity: i32 = 2141; -pub const tenant_removed: i32 = 2142; -pub const invalid_tenant_state: i32 = 2143; -pub const tenant_locked: i32 = 2144; -pub const invalid_cluster_name: i32 = 2160; -pub const invalid_metacluster_operation: i32 = 2161; -pub const cluster_already_exists: i32 = 2162; -pub const cluster_not_found: i32 = 2163; -pub const cluster_not_empty: i32 = 2164; -pub const cluster_already_registered: i32 = 2165; -pub const metacluster_no_capacity: i32 = 2166; -pub const management_cluster_invalid_access: i32 = 2167; -pub const tenant_creation_permanently_failed: i32 = 2168; -pub const cluster_removed: i32 = 2169; -pub const cluster_restoring: i32 = 2170; -pub const invalid_data_cluster: i32 = 2171; -pub const metacluster_mismatch: i32 = 2172; -pub const conflicting_restore: i32 = 2173; -pub const invalid_metacluster_configuration: i32 = 2174; -pub const unsupported_metacluster_version: i32 = 2175; -pub const api_version_unset: i32 = 2200; -pub const api_version_already_set: i32 = 2201; -pub const api_version_invalid: i32 = 2202; -pub const api_version_not_supported: i32 = 2203; -pub const api_function_missing: i32 = 2204; -pub const exact_mode_without_limits: i32 = 2210; -pub const invalid_tuple_data_type: i32 = 2250; -pub const invalid_tuple_index: i32 = 2251; -pub const key_not_in_subspace: i32 = 2252; -pub const manual_prefixes_not_enabled: i32 = 2253; -pub const prefix_in_partition: i32 = 2254; -pub const cannot_open_root_directory: i32 = 2255; -pub const directory_already_exists: i32 = 2256; -pub const directory_does_not_exist: i32 = 2257; -pub const parent_directory_does_not_exist: i32 = 2258; -pub const mismatched_layer: i32 = 2259; -pub const invalid_directory_layer_metadata: i32 = 2260; -pub const cannot_move_directory_between_partitions: i32 = 2261; -pub const cannot_use_partition_as_subspace: i32 = 2262; -pub const incompatible_directory_version: i32 = 2263; -pub const directory_prefix_not_empty: i32 = 2264; -pub const directory_prefix_in_use: i32 = 2265; -pub const invalid_destination_directory: i32 = 2266; -pub const cannot_modify_root_directory: i32 = 2267; -pub const invalid_uuid_size: i32 = 2268; -pub const invalid_versionstamp_size: i32 = 2269; -pub const backup_error: i32 = 2300; -pub const restore_error: i32 = 2301; -pub const backup_duplicate: i32 = 2311; -pub const backup_unneeded: i32 = 2312; -pub const backup_bad_block_size: i32 = 2313; -pub const backup_invalid_url: i32 = 2314; -pub const backup_invalid_info: i32 = 2315; -pub const backup_cannot_expire: i32 = 2316; -pub const backup_auth_missing: i32 = 2317; -pub const backup_auth_unreadable: i32 = 2318; -pub const backup_does_not_exist: i32 = 2319; -pub const backup_not_filterable_with_key_ranges: i32 = 2320; -pub const backup_not_overlapped_with_keys_filter: i32 = 2321; -pub const bucket_not_in_url: i32 = 2322; -pub const backup_parse_s3_response_failure: i32 = 2323; -pub const restore_invalid_version: i32 = 2361; -pub const restore_corrupted_data: i32 = 2362; -pub const restore_missing_data: i32 = 2363; -pub const restore_duplicate_tag: i32 = 2364; -pub const restore_unknown_tag: i32 = 2365; -pub const restore_unknown_file_type: i32 = 2366; -pub const restore_unsupported_file_version: i32 = 2367; -pub const restore_bad_read: i32 = 2368; -pub const restore_corrupted_data_padding: i32 = 2369; -pub const restore_destination_not_empty: i32 = 2370; -pub const restore_duplicate_uid: i32 = 2371; -pub const task_invalid_version: i32 = 2381; -pub const task_interrupted: i32 = 2382; -pub const invalid_encryption_key_file: i32 = 2383; -pub const blob_restore_missing_logs: i32 = 2384; -pub const blob_restore_corrupted_logs: i32 = 2385; -pub const blob_restore_invalid_manifest_url: i32 = 2386; -pub const blob_restore_corrupted_manifest: i32 = 2387; -pub const blob_restore_missing_manifest: i32 = 2388; -pub const blob_migrator_replaced: i32 = 2389; -pub const key_not_found: i32 = 2400; -pub const json_malformed: i32 = 2401; -pub const json_eof_expected: i32 = 2402; -pub const snap_disable_tlog_pop_failed: i32 = 2500; -pub const snap_storage_failed: i32 = 2501; -pub const snap_tlog_failed: i32 = 2502; -pub const snap_coord_failed: i32 = 2503; -pub const snap_enable_tlog_pop_failed: i32 = 2504; -pub const snap_path_not_whitelisted: i32 = 2505; -pub const snap_not_fully_recovered_unsupported: i32 = 2506; -pub const snap_log_anti_quorum_unsupported: i32 = 2507; -pub const snap_with_recovery_unsupported: i32 = 2508; -pub const snap_invalid_uid_string: i32 = 2509; -pub const encrypt_ops_error: i32 = 2700; -pub const encrypt_header_metadata_mismatch: i32 = 2701; -pub const encrypt_key_not_found: i32 = 2702; -pub const encrypt_key_ttl_expired: i32 = 2703; -pub const encrypt_header_authtoken_mismatch: i32 = 2704; -pub const encrypt_update_cipher: i32 = 2705; -pub const encrypt_invalid_id: i32 = 2706; -pub const encrypt_keys_fetch_failed: i32 = 2707; -pub const encrypt_invalid_kms_config: i32 = 2708; -pub const encrypt_unsupported: i32 = 2709; -pub const encrypt_mode_mismatch: i32 = 2710; -pub const encrypt_key_check_value_mismatch: i32 = 2711; -pub const encrypt_max_base_cipher_len: i32 = 2712; -pub const unknown_error: i32 = 4000; -pub const internal_error: i32 = 4100; -pub const not_implemented: i32 = 4200; -pub const permission_denied: i32 = 6000; -pub const unauthorized_attempt: i32 = 6001; -pub const digital_signature_ops_error: i32 = 6002; -pub const authorization_token_verify_failed: i32 = 6003; -pub const pkey_decode_error: i32 = 6004; -pub const pkey_encode_error: i32 = 6005; -pub const grpc_error: i32 = 7000; - -pub fn fdb_get_error(code: fdb_error_t) -> &'static str { - if code == success { - "Success" - } else if code == end_of_stream { - "End of stream" - } else if code == operation_failed { - "Operation failed" - } else if code == wrong_shard_server { - "Shard is not available from this server" - } else if code == operation_obsolete { - "Operation result no longer necessary" - } else if code == cold_cache_server { - "Cache server is not warm for this range" - } else if code == timed_out { - "Operation timed out" - } else if code == coordinated_state_conflict { - "Conflict occurred while changing coordination information" - } else if code == all_alternatives_failed { - "All alternatives failed" - } else if code == transaction_too_old { - "Transaction is too old to perform reads or be committed" - } else if code == no_more_servers { - "Not enough physical servers available" - } else if code == future_version { - "Request for future version" - } else if code == movekeys_conflict { - "Conflicting attempts to change data distribution" - } else if code == tlog_stopped { - "TLog stopped" - } else if code == server_request_queue_full { - "Server request queue is full" - } else if code == not_committed { - "Transaction not committed due to conflict with another transaction" - } else if code == commit_unknown_result { - "Transaction may or may not have committed" - } else if code == commit_unknown_result_fatal { - "Idempotency id for transaction may have expired, so the commit status of the transaction cannot be determined" - } else if code == transaction_cancelled { - "Operation aborted because the transaction was cancelled" - } else if code == connection_failed { - "Network connection failed" - } else if code == coordinators_changed { - "Coordination servers have changed" - } else if code == new_coordinators_timed_out { - "New coordination servers did not respond in a timely way" - } else if code == watch_cancelled { - "Watch cancelled because storage server watch limit exceeded" - } else if code == request_maybe_delivered { - "Request may or may not have been delivered" - } else if code == transaction_timed_out { - "Operation aborted because the transaction timed out" - } else if code == too_many_watches { - "Too many watches currently set" - } else if code == locality_information_unavailable { - "Locality information not available" - } else if code == watches_disabled { - "Watches cannot be set if read your writes is disabled" - } else if code == default_error_or { - "Default error for an ErrorOr object" - } else if code == accessed_unreadable { - "Read or wrote an unreadable key" - } else if code == process_behind { - "Storage process does not have recent mutations" - } else if code == database_locked { - "Database is locked" - } else if code == cluster_version_changed { - "The protocol version of the cluster has changed" - } else if code == external_client_already_loaded { - "External client has already been loaded" - } else if code == lookup_failed { - "DNS lookup failed" - } else if code == commit_proxy_memory_limit_exceeded { - "CommitProxy commit memory limit exceeded" - } else if code == shutdown_in_progress { - "Operation no longer supported due to shutdown" - } else if code == serialization_failed { - "Failed to deserialize an object" - } else if code == connection_unreferenced { - "No peer references for connection" - } else if code == connection_idle { - "Connection closed after idle timeout" - } else if code == disk_adapter_reset { - "The disk queue adapter reset" - } else if code == batch_transaction_throttled { - "Batch GRV request rate limit exceeded" - } else if code == dd_cancelled { - "Data distribution components cancelled" - } else if code == dd_not_found { - "Data distributor not found" - } else if code == wrong_connection_file { - "Connection file mismatch" - } else if code == version_already_compacted { - "The requested changes have been compacted away" - } else if code == local_config_changed { - "Local configuration file has changed. Restart and apply these changes" - } else if code == failed_to_reach_quorum { - "Failed to reach quorum from configuration database nodes. Retry sending these requests" - } else if code == unsupported_format_version { - "Format version not supported" - } else if code == unknown_change_feed { - "Change feed not found" - } else if code == change_feed_not_registered { - "Change feed not registered" - } else if code == granule_assignment_conflict { - "Conflicting attempts to assign blob granules" - } else if code == change_feed_cancelled { - "Change feed was cancelled" - } else if code == blob_granule_file_load_error { - "Error loading a blob file during granule materialization" - } else if code == blob_granule_transaction_too_old { - "Read version is older than blob granule history supports" - } else if code == blob_manager_replaced { - "This blob manager has been replaced." - } else if code == change_feed_popped { - "Tried to read a version older than what has been popped from the change feed" - } else if code == remote_kvs_cancelled { - "The remote key-value store is cancelled" - } else if code == page_header_wrong_page_id { - "Page header does not match location on disk" - } else if code == page_header_checksum_failed { - "Page header checksum failed" - } else if code == page_header_version_not_supported { - "Page header version is not supported" - } else if code == page_encoding_not_supported { - "Page encoding type is not supported or not valid" - } else if code == page_decoding_failed { - "Page content decoding failed" - } else if code == unexpected_encoding_type { - "Page content decoding failed" - } else if code == encryption_key_not_found { - "Encryption key not found" - } else if code == data_move_cancelled { - "Data move was cancelled" - } else if code == data_move_dest_team_not_found { - "Dest team was not found for data move" - } else if code == blob_worker_full { - "Blob worker cannot take on more granule assignments" - } else if code == grv_proxy_memory_limit_exceeded { - "GetReadVersion proxy memory limit exceeded" - } else if code == blob_granule_request_failed { - "BlobGranule request failed" - } else if code == storage_too_many_feed_streams { - "Too many feed streams to a single storage server" - } else if code == storage_engine_not_initialized { - "Storage engine was never successfully initialized." - } else if code == unknown_storage_engine { - "Storage engine type is not recognized." - } else if code == duplicate_snapshot_request { - "A duplicate snapshot request has been sent, the old request is discarded." - } else if code == dd_config_changed { - "DataDistribution configuration changed." - } else if code == consistency_check_urgent_task_failed { - "Consistency check urgent task is failed" - } else if code == data_move_conflict { - "Data move conflict in SS" - } else if code == consistency_check_urgent_duplicate_request { - "Consistency check urgent got a duplicate request" - } else if code == broken_promise { - "Broken promise" - } else if code == operation_cancelled { - "Asynchronous operation cancelled" - } else if code == future_released { - "Future has been released" - } else if code == connection_leaked { - "Connection object leaked" - } else if code == never_reply { - "Never reply to the request" - } else if code == retry { - "Retry operation" - } - // Be careful, catching this will delete the data of a storage server or tlog permanently - else if code == recruitment_failed { - "Recruitment of a server failed" - } else if code == move_to_removed_server { - "Attempt to move keys to a storage server that was removed" - } - // Be careful, catching this will delete the data of a storage server or tlog permanently - else if code == worker_removed { - "Normal worker shut down" - } else if code == cluster_recovery_failed { - "Cluster recovery failed" - } else if code == master_max_versions_in_flight { - "Master hit maximum number of versions in flight" - } - // similar to tlog_stopped, but the tlog has actually died - else if code == tlog_failed { - "Cluster recovery terminating because a TLog failed" - } else if code == worker_recovery_failed { - "Recovery of a worker process failed" - } else if code == please_reboot { - "Reboot of server process requested" - } else if code == please_reboot_delete { - "Reboot of server process requested, with deletion of state" - } else if code == commit_proxy_failed { - "Master terminating because a CommitProxy failed" - } else if code == resolver_failed { - "Cluster recovery terminating because a Resolver failed" - } else if code == server_overloaded { - "Server is under too much load and cannot respond" - } else if code == backup_worker_failed { - "Cluster recovery terminating because a backup worker failed" - } else if code == tag_throttled { - "Transaction tag is being throttled" - } else if code == grv_proxy_failed { - "Cluster recovery terminating because a GRVProxy failed" - } else if code == dd_tracker_cancelled { - "The data distribution tracker has been cancelled" - } else if code == failed_to_progress { - "Process has failed to make sufficient progress" - } else if code == invalid_cluster_id { - "Attempted to join cluster with a different cluster ID" - } else if code == restart_cluster_controller { - "Restart cluster controller process" - } else if code == please_reboot_kv_store { - "Need to reboot the storage engine" - } else if code == incompatible_software_version { - "Current software does not support database format" - } else if code == audit_storage_failed { - "Validate storage consistency operation failed" - } else if code == audit_storage_exceeded_request_limit { - "Exceeded the max number of allowed concurrent audit storage requests" - } else if code == proxy_tag_throttled { - "Exceeded maximum proxy tag throttling duration" - } else if code == key_value_store_deadline_exceeded { - "Exceeded maximum time allowed to read or write." - } else if code == storage_quota_exceeded { - "Exceeded the maximum storage quota allocated to the tenant." - } else if code == audit_storage_error { - "Found data corruption" - } else if code == master_failed { - "Cluster recovery terminating because master has failed" - } else if code == test_failed { - "Test failed" - } else if code == retry_clean_up_datamove_tombstone_added { - "Need background datamove cleanup" - } else if code == persist_new_audit_metadata_error { - "Persist new audit metadata error" - } else if code == cancel_audit_storage_failed { - "Failed to cancel an audit" - } else if code == audit_storage_cancelled { - "Audit has been cancelled" - } else if code == location_metadata_corruption { - "Found location metadata corruption" - } else if code == audit_storage_task_outdated { - "Audit task is scheduled by an outdated DD" - } else if code == transaction_throttled_hot_shard { - "Transaction throttled due to hot shard" - } else if code == storage_replica_comparison_error { - "Storage replicas not consistent" - } else if code == unreachable_storage_replica { - "Storage replica cannot be reached" - } else if code == bulkload_task_failed { - "Bulk loading task failed" - } else if code == bulkload_task_outdated { - "Bulk loading task outdated" - } else if code == range_lock_failed { - "Lock range failed" - } else if code == transaction_rejected_range_locked { - "Transaction rejected due to range lock" - } else if code == bulkdump_task_failed { - "Bulk dumping task failed" - } else if code == bulkdump_task_outdated { - "Bulk dumping task outdated" - } else if code == bulkload_fileset_invalid_filepath { - "Bulkload fileset provides invalid filepath" - } else if code == bulkload_manifest_decode_error { - "Bulkload manifest string is failed to decode" - } else if code == range_lock_reject { - "Range lock is rejected" - } else if code == range_unlock_reject { - "Range unlock is rejected" - } else if code == bulkload_dataset_not_cover_required_range { - "Bulkload dataset does not cover the required range" - } - // 15xx Platform errors - else if code == platform_error { - "Platform error" - } else if code == large_alloc_failed { - "Large block allocation failed" - } else if code == performance_counter_error { - "QueryPerformanceCounter error" - } else if code == bad_allocator { - "Null allocator was used to allocate memory" - } else if code == io_error { - "Disk i/o operation failed" - } else if code == file_not_found { - "File not found" - } else if code == bind_failed { - "Unable to bind to network" - } else if code == file_not_readable { - "File could not be read" - } else if code == file_not_writable { - "File could not be written" - } else if code == no_cluster_file_found { - "No cluster file found in current directory or default location" - } else if code == file_too_large { - "File too large to be read" - } else if code == non_sequential_op { - "Non sequential file operation not allowed" - } else if code == http_bad_response { - "HTTP response was badly formed" - } else if code == http_not_accepted { - "HTTP request not accepted" - } else if code == checksum_failed { - "A data checksum failed" - } else if code == io_timeout { - "A disk IO operation failed to complete in a timely manner" - } else if code == file_corrupt { - "A structurally corrupt data file was detected" - } else if code == http_request_failed { - "HTTP response code not received or indicated failure" - } else if code == http_auth_failed { - "HTTP request failed due to bad credentials" - } else if code == http_bad_request_id { - "HTTP response contained an unexpected X-Request-ID header" - } else if code == rest_invalid_uri { - "Invalid REST URI" - } else if code == rest_invalid_rest_client_knob { - "Invalid RESTClient knob" - } else if code == rest_connectpool_key_not_found { - "ConnectKey not found in connection pool" - } else if code == lock_file_failure { - "Unable to lock the file" - } else if code == rest_unsupported_protocol { - "Unsupported REST protocol" - } else if code == rest_malformed_response { - "Malformed REST response" - } else if code == rest_max_base_cipher_len { - "Max BaseCipher length violation" - } else if code == resource_not_found { - "Requested resource was not found" - } - // 2xxx Attempt (presumably by a _client_) to do something illegal. If an error is known to - // be internally caused, it should be 41xx - else if code == client_invalid_operation { - "Invalid API call" - } else if code == commit_read_incomplete { - "Commit with incomplete read" - } else if code == test_specification_invalid { - "Invalid test specification" - } else if code == key_outside_legal_range { - "Key outside legal range" - } else if code == inverted_range { - "Range begin key larger than end key" - } else if code == invalid_option_value { - "Option set with an invalid value" - } else if code == invalid_option { - "Option not valid in this context" - } else if code == network_not_setup { - "Action not possible before the network is configured" - } else if code == network_already_setup { - "Network can be configured only once" - } else if code == read_version_already_set { - "Transaction already has a read version set" - } else if code == version_invalid { - "Version not valid" - } else if code == range_limits_invalid { - "Range limits not valid" - } else if code == invalid_database_name { - "Database name must be 'DB'" - } else if code == attribute_not_found { - "Attribute not found" - } else if code == future_not_set { - "Future not ready" - } else if code == future_not_error { - "Future not an error" - } else if code == used_during_commit { - "Operation issued while a commit was outstanding" - } else if code == invalid_mutation_type { - "Unrecognized atomic mutation type" - } else if code == attribute_too_large { - "Attribute too large for type int" - } else if code == transaction_invalid_version { - "Transaction does not have a valid commit version" - } else if code == no_commit_version { - "Transaction is read-only and therefore does not have a commit version" - } else if code == environment_variable_network_option_failed { - "Environment variable network option could not be set" - } else if code == transaction_read_only { - "Attempted to commit a transaction specified as read-only" - } else if code == invalid_cache_eviction_policy { - "Invalid cache eviction policy, only random and lru are supported" - } else if code == network_cannot_be_restarted { - "Network can only be started once" - } else if code == blocked_from_network_thread { - "Detected a deadlock in a callback called from the network thread" - } else if code == invalid_config_db_range_read { - "Invalid configuration database range read" - } else if code == invalid_config_db_key { - "Invalid configuration database key provided" - } else if code == invalid_config_path { - "Invalid configuration path" - } else if code == mapper_bad_index { - "The index in K[] or V[] is not a valid number or out of range" - } else if code == mapper_no_such_key { - "A mapped key is not set in database" - } else if code == mapper_bad_range_decriptor { - "\"{...}\" must be the last element of the mapper tuple" - } else if code == quick_get_key_values_has_more { - "One of the mapped range queries is too large" - } else if code == quick_get_value_miss { - "Found a mapped key that is not served in the same SS" - } else if code == quick_get_key_values_miss { - "Found a mapped range that is not served in the same SS" - } else if code == blob_granule_no_ryw { - "Blob Granule Read Transactions must be specified as ryw-disabled" - } else if code == blob_granule_not_materialized { - "Blob Granule Read was not materialized" - } else if code == get_mapped_key_values_has_more { - "getMappedRange does not support continuation for now" - } else if code == get_mapped_range_reads_your_writes { - "getMappedRange tries to read data that were previously written in the transaction" - } else if code == checkpoint_not_found { - "Checkpoint not found" - } else if code == key_not_tuple { - "The key cannot be parsed as a tuple" - } else if code == value_not_tuple { - "The value cannot be parsed as a tuple" - } else if code == mapper_not_tuple { - "The mapper cannot be parsed as a tuple" - } else if code == invalid_checkpoint_format { - "Invalid checkpoint format" - } else if code == invalid_throttle_quota_value { - "Invalid quota value. Note that reserved_throughput cannot exceed total_throughput" - } else if code == failed_to_create_checkpoint { - "Failed to create a checkpoint" - } else if code == failed_to_restore_checkpoint { - "Failed to restore a checkpoint" - } else if code == failed_to_create_checkpoint_shard_metadata { - "Failed to dump shard metadata for a checkpoint to a sst file" - } else if code == address_parse_error { - "Failed to parse address" - } else if code == incompatible_protocol_version { - "Incompatible protocol version" - } else if code == transaction_too_large { - "Transaction exceeds byte limit" - } else if code == key_too_large { - "Key length exceeds limit" - } else if code == value_too_large { - "Value length exceeds limit" - } else if code == connection_string_invalid { - "Connection string invalid" - } else if code == address_in_use { - "Local address in use" - } else if code == invalid_local_address { - "Invalid local address" - } else if code == tls_error { - "TLS error" - } else if code == unsupported_operation { - "Operation is not supported" - } else if code == too_many_tags { - "Too many tags set on transaction" - } else if code == tag_too_long { - "Tag set on transaction is too long" - } else if code == too_many_tag_throttles { - "Too many tag throttles have been created" - } else if code == special_keys_cross_module_read { - "Special key space range read crosses modules. Refer to the `special_key_space_relaxed' transaction option for more details." - } else if code == special_keys_no_module_found { - "Special key space range read does not intersect a module. Refer to the `special_key_space_relaxed' transaction option for more details." - } else if code == special_keys_write_disabled { - "Special Key space is not allowed to write by default. Refer to the `special_key_space_enable_writes` transaction option for more details." - } else if code == special_keys_no_write_module_found { - "Special key space key or keyrange in set or clear does not intersect a module" - } else if code == special_keys_cross_module_clear { - "Special key space clear crosses modules" - } else if code == special_keys_api_failure { - "Api call through special keys failed. For more information, call get on special key 0xff0xff/error_message to get a json string of the error message." - } else if code == client_lib_invalid_metadata { - "Invalid client library metadata." - } else if code == client_lib_already_exists { - "Client library with same identifier already exists on the cluster." - } else if code == client_lib_not_found { - "Client library for the given identifier not found." - } else if code == client_lib_not_available { - "Client library exists, but is not available for download." - } else if code == client_lib_invalid_binary { - "Invalid client library binary." - } else if code == no_external_client_provided { - "No external client library provided." - } else if code == all_external_clients_failed { - "All external clients have failed." - } else if code == incompatible_client { - "None of the available clients match the protocol version of the cluster." - } else if code == tenant_name_required { - "Tenant name must be specified to access data in the cluster" - } else if code == tenant_not_found { - "Tenant does not exist" - } else if code == tenant_already_exists { - "A tenant with the given name already exists" - } else if code == tenant_not_empty { - "Cannot delete a non-empty tenant" - } else if code == invalid_tenant_name { - "Tenant name cannot begin with \\xff" - } else if code == tenant_prefix_allocator_conflict { - "The database already has keys stored at the prefix allocated for the tenant" - } else if code == tenants_disabled { - "Tenants have been disabled in the cluster" - } else if code == illegal_tenant_access { - "Illegal tenant access" - } else if code == invalid_tenant_group_name { - "Tenant group name cannot begin with \\xff" - } else if code == invalid_tenant_configuration { - "Tenant configuration is invalid" - } else if code == cluster_no_capacity { - "Cluster does not have capacity to perform the specified operation" - } else if code == tenant_removed { - "The tenant was removed" - } else if code == invalid_tenant_state { - "Operation cannot be applied to tenant in its current state" - } else if code == tenant_locked { - "Tenant is locked" - } else if code == invalid_cluster_name { - "Data cluster name cannot begin with \\xff" - } else if code == invalid_metacluster_operation { - "Metacluster operation performed on non-metacluster" - } else if code == cluster_already_exists { - "A data cluster with the given name already exists" - } else if code == cluster_not_found { - "Data cluster does not exist" - } else if code == cluster_not_empty { - "Cluster must be empty" - } else if code == cluster_already_registered { - "Data cluster is already registered with a metacluster" - } else if code == metacluster_no_capacity { - "Metacluster does not have capacity to create new tenants" - } else if code == management_cluster_invalid_access { - "Standard transactions cannot be run against the management cluster" - } else if code == tenant_creation_permanently_failed { - "The tenant creation did not complete in a timely manner and has permanently failed" - } else if code == cluster_removed { - "The cluster is being removed from the metacluster" - } else if code == cluster_restoring { - "The cluster is being restored to the metacluster" - } else if code == invalid_data_cluster { - "The data cluster being restored has no record of its metacluster" - } else if code == metacluster_mismatch { - "The cluster does not have the expected name or is associated with a different metacluster" - } else if code == conflicting_restore { - "Another restore is running for the same data cluster" - } else if code == invalid_metacluster_configuration { - "Metacluster configuration is invalid" - } else if code == unsupported_metacluster_version { - "Client is not compatible with the metacluster" - } - // 2200 - errors from bindings and official APIs - else if code == api_version_unset { - "API version is not set" - } else if code == api_version_already_set { - "API version may be set only once" - } else if code == api_version_invalid { - "API version not valid" - } else if code == api_version_not_supported { - "API version not supported" - } else if code == api_function_missing { - "Failed to load a required FDB API function." - } else if code == exact_mode_without_limits { - "EXACT streaming mode requires limits, but none were given" - } else if code == invalid_tuple_data_type { - "Unrecognized data type in packed tuple" - } else if code == invalid_tuple_index { - "Tuple does not have element at specified index" - } else if code == key_not_in_subspace { - "Cannot unpack key that is not in subspace" - } else if code == manual_prefixes_not_enabled { - "Cannot specify a prefix unless manual prefixes are enabled" - } else if code == prefix_in_partition { - "Cannot specify a prefix in a partition" - } else if code == cannot_open_root_directory { - "Root directory cannot be opened" - } else if code == directory_already_exists { - "Directory already exists" - } else if code == directory_does_not_exist { - "Directory does not exist" - } else if code == parent_directory_does_not_exist { - "Directory's parent does not exist" - } else if code == mismatched_layer { - "Directory has already been created with a different layer string" - } else if code == invalid_directory_layer_metadata { - "Invalid directory layer metadata" - } else if code == cannot_move_directory_between_partitions { - "Directory cannot be moved between partitions" - } else if code == cannot_use_partition_as_subspace { - "Directory partition cannot be used as subspace" - } else if code == incompatible_directory_version { - "Directory layer was created with an incompatible version" - } else if code == directory_prefix_not_empty { - "Database has keys stored at the prefix chosen by the automatic prefix allocator" - } else if code == directory_prefix_in_use { - "Directory layer already has a conflicting prefix" - } else if code == invalid_destination_directory { - "Target directory is invalid" - } else if code == cannot_modify_root_directory { - "Root directory cannot be modified" - } else if code == invalid_uuid_size { - "UUID is not sixteen bytes" - } else if code == invalid_versionstamp_size { - "Versionstamp is not exactly twelve bytes" - } - // 2300 - backup and restore errors - else if code == backup_error { - "Backup error" - } else if code == restore_error { - "Restore error" - } else if code == backup_duplicate { - "Backup duplicate request" - } else if code == backup_unneeded { - "Backup unneeded request" - } else if code == backup_bad_block_size { - "Backup file block size too small" - } else if code == backup_invalid_url { - "Backup Container URL invalid" - } else if code == backup_invalid_info { - "Backup Container info invalid" - } else if code == backup_cannot_expire { - "Cannot expire requested data from backup without violating minimum restorability" - } else if code == backup_auth_missing { - "Cannot find authentication details (such as a password or secret key) for the specified Backup Container URL" - } else if code == backup_auth_unreadable { - "Cannot read or parse one or more sources of authentication information for Backup Container URLs" - } else if code == backup_does_not_exist { - "Backup does not exist" - } else if code == backup_not_filterable_with_key_ranges { - "Backup before 6.3 cannot be filtered with key ranges" - } else if code == backup_not_overlapped_with_keys_filter { - "Backup key ranges doesn't overlap with key ranges filter" - } else if code == bucket_not_in_url { - "bucket is not in the URL for backup" - } else if code == backup_parse_s3_response_failure { - "cannot parse s3 response properly" - } else if code == restore_invalid_version { - "Invalid restore version" - } else if code == restore_corrupted_data { - "Corrupted backup data" - } else if code == restore_missing_data { - "Missing backup data" - } else if code == restore_duplicate_tag { - "Restore duplicate request" - } else if code == restore_unknown_tag { - "Restore tag does not exist" - } else if code == restore_unknown_file_type { - "Unknown backup/restore file type" - } else if code == restore_unsupported_file_version { - "Unsupported backup file version" - } else if code == restore_bad_read { - "Unexpected number of bytes read" - } else if code == restore_corrupted_data_padding { - "Backup file has unexpected padding bytes" - } else if code == restore_destination_not_empty { - "Attempted to restore into a non-empty destination database" - } else if code == restore_duplicate_uid { - "Attempted to restore using a UID that had been used for an aborted restore" - } else if code == task_invalid_version { - "Invalid task version" - } else if code == task_interrupted { - "Task execution stopped due to timeout, abort, or completion by another worker" - } else if code == invalid_encryption_key_file { - "The provided encryption key file has invalid contents" - } else if code == blob_restore_missing_logs { - "Missing mutation logs" - } else if code == blob_restore_corrupted_logs { - "Corrupted mutation logs" - } else if code == blob_restore_invalid_manifest_url { - "Invalid manifest URL" - } else if code == blob_restore_corrupted_manifest { - "Corrupted manifest" - } else if code == blob_restore_missing_manifest { - "Missing manifest" - } else if code == blob_migrator_replaced { - "Blob migrator is replaced" - } else if code == key_not_found { - "Expected key is missing" - } else if code == json_malformed { - "JSON string was malformed" - } else if code == json_eof_expected { - "JSON string did not terminate where expected" - } - // 2500 - disk snapshot based backup errors - else if code == snap_disable_tlog_pop_failed { - "Failed to disable tlog pops" - } else if code == snap_storage_failed { - "Failed to snapshot storage nodes" - } else if code == snap_tlog_failed { - "Failed to snapshot TLog nodes" - } else if code == snap_coord_failed { - "Failed to snapshot coordinator nodes" - } else if code == snap_enable_tlog_pop_failed { - "Failed to enable tlog pops" - } else if code == snap_path_not_whitelisted { - "Snapshot create binary path not whitelisted" - } else if code == snap_not_fully_recovered_unsupported { - "Unsupported when the cluster is not fully recovered" - } else if code == snap_log_anti_quorum_unsupported { - "Unsupported when log anti quorum is configured" - } else if code == snap_with_recovery_unsupported { - "Cluster recovery during snapshot operation not supported" - } else if code == snap_invalid_uid_string { - "The given uid string is not a 32-length hex string" - } - // 27XX - Encryption operations errors - else if code == encrypt_ops_error { - "Encryption operation error" - } else if code == encrypt_header_metadata_mismatch { - "Encryption header metadata mismatch" - } else if code == encrypt_key_not_found { - "Expected encryption key is missing" - } else if code == encrypt_key_ttl_expired { - "Expected encryption key TTL has expired" - } else if code == encrypt_header_authtoken_mismatch { - "Encryption header authentication token mismatch" - } else if code == encrypt_update_cipher { - "Attempt to update encryption cipher key" - } else if code == encrypt_invalid_id { - "Invalid encryption cipher details" - } else if code == encrypt_keys_fetch_failed { - "Encryption keys fetch from external KMS failed" - } else if code == encrypt_invalid_kms_config { - "Invalid encryption/kms configuration: discovery-url, validation-token, endpoint etc." - } else if code == encrypt_unsupported { - "Encryption not supported" - } else if code == encrypt_mode_mismatch { - "Encryption mode mismatch with configuration" - } else if code == encrypt_key_check_value_mismatch { - "Encryption key-check-value mismatch" - } else if code == encrypt_max_base_cipher_len { - "Max BaseCipher buffer length violation" - } - // 4xxx Internal errors (those that should be generated only by bugs) are decimal 4xxx - // C++ exception not of type Error - else if code == unknown_error { - "An unknown error occurred" - } else if code == internal_error { - "An internal error occurred" - } else if code == not_implemented { - "Not implemented yet" - } - // 6xxx Authorization and authentication error codes - else if code == permission_denied { - "Client tried to access unauthorized data" - } else if code == unauthorized_attempt { - "A untrusted client tried to send a message to a private endpoint" - } else if code == digital_signature_ops_error { - "Digital signature operation error" - } else if code == authorization_token_verify_failed { - "Failed to verify authorization token" - } else if code == pkey_decode_error { - "Failed to decode public/private key" - } else if code == pkey_encode_error { - "Failed to encode public/private key" - } - // gRPC error - else if code == grpc_error { - "gRPC Error" - } else { - "Unknown error" - } -} - -pub fn fdb_error_predicate(predicate_test: options::ErrorPredicate, code: fdb_error_t) -> bool { - if predicate_test == options::ErrorPredicate::Retryable { - return fdb_error_predicate(options::ErrorPredicate::MaybeCommitted, code) - || fdb_error_predicate(options::ErrorPredicate::RetryableNotCommitted, code); - } - if predicate_test == options::ErrorPredicate::MaybeCommitted { - return code == commit_unknown_result || code == cluster_version_changed; - } - if predicate_test == options::ErrorPredicate::RetryableNotCommitted { - return code == not_committed - || code == transaction_too_old - || code == future_version - || code == database_locked - || code == grv_proxy_memory_limit_exceeded - || code == commit_proxy_memory_limit_exceeded - || code == transaction_throttled_hot_shard - || code == batch_transaction_throttled - || code == process_behind - || code == tag_throttled - || code == proxy_tag_throttled - || code == transaction_rejected_range_locked; - } - - false -} - -/// Error returned when attempting to access metrics on a transaction that wasn't created with metrics instrumentation. -/// -/// This error occurs when calling methods like `set_custom_metric` or `increment_custom_metric` on a -/// transaction that was created without metrics instrumentation (i.e., using `create_trx` instead of -/// `create_instrumented_trx`). -#[derive(Debug)] -pub struct TransactionMetricsNotFound; - -impl std::fmt::Display for TransactionMetricsNotFound { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Transaction metrics not found") - } -} - -impl std::error::Error for TransactionMetricsNotFound {} - -/// The Standard Error type of FoundationDB -#[derive(Debug, Copy, Clone)] -pub struct FdbError { - /// The FoundationDB error code - error_code: i32, -} - -impl FdbError { - /// Converts from a raw foundationDB error code - pub fn from_code(error_code: fdb_error_t) -> Self { - Self { error_code } - } - - pub fn message(self) -> &'static str { - fdb_get_error(self.error_code) - } - - fn is_error_predicate(self, predicate: options::ErrorPredicate) -> bool { - fdb_error_predicate(predicate, self.error_code) - } - - /// Indicates the transaction may have succeeded, though not in a way the system can verify. - pub fn is_maybe_committed(self) -> bool { - self.is_error_predicate(options::ErrorPredicate::MaybeCommitted) - } - - /// Indicates the operations in the transactions should be retried because of transient error. - pub fn is_retryable(self) -> bool { - self.is_error_predicate(options::ErrorPredicate::Retryable) - } - - /// Indicates the transaction has not committed, though in a way that can be retried. - pub fn is_retryable_not_committed(self) -> bool { - self.is_error_predicate(options::ErrorPredicate::RetryableNotCommitted) - } - - /// Raw foundationdb error code - pub fn code(self) -> i32 { - self.error_code - } -} - -impl fmt::Display for FdbError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - std::fmt::Display::fmt(&self.message(), f) - } -} - -impl std::error::Error for FdbError {} - -/// Alias for `Result<..., FdbError>` -pub type FdbResult = Result; - -/// This error represent all errors that can be throwed by `db.run`. -/// Layer developers may use the `CustomError`. -pub enum FdbBindingError { - NonRetryableFdbError(FdbError), - PackError(PackError), - /// A reference to the `RetryableTransaction` has been kept - ReferenceToTransactionKept, - /// A custom error that layer developers can use - CustomError(Box), - /// Error returned when attempting to access metrics on a transaction that wasn't created with metrics instrumentation - TransactionMetricsNotFound, -} - -impl FdbBindingError { - /// Returns the underlying `FdbError`, if any. - pub fn get_fdb_error(&self) -> Option { - match *self { - Self::NonRetryableFdbError(error) => Some(error), - Self::CustomError(ref error) => { - if let Some(e) = error.downcast_ref::() { - Some(*e) - } else if let Some(e) = error.downcast_ref::() { - e.get_fdb_error() - } else { - None - } - } - _ => None, - } - } -} - -impl From for FdbBindingError { - fn from(e: FdbError) -> Self { - Self::NonRetryableFdbError(e) - } -} - -impl From for FdbBindingError { - fn from(_e: TransactionMetricsNotFound) -> Self { - Self::TransactionMetricsNotFound - } -} - -impl FdbBindingError { - /// create a new custom error - pub fn new_custom_error(e: Box) -> Self { - Self::CustomError(e) - } -} - -impl Debug for FdbBindingError { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - FdbBindingError::NonRetryableFdbError(err) => write!(f, "{err:?}"), - FdbBindingError::PackError(err) => write!(f, "{err:?}"), - FdbBindingError::ReferenceToTransactionKept => { - write!(f, "Reference to transaction kept") - } - FdbBindingError::CustomError(err) => write!(f, "{err:?}"), - FdbBindingError::TransactionMetricsNotFound => { - write!(f, "Transaction metrics not found") - } - } - } -} - -impl Display for FdbBindingError { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - std::fmt::Debug::fmt(&self, f) - } -} - -impl std::error::Error for FdbBindingError {} diff --git a/packages/common/universaldb/src/inherited/mod.rs b/packages/common/universaldb/src/inherited/mod.rs deleted file mode 100644 index 81ff533efb..0000000000 --- a/packages/common/universaldb/src/inherited/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod error; -pub mod keyselector; -pub mod options; -pub mod rangeoption; diff --git a/packages/common/universaldb/src/inherited/keyselector.rs b/packages/common/universaldb/src/key_selector.rs similarity index 82% rename from packages/common/universaldb/src/inherited/keyselector.rs rename to packages/common/universaldb/src/key_selector.rs index 4ccb0c468c..18d7fabb45 100644 --- a/packages/common/universaldb/src/inherited/keyselector.rs +++ b/packages/common/universaldb/src/key_selector.rs @@ -1,11 +1,3 @@ -// Copyright 2018 foundationdb-rs developers, https://github.com/Clikengo/foundationdb-rs/graphs/contributors -// Copyright 2013-2018 Apple, Inc and the FoundationDB project authors. -// -// Licensed under the Apache License, Version 2.0, or the MIT license , at your option. This file may not be -// copied, modified, or distributed except according to those terms. - //! A `KeySelector` identifies a particular key in the database. use crate::tuple::Bytes; @@ -29,8 +21,6 @@ use std::borrow::Cow; /// - `last_less_or_equal` /// - `first_greater_than` /// - `first_greater_or_equal` -/// -/// A dedicated [example](https://github.com/foundationdb-rs/foundationdb-rs/blob/main/foundationdb/examples/key_selectors.rs) is available on Github. #[derive(Clone, Debug)] pub struct KeySelector<'a> { key: Bytes<'a>, diff --git a/packages/common/universaldb/src/lib.rs b/packages/common/universaldb/src/lib.rs index b4dc229153..96260a177d 100644 --- a/packages/common/universaldb/src/lib.rs +++ b/packages/common/universaldb/src/lib.rs @@ -1,27 +1,24 @@ pub(crate) mod atomic; mod database; pub mod driver; -pub mod future; -pub mod inherited; +pub mod error; +pub mod key_selector; +mod metrics; +pub mod options; +pub mod prelude; +pub mod range_option; mod transaction; pub(crate) mod tx_ops; -mod types; pub mod utils; +pub mod value; pub mod versionstamp; -// Export UDB-specific types pub use database::Database; pub use driver::DatabaseDriverHandle; +pub use key_selector::KeySelector; +pub use range_option::RangeOption; +pub use transaction::{RetryableTransaction, Transaction}; +pub use utils::{Subspace, calculate_tx_retry_backoff}; // Re-export FDB types pub use foundationdb_tuple as tuple; -pub use future::{FdbKeyValue, FdbValue}; -pub use inherited::options; -pub use inherited::{ - error::FdbBindingError, error::FdbError, error::FdbResult, keyselector::KeySelector, - rangeoption::RangeOption, -}; -pub use options::DatabaseOption; -pub use transaction::{RetryableTransaction, Transaction}; -pub use types::*; -pub use utils::calculate_tx_retry_backoff; diff --git a/packages/common/udb-util/src/metrics.rs b/packages/common/universaldb/src/metrics.rs similarity index 94% rename from packages/common/udb-util/src/metrics.rs rename to packages/common/universaldb/src/metrics.rs index 95a8f36811..fd9aa61251 100644 --- a/packages/common/udb-util/src/metrics.rs +++ b/packages/common/universaldb/src/metrics.rs @@ -4,7 +4,7 @@ use rivet_metrics::{ }; lazy_static::lazy_static! { - static ref METER: Meter = meter("rivet-udb-util"); + static ref METER: Meter = meter("rivet-universaldb"); /// Has no expected attributes pub static ref PING_DURATION: Histogram = METER.f64_histogram("rivet_udb_ping_duration") diff --git a/packages/common/universaldb/src/inherited/options.rs b/packages/common/universaldb/src/options.rs similarity index 77% rename from packages/common/universaldb/src/inherited/options.rs rename to packages/common/universaldb/src/options.rs index 4d4492227a..64442cd9ea 100644 --- a/packages/common/universaldb/src/inherited/options.rs +++ b/packages/common/universaldb/src/options.rs @@ -1,160 +1,3 @@ -//! IMPORTANT: This file is generated by foundationdb-gen for fdb version 7.3. Anything referencing -// foundationdb_sys was stripped out. - -#[derive(Clone, Debug)] -#[non_exhaustive] -pub enum NetworkOption { - /// IP:PORT - /// - /// Deprecated - LocalAddress(String), - /// path to cluster file - /// - /// Deprecated - ClusterFile(String), - /// path to output directory (or NULL for current working directory) - /// - /// Enables trace output to a file in a directory of the clients choosing - TraceEnable(String), - /// max size of a single trace output file - /// - /// Sets the maximum size in bytes of a single trace output file. This value should be in the range ``[0, INT64_MAX]``. If the value is set to 0, there is no limit on individual file size. The default is a maximum size of 10,485,760 bytes. - TraceRollSize(i32), - /// max total size of trace files - /// - /// Sets the maximum size of all the trace output files put together. This value should be in the range ``[0, INT64_MAX]``. If the value is set to 0, there is no limit on the total size of the files. The default is a maximum size of 104,857,600 bytes. If the default roll size is used, this means that a maximum of 10 trace files will be written at a time. - TraceMaxLogsSize(i32), - /// value of the LogGroup attribute - /// - /// Sets the 'LogGroup' attribute with the specified value for all events in the trace output files. The default log group is 'default'. - TraceLogGroup(String), - /// Format of trace files - /// - /// Select the format of the log files. xml (the default) and json are supported. - TraceFormat(String), - /// Trace clock source - /// - /// Select clock source for trace files. now (the default) or realtime are supported. - TraceClockSource(String), - /// The identifier that will be part of all trace file names - /// - /// Once provided, this string will be used to replace the port/PID in the log file names. - TraceFileIdentifier(String), - /// Use the same base trace file name for all client threads as it did before version 7.2. The current default behavior is to use distinct trace file names for client threads by including their version and thread index. - TraceShareAmongClientThreads, - /// Initialize trace files on network setup, determine the local IP later. Otherwise tracing is initialized when opening the first database. - TraceInitializeOnSetup, - /// Append this suffix to partially written log files. When a log file is complete, it is renamed to remove the suffix. No separator is added between the file and the suffix. If you want to add a file extension, you should include the separator - e.g. '.tmp' instead of 'tmp' to add the 'tmp' extension. - /// - /// Set file suffix for partially written log files. - TracePartialFileSuffix(String), - /// knob_name=knob_value - /// - /// Set internal tuning or debugging knobs - Knob(String), - /// file path or linker-resolved name - /// - /// Deprecated - TLSPlugin(String), - /// certificates - /// - /// Set the certificate chain - TLSCertBytes(Vec), - /// file path - /// - /// Set the file from which to load the certificate chain - TLSCertPath(String), - /// key - /// - /// Set the private key corresponding to your own certificate - TLSKeyBytes(Vec), - /// file path - /// - /// Set the file from which to load the private key corresponding to your own certificate - TLSKeyPath(String), - /// verification pattern - /// - /// Set the peer certificate field verification criteria - TLSVerifyPeers(Vec), - BuggifyEnable, - BuggifyDisable, - /// probability expressed as a percentage between 0 and 100 - /// - /// Set the probability of a BUGGIFY section being active for the current execution. Only applies to code paths first traversed AFTER this option is changed. - BuggifySectionActivatedProbability(i32), - /// probability expressed as a percentage between 0 and 100 - /// - /// Set the probability of an active BUGGIFY section being fired - BuggifySectionFiredProbability(i32), - /// ca bundle - /// - /// Set the ca bundle - TLSCaBytes(Vec), - /// file path - /// - /// Set the file from which to load the certificate authority bundle - TLSCaPath(String), - /// key passphrase - /// - /// Set the passphrase for encrypted private key. Password should be set before setting the key for the password to be used. - TLSPassword(String), - /// Disables the multi-version client API and instead uses the local client directly. Must be set before setting up the network. - DisableMultiVersionClientApi, - /// If set, callbacks from external client libraries can be called from threads created by the FoundationDB client library. Otherwise, callbacks will be called from either the thread used to add the callback or the network thread. Setting this option can improve performance when connected using an external client, but may not be safe to use in all environments. Must be set before setting up the network. WARNING: This feature is considered experimental at this time. - CallbacksOnExternalThreads, - /// path to client library - /// - /// Adds an external client library for use by the multi-version client API. Must be set before setting up the network. - ExternalClientLibrary(String), - /// path to directory containing client libraries - /// - /// Searches the specified path for dynamic libraries and adds them to the list of client libraries for use by the multi-version client API. Must be set before setting up the network. - ExternalClientDirectory(String), - /// Prevents connections through the local client, allowing only connections through externally loaded client libraries. - DisableLocalClient, - /// Number of client threads to be spawned. Each cluster will be serviced by a single client thread. - /// - /// Spawns multiple worker threads for each version of the client that is loaded. Setting this to a number greater than one implies disable_local_client. - ClientThreadsPerVersion(i32), - /// path to client library - /// - /// Adds an external client library to be used with a future version protocol. This option can be used testing purposes only! - FutureVersionClientLibrary(String), - /// Retain temporary external client library copies that are created for enabling multi-threading. - RetainClientLibraryCopies, - /// Ignore the failure to initialize some of the external clients - IgnoreExternalClientFailures, - /// Fail with an error if there is no client matching the server version the client is connecting to - FailIncompatibleClient, - /// Disables logging of client statistics, such as sampled transaction activity. - DisableClientStatisticsLogging, - /// Deprecated - EnableSlowTaskProfiling, - /// Enables debugging feature to perform run loop profiling. Requires trace logging to be enabled. WARNING: this feature is not recommended for use in production. - EnableRunLoopProfiling, - /// Prevents the multi-version client API from being disabled, even if no external clients are configured. This option is required to use GRV caching. - DisableClientBypass, - /// Enable client buggify - will make requests randomly fail (intended for client testing) - ClientBuggifyEnable, - /// Disable client buggify - ClientBuggifyDisable, - /// probability expressed as a percentage between 0 and 100 - /// - /// Set the probability of a CLIENT_BUGGIFY section being active for the current execution. - ClientBuggifySectionActivatedProbability(i32), - /// probability expressed as a percentage between 0 and 100 - /// - /// Set the probability of an active CLIENT_BUGGIFY section being fired. A section will only fire if it was activated - ClientBuggifySectionFiredProbability(i32), - /// Distributed tracer type. Choose from none, log_file, or network_lossy - /// - /// Set a tracer to run on the client. Should be set to the same value as the tracer set on the server. - DistributedClientTracer(String), - /// Client directory for temporary files. - /// - /// Sets the directory for storing temporary files created by FDB client, such as temporary copies of client libraries. Defaults to /tmp - ClientTmpDir(String), -} #[derive(Clone, Debug)] #[non_exhaustive] pub enum DatabaseOption { diff --git a/packages/common/universaldb/src/prelude.rs b/packages/common/universaldb/src/prelude.rs new file mode 100644 index 0000000000..a5c1c897fa --- /dev/null +++ b/packages/common/universaldb/src/prelude.rs @@ -0,0 +1,8 @@ +pub use crate::{ + key_selector::KeySelector, + options::StreamingMode, + range_option::RangeOption, + tuple::{PackError, PackResult, TupleDepth, TuplePack, TupleUnpack, VersionstampOffset}, + utils::{FormalChunkedKey, FormalKey, IsolationLevel::*, OptSliceExt, SliceExt, keys::*}, + value::Value, +}; diff --git a/packages/common/universaldb/src/inherited/rangeoption.rs b/packages/common/universaldb/src/range_option.rs similarity index 96% rename from packages/common/universaldb/src/inherited/rangeoption.rs rename to packages/common/universaldb/src/range_option.rs index 83c4fa8023..f30952f321 100644 --- a/packages/common/universaldb/src/inherited/rangeoption.rs +++ b/packages/common/universaldb/src/range_option.rs @@ -15,8 +15,8 @@ use std::{ ops::{Range, RangeInclusive}, }; -use super::{keyselector::*, options}; -use crate::{future::*, tuple::Subspace}; +use super::{key_selector::KeySelector, options}; +use crate::{tuple::Subspace, value::Values}; /// `RangeOption` represents a query parameters for range scan query. #[derive(Debug, Clone)] @@ -47,7 +47,7 @@ impl RangeOption<'_> { self } - pub fn next_range(mut self, kvs: &FdbValues) -> Option { + pub fn next_range(mut self, kvs: &Values) -> Option { if !kvs.more() { return None; } diff --git a/packages/common/universaldb/src/transaction.rs b/packages/common/universaldb/src/transaction.rs index 2774395ff7..991c069db5 100644 --- a/packages/common/universaldb/src/transaction.rs +++ b/packages/common/universaldb/src/transaction.rs @@ -1,62 +1,225 @@ use std::{future::Future, ops::Deref, pin::Pin, sync::Arc}; +use anyhow::{Context, Result}; +use futures_util::StreamExt; + use crate::{ - FdbResult, KeySelector, RangeOption, driver::TransactionDriver, - future::{FdbSlice, FdbValues}, + key_selector::KeySelector, options::{ConflictRangeType, MutationType}, - tuple::Subspace, - types::{TransactionCommitError, TransactionCommitted}, + range_option::RangeOption, + tuple::{self, TuplePack, TupleUnpack}, + utils::{ + CherryPick, FormalKey, IsolationLevel, MaybeCommitted, OptSliceExt, Subspace, + end_of_key_range, + }, + value::{Slice, Value, Values}, }; +#[derive(Clone)] pub struct Transaction { - pub(crate) driver: Box, + pub(crate) driver: Arc, + subspace: Subspace, } impl Transaction { - pub(crate) fn new(driver: Box) -> Self { - Transaction { driver: driver } + pub(crate) fn new(driver: Arc) -> Self { + Transaction { + driver: driver, + subspace: tuple::Subspace::all().into(), + } } - pub fn atomic_op(&self, key: &[u8], param: &[u8], op_type: MutationType) { - self.driver.atomic_op(key, param, op_type) + /// Creates a new transaction instance with the provided subspace. + pub fn with_subspace(&self, subspace: Subspace) -> Self { + Transaction { + driver: self.driver.clone(), + subspace, + } } - // Read operations + pub fn informal(&self) -> InformalTransaction<'_> { + InformalTransaction { inner: self } + } + + pub fn pack(&self, t: &T) -> Vec { + self.subspace.pack(t) + } + + /// Unpacks a key based on the subspace of this transaction. + pub fn unpack<'de, T: TupleUnpack<'de>>(&self, key: &'de [u8]) -> Result { + self.subspace + .unpack(key) + .with_context(|| format!("failed unpacking key of {}", std::any::type_name::())) + } + + pub fn write(&self, key: &T, value: T::Value) -> Result<()> { + self.driver.set( + &self.subspace.pack(key), + &key.serialize(value).with_context(|| { + format!( + "failed serializing key value of {}", + std::any::type_name::(), + ) + })?, + ); + + Ok(()) + } + + pub async fn read<'de, T: FormalKey + TuplePack + TupleUnpack<'de>>( + &self, + key: &'de T, + isolation_level: IsolationLevel, + ) -> Result { + self.driver + .get(&self.subspace.pack(key), isolation_level) + .await? + .read(key) + } + + pub async fn read_opt<'de, T: FormalKey + TuplePack + TupleUnpack<'de>>( + &self, + key: &'de T, + isolation_level: IsolationLevel, + ) -> Result> { + self.driver + .get(&self.subspace.pack(key), isolation_level) + .await? + .read_opt(key) + } + + pub async fn exists( + &self, + key: &T, + isolation_level: IsolationLevel, + ) -> Result { + Ok(self + .driver + .get(&self.subspace.pack(key), isolation_level) + .await? + .is_some()) + } + + pub fn delete(&self, key: &T) { + self.driver.clear(&self.subspace.pack(key)); + } + + pub fn delete_key_subspace(&self, key: &T) { + self.informal() + .clear_subspace_range(&self.subspace.subspace(&self.subspace.pack(key))); + } + + pub fn read_entry TupleUnpack<'de>>( + &self, + entry: &Value, + ) -> Result<(T, T::Value)> { + let key = self.unpack::(entry.key())?; + let value = key.deserialize(entry.value()).with_context(|| { + format!( + "failed deserializing key value of {}", + std::any::type_name::() + ) + })?; + + Ok((key, value)) + } + + pub async fn cherry_pick( + &self, + subspace: impl TuplePack + Send, + isolation_level: IsolationLevel, + ) -> Result { + T::cherry_pick(self, subspace, isolation_level).await + } + + pub fn add_conflict_key( + &self, + key: &T, + conflict_type: ConflictRangeType, + ) -> Result<()> { + let key_buf = self.subspace.pack(key); + + self.driver + .add_conflict_range(&key_buf, &end_of_key_range(&key_buf), conflict_type) + .map_err(Into::into) + } + + pub fn atomic_op<'de, T: FormalKey + TuplePack + TupleUnpack<'de>>( + &self, + key: &'de T, + param: &[u8], + op_type: MutationType, + ) { + self.driver + .atomic_op(&self.subspace.pack(key), param, op_type) + } + + pub fn read_range<'a>( + &'a self, + opt: RangeOption<'a>, + isolation_level: IsolationLevel, + ) -> crate::value::Stream<'a, Value> { + let opt = RangeOption { + begin: KeySelector::new( + [self.subspace.bytes(), opt.begin.key()].concat().into(), + opt.begin.or_equal(), + opt.begin.offset(), + ), + end: KeySelector::new( + [self.subspace.bytes(), opt.end.key()].concat().into(), + opt.end.or_equal(), + opt.end.offset(), + ), + ..opt + }; + self.driver.get_ranges_keyvalues(opt, isolation_level) + } + + pub fn read_entries<'a, T: FormalKey + for<'de> TupleUnpack<'de>>( + &'a self, + opt: RangeOption<'a>, + isolation_level: IsolationLevel, + ) -> impl futures_util::Stream> { + self.driver + .get_ranges_keyvalues(opt, isolation_level) + .map(|res| self.read_entry(&res?)) + } + + // ==== TODO: Remove. all of these should only be used via `tx.informal()` ==== pub fn get<'a>( &'a self, key: &[u8], - snapshot: bool, - ) -> impl Future>> + 'a { - self.driver.get(key, snapshot) + isolation_level: IsolationLevel, + ) -> impl Future>> + 'a { + self.driver.get(key, isolation_level) } pub fn get_key<'a>( &'a self, selector: &KeySelector<'a>, - snapshot: bool, - ) -> impl Future> + 'a { - self.driver.get_key(selector, snapshot) + isolation_level: IsolationLevel, + ) -> impl Future> + 'a { + self.driver.get_key(selector, isolation_level) } pub fn get_range<'a>( &'a self, opt: &RangeOption<'a>, iteration: usize, - snapshot: bool, - ) -> impl Future> + 'a { - self.driver.get_range(opt, iteration, snapshot) + isolation_level: IsolationLevel, + ) -> impl Future> + 'a { + self.driver.get_range(opt, iteration, isolation_level) } pub fn get_ranges_keyvalues<'a>( &'a self, opt: RangeOption<'a>, - snapshot: bool, - ) -> crate::future::FdbStream<'a, crate::future::FdbValue> { - self.driver.get_ranges_keyvalues(opt, snapshot) + isolation_level: IsolationLevel, + ) -> crate::value::Stream<'a, Value> { + self.driver.get_ranges_keyvalues(opt, isolation_level) } - // Write operations pub fn set(&self, key: &[u8], value: &[u8]) { self.driver.set(key, value) } @@ -69,17 +232,9 @@ impl Transaction { self.driver.clear_range(begin, end) } - /// Clear all keys in a subspace range pub fn clear_subspace_range(&self, subspace: &Subspace) { let (begin, end) = subspace.range(); - self.clear_range(&begin, &end); - } - - pub fn commit( - self: Box, - ) -> Pin> + Send>> - { - self.driver.commit() + self.driver.clear_range(&begin, &end); } pub fn cancel(&self) { @@ -91,7 +246,7 @@ impl Transaction { begin: &[u8], end: &[u8], conflict_type: ConflictRangeType, - ) -> FdbResult<()> { + ) -> Result<()> { self.driver.add_conflict_range(begin, end, conflict_type) } @@ -99,23 +254,119 @@ impl Transaction { &'a self, begin: &'a [u8], end: &'a [u8], - ) -> Pin> + Send + 'a>> { + ) -> Pin> + Send + 'a>> { self.driver.get_estimated_range_size_bytes(begin, end) } } +pub struct InformalTransaction<'t> { + inner: &'t Transaction, +} + +impl<'t> InformalTransaction<'t> { + pub fn atomic_op(&self, key: &[u8], param: &[u8], op_type: MutationType) { + self.inner.driver.atomic_op(key, param, op_type) + } + + // Read operations + pub fn get<'a>( + &'a self, + key: &[u8], + isolation_level: IsolationLevel, + ) -> impl Future>> + 'a { + self.inner.driver.get(key, isolation_level) + } + + pub fn get_key<'a>( + &'a self, + selector: &KeySelector<'a>, + isolation_level: IsolationLevel, + ) -> impl Future> + 'a { + self.inner.driver.get_key(selector, isolation_level) + } + + pub fn get_range<'a>( + &'a self, + opt: &RangeOption<'a>, + iteration: usize, + isolation_level: IsolationLevel, + ) -> impl Future> + 'a { + self.inner.driver.get_range(opt, iteration, isolation_level) + } + + pub fn get_ranges_keyvalues<'a>( + &'a self, + opt: RangeOption<'a>, + isolation_level: IsolationLevel, + ) -> crate::value::Stream<'a, Value> { + self.inner.driver.get_ranges_keyvalues(opt, isolation_level) + } + + // Write operations + pub fn set(&self, key: &[u8], value: &[u8]) { + self.inner.driver.set(key, value) + } + + pub fn clear(&self, key: &[u8]) { + self.inner.driver.clear(key) + } + + pub fn clear_range(&self, begin: &[u8], end: &[u8]) { + self.inner.driver.clear_range(begin, end) + } + + /// Clear all keys in a subspace range + pub fn clear_subspace_range(&self, subspace: &Subspace) { + let (begin, end) = subspace.range(); + self.inner.driver.clear_range(&begin, &end); + } + + // pub fn commit(self: Box) -> Pin> + Send>> { + // self.inner.driver.commit() + // } + + pub fn cancel(&self) { + self.inner.driver.cancel() + } + + pub fn add_conflict_range( + &self, + begin: &[u8], + end: &[u8], + conflict_type: ConflictRangeType, + ) -> Result<()> { + self.inner + .driver + .add_conflict_range(begin, end, conflict_type) + } + + pub fn get_estimated_range_size_bytes<'a>( + &'a self, + begin: &'a [u8], + end: &'a [u8], + ) -> Pin> + Send + 'a>> { + self.inner.driver.get_estimated_range_size_bytes(begin, end) + } +} + /// Retryable transaction wrapper #[derive(Clone)] pub struct RetryableTransaction { - pub(crate) inner: Arc, + pub(crate) inner: Transaction, + pub(crate) maybe_committed: MaybeCommitted, } impl RetryableTransaction { pub fn new(transaction: Transaction) -> Self { RetryableTransaction { - inner: Arc::new(transaction), + inner: transaction, + maybe_committed: MaybeCommitted(false), } } + + pub fn maybe_committed(&self) -> MaybeCommitted { + self.maybe_committed + } } impl Deref for RetryableTransaction { @@ -125,11 +376,3 @@ impl Deref for RetryableTransaction { &self.inner } } - -impl RetryableTransaction { - /// Clear all keys in a subspace range - pub fn clear_subspace_range(&self, subspace: &Subspace) { - let (begin, end) = subspace.range(); - self.inner.clear_range(&begin, &end); - } -} diff --git a/packages/common/universaldb/src/tx_ops.rs b/packages/common/universaldb/src/tx_ops.rs index 52d4108a31..108619932d 100644 --- a/packages/common/universaldb/src/tx_ops.rs +++ b/packages/common/universaldb/src/tx_ops.rs @@ -1,10 +1,14 @@ +use std::collections::BTreeMap; + +use anyhow::Result; + use crate::{ - FdbResult, KeySelector, RangeOption, atomic::apply_atomic_op, - future::{FdbKeyValue, FdbSlice, FdbValues}, + key_selector::KeySelector, options::{ConflictRangeType, MutationType}, + range_option::RangeOption, + value::{KeyValue, Slice, Values}, }; -use std::collections::BTreeMap; #[derive(Debug, Clone)] pub enum Operation { @@ -134,14 +138,14 @@ impl TransactionOperations { &self, key: &[u8], get_from_db: F, - ) -> FdbResult> + ) -> Result> where F: FnOnce() -> Fut, - Fut: std::future::Future>>, + Fut: std::future::Future>>, { // Check local operations first match self.get(key) { - GetOutput::Value(value) => Ok(Some(value)), + GetOutput::Value(value) => Ok(Some(value.into())), GetOutput::Cleared => Ok(None), GetOutput::None => { // Fall back to database @@ -154,7 +158,12 @@ impl TransactionOperations { // Apply all atomic operations in order for (param, op_type) in atomic_ops { - result_value = apply_atomic_op(result_value.as_deref(), ¶m, op_type); + result_value = apply_atomic_op( + result_value.as_ref().map(|x| x.as_slice()), + ¶m, + op_type, + ) + .map(Into::into); } Ok(result_value) @@ -162,14 +171,10 @@ impl TransactionOperations { } } - pub async fn get_key( - &self, - selector: &KeySelector<'_>, - get_from_db: F, - ) -> FdbResult + pub async fn get_key(&self, selector: &KeySelector<'_>, get_from_db: F) -> Result where F: FnOnce() -> Fut, - Fut: std::future::Future>, + Fut: std::future::Future>, { // Get the database result first let db_key = get_from_db().await?; @@ -252,31 +257,27 @@ impl TransactionOperations { if db_key.as_slice() < local.as_slice() { Ok(db_key) } else { - Ok(local) + Ok(local.into()) } } else { // Return the larger key if db_key.as_slice() > local.as_slice() { Ok(db_key) } else { - Ok(local) + Ok(local.into()) } } } - (Some(local), _) => Ok(local), + (Some(local), _) => Ok(local.into()), (None, false) => Ok(db_key), - (None, true) => Ok(vec![]), + (None, true) => Ok(vec![].into()), } } - pub async fn get_range( - &self, - opt: &RangeOption<'_>, - get_from_db: F, - ) -> FdbResult + pub async fn get_range(&self, opt: &RangeOption<'_>, get_from_db: F) -> Result where F: FnOnce() -> Fut, - Fut: std::future::Future>, + Fut: std::future::Future>, { // Get database results let db_values = get_from_db().await?; @@ -349,10 +350,10 @@ impl TransactionOperations { let limit = opt.limit.unwrap_or(usize::MAX); for (key, value) in result_map.into_iter().take(limit) { - keyvalues.push(FdbKeyValue::new(key, value)); + keyvalues.push(KeyValue::new(key, value)); } - Ok(FdbValues::new(keyvalues)) + Ok(Values::new(keyvalues)) } pub fn clear_all(&mut self) { diff --git a/packages/common/universaldb/src/types.rs b/packages/common/universaldb/src/types.rs deleted file mode 100644 index b2b3dff74a..0000000000 --- a/packages/common/universaldb/src/types.rs +++ /dev/null @@ -1,49 +0,0 @@ -use std::{fmt, ops::Deref}; - -use crate::FdbError; - -pub struct TransactionCommitError { - pub err: FdbError, -} - -impl TransactionCommitError { - pub fn new(err: FdbError) -> Self { - Self { err } - } - - pub fn code(&self) -> i32 { - self.err.code() - } -} - -impl Deref for TransactionCommitError { - type Target = FdbError; - fn deref(&self) -> &FdbError { - &self.err - } -} - -impl From for FdbError { - fn from(tce: TransactionCommitError) -> FdbError { - tce.err - } -} - -impl fmt::Debug for TransactionCommitError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TransactionCommitError({})", self.err) - } -} - -impl fmt::Display for TransactionCommitError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - self.err.fmt(f) - } -} - -pub type TransactionCommitted = (); -pub type TransactionCancelled = (); - -/// Indicates the transaction might have committed -#[derive(Debug, Clone, Copy)] -pub struct MaybeCommitted(pub bool); diff --git a/packages/common/universaldb/src/utils.rs b/packages/common/universaldb/src/utils.rs deleted file mode 100644 index 811a0d1668..0000000000 --- a/packages/common/universaldb/src/utils.rs +++ /dev/null @@ -1,36 +0,0 @@ -use crate::{KeySelector, RangeOption, future::FdbValues}; - -pub fn calculate_tx_retry_backoff(attempt: usize) -> u64 { - // TODO: Update this to mirror fdb 1:1: - // https://github.com/apple/foundationdb/blob/21407341d9b49e1d343514a7a5f395bd5f232079/fdbclient/NativeAPI.actor.cpp#L3162 - - let base_backoff_ms = 2_u64.pow((attempt as u32).min(10)) * 10; - - let jitter_ms = rand::random::() % 100; - - base_backoff_ms + jitter_ms -} - -pub fn next_range<'a>(mut range: RangeOption<'a>, kvs: &'a FdbValues) -> Option> { - if !kvs.more() { - return None; - } - - let last = kvs.iter().last()?; - let last_key = last.key(); - - if let Some(limit) = range.limit.as_mut() { - *limit = limit.saturating_sub(kvs.len()); - if *limit == 0 { - return None; - } - } - - if range.reverse { - range.end = KeySelector::first_greater_or_equal(last_key); - } else { - range.begin = KeySelector::first_greater_than(last_key); - } - - Some(range) -} diff --git a/packages/common/universaldb/src/utils/cherry_pick.rs b/packages/common/universaldb/src/utils/cherry_pick.rs new file mode 100644 index 0000000000..51a6262fbc --- /dev/null +++ b/packages/common/universaldb/src/utils/cherry_pick.rs @@ -0,0 +1,88 @@ +use anyhow::{Context, Result, ensure}; +use futures_util::TryStreamExt; + +use crate::{ + options::StreamingMode, + transaction::Transaction, + tuple::{TuplePack, TupleUnpack}, + utils::{FormalKey, IsolationLevel, Subspace}, +}; + +#[async_trait::async_trait] +pub trait CherryPick { + type Output; + + async fn cherry_pick( + tx: &Transaction, + subspace: S, + isolation_level: IsolationLevel, + ) -> Result; +} + +// Implements `CherryPick` for any tuple size +macro_rules! impl_tuple { + ($($args:ident),*) => { + #[async_trait::async_trait] + impl<$($args: FormalKey + for<'de> TupleUnpack<'de>),*> CherryPick for ($($args),*) + where + $($args::Value: Send),* + { + type Output = ($($args::Value),*); + + async fn cherry_pick( + tx: &Transaction, + subspace: S, + isolation_level: IsolationLevel, + ) -> Result { + let tx = tx.with_subspace(Subspace::new(&subspace)); + + let mut stream = tx.read_range( + $crate::range_option::RangeOption { + mode: StreamingMode::WantAll, + ..(&Subspace::all()).into() + }, + isolation_level, + ); + + $( + #[allow(non_snake_case)] + let mut $args = None; + )* + + loop { + let Some(entry) = stream.try_next().await? else { + break; + }; + + $( + if let Ok(key) = tx.unpack::<$args>(entry.key()) { + ensure!($args.is_none(), "{} already picked", std::any::type_name::<$args>()); + + let value = key.deserialize(entry.value())?; + $args = Some(value); + continue; + } + )* + } + + Ok(( + $( + $args.with_context(|| { + format!("key not found in cherry pick: {}", std::any::type_name::<$args>()) + })?, + )* + )) + } + } + } +} + +impl_tuple!(A, B); +impl_tuple!(A, B, C); +impl_tuple!(A, B, C, D); +impl_tuple!(A, B, C, D, E); +impl_tuple!(A, B, C, D, E, F); +impl_tuple!(A, B, C, D, E, F, G); +impl_tuple!(A, B, C, D, E, F, G, H); +impl_tuple!(A, B, C, D, E, F, G, H, I); +impl_tuple!(A, B, C, D, E, F, G, H, I, J); diff --git a/packages/common/udb-util/src/codes.rs b/packages/common/universaldb/src/utils/codes.rs similarity index 100% rename from packages/common/udb-util/src/codes.rs rename to packages/common/universaldb/src/utils/codes.rs diff --git a/packages/common/universaldb/src/utils/ext.rs b/packages/common/universaldb/src/utils/ext.rs new file mode 100644 index 0000000000..c18074c7d0 --- /dev/null +++ b/packages/common/universaldb/src/utils/ext.rs @@ -0,0 +1,58 @@ +use anyhow::{Context, Result}; + +use crate::{tuple::TupleUnpack, utils::FormalKey}; + +pub trait SliceExt { + fn read<'de, T: FormalKey + TupleUnpack<'de>>(&self, key: &'de T) -> Result; +} + +pub trait OptSliceExt { + fn read<'de, T: FormalKey + TupleUnpack<'de>>(&self, key: &'de T) -> Result; + fn read_opt<'de, T: FormalKey + TupleUnpack<'de>>( + &self, + key: &'de T, + ) -> Result>; +} + +impl SliceExt for crate::value::Slice { + fn read<'de, T: FormalKey + TupleUnpack<'de>>(&self, key: &'de T) -> Result { + key.deserialize(self).with_context(|| { + format!( + "failed deserializing key value of {}", + std::any::type_name::(), + ) + }) + } +} + +impl OptSliceExt for Option { + fn read<'de, T: FormalKey + TupleUnpack<'de>>(&self, key: &'de T) -> Result { + key.deserialize( + &self + .as_ref() + .with_context(|| format!("key should exist: {}", std::any::type_name::()))?, + ) + .with_context(|| { + format!( + "failed deserializing key value of {}", + std::any::type_name::(), + ) + }) + } + + fn read_opt<'de, T: FormalKey + TupleUnpack<'de>>( + &self, + key: &'de T, + ) -> Result> { + if let Some(data) = self { + key.deserialize(data).map(Some).with_context(|| { + format!( + "failed deserializing key value of {}", + std::any::type_name::(), + ) + }) + } else { + Ok(None) + } + } +} diff --git a/packages/common/udb-util/src/formal_key.rs b/packages/common/universaldb/src/utils/formal_key.rs similarity index 54% rename from packages/common/udb-util/src/formal_key.rs rename to packages/common/universaldb/src/utils/formal_key.rs index d3b00f36b0..39551d497b 100644 --- a/packages/common/udb-util/src/formal_key.rs +++ b/packages/common/universaldb/src/utils/formal_key.rs @@ -1,5 +1,6 @@ -use anyhow::*; -use universaldb::{self as udb, future::FdbValue}; +use anyhow::Result; + +use crate::value::Value; pub trait FormalKey { type Value; @@ -7,11 +8,6 @@ pub trait FormalKey { fn deserialize(&self, raw: &[u8]) -> Result; fn serialize(&self, value: Self::Value) -> Result>; - - fn read(&self, value: &[u8]) -> std::result::Result { - self.deserialize(value) - .map_err(|x| udb::FdbBindingError::CustomError(x.into())) - } } pub trait FormalChunkedKey { @@ -21,7 +17,7 @@ pub trait FormalChunkedKey { fn chunk(&self, chunk: usize) -> Self::ChunkKey; /// Assumes chunks are in order. - fn combine(&self, chunks: Vec) -> Result; + fn combine(&self, chunks: Vec) -> Result; fn split(&self, value: Self::Value) -> Result>>; } diff --git a/packages/common/udb-util/src/keys.rs b/packages/common/universaldb/src/utils/keys.rs similarity index 100% rename from packages/common/udb-util/src/keys.rs rename to packages/common/universaldb/src/utils/keys.rs diff --git a/packages/common/udb-util/src/lib.rs b/packages/common/universaldb/src/utils/mod.rs similarity index 69% rename from packages/common/udb-util/src/lib.rs rename to packages/common/universaldb/src/utils/mod.rs index c1036d82dd..8b6d673c87 100644 --- a/packages/common/udb-util/src/lib.rs +++ b/packages/common/universaldb/src/utils/mod.rs @@ -1,34 +1,38 @@ -use std::result::Result::Ok; - -use universaldb::tuple::{PackError, PackResult}; +use crate::tuple::{PackError, PackResult}; +mod cherry_pick; pub mod codes; mod ext; mod formal_key; pub mod keys; -mod metrics; mod subspace; +pub use cherry_pick::*; pub use ext::*; pub use formal_key::*; pub use subspace::Subspace; -/// Makes the code blatantly obvious if its using a snapshot read. -pub const SNAPSHOT: bool = true; -pub const SERIALIZABLE: bool = false; pub const CHUNK_SIZE: usize = 10_000; // 10 KB, not KiB, see https://apple.github.io/foundationdb/blob.html -pub mod prelude { - pub use universaldb::{ - FdbBindingError, KeySelector, RangeOption, - future::FdbValue, - options::StreamingMode, - tuple::{PackError, PackResult, TupleDepth, TuplePack, TupleUnpack, VersionstampOffset}, - }; +#[derive(Debug, Clone, Copy)] +pub enum IsolationLevel { + Serializable, + Snapshot, +} + +/// Indicates the transaction might have committed +#[derive(Debug, Clone, Copy)] +pub struct MaybeCommitted(pub bool); + +pub fn calculate_tx_retry_backoff(attempt: usize) -> u64 { + // TODO: Update this to mirror fdb 1:1: + // https://github.com/apple/foundationdb/blob/21407341d9b49e1d343514a7a5f395bd5f232079/fdbclient/NativeAPI.actor.cpp#L3162 + + let base_backoff_ms = 2_u64.pow((attempt as u32).min(10)) * 10; - pub use super::{FormalChunkedKey, FormalKey, OptSliceExt, SliceExt, TxnExt, keys::*}; + let jitter_ms = rand::random::() % 100; - pub use crate::{SERIALIZABLE, SNAPSHOT}; + base_backoff_ms + jitter_ms } /// When using `add_conflict_range` to add a conflict for a single key, you cannot set both the start and end diff --git a/packages/common/udb-util/src/subspace.rs b/packages/common/universaldb/src/utils/subspace.rs similarity index 88% rename from packages/common/udb-util/src/subspace.rs rename to packages/common/universaldb/src/utils/subspace.rs index 13a27f79b3..1c743d0fda 100644 --- a/packages/common/udb-util/src/subspace.rs +++ b/packages/common/universaldb/src/utils/subspace.rs @@ -1,10 +1,11 @@ use std::{borrow::Cow, ops::Deref}; -use rivet_metrics::KeyValue; -use universaldb::{ - KeySelector, RangeOption, +use crate::{ + key_selector::KeySelector, + range_option::RangeOption, tuple::{self, PackResult, TuplePack, TupleUnpack}, }; +use rivet_metrics::KeyValue; use crate::metrics; @@ -22,6 +23,12 @@ impl Subspace { } } + pub fn all() -> Self { + Self { + inner: tuple::Subspace::all(), + } + } + /// Returns a new Subspace whose prefix extends this Subspace with a given tuple encodable. pub fn subspace(&self, t: &T) -> Self { Self { @@ -63,6 +70,12 @@ impl Deref for Subspace { } } +impl From for Subspace { + fn from(value: tuple::Subspace) -> Self { + Subspace { inner: value } + } +} + impl<'a> From<&'a Subspace> for RangeOption<'static> { fn from(subspace: &Subspace) -> Self { let (begin, end) = subspace.range(); diff --git a/packages/common/universaldb/src/value.rs b/packages/common/universaldb/src/value.rs new file mode 100644 index 0000000000..8e556e7bb9 --- /dev/null +++ b/packages/common/universaldb/src/value.rs @@ -0,0 +1,159 @@ +use std::{ + ops::{Deref, DerefMut}, + pin::Pin, +}; + +use anyhow::Result; + +#[derive(Debug, PartialEq, Eq)] +pub struct Slice(Vec); + +impl Slice { + pub fn new() -> Self { + Slice(Vec::new()) + } +} + +impl Deref for Slice { + type Target = Vec; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl DerefMut for Slice { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} + +impl From> for Slice { + fn from(value: Vec) -> Self { + Slice(value) + } +} + +impl From for Vec { + fn from(value: Slice) -> Self { + value.0 + } +} + +#[derive(Debug, Clone)] +pub struct Value(KeyValue); + +impl Value { + pub fn new(key: Vec, value: Vec) -> Self { + Value(KeyValue::new(key, value)) + } + + pub fn from_keyvalue(kv: KeyValue) -> Self { + Value(kv) + } + + pub fn key(&self) -> &[u8] { + self.0.key() + } + + pub fn value(&self) -> &[u8] { + self.0.value() + } + + pub fn into_parts(self) -> (Vec, Vec) { + self.0.into_parts() + } +} + +// Values wraps a Vec to match FoundationDB API +#[derive(Debug, Clone)] +pub struct Values { + values: Vec, + more: bool, +} + +impl Values { + pub fn new(values: Vec) -> Self { + Values { + values, + more: false, + } + } + + pub fn with_more(values: Vec, more: bool) -> Self { + Values { values, more } + } + + pub fn more(&self) -> bool { + self.more + } + + pub fn into_vec(self) -> Vec { + self.values + } + + pub fn len(&self) -> usize { + self.values.len() + } + + pub fn is_empty(&self) -> bool { + self.values.is_empty() + } + + pub fn iter(&self) -> std::slice::Iter<'_, KeyValue> { + self.values.iter() + } + + pub fn into_iter(self) -> std::vec::IntoIter { + self.values.into_iter() + } +} + +// impl Deref for Values { +// type Target = [KeyValue]; +// fn deref(&self) -> &Self::Target { +// &self.values +// } +// } +// impl AsRef<[KeyValue]> for Values { +// fn as_ref(&self) -> &[KeyValue] { +// self.deref() +// } +// } + +// KeyValue type with key() and value() methods +#[derive(Debug, Clone)] +pub struct KeyValue { + key: Vec, + value: Vec, +} + +impl KeyValue { + pub fn new(key: Vec, value: Vec) -> Self { + KeyValue { key, value } + } + + pub fn key(&self) -> &[u8] { + &self.key + } + + pub fn value(&self) -> &[u8] { + &self.value + } + + pub fn into_parts(self) -> (Vec, Vec) { + (self.key, self.value) + } + + pub fn to_value(self) -> Value { + Value::from_keyvalue(self) + } + + pub fn value_ref(&self) -> Value { + Value::from_keyvalue(self.clone()) + } +} + +// Stream type for range queries - generic over item type +pub type Stream<'a, T = KeyValue> = + Pin> + Send + 'a>>; diff --git a/packages/common/universaldb/tests/integration.rs b/packages/common/universaldb/tests/integration.rs index a20f44f248..f066da4747 100644 --- a/packages/common/universaldb/tests/integration.rs +++ b/packages/common/universaldb/tests/integration.rs @@ -1,9 +1,13 @@ +use anyhow::Result; use rivet_test_deps_docker::TestDatabase; use std::{borrow::Cow, sync::Arc}; use universaldb::{ - Database, FdbBindingError, KeySelector, RangeOption, + Database, + key_selector::KeySelector, options::{ConflictRangeType, StreamingMode}, + range_option::RangeOption, tuple::{Element, Subspace, Versionstamp, pack_with_versionstamp}, + utils::IsolationLevel::*, versionstamp::generate_versionstamp, }; use uuid::Uuid; @@ -123,7 +127,7 @@ async fn run_all_tests(db: universaldb::Database) { async fn test_database_options(db: &Database) { use std::sync::Arc; use std::sync::atomic::{AtomicU32, Ordering}; - use universaldb::FdbError; + use universaldb::error::DatabaseError; use universaldb::options::DatabaseOption; // Test setting transaction retry limit @@ -135,7 +139,7 @@ async fn test_database_options(db: &Database) { let counter_clone = conflict_counter.clone(); let result = db - .run(|tx, _maybe_committed| { + .run(|tx| { let counter = counter_clone.clone(); async move { // Increment counter to track retry attempts @@ -143,7 +147,7 @@ async fn test_database_options(db: &Database) { // Force a retry on first few attempts by returning a retryable error if attempts < 3 { - return Err(FdbBindingError::from(FdbError::from_code(1020))); // not_committed + return Err(DatabaseError::NotCommitted.into()); } // Should succeed on the third attempt @@ -166,7 +170,7 @@ async fn test_database_options(db: &Database) { let counter_clone2 = conflict_counter2.clone(); let result = db - .run(|_tx, _maybe_committed| { + .run(|_tx| { let counter = counter_clone2.clone(); async move { // Increment counter to track retry attempts @@ -174,7 +178,7 @@ async fn test_database_options(db: &Database) { // Always force a retry if attempts < 10 { - return Err(FdbBindingError::from(FdbError::from_code(1020))); // not_committed + return Err(DatabaseError::NotCommitted.into()); } Ok(()) @@ -195,8 +199,8 @@ async fn test_database_options(db: &Database) { .unwrap(); } -async fn clear_test_namespace(db: &Database) -> Result<(), FdbBindingError> { - db.run(|tx, _maybe_committed| async move { +async fn clear_test_namespace(db: &Database) -> Result<()> { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let (begin, end) = test_subspace.range(); tx.clear_range(&begin, &end); @@ -207,7 +211,7 @@ async fn clear_test_namespace(db: &Database) -> Result<(), FdbBindingError> { async fn test_basic_operations(db: &Database) { // Test set and get using subspace and tuple syntax - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("key1",)); tx.set(&key, b"value1"); @@ -217,22 +221,22 @@ async fn test_basic_operations(db: &Database) { .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("key1",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - assert_eq!(value, Some(b"value1".to_vec())); + assert_eq!(value, Some(b"value1".to_vec().into())); // Test get non-existent key let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -240,7 +244,7 @@ async fn test_basic_operations(db: &Database) { assert_eq!(value, None); // Test clear - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("key1",)); tx.clear(&key); @@ -250,10 +254,10 @@ async fn test_basic_operations(db: &Database) { .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("key1",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -263,7 +267,7 @@ async fn test_basic_operations(db: &Database) { async fn test_range_operations(db: &Database) { // Setup test data using subspace keys - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key_a = test_subspace.pack(&("a",)); let key_b = test_subspace.pack(&("b",)); @@ -281,7 +285,7 @@ async fn test_range_operations(db: &Database) { // Test get_range using subspace range for keys "b" through "d" (exclusive) let results = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key_b = test_subspace.pack(&("b",)); let key_d = test_subspace.pack(&("d",)); @@ -296,7 +300,7 @@ async fn test_range_operations(db: &Database) { ..RangeOption::default() }; - let vals = tx.get_range(&range, 1, false).await?; + let vals = tx.get_range(&range, 1, Serializable).await?; Ok(vals) }) .await @@ -316,7 +320,7 @@ async fn test_range_operations(db: &Database) { assert_eq!(values[1].value(), b"3"); // Test clear_range using subspace keys - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key_b = test_subspace.pack(&("b",)); let key_d = test_subspace.pack(&("d",)); @@ -328,7 +332,7 @@ async fn test_range_operations(db: &Database) { // Verify range was cleared let results = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key_b = test_subspace.pack(&("b",)); let key_d = test_subspace.pack(&("d",)); @@ -343,7 +347,7 @@ async fn test_range_operations(db: &Database) { ..RangeOption::default() }; - let vals = tx.get_range(&range, 1, false).await?; + let vals = tx.get_range(&range, 1, Serializable).await?; Ok(vals) }) .await @@ -352,31 +356,31 @@ async fn test_range_operations(db: &Database) { // Verify keys outside range still exist let value_a = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("a",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - assert_eq!(value_a, Some(b"1".to_vec())); + assert_eq!(value_a, Some(b"1".to_vec().into())); let value_d = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("d",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - assert_eq!(value_d, Some(b"4".to_vec())); + assert_eq!(value_d, Some(b"4".to_vec().into())); } async fn test_transaction_isolation(db: &Database) { // Set initial value using subspace - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("counter",)); tx.set(&key, b"0"); @@ -387,18 +391,18 @@ async fn test_transaction_isolation(db: &Database) { // Test that each transaction sees consistent state let val1 = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("counter",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - assert_eq!(val1, Some(b"0".to_vec())); + assert_eq!(val1, Some(b"0".to_vec().into())); // Set value in one transaction - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("counter",)); tx.set(&key, b"1"); @@ -409,20 +413,20 @@ async fn test_transaction_isolation(db: &Database) { // Verify the change is visible in new transaction let val3 = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("counter",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - assert_eq!(val3, Some(b"1".to_vec())); + assert_eq!(val3, Some(b"1".to_vec().into())); } async fn test_conflict_ranges(db: &Database) { // Test 1: Basic conflict range with read type - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("conflict",)); tx.set(&key, b"initial"); @@ -435,7 +439,7 @@ async fn test_conflict_ranges(db: &Database) { .unwrap(); // Test 2: Conflict range with write type - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("range_test1",)); let key2 = test_subspace.pack(&("range_test2",)); @@ -452,7 +456,7 @@ async fn test_conflict_ranges(db: &Database) { .unwrap(); // Test 3: Multiple conflict ranges - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); // Add multiple conflict ranges @@ -472,7 +476,7 @@ async fn test_conflict_ranges(db: &Database) { async fn test_get_key(db: &Database) { // Setup test data using subspace keys - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("k1",)); let key2 = test_subspace.pack(&("k2",)); @@ -488,11 +492,11 @@ async fn test_get_key(db: &Database) { // Test first_greater_or_equal let key = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let search_key = test_subspace.pack(&("k2",)); let selector = KeySelector::first_greater_or_equal(Cow::Owned(search_key)); - let k = tx.get_key(&selector, false).await?; + let k = tx.get_key(&selector, Serializable).await?; Ok(k) }) .await @@ -500,27 +504,27 @@ async fn test_get_key(db: &Database) { let test_subspace = Subspace::from("test"); let expected_key = test_subspace.pack(&("k2",)); - assert_eq!(key, expected_key); + assert_eq!(key, expected_key.into()); // Test with first_greater_than let key = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let search_key = test_subspace.pack(&("k1",)); let selector = KeySelector::first_greater_than(Cow::Owned(search_key)); - let k = tx.get_key(&selector, false).await?; + let k = tx.get_key(&selector, Serializable).await?; Ok(k) }) .await .unwrap(); let expected_key = test_subspace.pack(&("k2",)); - assert_eq!(key, expected_key); + assert_eq!(key, expected_key.into()); } async fn test_range_options(db: &Database) { // Setup test data - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key_a = test_subspace.pack(&("range_a",)); let key_b = test_subspace.pack(&("range_b",)); @@ -540,7 +544,7 @@ async fn test_range_options(db: &Database) { // Test 1: first_greater_or_equal on both bounds (inclusive range [b, d)) let results = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key_b = test_subspace.pack(&("range_b",)); let key_d = test_subspace.pack(&("range_d",)); @@ -555,7 +559,7 @@ async fn test_range_options(db: &Database) { ..RangeOption::default() }; - let vals = tx.get_range(&range, 1, false).await?; + let vals = tx.get_range(&range, 1, Serializable).await?; Ok(vals.into_vec()) }) .await @@ -575,7 +579,7 @@ async fn test_range_options(db: &Database) { // Test 2: first_greater_than on lower, first_greater_or_equal on upper (b, d) // Note: Some drivers may not correctly implement first_greater_than and include the boundary key let results = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key_b = test_subspace.pack(&("range_b",)); let key_d = test_subspace.pack(&("range_d",)); @@ -590,7 +594,7 @@ async fn test_range_options(db: &Database) { ..RangeOption::default() }; - let vals = tx.get_range(&range, 1, false).await?; + let vals = tx.get_range(&range, 1, Serializable).await?; Ok(vals.into_vec()) }) .await @@ -617,7 +621,7 @@ async fn test_range_options(db: &Database) { // Test 3: first_greater_or_equal on lower, first_greater_than on upper [b, d] let results = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key_b = test_subspace.pack(&("range_b",)); let key_d = test_subspace.pack(&("range_d",)); @@ -632,7 +636,7 @@ async fn test_range_options(db: &Database) { ..RangeOption::default() }; - let vals = tx.get_range(&range, 1, false).await?; + let vals = tx.get_range(&range, 1, Serializable).await?; Ok(vals.into_vec()) }) .await @@ -652,7 +656,7 @@ async fn test_range_options(db: &Database) { // Test 4: first_greater_than on both bounds (b, e) let results = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key_b = test_subspace.pack(&("range_b",)); let key_e = test_subspace.pack(&("range_e",)); @@ -667,7 +671,7 @@ async fn test_range_options(db: &Database) { ..RangeOption::default() }; - let vals = tx.get_range(&range, 1, false).await?; + let vals = tx.get_range(&range, 1, Serializable).await?; Ok(vals.into_vec()) }) .await @@ -686,7 +690,7 @@ async fn test_range_options(db: &Database) { assert_eq!(results[2].value(), b"val_e"); // Clear test data - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let (begin, end) = test_subspace.range(); tx.clear_range(&begin, &end); @@ -698,7 +702,7 @@ async fn test_range_options(db: &Database) { async fn test_read_after_write(db: &Database) { // Test 1: Basic set and get within same transaction - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("raw_key1",)); @@ -706,12 +710,12 @@ async fn test_read_after_write(db: &Database) { tx.set(&key1, b"value1"); // Read it back immediately (read-after-write) - let value = tx.get(&key1, false).await?; - assert_eq!(value, Some(b"value1".to_vec())); + let value = tx.get(&key1, Serializable).await?; + assert_eq!(value, Some(b"value1".to_vec().into())); // Read a non-existent key let key2 = test_subspace.pack(&("raw_key2",)); - let value = tx.get(&key2, false).await?; + let value = tx.get(&key2, Serializable).await?; assert_eq!(value, None); Ok(()) @@ -720,19 +724,19 @@ async fn test_read_after_write(db: &Database) { .unwrap(); // Test 2: Clear and get - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("raw_key1",)); // First verify the key exists from previous test - let value = tx.get(&key1, false).await?; - assert_eq!(value, Some(b"value1".to_vec())); + let value = tx.get(&key1, Serializable).await?; + assert_eq!(value, Some(b"value1".to_vec().into())); // Clear it tx.clear(&key1); // Read should return None - let value = tx.get(&key1, false).await?; + let value = tx.get(&key1, Serializable).await?; assert_eq!(value, None); Ok(()) @@ -741,7 +745,7 @@ async fn test_read_after_write(db: &Database) { .unwrap(); // Test 3: Clear range and get - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key_a = test_subspace.pack(&("raw_a",)); let key_b = test_subspace.pack(&("raw_b",)); @@ -758,10 +762,16 @@ async fn test_read_after_write(db: &Database) { tx.clear_range(&key_b, &key_d); // Check values - assert_eq!(tx.get(&key_a, false).await?, Some(b"value_a".to_vec())); - assert_eq!(tx.get(&key_b, false).await?, None); - assert_eq!(tx.get(&key_c, false).await?, None); - assert_eq!(tx.get(&key_d, false).await?, Some(b"value_d".to_vec())); + assert_eq!( + tx.get(&key_a, Serializable).await?, + Some(b"value_a".to_vec().into()) + ); + assert_eq!(tx.get(&key_b, Serializable).await?, None); + assert_eq!(tx.get(&key_c, Serializable).await?, None); + assert_eq!( + tx.get(&key_d, Serializable).await?, + Some(b"value_d".to_vec().into()) + ); Ok(()) }) @@ -769,7 +779,7 @@ async fn test_read_after_write(db: &Database) { .unwrap(); // Test 4: Get range with local modifications - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let range_key1 = test_subspace.pack(&("range_key1",)); let range_key3 = test_subspace.pack(&("range_key3",)); @@ -786,7 +796,7 @@ async fn test_read_after_write(db: &Database) { end: KeySelector::first_greater_or_equal(Cow::Owned(end)), ..RangeOption::default() }; - let values = tx.get_range(&range_opt, 1, false).await?; + let values = tx.get_range(&range_opt, 1, Serializable).await?; let mut keys = Vec::new(); for kv in values.into_iter() { @@ -803,18 +813,27 @@ async fn test_read_after_write(db: &Database) { .unwrap(); // Test 5: Overwrite value multiple times - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("overwrite_key",)); tx.set(&key, b"value1"); - assert_eq!(tx.get(&key, false).await?, Some(b"value1".to_vec())); + assert_eq!( + tx.get(&key, Serializable).await?, + Some(b"value1".to_vec().into()) + ); tx.set(&key, b"value2"); - assert_eq!(tx.get(&key, false).await?, Some(b"value2".to_vec())); + assert_eq!( + tx.get(&key, Serializable).await?, + Some(b"value2".to_vec().into()) + ); tx.set(&key, b"value3"); - assert_eq!(tx.get(&key, false).await?, Some(b"value3".to_vec())); + assert_eq!( + tx.get(&key, Serializable).await?, + Some(b"value3".to_vec().into()) + ); Ok(()) }) @@ -824,7 +843,7 @@ async fn test_read_after_write(db: &Database) { async fn test_set_clear_set(db: &Database) { // Test the bug where set → clear → set sequence doesn't work correctly - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bug_key",)); @@ -838,10 +857,10 @@ async fn test_set_clear_set(db: &Database) { tx.set(&key, b"value2"); // This should return the latest value "value2", not None or Cleared - let value = tx.get(&key, false).await?; + let value = tx.get(&key, Serializable).await?; assert_eq!( value, - Some(b"value2".to_vec()), + Some(b"value2".to_vec().into()), "Expected to get the latest set value after set-clear-set sequence" ); @@ -853,7 +872,7 @@ async fn test_set_clear_set(db: &Database) { async fn test_get_key_with_local_writes(db: &Database) { // Setup: Store keys with values 2 and 10 in the database - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key2 = test_subspace.pack(&(2,)); let key10 = test_subspace.pack(&(10,)); @@ -866,7 +885,7 @@ async fn test_get_key_with_local_writes(db: &Database) { // Test: Write a key with value 5 in the transaction, then get_key with >= 3 let result_key = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); // Write key 5 in the transaction @@ -876,7 +895,7 @@ async fn test_get_key_with_local_writes(db: &Database) { // Use get_key with >= 3 selector let search_key = test_subspace.pack(&(3,)); let selector = KeySelector::first_greater_or_equal(Cow::Owned(search_key)); - let k = tx.get_key(&selector, false).await?; + let k = tx.get_key(&selector, Serializable).await?; Ok(k) }) .await @@ -886,13 +905,14 @@ async fn test_get_key_with_local_writes(db: &Database) { let test_subspace = Subspace::from("test"); let expected_key5 = test_subspace.pack(&(5,)); assert_eq!( - result_key, expected_key5, + result_key, + expected_key5.clone().into(), "get_key should return key 5 from local writes, not key 10 from database" ); // Test with first_greater_than let result_key = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); // Write key 5 in the transaction @@ -902,7 +922,7 @@ async fn test_get_key_with_local_writes(db: &Database) { // Use get_key with > 4 selector let search_key = test_subspace.pack(&(4,)); let selector = KeySelector::first_greater_than(Cow::Owned(search_key)); - let k = tx.get_key(&selector, false).await?; + let k = tx.get_key(&selector, Serializable).await?; Ok(k) }) .await @@ -910,14 +930,15 @@ async fn test_get_key_with_local_writes(db: &Database) { // Should return key5, not key10 assert_eq!( - result_key, expected_key5, + result_key, + expected_key5.into(), "get_key with > selector should return key 5 from local writes" ); } async fn test_snapshot_reads(db: &Database) { // Setup: Store initial data in the database - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("snap_key1",)); let key2 = test_subspace.pack(&("snap_key2",)); @@ -932,7 +953,7 @@ async fn test_snapshot_reads(db: &Database) { .unwrap(); // Test 1: Just snapshot reads - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("snap_key1",)); let key2 = test_subspace.pack(&("snap_key2",)); @@ -940,10 +961,10 @@ async fn test_snapshot_reads(db: &Database) { let key4 = test_subspace.pack(&("snap_key4",)); // Snapshot read should see database value - let snapshot_value = tx.get(&key1, true).await?; + let snapshot_value = tx.get(&key1, Snapshot).await?; assert_eq!( snapshot_value, - Some(b"db_value1".to_vec()), + Some(b"db_value1".to_vec().into()), "Snapshot read should see database value" ); @@ -957,7 +978,7 @@ async fn test_snapshot_reads(db: &Database) { }; // Snapshot range read - let snapshot_values = tx.get_range(&range_opt, 1, true).await?; + let snapshot_values = tx.get_range(&range_opt, 1, Snapshot).await?; let mut snapshot_keys = Vec::new(); for kv in snapshot_values.into_iter() { snapshot_keys.push((kv.key().to_vec(), kv.value().to_vec())); @@ -987,7 +1008,7 @@ async fn test_snapshot_reads(db: &Database) { .unwrap(); // Test 2: Snapshot read should skip local set operations within a transaction - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("snap_key1",)); @@ -995,18 +1016,18 @@ async fn test_snapshot_reads(db: &Database) { tx.set(&key1, b"local_value1"); // Non-snapshot read should see local value - let value = tx.get(&key1, false).await?; + let value = tx.get(&key1, Serializable).await?; assert_eq!( value, - Some(b"local_value1".to_vec()), + Some(b"local_value1".to_vec().into()), "Non-snapshot read should see local write" ); // Snapshot read should see local write - let snapshot_value = tx.get(&key1, true).await?; + let snapshot_value = tx.get(&key1, Snapshot).await?; assert_eq!( snapshot_value, - Some(b"local_value1".to_vec()), + Some(b"local_value1".to_vec().into()), "Snapshot read should see local write" ); @@ -1018,7 +1039,7 @@ async fn test_snapshot_reads(db: &Database) { // Reset state { clear_test_namespace(&db).await.unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("snap_key1",)); let key2 = test_subspace.pack(&("snap_key2",)); @@ -1034,7 +1055,7 @@ async fn test_snapshot_reads(db: &Database) { } // Test 3: Snapshot read should skip local clear operations - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key2 = test_subspace.pack(&("snap_key2",)); @@ -1042,11 +1063,11 @@ async fn test_snapshot_reads(db: &Database) { tx.clear(&key2); // Non-snapshot read should see None (cleared) - let value = tx.get(&key2, false).await?; + let value = tx.get(&key2, Serializable).await?; assert_eq!(value, None, "Non-snapshot read should see cleared value"); // Snapshot read should still see database value - let snapshot_value = tx.get(&key2, true).await?; + let snapshot_value = tx.get(&key2, Snapshot).await?; assert_eq!(snapshot_value, None, "Snapshot read should see local clear"); Ok(()) @@ -1057,7 +1078,7 @@ async fn test_snapshot_reads(db: &Database) { // Reset state { clear_test_namespace(&db).await.unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("snap_key1",)); let key2 = test_subspace.pack(&("snap_key2",)); @@ -1073,7 +1094,7 @@ async fn test_snapshot_reads(db: &Database) { } // Test 4: Snapshot get_range should skip local operations - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("snap_key1",)); let key2 = test_subspace.pack(&("snap_key2",)); @@ -1095,7 +1116,7 @@ async fn test_snapshot_reads(db: &Database) { }; // Non-snapshot range read - let values = tx.get_range(&range_opt, 1, false).await?; + let values = tx.get_range(&range_opt, 1, Serializable).await?; let mut non_snapshot_keys = Vec::new(); for kv in values.into_iter() { non_snapshot_keys.push((kv.key().to_vec(), kv.value().to_vec())); @@ -1121,7 +1142,7 @@ async fn test_snapshot_reads(db: &Database) { ); // Snapshot range read - let snapshot_values = tx.get_range(&range_opt, 1, true).await?; + let snapshot_values = tx.get_range(&range_opt, 1, Snapshot).await?; let mut snapshot_keys = Vec::new(); for kv in snapshot_values.into_iter() { snapshot_keys.push((kv.key().to_vec(), kv.value().to_vec())); @@ -1154,7 +1175,7 @@ async fn test_snapshot_reads(db: &Database) { // Reset state { clear_test_namespace(&db).await.unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key1 = test_subspace.pack(&("snap_key1",)); let key2 = test_subspace.pack(&("snap_key2",)); @@ -1170,7 +1191,7 @@ async fn test_snapshot_reads(db: &Database) { } // Test 5: Snapshot get_key should skip local operations - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); // Add a local key between existing database keys @@ -1180,13 +1201,18 @@ async fn test_snapshot_reads(db: &Database) { // Non-snapshot get_key >= "snap_key14" should find the local key15 let search_key = test_subspace.pack(&("snap_key14",)); let selector = KeySelector::first_greater_or_equal(Cow::Owned(search_key)); - let result = tx.get_key(&selector, false).await?; - assert_eq!(result, key15, "Non-snapshot get_key should find local key"); + let result = tx.get_key(&selector, Serializable).await?; + assert_eq!( + result, + key15.clone().into(), + "Non-snapshot get_key should find local key" + ); // Snapshot get_key >= "snap_key14" should find the local key15 - let snapshot_result = tx.get_key(&selector, true).await?; + let snapshot_result = tx.get_key(&selector, Snapshot).await?; assert_eq!( - snapshot_result, key15, + snapshot_result, + key15.into(), "Snapshot get_key should find local key" ); @@ -1234,12 +1260,13 @@ async fn test_atomic_add(db: &Database) { use universaldb::options::MutationType; // Test 1: Add to non-existent key (should treat as 0) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("add_key1",)); // Add 42 to non-existent key - tx.atomic_op(&key, &42i64.to_le_bytes(), MutationType::Add); + tx.informal() + .atomic_op(&key, &42i64.to_le_bytes(), MutationType::Add); Ok(()) }) .await @@ -1247,73 +1274,75 @@ async fn test_atomic_add(db: &Database) { // Verify the result let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("add_key1",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!( result, 42, "Add to non-existent key should equal the parameter" ); // Test 2: Add to existing value - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("add_key1",)); // Add 10 to existing value (42) - tx.atomic_op(&key, &10i64.to_le_bytes(), MutationType::Add); + tx.informal() + .atomic_op(&key, &10i64.to_le_bytes(), MutationType::Add); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("add_key1",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!(result, 52, "42 + 10 should equal 52"); // Test 3: Add negative number - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("add_key1",)); // Add -20 to existing value (52) - tx.atomic_op(&key, &(-20i64).to_le_bytes(), MutationType::Add); + tx.informal() + .atomic_op(&key, &(-20i64).to_le_bytes(), MutationType::Add); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("add_key1",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!(result, 32, "52 + (-20) should equal 32"); // Test 4: Test wrapping behavior with overflow - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("add_overflow",)); @@ -1324,28 +1353,29 @@ async fn test_atomic_add(db: &Database) { .await .unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("add_overflow",)); // Add 1 to max i64 (should wrap) - tx.atomic_op(&key, &1i64.to_le_bytes(), MutationType::Add); + tx.informal() + .atomic_op(&key, &1i64.to_le_bytes(), MutationType::Add); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("add_overflow",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!(result, i64::MIN, "Max i64 + 1 should wrap to min i64"); } @@ -1353,7 +1383,7 @@ async fn test_atomic_bitwise(db: &Database) { use universaldb::options::MutationType; // Test BitAnd operation - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_and",)); @@ -1364,22 +1394,23 @@ async fn test_atomic_bitwise(db: &Database) { .await .unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_and",)); // AND with 0b10101010 (170) - tx.atomic_op(&key, &[0b10101010], MutationType::BitAnd); + tx.informal() + .atomic_op(&key, &[0b10101010], MutationType::BitAnd); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_and",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -1392,7 +1423,7 @@ async fn test_atomic_bitwise(db: &Database) { ); // Test BitOr operation - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_or",)); @@ -1403,22 +1434,23 @@ async fn test_atomic_bitwise(db: &Database) { .await .unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_or",)); // OR with 0b00001111 (15) - tx.atomic_op(&key, &[0b00001111], MutationType::BitOr); + tx.informal() + .atomic_op(&key, &[0b00001111], MutationType::BitOr); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_or",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -1431,7 +1463,7 @@ async fn test_atomic_bitwise(db: &Database) { ); // Test BitXor operation - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_xor",)); @@ -1442,22 +1474,23 @@ async fn test_atomic_bitwise(db: &Database) { .await .unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_xor",)); // XOR with 0b10101010 (170) - tx.atomic_op(&key, &[0b10101010], MutationType::BitXor); + tx.informal() + .atomic_op(&key, &[0b10101010], MutationType::BitXor); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_xor",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -1470,7 +1503,7 @@ async fn test_atomic_bitwise(db: &Database) { ); // Test bitwise operations with different lengths - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_len",)); @@ -1481,22 +1514,23 @@ async fn test_atomic_bitwise(db: &Database) { .await .unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_len",)); // AND with 1-byte value (should extend current to match param length) - tx.atomic_op(&key, &[0b10101010], MutationType::BitAnd); + tx.informal() + .atomic_op(&key, &[0b10101010], MutationType::BitAnd); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("bit_len",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -1518,61 +1552,63 @@ async fn test_atomic_append_if_fits(db: &Database) { use universaldb::options::MutationType; // Test 1: Append to non-existent key - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("append_key1",)); - tx.atomic_op(&key, b"hello", MutationType::AppendIfFits); + tx.informal() + .atomic_op(&key, b"hello", MutationType::AppendIfFits); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("append_key1",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); assert_eq!( - value.unwrap(), + value.unwrap().as_slice(), b"hello", "Append to non-existent key should create the key" ); // Test 2: Append to existing key - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("append_key1",)); - tx.atomic_op(&key, b" world", MutationType::AppendIfFits); + tx.informal() + .atomic_op(&key, b" world", MutationType::AppendIfFits); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("append_key1",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); assert_eq!( - value.unwrap(), + value.unwrap().as_slice(), b"hello world", "Append should concatenate values" ); // Test 3: Append that would exceed size limit (should not append) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("append_large",)); @@ -1584,23 +1620,24 @@ async fn test_atomic_append_if_fits(db: &Database) { .await .unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("append_large",)); // Try to append 2KB more (should not fit) let append_value = vec![b'y'; 2000]; - tx.atomic_op(&key, &append_value, MutationType::AppendIfFits); + tx.informal() + .atomic_op(&key, &append_value, MutationType::AppendIfFits); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("append_large",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -1622,7 +1659,7 @@ async fn test_atomic_min_max(db: &Database) { use universaldb::options::MutationType; // Test Max operation - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("max_key",)); @@ -1634,55 +1671,57 @@ async fn test_atomic_min_max(db: &Database) { .unwrap(); // Max with larger value (should replace) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("max_key",)); - tx.atomic_op(&key, &20i64.to_le_bytes(), MutationType::Max); + tx.informal() + .atomic_op(&key, &20i64.to_le_bytes(), MutationType::Max); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("max_key",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!(result, 20, "Max should select the larger value"); // Max with smaller value (should not replace) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("max_key",)); - tx.atomic_op(&key, &15i64.to_le_bytes(), MutationType::Max); + tx.informal() + .atomic_op(&key, &15i64.to_le_bytes(), MutationType::Max); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("max_key",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!(result, 20, "Max should keep the larger value"); // Test Min operation - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("min_key",)); @@ -1694,75 +1733,78 @@ async fn test_atomic_min_max(db: &Database) { .unwrap(); // Min with smaller value (should replace) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("min_key",)); - tx.atomic_op(&key, &5i64.to_le_bytes(), MutationType::Min); + tx.informal() + .atomic_op(&key, &5i64.to_le_bytes(), MutationType::Min); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("min_key",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!(result, 5, "Min should select the smaller value"); // Min with larger value (should not replace) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("min_key",)); - tx.atomic_op(&key, &15i64.to_le_bytes(), MutationType::Min); + tx.informal() + .atomic_op(&key, &15i64.to_le_bytes(), MutationType::Min); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("min_key",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!(result, 5, "Min should keep the smaller value"); // Test Max/Min with non-existent key - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("max_nonexistent",)); - tx.atomic_op(&key, &42i64.to_le_bytes(), MutationType::Max); + tx.informal() + .atomic_op(&key, &42i64.to_le_bytes(), MutationType::Max); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("max_nonexistent",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!(result, 42, "Max on non-existent key should set the value"); } @@ -1770,7 +1812,7 @@ async fn test_atomic_byte_min_max(db: &Database) { use universaldb::options::MutationType; // Test ByteMax operation (lexicographic comparison) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_max",)); @@ -1782,61 +1824,63 @@ async fn test_atomic_byte_min_max(db: &Database) { .unwrap(); // ByteMax with lexicographically larger string (should replace) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_max",)); - tx.atomic_op(&key, b"cherry", MutationType::ByteMax); + tx.informal() + .atomic_op(&key, b"cherry", MutationType::ByteMax); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_max",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); assert_eq!( - value.unwrap(), + value.unwrap().as_slice(), b"cherry", "ByteMax should select lexicographically larger value" ); // ByteMax with lexicographically smaller string (should not replace) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_max",)); - tx.atomic_op(&key, b"apple", MutationType::ByteMax); + tx.informal() + .atomic_op(&key, b"apple", MutationType::ByteMax); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_max",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); assert_eq!( - value.unwrap(), + value.unwrap().as_slice(), b"cherry", "ByteMax should keep lexicographically larger value" ); // Test ByteMin operation - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_min",)); @@ -1848,82 +1892,85 @@ async fn test_atomic_byte_min_max(db: &Database) { .unwrap(); // ByteMin with lexicographically smaller string (should replace) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_min",)); - tx.atomic_op(&key, b"apple", MutationType::ByteMin); + tx.informal() + .atomic_op(&key, b"apple", MutationType::ByteMin); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_min",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); assert_eq!( - value.unwrap(), + value.unwrap().as_slice(), b"apple", "ByteMin should select lexicographically smaller value" ); // ByteMin with lexicographically larger string (should not replace) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_min",)); - tx.atomic_op(&key, b"cherry", MutationType::ByteMin); + tx.informal() + .atomic_op(&key, b"cherry", MutationType::ByteMin); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_min",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); assert_eq!( - value.unwrap(), + value.unwrap().as_slice(), b"apple", "ByteMin should keep lexicographically smaller value" ); // Test ByteMin/ByteMax with non-existent key - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_nonexistent",)); - tx.atomic_op(&key, b"first", MutationType::ByteMin); + tx.informal() + .atomic_op(&key, b"first", MutationType::ByteMin); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("byte_nonexistent",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); assert_eq!( - value.unwrap(), + value.unwrap().as_slice(), b"first", "ByteMin on non-existent key should set the value" ); @@ -1933,7 +1980,7 @@ async fn test_atomic_compare_and_clear(db: &Database) { use universaldb::options::MutationType; // Test 1: Compare and clear with matching value - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_key1",)); @@ -1944,22 +1991,23 @@ async fn test_atomic_compare_and_clear(db: &Database) { .await .unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_key1",)); // Compare and clear with matching value - tx.atomic_op(&key, b"target_value", MutationType::CompareAndClear); + tx.informal() + .atomic_op(&key, b"target_value", MutationType::CompareAndClear); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_key1",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -1968,7 +2016,7 @@ async fn test_atomic_compare_and_clear(db: &Database) { assert_eq!(value, None, "Key should be cleared when values match"); // Test 2: Compare and clear with non-matching value - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_key2",)); @@ -1979,50 +2027,52 @@ async fn test_atomic_compare_and_clear(db: &Database) { .await .unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_key2",)); // Compare and clear with non-matching value - tx.atomic_op(&key, b"different_value", MutationType::CompareAndClear); + tx.informal() + .atomic_op(&key, b"different_value", MutationType::CompareAndClear); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_key2",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); assert_eq!( - value.unwrap(), + value.unwrap().as_slice(), b"keep_this", "Key should remain unchanged when values don't match" ); // Test 3: Compare and clear on non-existent key - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_nonexistent",)); // Compare and clear on non-existent key (treated as empty value) - tx.atomic_op(&key, b"", MutationType::CompareAndClear); + tx.informal() + .atomic_op(&key, b"", MutationType::CompareAndClear); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_nonexistent",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -2034,7 +2084,7 @@ async fn test_atomic_compare_and_clear(db: &Database) { ); // Test 4: Compare and clear with empty value on existing key - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_empty",)); @@ -2045,22 +2095,23 @@ async fn test_atomic_compare_and_clear(db: &Database) { .await .unwrap(); - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_empty",)); // Compare and clear with empty value - tx.atomic_op(&key, b"", MutationType::CompareAndClear); + tx.informal() + .atomic_op(&key, b"", MutationType::CompareAndClear); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("cac_empty",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -2076,7 +2127,7 @@ async fn test_atomic_transaction_isolation(db: &Database) { use universaldb::options::MutationType; // Test that atomic operations within a transaction are visible to subsequent reads - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("isolation_key",)); @@ -2084,11 +2135,12 @@ async fn test_atomic_transaction_isolation(db: &Database) { tx.set(&key, &10i64.to_le_bytes()); // Perform atomic add - tx.atomic_op(&key, &5i64.to_le_bytes(), MutationType::Add); + tx.informal() + .atomic_op(&key, &5i64.to_le_bytes(), MutationType::Add); // Read the value within the same transaction - let value = tx.get(&key, false).await?; - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let value = tx.get(&key, Serializable).await?; + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!( result, 15, @@ -2096,11 +2148,12 @@ async fn test_atomic_transaction_isolation(db: &Database) { ); // Perform another atomic operation - tx.atomic_op(&key, &3i64.to_le_bytes(), MutationType::Add); + tx.informal() + .atomic_op(&key, &3i64.to_le_bytes(), MutationType::Add); // Read again - let value = tx.get(&key, false).await?; - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let value = tx.get(&key, Serializable).await?; + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!( result, 18, @@ -2114,7 +2167,7 @@ async fn test_atomic_transaction_isolation(db: &Database) { // Test that atomic operations are isolated between transactions // Set initial value in one transaction - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("isolation_key2",)); @@ -2125,11 +2178,12 @@ async fn test_atomic_transaction_isolation(db: &Database) { .unwrap(); // Perform atomic operation in another transaction - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("isolation_key2",)); - tx.atomic_op(&key, &50i64.to_le_bytes(), MutationType::Add); + tx.informal() + .atomic_op(&key, &50i64.to_le_bytes(), MutationType::Add); Ok(()) }) .await @@ -2137,16 +2191,16 @@ async fn test_atomic_transaction_isolation(db: &Database) { // Verify the result in a third transaction let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("isolation_key2",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!( result, 150, "Atomic operation should be committed and visible in new transaction" @@ -2159,45 +2213,47 @@ async fn test_atomic_nonexistent_keys(db: &Database) { // Test atomic operations on non-existent keys behave correctly // Test Add (should treat as 0) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent_add",)); - tx.atomic_op(&key, &42i64.to_le_bytes(), MutationType::Add); + tx.informal() + .atomic_op(&key, &42i64.to_le_bytes(), MutationType::Add); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent_add",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!(result, 42, "Add on non-existent key should treat as 0"); // Test BitOr (should treat as 0) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent_or",)); - tx.atomic_op(&key, &[0b11110000], MutationType::BitOr); + tx.informal() + .atomic_op(&key, &[0b11110000], MutationType::BitOr); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent_or",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -2210,75 +2266,78 @@ async fn test_atomic_nonexistent_keys(db: &Database) { ); // Test Max (should set the parameter value) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent_max",)); - tx.atomic_op(&key, &123i64.to_le_bytes(), MutationType::Max); + tx.informal() + .atomic_op(&key, &123i64.to_le_bytes(), MutationType::Max); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent_max",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); - let result = i64::from_le_bytes(value.unwrap().try_into().unwrap()); + let result = i64::from_le_bytes(Vec::from(value.unwrap()).try_into().unwrap()); assert_eq!( result, 123, "Max on non-existent key should set the parameter value" ); // Test ByteMin (should set the parameter value) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent_bytemin",)); - tx.atomic_op(&key, b"hello", MutationType::ByteMin); + tx.informal() + .atomic_op(&key, b"hello", MutationType::ByteMin); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent_bytemin",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await .unwrap(); assert_eq!( - value.unwrap(), + value.unwrap().as_slice(), b"hello", "ByteMin on non-existent key should set the parameter value" ); // Test CompareAndClear with empty comparison (should clear since non-existent = empty) - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent_cac",)); - tx.atomic_op(&key, b"", MutationType::CompareAndClear); + tx.informal() + .atomic_op(&key, b"", MutationType::CompareAndClear); Ok(()) }) .await .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test"); let key = test_subspace.pack(&("nonexistent_cac",)); - let val = tx.get(&key, false).await?; + let val = tx.get(&key, Serializable).await?; Ok(val) }) .await @@ -2292,7 +2351,7 @@ async fn test_atomic_nonexistent_keys(db: &Database) { async fn test_versionstamps(db: &Database) { // Test 1: Basic versionstamp insertion and ordering within a single transaction - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test_vs"); // Create multiple values with incomplete versionstamps in the same transaction @@ -2317,7 +2376,7 @@ async fn test_versionstamps(db: &Database) { // Verify that versionstamps were substituted and have the same transaction version // but different user versions (counter values) let results = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test_vs"); let (begin, end) = test_subspace.range(); @@ -2327,7 +2386,7 @@ async fn test_versionstamps(db: &Database) { ..RangeOption::default() }; - let values = tx.get_range(&range_opt, 1, false).await?; + let values = tx.get_range(&range_opt, 1, Serializable).await?; let mut results = Vec::new(); for kv in values.into_iter() { @@ -2375,7 +2434,7 @@ async fn test_versionstamps(db: &Database) { for i in 0..3 { let vs = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test_vs"); let incomplete = Versionstamp::from([0xff; 12]); @@ -2401,7 +2460,7 @@ async fn test_versionstamps(db: &Database) { // Read back and verify ordering let multi_tx_results = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test_vs"); let begin = test_subspace.pack(&("tx_",)); let end = test_subspace.pack(&("tx_z",)); @@ -2412,7 +2471,7 @@ async fn test_versionstamps(db: &Database) { ..RangeOption::default() }; - let values = tx.get_range(&range_opt, 1, false).await?; + let values = tx.get_range(&range_opt, 1, Serializable).await?; let mut results = Vec::new(); for kv in values.into_iter() { @@ -2444,7 +2503,7 @@ async fn test_versionstamps(db: &Database) { } // Test 3: Already complete versionstamps should not be modified - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test_vs"); // Create a complete versionstamp manually @@ -2468,10 +2527,10 @@ async fn test_versionstamps(db: &Database) { // Read back and verify the versionstamp remains unchanged let complete_result = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test_vs"); let key = test_subspace.pack(&("complete_entry",)); - let value = tx.get(&key, false).await?.unwrap(); + let value = tx.get(&key, Serializable).await?.unwrap(); let unpacked: Vec = universaldb::tuple::unpack(&value).unwrap(); if let Element::Versionstamp(vs) = &unpacked[1] { @@ -2488,7 +2547,7 @@ async fn test_versionstamps(db: &Database) { // Test 4: Verify correct count and order within a transaction // Insert 10 entries in one transaction and verify they have sequential counters - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test_vs"); for i in 0..10 { @@ -2510,7 +2569,7 @@ async fn test_versionstamps(db: &Database) { // Read back and verify count and ordering let count_results = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test_vs"); let begin = test_subspace.pack(&("count_",)); let end = test_subspace.pack(&("count_z",)); @@ -2521,7 +2580,7 @@ async fn test_versionstamps(db: &Database) { ..RangeOption::default() }; - let values = tx.get_range(&range_opt, 1, false).await?; + let values = tx.get_range(&range_opt, 1, Serializable).await?; let mut results = Vec::new(); for kv in values.into_iter() { @@ -2566,7 +2625,7 @@ async fn test_versionstamps(db: &Database) { } // Test 5: Mixed incomplete and complete versionstamps in same transaction - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { let test_subspace = Subspace::from("test_vs"); // Insert an incomplete versionstamp @@ -2598,7 +2657,7 @@ async fn test_versionstamps(db: &Database) { // Verify both were stored correctly let mixed_results = db - .run(|tx, _maybe_committed| async move { + .run(|tx| async move { let test_subspace = Subspace::from("test_vs"); let begin = test_subspace.pack(&("mixed_",)); let end = test_subspace.pack(&("mixed_z",)); @@ -2609,7 +2668,7 @@ async fn test_versionstamps(db: &Database) { ..RangeOption::default() }; - let values = tx.get_range(&range_opt, 1, false).await?; + let values = tx.get_range(&range_opt, 1, Serializable).await?; let mut results = Vec::new(); for kv in values.into_iter() { diff --git a/packages/common/universaldb/tests/integration_gas.rs b/packages/common/universaldb/tests/integration_gas.rs index c3b48b77aa..26aec44a9d 100644 --- a/packages/common/universaldb/tests/integration_gas.rs +++ b/packages/common/universaldb/tests/integration_gas.rs @@ -1,7 +1,9 @@ use futures_util::TryStreamExt; use rivet_test_deps_docker::TestDatabase; use std::sync::Arc; -use universaldb::{Database, RangeOption, options::StreamingMode, tuple::Subspace}; +use universaldb::{ + Database, RangeOption, options::StreamingMode, tuple::Subspace, utils::IsolationLevel::*, +}; use uuid::Uuid; #[tokio::test] @@ -51,7 +53,7 @@ pub async fn test_gasoline_operations(db: &Database) { println!("Running simple write/read test..."); // Simple test: write a single key and read it back - db.run(|tx, _maybe_committed| async move { + db.run(|tx| async move { tx.set(b"simple_test_key", b"simple_test_value"); Ok(()) }) @@ -59,8 +61,8 @@ pub async fn test_gasoline_operations(db: &Database) { .unwrap(); let value = db - .run(|tx, _maybe_committed| async move { - let val = tx.get(b"simple_test_key", false).await?; + .run(|tx| async move { + let val = tx.get(b"simple_test_key", Serializable).await?; println!( "Simple test read result: {:?}", val.as_ref() @@ -87,7 +89,7 @@ pub async fn test_gasoline_operations(db: &Database) { let workflow_id = Uuid::new_v4(); // Test 1: Write workflow data like gasoline does - db.run(|tx, _maybe_committed| { + db.run(|tx| { let workflow_subspace = workflow_subspace.clone(); async move { // Write create timestamp (similar to CreateTsKey) @@ -128,7 +130,7 @@ pub async fn test_gasoline_operations(db: &Database) { // Test 2: Read workflow data back like gasoline does let (input_found, state_found, wake_found) = db - .run(|tx, _maybe_committed| { + .run(|tx| { let workflow_subspace = workflow_subspace.clone(); async move { // Read input chunks using range query @@ -141,7 +143,7 @@ pub async fn test_gasoline_operations(db: &Database) { mode: StreamingMode::WantAll, ..(&input_subspace).into() }, - false, + Serializable, ) .try_collect::>() .await?; @@ -156,7 +158,7 @@ pub async fn test_gasoline_operations(db: &Database) { mode: StreamingMode::WantAll, ..(&state_subspace).into() }, - false, + Serializable, ) .try_collect::>() .await?; @@ -164,7 +166,7 @@ pub async fn test_gasoline_operations(db: &Database) { // Read wake condition let wake_condition_key = workflow_subspace.pack(&("workflow", "has_wake_condition", workflow_id)); - let wake_condition = tx.get(&wake_condition_key, false).await?; + let wake_condition = tx.get(&wake_condition_key, Serializable).await?; println!("Input chunks found: {}", input_chunks.len()); println!("State chunks found: {}", state_chunks.len()); @@ -185,7 +187,7 @@ pub async fn test_gasoline_operations(db: &Database) { assert!(wake_found, "Should find wake condition"); // Test 3: Test the exact pattern gasoline uses with subspace operations - db.run(|tx, _maybe_committed| { + db.run(|tx| { let workflow_subspace = workflow_subspace.clone(); async move { // Create a new workflow ID @@ -208,7 +210,7 @@ pub async fn test_gasoline_operations(db: &Database) { // Test 4: Verify the data was written correctly let workflow_id2 = db - .run(|tx, _maybe_committed| { + .run(|tx| { let workflow_subspace = workflow_subspace.clone(); async move { // Generate the same workflow_id2 again (for test purposes, we'll store it) @@ -223,10 +225,10 @@ pub async fn test_gasoline_operations(db: &Database) { tx.set(&chunk_key, b"test_data"); // Read it back in the same transaction - let value = tx.get(&chunk_key, false).await?; + let value = tx.get(&chunk_key, Serializable).await?; assert_eq!( value, - Some(b"test_data".to_vec()), + Some(b"test_data".to_vec().into()), "Should read back the same data" ); @@ -237,7 +239,7 @@ pub async fn test_gasoline_operations(db: &Database) { .unwrap(); // Test 5: Read in a separate transaction (like gasoline does) - db.run(|tx, _maybe_committed| { + db.run(|tx| { let workflow_subspace = workflow_subspace.clone(); async move { let input_key_base = workflow_subspace.pack(&("workflow", "input", workflow_id2)); @@ -245,7 +247,7 @@ pub async fn test_gasoline_operations(db: &Database) { // Read using range query like gasoline let input_chunks = tx - .get_ranges_keyvalues((&input_subspace).into(), false) + .get_ranges_keyvalues((&input_subspace).into(), Serializable) .try_collect::>() .await?; diff --git a/packages/common/util/id/Cargo.toml b/packages/common/util/id/Cargo.toml index 227be8b251..b2e5988d06 100644 --- a/packages/common/util/id/Cargo.toml +++ b/packages/common/util/id/Cargo.toml @@ -6,8 +6,8 @@ license.workspace = true edition.workspace = true [dependencies] -udb-util.workspace = true serde.workspace = true thiserror.workspace = true -uuid.workspace = true +universaldb.workspace = true utoipa.workspace = true +uuid.workspace = true diff --git a/packages/common/util/id/src/lib.rs b/packages/common/util/id/src/lib.rs index e10fef2781..c6c4c9338f 100644 --- a/packages/common/util/id/src/lib.rs +++ b/packages/common/util/id/src/lib.rs @@ -1,7 +1,7 @@ use std::{fmt, str::FromStr}; use thiserror::Error; -use udb_util::prelude::*; +use universaldb::prelude::*; use uuid::Uuid; #[derive(Debug, Error)] @@ -226,7 +226,7 @@ impl TuplePack for Id { ) -> std::io::Result { let mut offset = VersionstampOffset::None { size: 0 }; - w.write_all(&[udb_util::codes::ID])?; + w.write_all(&[universaldb::utils::codes::ID])?; let bytes = self.as_bytes(); @@ -242,14 +242,14 @@ impl TuplePack for Id { impl<'de> TupleUnpack<'de> for Id { fn unpack(input: &[u8], _tuple_depth: TupleDepth) -> PackResult<(&[u8], Self)> { - let input = udb_util::parse_code(input, udb_util::codes::ID)?; - let (_, version) = udb_util::parse_byte(input)?; + let input = universaldb::utils::parse_code(input, universaldb::utils::codes::ID)?; + let (_, version) = universaldb::utils::parse_byte(input)?; let (input, slice) = if version == 1 { // Parse 19 bytes including version - udb_util::parse_bytes(input, 19)? + universaldb::utils::parse_bytes(input, 19)? } else { - udb_util::parse_bytes(input, 1)? + universaldb::utils::parse_bytes(input, 1)? }; let v = Id::from_slice(slice) diff --git a/packages/core/actor-kv/Cargo.toml b/packages/core/actor-kv/Cargo.toml index f641fc2d05..8c92c45a1f 100644 --- a/packages/core/actor-kv/Cargo.toml +++ b/packages/core/actor-kv/Cargo.toml @@ -8,6 +8,7 @@ edition.workspace = true [dependencies] anyhow.workspace = true futures-util.workspace = true +rivet-runner-protocol.workspace = true rivet-util-id.workspace = true serde_bare.workspace = true serde.workspace = true @@ -15,8 +16,6 @@ tokio.workspace = true tracing-logfmt.workspace = true tracing-subscriber.workspace = true tracing.workspace = true -udb-util.workspace = true universaldb.workspace = true -rivet-runner-protocol.workspace = true pegboard.workspace = true diff --git a/packages/core/actor-kv/src/entry.rs b/packages/core/actor-kv/src/entry.rs index 143348be2f..81a57d0d96 100644 --- a/packages/core/actor-kv/src/entry.rs +++ b/packages/core/actor-kv/src/entry.rs @@ -1,7 +1,7 @@ use std::result::Result::Ok; use anyhow::*; -use udb_util::prelude::*; +use universaldb::prelude::*; use rivet_runner_protocol as rp; diff --git a/packages/core/actor-kv/src/key.rs b/packages/core/actor-kv/src/key.rs index a186caed6f..a10bbd2e44 100644 --- a/packages/core/actor-kv/src/key.rs +++ b/packages/core/actor-kv/src/key.rs @@ -20,12 +20,12 @@ impl TuplePack for KeyWrapper { ) -> std::io::Result { let mut offset = VersionstampOffset::None { size: 0 }; - w.write_all(&[udb_util::codes::NESTED])?; + w.write_all(&[universaldb::utils::codes::NESTED])?; offset += 1; offset += self.0.pack(w, tuple_depth.increment())?; - w.write_all(&[udb_util::codes::NIL])?; + w.write_all(&[universaldb::utils::codes::NIL])?; offset += 1; Ok(offset) @@ -34,11 +34,11 @@ impl TuplePack for KeyWrapper { impl<'de> TupleUnpack<'de> for KeyWrapper { fn unpack(input: &[u8], tuple_depth: TupleDepth) -> PackResult<(&[u8], Self)> { - let input = udb_util::parse_code(input, udb_util::codes::NESTED)?; + let input = universaldb::utils::parse_code(input, universaldb::utils::codes::NESTED)?; let (input, inner) = Bytes::unpack(input, tuple_depth.increment())?; - let input = udb_util::parse_code(input, udb_util::codes::NIL)?; + let input = universaldb::utils::parse_code(input, universaldb::utils::codes::NIL)?; Ok((input, KeyWrapper(inner.into_owned()))) } @@ -55,7 +55,7 @@ impl TuplePack for ListKeyWrapper { ) -> std::io::Result { let mut offset = VersionstampOffset::None { size: 0 }; - w.write_all(&[udb_util::codes::NESTED])?; + w.write_all(&[universaldb::utils::codes::NESTED])?; offset += 1; offset += self.0.pack(w, tuple_depth.increment())?; diff --git a/packages/core/actor-kv/src/lib.rs b/packages/core/actor-kv/src/lib.rs index 253e36c458..f42eb14280 100644 --- a/packages/core/actor-kv/src/lib.rs +++ b/packages/core/actor-kv/src/lib.rs @@ -6,8 +6,8 @@ use futures_util::{StreamExt, TryStreamExt}; use key::{KeyWrapper, ListKeyWrapper}; use rivet_runner_protocol as rp; use rivet_util_id::Id; -use udb_util::prelude::*; -use universaldb::{self as udb, tuple::Subspace}; +use universaldb::prelude::*; +use universaldb::tuple::Subspace; use utils::{validate_entries, validate_keys}; mod entry; @@ -22,12 +22,12 @@ const MAX_PUT_PAYLOAD_SIZE: usize = 976 * 1024; const MAX_STORAGE_SIZE: usize = 1024 * 1024 * 1024; // 1 GiB const VALUE_CHUNK_SIZE: usize = 10_000; // 10 KB, not KiB, see https://apple.github.io/foundationdb/blob.html -fn subspace(actor_id: Id) -> udb_util::Subspace { +fn subspace(actor_id: Id) -> universaldb::utils::Subspace { pegboard::keys::actor_kv_subspace().subspace(&actor_id) } /// Returns estimated size of the given subspace. -pub async fn get_subspace_size(db: &udb::Database, subspace: &Subspace) -> Result { +pub async fn get_subspace_size(db: &universaldb::Database, subspace: &Subspace) -> Result { let (start, end) = subspace.range(); // This txn does not have to be committed because we are not modifying any data @@ -39,30 +39,30 @@ pub async fn get_subspace_size(db: &udb::Database, subspace: &Subspace) -> Resul /// Gets keys from the KV store. pub async fn get( - db: &udb::Database, + db: &universaldb::Database, actor_id: Id, keys: Vec, ) -> Result<(Vec, Vec, Vec)> { validate_keys(&keys)?; - db.run(|tx, _mc| { + db.run(|tx| { let keys = keys.clone(); async move { - let txs = tx.subspace(subspace(actor_id)); + let tx = tx.with_subspace(subspace(actor_id)); let size_estimate = keys.len().min(1024); let mut stream = futures_util::stream::iter(keys) .map(|key| { - let key_subspace = txs.subspace(&KeyWrapper(key)); + let key_subspace = subspace(actor_id).subspace(&KeyWrapper(key)); // Get all sub keys in the key subspace - txs.get_ranges_keyvalues( - udb::RangeOption { - mode: udb::options::StreamingMode::WantAll, + tx.get_ranges_keyvalues( + universaldb::RangeOption { + mode: universaldb::options::StreamingMode::WantAll, ..key_subspace.range().into() }, - false, + Serializable, ) }) // Should remain in order @@ -79,13 +79,12 @@ pub async fn get( break; }; - let key = txs.unpack::(&entry.key())?.key; + let key = tx.unpack::(&entry.key())?.key; let current_entry = if let Some(inner) = &mut current_entry { if inner.key != key { - let (key, value, meta) = std::mem::replace(inner, EntryBuilder::new(key)) - .build() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let (key, value, meta) = + std::mem::replace(inner, EntryBuilder::new(key)).build()?; keys.push(key); values.push(value); @@ -99,25 +98,19 @@ pub async fn get( current_entry.as_mut().expect("must be set") }; - if let Ok(chunk_key) = txs.unpack::(&entry.key()) { + if let Ok(chunk_key) = tx.unpack::(&entry.key()) { current_entry.append_chunk(chunk_key.chunk, entry.value()); - } else if let Ok(metadata_key) = txs.unpack::(&entry.key()) { - let value = metadata_key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + } else if let Ok(metadata_key) = tx.unpack::(&entry.key()) { + let value = metadata_key.deserialize(entry.value())?; current_entry.append_metadata(value); } else { - return Err(udb::FdbBindingError::CustomError( - "unexpected sub key".into(), - )); + bail!("unexpected sub key"); } } if let Some(inner) = current_entry { - let (key, value, meta) = inner - .build() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let (key, value, meta) = inner.build()?; keys.push(key); values.push(value); @@ -133,7 +126,7 @@ pub async fn get( /// Gets keys from the KV store. pub async fn list( - db: &udb::Database, + db: &universaldb::Database, actor_id: Id, query: rp::KvListQuery, reverse: bool, @@ -145,20 +138,20 @@ pub async fn list( let subspace = subspace(actor_id); let list_range = list_query_range(query, &subspace); - db.run(|tx, _mc| { + db.run(|tx| { let list_range = list_range.clone(); let subspace = subspace.clone(); async move { - let txs = tx.subspace(subspace); + let tx = tx.with_subspace(subspace); - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { - mode: udb::options::StreamingMode::Iterator, + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { + mode: universaldb::options::StreamingMode::Iterator, reverse, ..list_range.into() }, - false, + Serializable, ); let mut keys = Vec::new(); @@ -171,13 +164,12 @@ pub async fn list( break; }; - let key = txs.unpack::(&entry.key())?.key; + let key = tx.unpack::(&entry.key())?.key; let curr = if let Some(inner) = &mut current_entry { if inner.key != key { - let (key, value, meta) = std::mem::replace(inner, EntryBuilder::new(key)) - .build() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let (key, value, meta) = + std::mem::replace(inner, EntryBuilder::new(key)).build()?; keys.push(key); values.push(value); @@ -196,25 +188,19 @@ pub async fn list( current_entry.as_mut().expect("must be set") }; - if let Ok(chunk_key) = txs.unpack::(&entry.key()) { + if let Ok(chunk_key) = tx.unpack::(&entry.key()) { curr.append_chunk(chunk_key.chunk, entry.value()); - } else if let Ok(metadata_key) = txs.unpack::(&entry.key()) { - let value = metadata_key - .deserialize(entry.value()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + } else if let Ok(metadata_key) = tx.unpack::(&entry.key()) { + let value = metadata_key.deserialize(entry.value())?; curr.append_metadata(value); } else { - return Err(udb::FdbBindingError::CustomError( - "unexpected sub key".into(), - )); + bail!("unexpected sub key"); } } if let Some(inner) = current_entry { - let (key, value, meta) = inner - .build() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let (key, value, meta) = inner.build()?; keys.push(key); values.push(value); @@ -230,7 +216,7 @@ pub async fn list( /// Puts keys into the KV store. pub async fn put( - db: &udb::Database, + db: &universaldb::Database, actor_id: Id, keys: Vec, values: Vec, @@ -240,27 +226,27 @@ pub async fn put( validate_entries(&keys, &values, total_size)?; - db.run(|tx, _mc| { + db.run(|tx| { // TODO: Costly clone let keys = keys.clone(); let values = values.clone(); let subspace = subspace.clone(); async move { - let txs = tx.subspace(subspace.clone()); + let tx = tx.with_subspace(subspace.clone()); futures_util::stream::iter(keys.into_iter().zip(values.into_iter())) .map(|(key, value)| { - let txs = txs.clone(); + let tx = tx.clone(); let key = KeyWrapper(key.clone()); let subspace = subspace.clone(); async move { // Clear previous key data before setting - txs.clear_subspace_range(&subspace.subspace(&key)); + tx.clear_subspace_range(&subspace.subspace(&key)); // Set metadata - txs.write( + tx.write( &EntryMetadataKey::new(key.clone()), rp::KvMetadata { version: VERSION.as_bytes().to_vec(), @@ -273,12 +259,9 @@ pub async fn put( let idx = start / VALUE_CHUNK_SIZE; let end = (start + VALUE_CHUNK_SIZE).min(value.len()); - txs.set( + tx.set( &subspace.pack(&EntryValueChunkKey::new(key.clone(), idx)), - &value - .get(start..end) - .context("bad slice") - .map_err(|err| udb::FdbBindingError::CustomError(err.into()))?, + &value.get(start..end).context("bad slice")?, ); } @@ -295,10 +278,10 @@ pub async fn put( } /// Deletes keys from the KV store. Cannot be undone. -pub async fn delete(db: &udb::Database, actor_id: Id, keys: Vec) -> Result<()> { +pub async fn delete(db: &universaldb::Database, actor_id: Id, keys: Vec) -> Result<()> { validate_keys(&keys)?; - db.run(|tx, _mc| { + db.run(|tx| { let keys = keys.clone(); async move { for key in keys { @@ -315,8 +298,8 @@ pub async fn delete(db: &udb::Database, actor_id: Id, keys: Vec) -> R } /// Deletes all keys from the KV store. Cannot be undone. -pub async fn delete_all(db: &udb::Database, actor_id: Id) -> Result<()> { - db.run(|tx, _mc| async move { +pub async fn delete_all(db: &universaldb::Database, actor_id: Id) -> Result<()> { + db.run(|tx| async move { tx.clear_subspace_range(&subspace(actor_id)); Ok(()) }) diff --git a/packages/core/guard/server/Cargo.toml b/packages/core/guard/server/Cargo.toml index ebf82c812f..0b9944e328 100644 --- a/packages/core/guard/server/Cargo.toml +++ b/packages/core/guard/server/Cargo.toml @@ -18,25 +18,22 @@ http-body.workspace = true http-body-util.workspace = true hyper-tungstenite.workspace = true tower.workspace = true -udb-util.workspace = true -universaldb.workspace = true -universalpubsub.workspace = true futures.workspace = true # TODO: Make this use workspace version hyper = "1.6.0" indoc.workspace = true once_cell.workspace = true -pegboard.workspace = true pegboard-gateway.workspace = true pegboard-tunnel.workspace = true +pegboard.workspace = true regex.workspace = true rivet-api-peer.workspace = true rivet-api-public.workspace = true rivet-cache.workspace = true rivet-config.workspace = true +rivet-data.workspace = true rivet-error.workspace = true rivet-guard-core.workspace = true -rivet-data.workspace = true rivet-logs.workspace = true rivet-metrics.workspace = true rivet-pools.workspace = true @@ -47,6 +44,8 @@ serde_json.workspace = true serde.workspace = true tokio.workspace = true tracing.workspace = true +universaldb.workspace = true +universalpubsub.workspace = true url.workspace = true uuid.workspace = true diff --git a/packages/core/guard/server/src/routing/pegboard_gateway.rs b/packages/core/guard/server/src/routing/pegboard_gateway.rs index 7015a8a686..4a142d7526 100644 --- a/packages/core/guard/server/src/routing/pegboard_gateway.rs +++ b/packages/core/guard/server/src/routing/pegboard_gateway.rs @@ -4,7 +4,7 @@ use anyhow::Result; use gas::prelude::*; use hyper::header::HeaderName; use rivet_guard_core::proxy_service::{RouteConfig, RouteTarget, RoutingOutput, RoutingTimeout}; -use udb_util::{SERIALIZABLE, TxnExt}; +use universaldb::utils::IsolationLevel::*; use crate::{errors, shared_state::SharedState}; @@ -98,17 +98,17 @@ async fn find_actor( let actor_res = tokio::time::timeout( Duration::from_secs(5), ctx.udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(pegboard::keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(pegboard::keys::subspace()); let workflow_id_key = pegboard::keys::actor::WorkflowIdKey::new(actor_id); let sleep_ts_key = pegboard::keys::actor::SleepTsKey::new(actor_id); let destroy_ts_key = pegboard::keys::actor::DestroyTsKey::new(actor_id); let (workflow_id_entry, sleeping, destroyed) = tokio::try_join!( - txs.read_opt(&workflow_id_key, SERIALIZABLE), - txs.exists(&sleep_ts_key, SERIALIZABLE), - txs.exists(&destroy_ts_key, SERIALIZABLE), + tx.read_opt(&workflow_id_key, Serializable), + tx.exists(&sleep_ts_key, Serializable), + tx.exists(&destroy_ts_key, Serializable), )?; let Some(workflow_id) = workflow_id_entry else { @@ -177,10 +177,13 @@ async fn find_actor( // Get runner key from runner_id let runner_key = ctx .udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(pegboard::keys::subspace()); - let key_key = pegboard::keys::runner::KeyKey::new(runner_id); - txs.read_opt(&key_key, SERIALIZABLE).await + .run(|tx| async move { + let tx = tx.with_subspace(pegboard::keys::subspace()); + tx.read_opt( + &pegboard::keys::runner::KeyKey::new(runner_id), + Serializable, + ) + .await }) .await? .context("runner key not found")?; diff --git a/packages/core/guard/server/src/shared_state.rs b/packages/core/guard/server/src/shared_state.rs index 71bc25939e..0462fad5c8 100644 --- a/packages/core/guard/server/src/shared_state.rs +++ b/packages/core/guard/server/src/shared_state.rs @@ -1,5 +1,4 @@ use anyhow::*; -use gas::prelude::*; use std::{ops::Deref, sync::Arc}; use universalpubsub::PubSub; diff --git a/packages/core/pegboard-gateway/Cargo.toml b/packages/core/pegboard-gateway/Cargo.toml index 6a3ddc446a..bf4eba0740 100644 --- a/packages/core/pegboard-gateway/Cargo.toml +++ b/packages/core/pegboard-gateway/Cargo.toml @@ -20,8 +20,8 @@ rivet-error.workspace = true rivet-guard-core.workspace = true rivet-tunnel-protocol.workspace = true rivet-util.workspace = true +thiserror.workspace = true tokio-tungstenite.workspace = true tokio.workspace = true universalpubsub.workspace = true versioned-data-util.workspace = true -thiserror.workspace = true diff --git a/packages/core/pegboard-serverless/Cargo.toml b/packages/core/pegboard-serverless/Cargo.toml index 78eaca5978..44b97cbea1 100644 --- a/packages/core/pegboard-serverless/Cargo.toml +++ b/packages/core/pegboard-serverless/Cargo.toml @@ -14,7 +14,6 @@ rivet-config.workspace = true rivet-runner-protocol.workspace = true rivet-types.workspace = true tracing.workspace = true -udb-util.workspace = true universaldb.workspace = true namespace.workspace = true diff --git a/packages/core/pegboard-serverless/src/lib.rs b/packages/core/pegboard-serverless/src/lib.rs index 9d6ff472bf..c069dc050e 100644 --- a/packages/core/pegboard-serverless/src/lib.rs +++ b/packages/core/pegboard-serverless/src/lib.rs @@ -14,8 +14,8 @@ use pegboard::keys; use reqwest_eventsource as sse; use rivet_runner_protocol::protocol; use tokio::{sync::oneshot, task::JoinHandle, time::Duration}; -use udb_util::{SNAPSHOT, TxnExt}; -use universaldb::{self as udb, options::StreamingMode}; +use universaldb::options::StreamingMode; +use universaldb::utils::IsolationLevel::*; struct OutboundConnection { handle: JoinHandle<()>, @@ -54,25 +54,25 @@ async fn tick( ) -> Result<()> { let serverless_data = ctx .udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); - let serverless_desired_subspace = txs.subspace( + let serverless_desired_subspace = keys::subspace().subspace( &rivet_types::keys::pegboard::ns::ServerlessDesiredSlotsKey::entire_subspace(), ); - txs.get_ranges_keyvalues( - udb::RangeOption { + tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&serverless_desired_subspace).into() }, // NOTE: This is a snapshot to prevent conflict with updates to this subspace - SNAPSHOT, + Snapshot, ) .map(|res| match res { Ok(entry) => { let (key, desired_slots) = - txs.read_entry::(&entry)?; + tx.read_entry::(&entry)?; Ok((key.namespace_id, key.runner_name, desired_slots)) } diff --git a/packages/infra/engine/Cargo.toml b/packages/infra/engine/Cargo.toml index f141806f6c..b654a96003 100644 --- a/packages/infra/engine/Cargo.toml +++ b/packages/infra/engine/Cargo.toml @@ -46,7 +46,6 @@ tempfile.workspace = true thiserror.workspace = true tokio.workspace = true tracing.workspace = true -udb-util.workspace = true universaldb.workspace = true url.workspace = true uuid.workspace = true diff --git a/packages/infra/engine/src/commands/udb/cli.rs b/packages/infra/engine/src/commands/udb/cli.rs index 0149ea5678..b4926f9275 100644 --- a/packages/infra/engine/src/commands/udb/cli.rs +++ b/packages/infra/engine/src/commands/udb/cli.rs @@ -7,7 +7,7 @@ use clap::{Parser, ValueEnum}; use futures_util::TryStreamExt; use rivet_pools::UdbPool; use rivet_term::console::style; -use universaldb::{self as udb, options::StreamingMode}; +use universaldb::{options::StreamingMode, utils::IsolationLevel::*}; use crate::util::{ format::indent_string, @@ -123,11 +123,11 @@ impl SubCommand { return CommandResult::Error; } - let fut = pool.run(|tx, _mc| { + let fut = pool.run(|tx| { let current_tuple = current_tuple.clone(); async move { - let key = udb::tuple::pack(¤t_tuple); - let entry = tx.get(&key, true).await?; + let key = universaldb::tuple::pack(¤t_tuple); + let entry = tx.get(&key, Snapshot).await?; Ok(entry) } }); @@ -166,18 +166,18 @@ impl SubCommand { return CommandResult::Error; } - let subspace = udb::tuple::Subspace::all().subspace(¤t_tuple); + let subspace = universaldb::tuple::Subspace::all().subspace(¤t_tuple); - let fut = pool.run(|tx, _mc| { + let fut = pool.run(|tx| { let subspace = subspace.clone(); async move { let entries = tx .get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&subspace).into() }, - true, + Snapshot, ) .try_collect::>() .await?; @@ -355,21 +355,23 @@ impl SubCommand { return CommandResult::Error; } - let fut = pool.run(|tx, _mc| { + let fut = pool.run(|tx| { let old_tuple = old_tuple.clone(); let new_tuple = new_tuple.clone(); async move { if recursive { - let old_subspace = udb::tuple::Subspace::all().subspace(&old_tuple); - let new_subspace = udb::tuple::Subspace::all().subspace(&new_tuple); + let old_subspace = + universaldb::tuple::Subspace::all().subspace(&old_tuple); + let new_subspace = + universaldb::tuple::Subspace::all().subspace(&new_tuple); // Get all key-value pairs from the old subspace let mut stream = tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&old_subspace).into() }, - true, + Snapshot, ); let mut keys_moved = 0; @@ -393,10 +395,10 @@ impl SubCommand { Ok(keys_moved) } else { - let old_key = udb::tuple::pack(&old_tuple); - let new_key = udb::tuple::pack(&new_tuple); + let old_key = universaldb::tuple::pack(&old_tuple); + let new_key = universaldb::tuple::pack(&new_tuple); - let Some(value) = tx.get(&old_key, true).await? else { + let Some(value) = tx.get(&old_key, Snapshot).await? else { return Ok(0); }; @@ -451,14 +453,12 @@ impl SubCommand { } }; - let fut = pool.run(|tx, _mc| { + let fut = pool.run(|tx| { let current_tuple = current_tuple.clone(); let value = parsed_value.clone(); async move { - let key = udb::tuple::pack(¤t_tuple); - let value = value - .serialize() - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let key = universaldb::tuple::pack(¤t_tuple); + let value = value.serialize()?; tx.set(&key, &value); Ok(()) @@ -495,14 +495,15 @@ impl SubCommand { } } - let fut = pool.run(|tx, _mc| { + let fut = pool.run(|tx| { let current_tuple = current_tuple.clone(); async move { if clear_range { - let subspace = udb::tuple::Subspace::all().subspace(¤t_tuple); + let subspace = + universaldb::utils::Subspace::all().subspace(¤t_tuple); tx.clear_subspace_range(&subspace); } else { - let key = udb::tuple::pack(¤t_tuple); + let key = universaldb::tuple::pack(¤t_tuple); tx.clear(&key); } diff --git a/packages/infra/engine/src/util/udb.rs b/packages/infra/engine/src/util/udb.rs index 7165a891e5..0f1c3efd1b 100644 --- a/packages/infra/engine/src/util/udb.rs +++ b/packages/infra/engine/src/util/udb.rs @@ -43,7 +43,9 @@ impl SimpleTupleValue { SimpleTupleValue::Uuid(v) } else if let Ok(v) = rivet_util::Id::from_str(value) { SimpleTupleValue::Id(v) - } else if let (true, Some(v)) = (convert_keys, udb_util::prelude::key_from_str(value)) { + } else if let (true, Some(v)) = + (convert_keys, universaldb::utils::keys::key_from_str(value)) + { SimpleTupleValue::U64(v as u64) } else if nested && value.trim().starts_with('[') && value.trim().ends_with(']') { let mut items = Vec::new(); @@ -227,7 +229,9 @@ impl SimpleTupleValue { match &self { SimpleTupleValue::U64(v) => { if let Ok(v) = (*v).try_into() { - if let (true, Some(key)) = (convert_keys, udb_util::prelude::str_from_key(v)) { + if let (true, Some(key)) = + (convert_keys, universaldb::utils::keys::str_from_key(v)) + { write!( f, "{} {}", diff --git a/packages/services/epoxy/Cargo.toml b/packages/services/epoxy/Cargo.toml index c3c5564eeb..b20f72c94d 100644 --- a/packages/services/epoxy/Cargo.toml +++ b/packages/services/epoxy/Cargo.toml @@ -31,7 +31,6 @@ slog.workspace = true tokio.workspace = true tracing-slog.workspace = true tracing.workspace = true -udb-util.workspace = true universaldb.workspace = true url.workspace = true uuid.workspace = true diff --git a/packages/services/epoxy/src/keys/keys.rs b/packages/services/epoxy/src/keys/keys.rs index 0ad3cf0c38..bbe22ba2d3 100644 --- a/packages/services/epoxy/src/keys/keys.rs +++ b/packages/services/epoxy/src/keys/keys.rs @@ -1,7 +1,7 @@ use anyhow::*; use epoxy_protocol::protocol::ReplicaId; use std::result::Result::Ok; -use udb_util::prelude::*; +use universaldb::prelude::*; #[derive(Debug, Clone)] pub struct KvValueKey { diff --git a/packages/services/epoxy/src/keys/mod.rs b/packages/services/epoxy/src/keys/mod.rs index 8a582bbd83..68c8010140 100644 --- a/packages/services/epoxy/src/keys/mod.rs +++ b/packages/services/epoxy/src/keys/mod.rs @@ -1,9 +1,9 @@ use epoxy_protocol::protocol::ReplicaId; -use udb_util::prelude::*; +use universaldb::prelude::*; pub mod keys; pub mod replica; -pub fn subspace(replica_id: ReplicaId) -> udb_util::Subspace { - udb_util::Subspace::new(&(RIVET, EPOXY, REPLICA, replica_id)) +pub fn subspace(replica_id: ReplicaId) -> universaldb::utils::Subspace { + universaldb::utils::Subspace::new(&(RIVET, EPOXY, REPLICA, replica_id)) } diff --git a/packages/services/epoxy/src/keys/replica.rs b/packages/services/epoxy/src/keys/replica.rs index 7a8f83eaea..809abb5fa4 100644 --- a/packages/services/epoxy/src/keys/replica.rs +++ b/packages/services/epoxy/src/keys/replica.rs @@ -1,7 +1,7 @@ use anyhow::*; use epoxy_protocol::protocol::{ReplicaId, SlotId}; -use udb_util::prelude::*; -use versioned_data_util::OwnedVersionedData as _; +use universaldb::prelude::*; +use versioned_data_util::OwnedVersionedData; #[derive(Debug)] pub struct InstanceNumberKey; diff --git a/packages/services/epoxy/src/ops/explicit_prepare.rs b/packages/services/epoxy/src/ops/explicit_prepare.rs index 97a2d85133..9fef6278be 100644 --- a/packages/services/epoxy/src/ops/explicit_prepare.rs +++ b/packages/services/epoxy/src/ops/explicit_prepare.rs @@ -1,7 +1,6 @@ use anyhow::*; use epoxy_protocol::protocol::{self, ReplicaId}; use gas::prelude::*; -use universaldb::FdbBindingError; use crate::{http_client, replica, types, utils}; @@ -33,21 +32,13 @@ pub async fn explicit_prepare(ctx: &OperationCtx, input: &Input) -> Result Result { let value = ctx .udb()? - .run(|tx, _| { + .run(|tx| { let packed_key = packed_key.clone(); let kv_key = kv_key.clone(); async move { (async move { - let value = tx.get(&packed_key, false).await?; + let value = tx.get(&packed_key, Serializable).await?; if let Some(v) = value { Ok(Some(kv_key.deserialize(&v)?)) } else { @@ -39,7 +39,6 @@ pub async fn get_local(ctx: &OperationCtx, input: &Input) -> Result { } }) .await - .map_err(|e: anyhow::Error| universaldb::FdbBindingError::CustomError(e.into())) } }) .await?; diff --git a/packages/services/epoxy/src/ops/kv/get_optimistic.rs b/packages/services/epoxy/src/ops/kv/get_optimistic.rs index 4a00dadef2..1f9802e68a 100644 --- a/packages/services/epoxy/src/ops/kv/get_optimistic.rs +++ b/packages/services/epoxy/src/ops/kv/get_optimistic.rs @@ -1,7 +1,7 @@ use anyhow::*; use epoxy_protocol::protocol::{self, ReplicaId}; use gas::prelude::*; -use udb_util::FormalKey; +use universaldb::utils::{FormalKey, IsolationLevel::*}; use crate::{http_client, keys, utils}; @@ -45,7 +45,7 @@ pub async fn get_optimistic(ctx: &OperationCtx, input: &Input) -> Result let value = ctx .udb()? - .run(|tx, _| { + .run(|tx| { let packed_key = packed_key.clone(); let packed_cache_key = packed_cache_key.clone(); let kv_key = kv_key.clone(); @@ -54,7 +54,7 @@ pub async fn get_optimistic(ctx: &OperationCtx, input: &Input) -> Result (async move { let (value, cache_value) = tokio::try_join!( async { - let v = tx.get(&packed_key, false).await?; + let v = tx.get(&packed_key, Serializable).await?; if let Some(ref bytes) = v { Ok(Some(kv_key.deserialize(bytes)?)) } else { @@ -62,7 +62,7 @@ pub async fn get_optimistic(ctx: &OperationCtx, input: &Input) -> Result } }, async { - let v = tx.get(&packed_cache_key, false).await?; + let v = tx.get(&packed_cache_key, Serializable).await?; if let Some(ref bytes) = v { Ok(Some(cache_key.deserialize(bytes)?)) } else { @@ -74,7 +74,6 @@ pub async fn get_optimistic(ctx: &OperationCtx, input: &Input) -> Result Ok(value.or(cache_value)) }) .await - .map_err(|e: anyhow::Error| universaldb::FdbBindingError::CustomError(e.into())) } }) .await?; @@ -129,7 +128,7 @@ pub async fn get_optimistic(ctx: &OperationCtx, input: &Input) -> Result if let Some(value) = response { // Cache value ctx.udb()? - .run(|tx, _| { + .run(|tx| { let packed_cache_key = packed_cache_key.clone(); let cache_key = cache_key.clone(); let value_to_cache = value.clone(); @@ -140,9 +139,6 @@ pub async fn get_optimistic(ctx: &OperationCtx, input: &Input) -> Result Ok(()) }) .await - .map_err(|e: anyhow::Error| { - universaldb::FdbBindingError::CustomError(e.into()) - }) } }) .await?; diff --git a/packages/services/epoxy/src/ops/propose.rs b/packages/services/epoxy/src/ops/propose.rs index dafacc6d32..6f9b05f7dc 100644 --- a/packages/services/epoxy/src/ops/propose.rs +++ b/packages/services/epoxy/src/ops/propose.rs @@ -3,7 +3,6 @@ use epoxy_protocol::protocol::{self, Path, Payload, ReplicaId}; use gas::prelude::*; use rivet_api_builder::prelude::*; use rivet_config::Config; -use universaldb::FdbBindingError; use crate::{http_client, replica, utils}; @@ -34,23 +33,15 @@ pub async fn propose(ctx: &OperationCtx, input: &Input) -> Result Result Result { let config = ctx .udb()? - .run(|tx, _| { + .run(|tx| { let replica_id = input.replica_id; - async move { - utils::read_config(&tx, replica_id) - .await - .map_err(|e: anyhow::Error| universaldb::FdbBindingError::CustomError(e.into())) - } + async move { utils::read_config(&tx, replica_id).await } }) .await?; diff --git a/packages/services/epoxy/src/replica/ballot.rs b/packages/services/epoxy/src/replica/ballot.rs index f81aa92d6c..ae52749299 100644 --- a/packages/services/epoxy/src/replica/ballot.rs +++ b/packages/services/epoxy/src/replica/ballot.rs @@ -1,6 +1,7 @@ +use anyhow::Result; use epoxy_protocol::protocol; -use udb_util::FormalKey; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; +use universaldb::utils::{FormalKey, IsolationLevel::*}; use crate::keys; @@ -8,16 +9,14 @@ use crate::keys; pub async fn get_ballot( tx: &Transaction, replica_id: protocol::ReplicaId, -) -> Result { +) -> Result { let ballot_key = keys::replica::CurrentBallotKey; let subspace = keys::subspace(replica_id); let packed_key = subspace.pack(&ballot_key); - match tx.get(&packed_key, false).await? { + match tx.get(&packed_key, Serializable).await? { Some(bytes) => { - let ballot = ballot_key - .deserialize(&bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + let ballot = ballot_key.deserialize(&bytes)?; Ok(ballot) } None => { @@ -35,7 +34,7 @@ pub async fn get_ballot( pub async fn increment_ballot( tx: &Transaction, replica_id: protocol::ReplicaId, -) -> Result { +) -> Result { let mut current_ballot = get_ballot(tx, replica_id).await?; // Increment ballot number @@ -45,9 +44,7 @@ pub async fn increment_ballot( let ballot_key = keys::replica::CurrentBallotKey; let subspace = keys::subspace(replica_id); let packed_key = subspace.pack(&ballot_key); - let serialized = ballot_key - .serialize(current_ballot.clone()) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + let serialized = ballot_key.serialize(current_ballot.clone())?; tx.set(&packed_key, &serialized); @@ -75,18 +72,16 @@ pub async fn validate_and_update_ballot_for_instance( replica_id: protocol::ReplicaId, ballot: &protocol::Ballot, instance: &protocol::Instance, -) -> Result { +) -> Result { let instance_ballot_key = keys::replica::InstanceBallotKey::new(instance.replica_id, instance.slot_id); let subspace = keys::subspace(replica_id); let packed_key = subspace.pack(&instance_ballot_key); // Get the highest ballot seen for this instance - let highest_ballot = match tx.get(&packed_key, false).await? { + let highest_ballot = match tx.get(&packed_key, Serializable).await? { Some(bytes) => { - let stored_ballot = instance_ballot_key - .deserialize(&bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + let stored_ballot = instance_ballot_key.deserialize(&bytes)?; stored_ballot } None => { @@ -107,9 +102,7 @@ pub async fn validate_and_update_ballot_for_instance( // If the incoming ballot is higher, update our stored highest if compare_ballots(ballot, &highest_ballot) == std::cmp::Ordering::Greater { - let serialized = instance_ballot_key - .serialize(ballot.clone()) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + let serialized = instance_ballot_key.serialize(ballot.clone())?; tx.set(&packed_key, &serialized); tracing::debug!(?ballot, ?instance, "updated highest ballot for instance"); diff --git a/packages/services/epoxy/src/replica/commit_kv.rs b/packages/services/epoxy/src/replica/commit_kv.rs index d44315a6e0..7a9fcfa450 100644 --- a/packages/services/epoxy/src/replica/commit_kv.rs +++ b/packages/services/epoxy/src/replica/commit_kv.rs @@ -1,7 +1,7 @@ use anyhow::*; use epoxy_protocol::protocol::{self, ReplicaId}; -use udb_util::prelude::*; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; +use universaldb::prelude::*; use crate::{keys, ops::propose::CommandError, replica::utils}; @@ -10,7 +10,7 @@ pub async fn commit_kv( tx: &Transaction, replica_id: ReplicaId, commands: &[protocol::Command], -) -> Result, FdbBindingError> { +) -> Result> { let subspace = keys::subspace(replica_id); for command in commands.iter() { @@ -23,12 +23,8 @@ pub async fn commit_kv( // Read current value let kv_key = keys::keys::KvValueKey::new(cmd.key.clone()); let packed_key = subspace.pack(&kv_key); - let current_value = if let Some(bytes) = tx.get(&packed_key, false).await? { - Some( - kv_key - .deserialize(&bytes) - .map_err(|x| FdbBindingError::CustomError(x.into()))?, - ) + let current_value = if let Some(bytes) = tx.get(&packed_key, Serializable).await? { + Some(kv_key.deserialize(&bytes)?) } else { None }; @@ -67,9 +63,7 @@ pub async fn commit_kv( // Update the value if let Some(value) = new_value { - let serialized = kv_key - .serialize(value.clone()) - .map_err(|x| FdbBindingError::CustomError(x.into()))?; + let serialized = kv_key.serialize(value.clone())?; tx.set(&packed_key, &serialized); } else { tx.clear(&packed_key); diff --git a/packages/services/epoxy/src/replica/decide_path.rs b/packages/services/epoxy/src/replica/decide_path.rs index af1345fa38..1b7022728f 100644 --- a/packages/services/epoxy/src/replica/decide_path.rs +++ b/packages/services/epoxy/src/replica/decide_path.rs @@ -1,5 +1,6 @@ +use anyhow::Result; use epoxy_protocol::protocol; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; use crate::replica::utils; @@ -7,7 +8,7 @@ pub fn decide_path( _tx: &Transaction, pre_accept_oks: Vec, payload: &protocol::Payload, -) -> Result { +) -> Result { tracing::info!(instance=?payload.instance, "deciding path"); let mut new_payload = payload.clone(); diff --git a/packages/services/epoxy/src/replica/lead_consensus.rs b/packages/services/epoxy/src/replica/lead_consensus.rs index 1549c0dc60..96de6e75a3 100644 --- a/packages/services/epoxy/src/replica/lead_consensus.rs +++ b/packages/services/epoxy/src/replica/lead_consensus.rs @@ -1,6 +1,7 @@ +use anyhow::Result; use epoxy_protocol::protocol; -use udb_util::FormalKey as _; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; +use universaldb::utils::{FormalKey, IsolationLevel::*}; use crate::keys; use crate::replica::{ballot, messages, utils}; @@ -9,18 +10,16 @@ pub async fn lead_consensus( tx: &Transaction, replica_id: protocol::ReplicaId, proposal: protocol::Proposal, -) -> Result { +) -> Result { tracing::info!(?replica_id, "leading consensus"); // EPaxos Step 1 let instance_num_key = keys::replica::InstanceNumberKey; let packed_key = keys::subspace(replica_id).pack(&instance_num_key); - let value = tx.get(&packed_key, false).await?; + let value = tx.get(&packed_key, Serializable).await?; let current_slot = if let Some(ref bytes) = value { - let current = instance_num_key - .deserialize(bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + let current = instance_num_key.deserialize(bytes)?; current } else { 0 @@ -28,12 +27,7 @@ pub async fn lead_consensus( // Increment and store the new instance number let slot_id = current_slot + 1; - tx.set( - &packed_key, - &instance_num_key - .serialize(slot_id) - .map_err(|e| FdbBindingError::CustomError(e.into()))?, - ); + tx.set(&packed_key, &instance_num_key.serialize(slot_id)?); // Find interference for this key let interf = utils::find_interference(tx, replica_id, &proposal.commands).await?; diff --git a/packages/services/epoxy/src/replica/log.rs b/packages/services/epoxy/src/replica/log.rs index a2f4dd2da8..ab9ba95d13 100644 --- a/packages/services/epoxy/src/replica/log.rs +++ b/packages/services/epoxy/src/replica/log.rs @@ -1,6 +1,7 @@ +use anyhow::{Result, ensure}; use epoxy_protocol::protocol::{self, ReplicaId}; -use udb_util::FormalKey; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; +use universaldb::utils::{FormalKey, IsolationLevel::*}; use crate::{keys, replica::utils}; @@ -19,7 +20,7 @@ pub async fn update_log( replica_id: ReplicaId, log_entry: protocol::LogEntry, instance: &protocol::Instance, -) -> Result<(), FdbBindingError> { +) -> Result<()> { tracing::debug!(?replica_id, ?instance, ?log_entry.state, "updating log"); let subspace = keys::subspace(replica_id); @@ -27,12 +28,8 @@ pub async fn update_log( let packed_key = subspace.pack(&log_key); // Read existing log entry to validate state progression - let current_entry = match tx.get(&packed_key, false).await? { - Some(bytes) => Some( - log_key - .deserialize(&bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?, - ), + let current_entry = match tx.get(&packed_key, Serializable).await? { + Some(bytes) => Some(log_key.deserialize(&bytes)?), None => None, }; @@ -41,18 +38,14 @@ pub async fn update_log( let current_order = state_order(¤t.state); let new_order = state_order(&log_entry.state); - if new_order <= current_order { - return Err(FdbBindingError::CustomError( - anyhow::anyhow!( - "invalid state transition: cannot transition from {:?} to {:?} (order {} to {})", - current.state, - log_entry.state, - current_order, - new_order - ) - .into(), - )); - } + ensure!( + new_order > current_order, + "invalid state transition: cannot transition from {:?} to {:?} (order {} to {})", + current.state, + log_entry.state, + current_order, + new_order, + ); tracing::debug!( ?current.state, @@ -66,9 +59,7 @@ pub async fn update_log( } // Store log entry in UDB - let value = log_key - .serialize(log_entry.clone()) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + let value = log_key.serialize(log_entry.clone())?; tx.set(&packed_key, &value); // Store in keys for interference diff --git a/packages/services/epoxy/src/replica/message_request.rs b/packages/services/epoxy/src/replica/message_request.rs index fe81278c70..9dd0a3655d 100644 --- a/packages/services/epoxy/src/replica/message_request.rs +++ b/packages/services/epoxy/src/replica/message_request.rs @@ -20,12 +20,9 @@ pub async fn message_request( // Store the configuration ctx.udb()? - .run(move |tx, _| { + .run(move |tx| { let req = req.clone(); - async move { - replica::update_config::update_config(&*tx, replica_id, req) - .map_err(|e| universaldb::FdbBindingError::CustomError(e.into())) - } + async move { replica::update_config::update_config(&*tx, replica_id, req) } }) .await?; @@ -34,13 +31,9 @@ pub async fn message_request( protocol::RequestKind::PreAcceptRequest(req) => { let response = ctx .udb()? - .run(move |tx, _| { + .run(move |tx| { let req = req.clone(); - async move { - replica::messages::pre_accept(&*tx, replica_id, req) - .await - .map_err(|e| universaldb::FdbBindingError::CustomError(e.into())) - } + async move { replica::messages::pre_accept(&*tx, replica_id, req).await } }) .await?; protocol::ResponseKind::PreAcceptResponse(response) @@ -48,13 +41,9 @@ pub async fn message_request( protocol::RequestKind::AcceptRequest(req) => { let response = ctx .udb()? - .run(move |tx, _| { + .run(move |tx| { let req = req.clone(); - async move { - replica::messages::accept(&*tx, replica_id, req) - .await - .map_err(|e| universaldb::FdbBindingError::CustomError(e.into())) - } + async move { replica::messages::accept(&*tx, replica_id, req).await } }) .await?; protocol::ResponseKind::AcceptResponse(response) @@ -62,13 +51,10 @@ pub async fn message_request( protocol::RequestKind::CommitRequest(req) => { // Commit and update KV store ctx.udb()? - .run(move |tx, _| { + .run(move |tx| { let req = req.clone(); async move { - replica::messages::commit(&*tx, replica_id, req, true) - .await - .map_err(|e| universaldb::FdbBindingError::CustomError(e.into()))?; - + replica::messages::commit(&*tx, replica_id, req, true).await?; Result::Ok(()) } }) @@ -79,28 +65,20 @@ pub async fn message_request( protocol::RequestKind::PrepareRequest(req) => { let response = ctx .udb()? - .run(move |tx, _| { + .run(move |tx| { let req = req.clone(); - async move { - replica::messages::prepare(&*tx, replica_id, req) - .await - .map_err(|e| universaldb::FdbBindingError::CustomError(e.into())) - } + async move { replica::messages::prepare(&*tx, replica_id, req).await } }) .await?; protocol::ResponseKind::PrepareResponse(response) } protocol::RequestKind::DownloadInstancesRequest(req) => { - // Handle download instances request - read from FDB and return instances + // Handle download instances request - read from UDB and return instances let instances = ctx .udb()? - .run(move |tx, _| { + .run(move |tx| { let req = req.clone(); - async move { - replica::messages::download_instances(&*tx, replica_id, req) - .await - .map_err(|e| universaldb::FdbBindingError::CustomError(e.into())) - } + async move { replica::messages::download_instances(&*tx, replica_id, req).await } }) .await?; diff --git a/packages/services/epoxy/src/replica/messages/accept.rs b/packages/services/epoxy/src/replica/messages/accept.rs index 63295464b1..0f570e892e 100644 --- a/packages/services/epoxy/src/replica/messages/accept.rs +++ b/packages/services/epoxy/src/replica/messages/accept.rs @@ -1,5 +1,6 @@ +use anyhow::{Result, ensure}; use epoxy_protocol::protocol; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; use crate::replica::{ballot, messages}; @@ -7,7 +8,7 @@ pub async fn accept( tx: &Transaction, replica_id: protocol::ReplicaId, accept_req: protocol::AcceptRequest, -) -> Result { +) -> Result { let protocol::Payload { proposal, seq, @@ -22,11 +23,7 @@ pub async fn accept( let is_valid = ballot::validate_and_update_ballot_for_instance(tx, replica_id, ¤t_ballot, &instance) .await?; - if !is_valid { - return Err(FdbBindingError::CustomError( - anyhow::anyhow!("ballot validation failed for pre_accept").into(), - )); - } + ensure!(is_valid, "ballot validation failed for pre_accept"); // EPaxos Step 18 let log_entry = protocol::LogEntry { diff --git a/packages/services/epoxy/src/replica/messages/accepted.rs b/packages/services/epoxy/src/replica/messages/accepted.rs index ae765fbaa0..5314ef88c2 100644 --- a/packages/services/epoxy/src/replica/messages/accepted.rs +++ b/packages/services/epoxy/src/replica/messages/accepted.rs @@ -1,5 +1,6 @@ +use anyhow::Result; use epoxy_protocol::protocol; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; use crate::replica::{ballot, messages, utils}; @@ -8,7 +9,7 @@ pub async fn accepted( tx: &Transaction, replica_id: protocol::ReplicaId, payload: protocol::Payload, -) -> Result<(), FdbBindingError> { +) -> Result<()> { let protocol::Payload { proposal, seq, diff --git a/packages/services/epoxy/src/replica/messages/commit.rs b/packages/services/epoxy/src/replica/messages/commit.rs index 60197c86ff..fd52d64833 100644 --- a/packages/services/epoxy/src/replica/messages/commit.rs +++ b/packages/services/epoxy/src/replica/messages/commit.rs @@ -1,5 +1,6 @@ +use anyhow::Result; use epoxy_protocol::protocol; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; use crate::replica::ballot; @@ -9,7 +10,7 @@ pub async fn commit( replica_id: protocol::ReplicaId, commit_req: protocol::CommitRequest, commit_to_kv: bool, -) -> Result<(), FdbBindingError> { +) -> Result<()> { let protocol::Payload { proposal, seq, diff --git a/packages/services/epoxy/src/replica/messages/committed.rs b/packages/services/epoxy/src/replica/messages/committed.rs index 0694a9c584..444c7681d9 100644 --- a/packages/services/epoxy/src/replica/messages/committed.rs +++ b/packages/services/epoxy/src/replica/messages/committed.rs @@ -1,5 +1,6 @@ +use anyhow::Result; use epoxy_protocol::protocol; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; use crate::replica::ballot; @@ -8,7 +9,7 @@ pub async fn committed( tx: &Transaction, replica_id: protocol::ReplicaId, payload: &protocol::Payload, -) -> Result, FdbBindingError> { +) -> Result> { let protocol::Payload { proposal, seq, diff --git a/packages/services/epoxy/src/replica/messages/download_instances.rs b/packages/services/epoxy/src/replica/messages/download_instances.rs index bb92103d78..aa997f1d04 100644 --- a/packages/services/epoxy/src/replica/messages/download_instances.rs +++ b/packages/services/epoxy/src/replica/messages/download_instances.rs @@ -1,7 +1,8 @@ +use anyhow::Result; use epoxy_protocol::protocol::{self, ReplicaId}; use futures_util::TryStreamExt; -use udb_util::prelude::*; -use universaldb::{FdbBindingError, KeySelector, RangeOption, Transaction, options::StreamingMode}; +use universaldb::prelude::*; +use universaldb::{KeySelector, RangeOption, Transaction, options::StreamingMode}; use crate::keys; @@ -9,7 +10,7 @@ pub async fn download_instances( tx: &Transaction, replica_id: ReplicaId, req: protocol::DownloadInstancesRequest, -) -> Result, FdbBindingError> { +) -> Result> { tracing::info!(?replica_id, "handling download instances message"); let mut entries = Vec::new(); @@ -25,13 +26,13 @@ pub async fn download_instances( } else { // TODO: Use ::subspace() // Start from the beginning of the log - let prefix = subspace.pack(&(udb_util::keys::LOG,)); + let prefix = subspace.pack(&(universaldb::utils::keys::LOG,)); KeySelector::first_greater_or_equal(prefix) }; // TODO: Is there a cleaner way to do this // End key is after all log entries - let end_prefix = subspace.pack(&(udb_util::keys::LOG + 1,)); + let end_prefix = subspace.pack(&(universaldb::utils::keys::LOG + 1,)); let end_key = KeySelector::first_greater_or_equal(end_prefix); let range = RangeOption { @@ -43,20 +44,14 @@ pub async fn download_instances( }; // Query the range - let mut stream = tx.get_ranges_keyvalues(range, SERIALIZABLE); + let mut stream = tx.get_ranges_keyvalues(range, Serializable); while let Some(kv) = stream.try_next().await? { // Parse the key to extract instance info let key_bytes = kv.key(); - let log_key = subspace - .unpack::(key_bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; - + let log_key = subspace.unpack::(key_bytes)?; // Deserialize the log entry - let log_entry = log_key - .deserialize(kv.value()) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; - + let log_entry = log_key.deserialize(kv.value())?; // Create the instance from the key let instance = protocol::Instance { replica_id: log_key.instance_replica_id, diff --git a/packages/services/epoxy/src/replica/messages/pre_accept.rs b/packages/services/epoxy/src/replica/messages/pre_accept.rs index 3801bfc439..23f33755a6 100644 --- a/packages/services/epoxy/src/replica/messages/pre_accept.rs +++ b/packages/services/epoxy/src/replica/messages/pre_accept.rs @@ -1,6 +1,7 @@ +use anyhow::{Result, ensure}; use epoxy_protocol::protocol; use std::cmp; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; use crate::replica::{ballot, messages, utils}; @@ -8,7 +9,7 @@ pub async fn pre_accept( tx: &Transaction, replica_id: protocol::ReplicaId, pre_accept_req: protocol::PreAcceptRequest, -) -> Result { +) -> Result { tracing::info!(?replica_id, "handling pre-accept message"); let protocol::Payload { @@ -25,11 +26,7 @@ pub async fn pre_accept( let is_valid = ballot::validate_and_update_ballot_for_instance(tx, replica_id, ¤t_ballot, &instance) .await?; - if !is_valid { - return Err(FdbBindingError::CustomError( - anyhow::anyhow!("ballot validation failed for pre_accept").into(), - )); - } + ensure!(is_valid, "ballot validation failed for pre_accept"); // Find interference for this key let interf = utils::find_interference(tx, replica_id, &proposal.commands).await?; diff --git a/packages/services/epoxy/src/replica/messages/prepare.rs b/packages/services/epoxy/src/replica/messages/prepare.rs index 296f3c4d68..df3581eb3b 100644 --- a/packages/services/epoxy/src/replica/messages/prepare.rs +++ b/packages/services/epoxy/src/replica/messages/prepare.rs @@ -1,6 +1,7 @@ +use anyhow::Result; use epoxy_protocol::protocol; -use udb_util::FormalKey; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; +use universaldb::utils::{FormalKey, IsolationLevel::*}; use crate::{keys, replica::ballot}; @@ -8,7 +9,7 @@ pub async fn prepare( tx: &Transaction, replica_id: protocol::ReplicaId, prepare_req: protocol::PrepareRequest, -) -> Result { +) -> Result { tracing::info!(?replica_id, "handling prepare message"); let protocol::PrepareRequest { ballot, instance } = prepare_req; @@ -19,14 +20,10 @@ pub async fn prepare( let subspace = keys::subspace(replica_id); let log_key = keys::replica::LogEntryKey::new(instance.replica_id, instance.slot_id); - let current_entry = match tx.get(&subspace.pack(&log_key), false).await? { + let current_entry = match tx.get(&subspace.pack(&log_key), Serializable).await? { Some(bytes) => { // Deserialize the existing log entry - Some( - log_key - .deserialize(&bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?, - ) + Some(log_key.deserialize(&bytes)?) } None => None, }; @@ -72,10 +69,8 @@ pub async fn prepare( let subspace = keys::subspace(replica_id); let packed_key = subspace.pack(&instance_ballot_key); - let highest_ballot = match tx.get(&packed_key, false).await? { - Some(bytes) => instance_ballot_key - .deserialize(&bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?, + let highest_ballot = match tx.get(&packed_key, Serializable).await? { + Some(bytes) => instance_ballot_key.deserialize(&bytes)?, None => { // Default ballot for the original replica protocol::Ballot { diff --git a/packages/services/epoxy/src/replica/update_config.rs b/packages/services/epoxy/src/replica/update_config.rs index b44987a4f6..c54a26ee2e 100644 --- a/packages/services/epoxy/src/replica/update_config.rs +++ b/packages/services/epoxy/src/replica/update_config.rs @@ -1,6 +1,7 @@ +use anyhow::Result; use epoxy_protocol::protocol::{self, ReplicaId}; -use udb_util::FormalKey; -use universaldb::{FdbBindingError, Transaction}; +use universaldb::Transaction; +use universaldb::utils::FormalKey; use crate::keys; @@ -8,16 +9,14 @@ pub fn update_config( tx: &Transaction, replica_id: ReplicaId, update_config_req: protocol::UpdateConfigRequest, -) -> Result<(), FdbBindingError> { +) -> Result<()> { tracing::debug!("updating config"); // Store config in UDB let config_key = keys::replica::ConfigKey; let subspace = keys::subspace(replica_id); let packed_key = subspace.pack(&config_key); - let value = config_key - .serialize(update_config_req.config) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + let value = config_key.serialize(update_config_req.config)?; tx.set(&packed_key, &value); diff --git a/packages/services/epoxy/src/replica/utils.rs b/packages/services/epoxy/src/replica/utils.rs index 2efa1bea3d..870d3db7ef 100644 --- a/packages/services/epoxy/src/replica/utils.rs +++ b/packages/services/epoxy/src/replica/utils.rs @@ -1,8 +1,9 @@ +use anyhow::Result; use epoxy_protocol::protocol::{self, ReplicaId}; use futures_util::TryStreamExt; use std::{cmp::Ordering, collections::HashSet}; -use udb_util::prelude::*; -use universaldb::{FdbBindingError, KeySelector, RangeOption, Transaction, options::StreamingMode}; +use universaldb::prelude::*; +use universaldb::{KeySelector, RangeOption, Transaction, options::StreamingMode}; use crate::keys; @@ -11,7 +12,7 @@ pub async fn find_interference( tx: &Transaction, replica_id: ReplicaId, commands: &Vec, -) -> Result, FdbBindingError> { +) -> Result> { let mut interf = Vec::new(); // Get deduplicated keys @@ -34,14 +35,12 @@ pub async fn find_interference( ..Default::default() }; - let mut stream = tx.get_ranges_keyvalues(range, SERIALIZABLE); + let mut stream = tx.get_ranges_keyvalues(range, Serializable); while let Some(kv) = stream.try_next().await? { // Parse the key to extract replica_id and slot_id let key_bytes = kv.key(); - let key = subspace - .unpack::(key_bytes) - .map_err(|x| FdbBindingError::CustomError(x.into()))?; + let key = subspace.unpack::(key_bytes)?; interf.push(protocol::Instance { replica_id: key.instance_replica_id, @@ -59,7 +58,7 @@ pub async fn find_max_seq( tx: &Transaction, replica_id: protocol::ReplicaId, interf: &Vec, -) -> Result { +) -> Result { let mut seq = 0; for instance in interf { @@ -67,11 +66,9 @@ pub async fn find_max_seq( let key = keys::replica::LogEntryKey::new(instance.replica_id, instance.slot_id); let subspace = keys::subspace(replica_id); - let value = tx.get(&subspace.pack(&key), false).await?; + let value = tx.get(&subspace.pack(&key), Serializable).await?; if let Some(ref bytes) = value { - let log_entry: protocol::LogEntry = key - .deserialize(bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + let log_entry: protocol::LogEntry = key.deserialize(bytes)?; if log_entry.seq > seq { seq = log_entry.seq; } diff --git a/packages/services/epoxy/src/utils.rs b/packages/services/epoxy/src/utils.rs index b6896f1a08..88e6fbee1d 100644 --- a/packages/services/epoxy/src/utils.rs +++ b/packages/services/epoxy/src/utils.rs @@ -1,6 +1,6 @@ use anyhow::*; use epoxy_protocol::protocol::{self, ReplicaId}; -use universaldb::Transaction; +use universaldb::{Transaction, utils::IsolationLevel::*}; #[derive(Clone, Copy, Debug)] pub enum QuorumType { @@ -44,13 +44,13 @@ pub async fn read_config( tx: &Transaction, replica_id: ReplicaId, ) -> Result { - use udb_util::FormalKey; + use universaldb::utils::FormalKey; let config_key = crate::keys::replica::ConfigKey; let subspace = crate::keys::subspace(replica_id); let packed_key = subspace.pack(&config_key); - match tx.get(&packed_key, false).await? { + match tx.get(&packed_key, Serializable).await? { Some(value) => { let config = config_key.deserialize(&value)?; Ok(config) diff --git a/packages/services/epoxy/src/workflows/replica/setup.rs b/packages/services/epoxy/src/workflows/replica/setup.rs index 50ff1929ac..278c77269d 100644 --- a/packages/services/epoxy/src/workflows/replica/setup.rs +++ b/packages/services/epoxy/src/workflows/replica/setup.rs @@ -4,7 +4,7 @@ use futures_util::{FutureExt, TryStreamExt}; use gas::prelude::*; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, VecDeque}; -use udb_util::prelude::*; +use universaldb::prelude::*; use universaldb::{KeySelector, RangeOption, options::StreamingMode}; use crate::types; @@ -245,7 +245,7 @@ async fn apply_log_entry( // Replay the log entry ctx.udb()? - .run(move |tx, _| { + .run(move |tx| { let log_entry = log_entry.clone(); let instance = instance.clone(); @@ -256,10 +256,8 @@ async fn apply_log_entry( let packed_key = subspace.pack(&log_key); // Read existing entry to determine if we need to replay this log entry - if let Some(bytes) = tx.get(&packed_key, SERIALIZABLE).await? { - let existing = log_key - .deserialize(&bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + if let Some(bytes) = tx.get(&packed_key, Serializable).await? { + let existing = log_key.deserialize(&bytes)?; let existing_order = crate::replica::log::state_order(&existing.state); let new_order = crate::replica::log::state_order(&log_entry.state); @@ -286,21 +284,15 @@ async fn apply_log_entry( match log_entry.state { protocol::State::PreAccepted => { let request = protocol::PreAcceptRequest { payload }; - crate::replica::messages::pre_accept(&*tx, replica_id, request) - .await - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + crate::replica::messages::pre_accept(&*tx, replica_id, request).await?; } protocol::State::Accepted => { let request = protocol::AcceptRequest { payload }; - crate::replica::messages::accept(&*tx, replica_id, request) - .await - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + crate::replica::messages::accept(&*tx, replica_id, request).await?; } protocol::State::Committed => { let request = protocol::CommitRequest { payload }; - crate::replica::messages::commit(&*tx, replica_id, request, false) - .await - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + crate::replica::messages::commit(&*tx, replica_id, request, false).await?; } } @@ -370,7 +362,7 @@ pub async fn recover_keys_chunk( let (last_key, recovered_count) = ctx .udb()? - .run(move |tx, _| { + .run(move |tx| { let after_key = input.after_key.clone(); let count = input.count; @@ -409,7 +401,7 @@ pub async fn recover_keys_chunk( ..Default::default() }; - let mut stream = tx.get_ranges_keyvalues(range_option, SERIALIZABLE); + let mut stream = tx.get_ranges_keyvalues(range_option, Serializable); // Iterate over stream and aggregate data for each key let mut current_key: Option> = None; @@ -423,10 +415,8 @@ pub async fn recover_keys_chunk( scanned_count += 1; // Parse the key instance entry to extract the key and instance info - let key_instance = subspace - .unpack::(kv.key()) - .map_err(|e| universaldb::FdbBindingError::CustomError(e.into()))?; - + let key_instance = + subspace.unpack::(kv.key())?; let key = key_instance.key; let instance = ( key_instance.instance_replica_id, @@ -483,13 +473,9 @@ pub async fn recover_keys_chunk( // it means a single key has too many instances (i.e. larger than // the range limit) if recovered_count == 0 && scanned_count >= count { - return Err(universaldb::FdbBindingError::CustomError( - anyhow!( - "single key has more than {} instances, cannot process in one chunk", - count - ) - .into(), - )); + bail!( + "single key has more than {count} instances, cannot process in one chunk", + ); } tracing::info!( @@ -640,7 +626,7 @@ async fn recover_key_value_with_instances( replica_id: protocol::ReplicaId, key: &[u8], instances: &[(protocol::ReplicaId, protocol::SlotId)], -) -> Result<(), universaldb::FdbBindingError> { +) -> Result<()> { let subspace = crate::keys::subspace(replica_id); tracing::debug!( @@ -659,7 +645,7 @@ async fn recover_key_value_with_instances( let log_key = crate::keys::replica::LogEntryKey::new(instance_replica_id, instance_slot_id); let packed_key = subspace.pack(&log_key); - futures.push(tx.get(&packed_key, SERIALIZABLE)); + futures.push(tx.get(&packed_key, Serializable)); batch_keys.push((packed_key, log_key, instance_replica_id, instance_slot_id)); } let batch_results = futures_util::future::try_join_all(futures).await?; @@ -669,21 +655,15 @@ async fn recover_key_value_with_instances( batch_results.into_iter().zip(batch_keys.iter()) { // Missing log entry indicates data corruption - let bytes = bytes.ok_or_else(|| { - universaldb::FdbBindingError::CustomError( - anyhow!( - "missing log entry for instance ({}, {}), data corruption detected", - instance_replica_id, - instance_slot_id - ) - .into(), + let bytes = bytes.with_context(|| { + format!( + "missing log entry for instance ({}, {}), data corruption detected", + instance_replica_id, instance_slot_id ) })?; // Collect committed entries - let entry = log_key - .deserialize(&bytes) - .map_err(|e| universaldb::FdbBindingError::CustomError(e.into()))?; + let entry = log_key.deserialize(&bytes)?; if matches!(entry.state, protocol::State::Committed) { committed_entries.push(CommittedEntry { instance: (*instance_replica_id, *instance_slot_id), @@ -702,9 +682,7 @@ async fn recover_key_value_with_instances( // Sort entries topologically to respect dependencies // This ensures that operations are applied in the correct order, // particularly important for dependent operations like check-and-set - let sorted_entries = topological_sort_entries(&committed_entries) - .map_err(|e| universaldb::FdbBindingError::CustomError(e.into()))?; - + let sorted_entries = topological_sort_entries(&committed_entries)?; tracing::debug!( key_len = key.len(), sorted_count = sorted_entries.len(), diff --git a/packages/services/epoxy/tests/reconfigure.rs b/packages/services/epoxy/tests/reconfigure.rs index b54ef7464f..948e358b29 100644 --- a/packages/services/epoxy/tests/reconfigure.rs +++ b/packages/services/epoxy/tests/reconfigure.rs @@ -7,8 +7,8 @@ use futures_util::TryStreamExt; use gas::prelude::*; use serde_json::json; use std::collections::HashSet; -use udb_util::prelude::*; -use universaldb::{FdbBindingError, KeySelector, RangeOption, options::StreamingMode}; +use universaldb::prelude::*; +use universaldb::{KeySelector, RangeOption, options::StreamingMode}; mod common; @@ -456,7 +456,7 @@ async fn verify_log_entries_match( // Read log entries from replica 1 let log_entries_1 = ctx_1 .udb()? - .run(move |tx, _| async move { + .run(move |tx| async move { let subspace = epoxy::keys::subspace(replica_1_id); // Range scan to get all log entries for this replica @@ -467,7 +467,7 @@ async fn verify_log_entries_match( ..Default::default() }; - let mut stream = tx.get_ranges_keyvalues(range, SERIALIZABLE); + let mut stream = tx.get_ranges_keyvalues(range, Serializable); let mut log_entries = Vec::new(); while let Some(kv) = stream.try_next().await? { @@ -475,29 +475,26 @@ async fn verify_log_entries_match( let value_bytes = kv.value(); // Parse the key to get replica_id and slot_id - let key = subspace - .unpack::(key_bytes) - .map_err(|x| FdbBindingError::CustomError(x.into()))?; + let key = subspace.unpack::(key_bytes)?; // Deserialize the log entry let log_entry = epoxy::keys::replica::LogEntryKey::new( key.instance_replica_id, key.instance_slot_id, ) - .deserialize(value_bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + .deserialize(value_bytes)?; log_entries.push((key.instance_replica_id, key.instance_slot_id, log_entry)); } - Result::<_, FdbBindingError>::Ok(log_entries) + Ok(log_entries) }) .await?; // Read log entries from replica 2 let log_entries_2 = ctx_2 .udb()? - .run(move |tx, _| async move { + .run(move |tx| async move { let subspace = epoxy::keys::subspace(replica_2_id); // Range scan to get all log entries for this replica @@ -508,7 +505,7 @@ async fn verify_log_entries_match( ..Default::default() }; - let mut stream = tx.get_ranges_keyvalues(range, SERIALIZABLE); + let mut stream = tx.get_ranges_keyvalues(range, Serializable); let mut log_entries = Vec::new(); while let Some(kv) = stream.try_next().await? { @@ -516,22 +513,19 @@ async fn verify_log_entries_match( let value_bytes = kv.value(); // Parse the key to get replica_id and slot_id - let key = subspace - .unpack::(key_bytes) - .map_err(|x| FdbBindingError::CustomError(x.into()))?; + let key = subspace.unpack::(key_bytes)?; // Deserialize the log entry let log_entry = epoxy::keys::replica::LogEntryKey::new( key.instance_replica_id, key.instance_slot_id, ) - .deserialize(value_bytes) - .map_err(|e| FdbBindingError::CustomError(e.into()))?; + .deserialize(value_bytes)?; log_entries.push((key.instance_replica_id, key.instance_slot_id, log_entry)); } - Result::<_, FdbBindingError>::Ok(log_entries) + Ok(log_entries) }) .await?; @@ -593,15 +587,15 @@ async fn verify_kv_replication( for (i, (key, expected_value)) in expected_keys.iter().enumerate() { // Read the KV value from the replica's UDB let actual_value = udb - .run(move |tx, _| { + .run(move |tx| { let key_clone = key.clone(); async move { let subspace = epoxy::keys::subspace(replica_id); let kv_key = epoxy::keys::keys::KvValueKey::new(key_clone); - let result = tx.get(&subspace.pack(&kv_key), SERIALIZABLE).await?; + let result = tx.get(&subspace.pack(&kv_key), Serializable).await?; // KvValueKey stores Vec directly, so we can return it as is - Result::<_, FdbBindingError>::Ok(result) + Ok(result) } }) .await?; diff --git a/packages/services/namespace/Cargo.toml b/packages/services/namespace/Cargo.toml index 46ec60f082..efd8da845f 100644 --- a/packages/services/namespace/Cargo.toml +++ b/packages/services/namespace/Cargo.toml @@ -18,7 +18,6 @@ rivet-util.workspace = true serde.workspace = true strum.workspace = true tracing.workspace = true -udb-util.workspace = true universaldb.workspace = true url.workspace = true utoipa.workspace = true diff --git a/packages/services/namespace/src/keys.rs b/packages/services/namespace/src/keys.rs index 93e133de25..caa49d1005 100644 --- a/packages/services/namespace/src/keys.rs +++ b/packages/services/namespace/src/keys.rs @@ -3,12 +3,12 @@ use std::result::Result::Ok; use anyhow::*; use gas::prelude::*; use serde::Serialize; -use udb_util::prelude::*; +use universaldb::prelude::*; use utoipa::ToSchema; use versioned_data_util::OwnedVersionedData; -pub fn subspace() -> udb_util::Subspace { - udb_util::Subspace::new(&(RIVET, NAMESPACE)) +pub fn subspace() -> universaldb::utils::Subspace { + universaldb::utils::Subspace::new(&(RIVET, NAMESPACE)) } #[derive(Debug)] diff --git a/packages/services/namespace/src/ops/get_local.rs b/packages/services/namespace/src/ops/get_local.rs index 3f1b5f4474..156632f384 100644 --- a/packages/services/namespace/src/ops/get_local.rs +++ b/packages/services/namespace/src/ops/get_local.rs @@ -1,7 +1,6 @@ use futures_util::{StreamExt, TryStreamExt}; use gas::prelude::*; -use udb_util::{SERIALIZABLE, TxnExt}; -use universaldb as udb; +use universaldb::utils::IsolationLevel::*; use crate::{errors, keys, types::Namespace}; @@ -18,7 +17,7 @@ pub async fn namespace_get_local(ctx: &OperationCtx, input: &Input) -> Result Result std::result::Result, udb::FdbBindingError> { - let txs = tx.subspace(keys::subspace()); + tx: &universaldb::Transaction, +) -> Result> { + let tx = tx.with_subspace(keys::subspace()); let name_key = keys::NameKey::new(namespace_id); let display_name_key = keys::DisplayNameKey::new(namespace_id); let create_ts_key = keys::CreateTsKey::new(namespace_id); let (name, display_name, create_ts) = tokio::try_join!( - txs.read_opt(&name_key, SERIALIZABLE), - txs.read_opt(&display_name_key, SERIALIZABLE), - txs.read_opt(&create_ts_key, SERIALIZABLE), + tx.read_opt(&name_key, Serializable), + tx.read_opt(&display_name_key, Serializable), + tx.read_opt(&create_ts_key, Serializable), )?; // Namespace not found @@ -57,12 +56,8 @@ pub(crate) async fn get_inner( return Ok(None); }; - let display_name = display_name.ok_or(udb::FdbBindingError::CustomError( - format!("key should exist: {display_name_key:?}").into(), - ))?; - let create_ts = create_ts.ok_or(udb::FdbBindingError::CustomError( - format!("key should exist: {create_ts_key:?}").into(), - ))?; + let display_name = display_name.context("key should exist")?; + let create_ts = create_ts.context("key should exist")?; Ok(Some(Namespace { namespace_id, diff --git a/packages/services/namespace/src/ops/list.rs b/packages/services/namespace/src/ops/list.rs index abac196775..57955718b7 100644 --- a/packages/services/namespace/src/ops/list.rs +++ b/packages/services/namespace/src/ops/list.rs @@ -1,8 +1,8 @@ use anyhow::Result; use futures_util::TryStreamExt; use gas::prelude::*; -use udb_util::SNAPSHOT; -use universaldb::{self as udb, options::StreamingMode}; +use universaldb::options::StreamingMode; +use universaldb::utils::IsolationLevel::*; use crate::{errors, keys, types::Namespace}; @@ -24,16 +24,16 @@ pub async fn namespace_list(ctx: &OperationCtx, input: &Input) -> Result let namespaces = ctx .udb()? - .run(|tx, _mc| async move { + .run(|tx| async move { let mut namespaces = Vec::new(); let limit = input.limit.unwrap_or(1000); // Default limit to 1000 let mut stream = tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::Iterator, ..(&keys::subspace()).into() }, - SNAPSHOT, + Snapshot, ); let mut seen_namespaces = std::collections::HashSet::new(); @@ -59,7 +59,7 @@ pub async fn namespace_list(ctx: &OperationCtx, input: &Input) -> Result } } - Result::<_, udb::FdbBindingError>::Ok(namespaces) + Ok(namespaces) }) .custom_instrument(tracing::info_span!("namespace_list_tx")) .await?; diff --git a/packages/services/namespace/src/ops/resolve_for_name_local.rs b/packages/services/namespace/src/ops/resolve_for_name_local.rs index d70e382000..b02617a35d 100644 --- a/packages/services/namespace/src/ops/resolve_for_name_local.rs +++ b/packages/services/namespace/src/ops/resolve_for_name_local.rs @@ -1,5 +1,5 @@ use gas::prelude::*; -use udb_util::{SERIALIZABLE, TxnExt}; +use universaldb::utils::IsolationLevel::*; use crate::{errors, keys, ops::get_local::get_inner, types::Namespace}; @@ -18,13 +18,13 @@ pub async fn namespace_resolve_for_name_local( } ctx.udb()? - .run(|tx, _mc| { + .run(|tx| { let name = input.name.clone(); async move { - let txs = tx.subspace(keys::subspace()); + let tx = tx.with_subspace(keys::subspace()); - let Some(namespace_id) = txs - .read_opt(&keys::ByNameKey::new(name.clone()), SERIALIZABLE) + let Some(namespace_id) = tx + .read_opt(&keys::ByNameKey::new(name.clone()), Serializable) .await? else { // Namespace not found diff --git a/packages/services/namespace/src/ops/runner_config/delete.rs b/packages/services/namespace/src/ops/runner_config/delete.rs index 09ba689150..1b6f68cc3a 100644 --- a/packages/services/namespace/src/ops/runner_config/delete.rs +++ b/packages/services/namespace/src/ops/runner_config/delete.rs @@ -1,6 +1,6 @@ use gas::prelude::*; use rivet_cache::CacheKey; -use udb_util::{SERIALIZABLE, TxnExt}; +use universaldb::utils::IsolationLevel::*; use crate::{errors, keys}; @@ -17,18 +17,18 @@ pub async fn namespace_runner_config_delete(ctx: &OperationCtx, input: &Input) - } ctx.udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); // Read existing config to determine variant let runner_config_key = keys::RunnerConfigKey::new(input.namespace_id, input.name.clone()); - if let Some(config) = txs.read_opt(&runner_config_key, SERIALIZABLE).await? { - txs.delete(&runner_config_key); + if let Some(config) = tx.read_opt(&runner_config_key, Serializable).await? { + tx.delete(&runner_config_key); // Clear secondary idx - txs.delete(&keys::RunnerConfigByVariantKey::new( + tx.delete(&keys::RunnerConfigByVariantKey::new( input.namespace_id, config.variant(), input.name.clone(), diff --git a/packages/services/namespace/src/ops/runner_config/get_local.rs b/packages/services/namespace/src/ops/runner_config/get_local.rs index fd23ad9562..35a00aca94 100644 --- a/packages/services/namespace/src/ops/runner_config/get_local.rs +++ b/packages/services/namespace/src/ops/runner_config/get_local.rs @@ -1,7 +1,7 @@ use futures_util::{StreamExt, TryStreamExt}; use gas::prelude::*; use serde::{Deserialize, Serialize}; -use udb_util::{SERIALIZABLE, TxnExt}; +use universaldb::utils::IsolationLevel::*; use crate::{errors, keys}; @@ -28,20 +28,20 @@ pub async fn namespace_runner_config_get_local( let runner_configs = ctx .udb()? - .run(|tx, _mc| async move { + .run(|tx| async move { futures_util::stream::iter(input.runners.clone()) .map(|(namespace_id, runner_name)| { let tx = tx.clone(); async move { - let txs = tx.subspace(keys::subspace()); + let tx = tx.with_subspace(keys::subspace()); let runner_config_key = keys::RunnerConfigKey::new(namespace_id, runner_name.clone()); // Runner config not found let Some(runner_config) = - txs.read_opt(&runner_config_key, SERIALIZABLE).await? + tx.read_opt(&runner_config_key, Serializable).await? else { return Ok(None); }; diff --git a/packages/services/namespace/src/ops/runner_config/list.rs b/packages/services/namespace/src/ops/runner_config/list.rs index 1b15b21fb2..45f414304d 100644 --- a/packages/services/namespace/src/ops/runner_config/list.rs +++ b/packages/services/namespace/src/ops/runner_config/list.rs @@ -1,7 +1,7 @@ use futures_util::{StreamExt, TryStreamExt}; use gas::prelude::*; -use udb_util::{SERIALIZABLE, TxnExt}; -use universaldb::{self as udb, options::StreamingMode}; +use universaldb::options::StreamingMode; +use universaldb::utils::IsolationLevel::*; use crate::{errors, keys, types::RunnerConfig}; @@ -24,11 +24,11 @@ pub async fn namespace_runner_config_list( let runner_configs = ctx .udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); let (start, end) = if let Some(variant) = input.variant { - let (start, end) = txs + let (start, end) = keys::subspace() .subspace(&keys::RunnerConfigByVariantKey::subspace_with_variant( input.namespace_id, variant, @@ -36,7 +36,7 @@ pub async fn namespace_runner_config_list( .range(); let start = if let Some(name) = &input.after_name { - txs.pack(&keys::RunnerConfigByVariantKey::new( + tx.pack(&keys::RunnerConfigByVariantKey::new( input.namespace_id, variant, name.clone(), @@ -47,12 +47,12 @@ pub async fn namespace_runner_config_list( (start, end) } else { - let (start, end) = txs + let (start, end) = keys::subspace() .subspace(&keys::RunnerConfigKey::subspace(input.namespace_id)) .range(); let start = if let Some(name) = &input.after_name { - txs.pack(&keys::RunnerConfigKey::new( + tx.pack(&keys::RunnerConfigKey::new( input.namespace_id, name.clone(), )) @@ -63,22 +63,22 @@ pub async fn namespace_runner_config_list( (start, end) }; - txs.get_ranges_keyvalues( - udb::RangeOption { + tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::WantAll, limit: Some(input.limit), ..(start, end).into() }, - SERIALIZABLE, + Serializable, ) .map(|res| match res { Ok(entry) => { if input.variant.is_some() { let (key, config) = - txs.read_entry::(&entry)?; + tx.read_entry::(&entry)?; Ok((key.name, config)) } else { - let (key, config) = txs.read_entry::(&entry)?; + let (key, config) = tx.read_entry::(&entry)?; Ok((key.name, config)) } } diff --git a/packages/services/namespace/src/ops/runner_config/upsert.rs b/packages/services/namespace/src/ops/runner_config/upsert.rs index e530f8e3ce..5c88b74f62 100644 --- a/packages/services/namespace/src/ops/runner_config/upsert.rs +++ b/packages/services/namespace/src/ops/runner_config/upsert.rs @@ -1,6 +1,5 @@ use gas::prelude::*; use rivet_cache::CacheKey; -use udb_util::TxnExt; use universaldb::options::MutationType; use crate::{errors, keys, types::RunnerConfig}; @@ -19,17 +18,17 @@ pub async fn namespace_runner_config_upsert(ctx: &OperationCtx, input: &Input) - } ctx.udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); // TODO: Once other types of configs get added, delete previous config before writing - txs.write( + tx.write( &keys::RunnerConfigKey::new(input.namespace_id, input.name.clone()), input.config.clone(), )?; // Write to secondary idx - txs.write( + tx.write( &keys::RunnerConfigByVariantKey::new( input.namespace_id, input.config.variant(), @@ -59,8 +58,8 @@ pub async fn namespace_runner_config_upsert(ctx: &OperationCtx, input: &Input) - } // Sets desired count to 0 if it doesn't exist - let txs = tx.subspace(rivet_types::keys::pegboard::subspace()); - txs.atomic_op( + let tx = tx.with_subspace(rivet_types::keys::pegboard::subspace()); + tx.atomic_op( &rivet_types::keys::pegboard::ns::ServerlessDesiredSlotsKey::new( input.namespace_id, input.name.clone(), diff --git a/packages/services/namespace/src/workflows/namespace.rs b/packages/services/namespace/src/workflows/namespace.rs index 4feac7a0d2..525aee56d6 100644 --- a/packages/services/namespace/src/workflows/namespace.rs +++ b/packages/services/namespace/src/workflows/namespace.rs @@ -1,7 +1,7 @@ use futures_util::FutureExt; use gas::prelude::*; use serde::{Deserialize, Serialize}; -use udb_util::{SERIALIZABLE, TxnExt}; +use universaldb::utils::IsolationLevel::*; use crate::{errors, keys}; @@ -32,7 +32,7 @@ pub async fn namespace(ctx: &mut WorkflowCtx, input: &Input) -> Result<()> { } let insert_res = ctx - .activity(InsertFdbInput { + .activity(InsertDbInput { namespace_id: input.namespace_id, name: input.name.clone(), display_name: input.display_name.clone(), @@ -135,39 +135,39 @@ pub async fn validate( } #[derive(Debug, Clone, Serialize, Deserialize, Hash)] -struct InsertFdbInput { +struct InsertDbInput { namespace_id: Id, name: String, display_name: String, create_ts: i64, } -#[activity(InsertFdb)] -async fn insert_fdb( +#[activity(InsertDb)] +async fn insert_db( ctx: &ActivityCtx, - input: &InsertFdbInput, + input: &InsertDbInput, ) -> Result> { ctx.udb()? - .run(|tx, _mc| { + .run(|tx| { let namespace_id = input.namespace_id; let name = input.name.clone(); let display_name = input.display_name.clone(); async move { - let txs = tx.subspace(keys::subspace()); + let tx = tx.with_subspace(keys::subspace()); let name_idx_key = keys::ByNameKey::new(name.clone()); - if txs.exists(&name_idx_key, SERIALIZABLE).await? { + if tx.exists(&name_idx_key, Serializable).await? { return Ok(Err(errors::Namespace::NameNotUnique)); } - txs.write(&keys::NameKey::new(namespace_id), name)?; - txs.write(&keys::DisplayNameKey::new(namespace_id), display_name)?; - txs.write(&keys::CreateTsKey::new(namespace_id), input.create_ts)?; + tx.write(&keys::NameKey::new(namespace_id), name)?; + tx.write(&keys::DisplayNameKey::new(namespace_id), display_name)?; + tx.write(&keys::CreateTsKey::new(namespace_id), input.create_ts)?; // Insert idx - txs.write(&name_idx_key, namespace_id)?; + tx.write(&name_idx_key, namespace_id)?; Ok(Ok(())) } diff --git a/packages/services/pegboard/Cargo.toml b/packages/services/pegboard/Cargo.toml index f9569fa571..cc08e962cf 100644 --- a/packages/services/pegboard/Cargo.toml +++ b/packages/services/pegboard/Cargo.toml @@ -25,7 +25,6 @@ serde_json.workspace = true serde.workspace = true strum.workspace = true tracing.workspace = true -udb-util.workspace = true universaldb.workspace = true utoipa.workspace = true versioned-data-util.workspace = true diff --git a/packages/services/pegboard/src/keys/actor.rs b/packages/services/pegboard/src/keys/actor.rs index 022f047c93..e85d5fc07a 100644 --- a/packages/services/pegboard/src/keys/actor.rs +++ b/packages/services/pegboard/src/keys/actor.rs @@ -2,7 +2,7 @@ use std::result::Result::Ok; use anyhow::*; use gas::prelude::*; -use udb_util::prelude::*; +use universaldb::prelude::*; #[derive(Debug)] pub struct CreateTsKey { diff --git a/packages/services/pegboard/src/keys/epoxy/ns.rs b/packages/services/pegboard/src/keys/epoxy/ns.rs index e2807aac9e..6ca366be8c 100644 --- a/packages/services/pegboard/src/keys/epoxy/ns.rs +++ b/packages/services/pegboard/src/keys/epoxy/ns.rs @@ -2,7 +2,7 @@ use std::result::Result::Ok; use anyhow::*; use gas::prelude::*; -use udb_util::prelude::*; +use universaldb::prelude::*; #[derive(Debug)] pub struct ReservationByKeyKey { diff --git a/packages/services/pegboard/src/keys/mod.rs b/packages/services/pegboard/src/keys/mod.rs index 3cb17c5bbb..253fdcb409 100644 --- a/packages/services/pegboard/src/keys/mod.rs +++ b/packages/services/pegboard/src/keys/mod.rs @@ -1,14 +1,14 @@ -use udb_util::prelude::*; +use universaldb::prelude::*; pub mod actor; pub mod epoxy; pub mod ns; pub mod runner; -pub fn subspace() -> udb_util::Subspace { +pub fn subspace() -> universaldb::utils::Subspace { rivet_types::keys::pegboard::subspace() } -pub fn actor_kv_subspace() -> udb_util::Subspace { - udb_util::Subspace::new(&(RIVET, PEGBOARD, ACTOR_KV)) +pub fn actor_kv_subspace() -> universaldb::utils::Subspace { + universaldb::utils::Subspace::new(&(RIVET, PEGBOARD, ACTOR_KV)) } diff --git a/packages/services/pegboard/src/keys/ns.rs b/packages/services/pegboard/src/keys/ns.rs index 5ccf65fcb5..a30a82d8c6 100644 --- a/packages/services/pegboard/src/keys/ns.rs +++ b/packages/services/pegboard/src/keys/ns.rs @@ -2,7 +2,7 @@ use std::result::Result::Ok; use anyhow::*; use gas::prelude::*; -use udb_util::prelude::*; +use universaldb::prelude::*; use versioned_data_util::OwnedVersionedData; #[derive(Debug)] diff --git a/packages/services/pegboard/src/keys/runner.rs b/packages/services/pegboard/src/keys/runner.rs index 9c20248557..fcdd520ee1 100644 --- a/packages/services/pegboard/src/keys/runner.rs +++ b/packages/services/pegboard/src/keys/runner.rs @@ -2,7 +2,7 @@ use std::result::Result::Ok; use anyhow::*; use gas::prelude::*; -use udb_util::prelude::*; +use universaldb::prelude::*; use versioned_data_util::OwnedVersionedData; #[derive(Debug)] @@ -753,7 +753,7 @@ impl FormalChunkedKey for MetadataKey { } } - fn combine(&self, chunks: Vec) -> Result { + fn combine(&self, chunks: Vec) -> Result { rivet_data::versioned::MetadataKeyData::deserialize_with_embedded_version( &chunks .iter() @@ -768,7 +768,7 @@ impl FormalChunkedKey for MetadataKey { Ok( rivet_data::versioned::MetadataKeyData::latest(value.try_into()?) .serialize_with_embedded_version(rivet_data::PEGBOARD_RUNNER_METADATA_VERSION)? - .chunks(udb_util::CHUNK_SIZE) + .chunks(universaldb::utils::CHUNK_SIZE) .map(|x| x.to_vec()) .collect(), ) diff --git a/packages/services/pegboard/src/ops/actor/get.rs b/packages/services/pegboard/src/ops/actor/get.rs index 8c7ba0db56..22c9b55f17 100644 --- a/packages/services/pegboard/src/ops/actor/get.rs +++ b/packages/services/pegboard/src/ops/actor/get.rs @@ -1,8 +1,7 @@ use futures_util::{StreamExt, TryStreamExt}; use gas::prelude::*; use rivet_types::actors::Actor; -use udb_util::{FormalKey, SERIALIZABLE}; -use universaldb as udb; +use universaldb::utils::{FormalKey, IsolationLevel::*}; use crate::keys; @@ -20,7 +19,7 @@ pub struct Output { pub async fn pegboard_actor_get(ctx: &OperationCtx, input: &Input) -> Result { let actors_with_wf_ids = ctx .udb()? - .run(|tx, _mc| async move { + .run(|tx| async move { futures_util::stream::iter(input.actor_ids.clone()) .map(|actor_id| { let tx = tx.clone(); @@ -28,16 +27,14 @@ pub async fn pegboard_actor_get(ctx: &OperationCtx, input: &Input) -> Result Result { let actors = ctx .udb()? - .run(|tx, _mc| async move { + .run(|tx| async move { futures_util::stream::iter(input.actor_ids.clone()) .map(|actor_id| { let tx = tx.clone(); @@ -36,17 +35,15 @@ pub async fn pegboard_actor_get_runner(ctx: &OperationCtx, input: &Input) -> Res let connectable_key = keys::actor::ConnectableKey::new(actor_id); let (runner_id_entry, connectable_entry) = tokio::try_join!( - tx.get(&keys::subspace().pack(&runner_id_key), SERIALIZABLE), - tx.get(&keys::subspace().pack(&connectable_key), SERIALIZABLE), + tx.get(&keys::subspace().pack(&runner_id_key), Serializable), + tx.get(&keys::subspace().pack(&connectable_key), Serializable), )?; let Some(runner_id_entry) = runner_id_entry else { return Ok(None); }; - let runner_id = runner_id_key - .deserialize(&runner_id_entry) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?; + let runner_id = runner_id_key.deserialize(&runner_id_entry)?; Ok(Some(Actor { actor_id, diff --git a/packages/services/pegboard/src/ops/actor/list_for_ns.rs b/packages/services/pegboard/src/ops/actor/list_for_ns.rs index 864359ee4e..fdf4373c8a 100644 --- a/packages/services/pegboard/src/ops/actor/list_for_ns.rs +++ b/packages/services/pegboard/src/ops/actor/list_for_ns.rs @@ -1,8 +1,8 @@ use futures_util::TryStreamExt; use gas::prelude::*; use rivet_types::actors::Actor; -use udb_util::{SNAPSHOT, TxnExt}; -use universaldb::{self as udb, options::StreamingMode}; +use universaldb::options::StreamingMode; +use universaldb::utils::IsolationLevel::*; use crate::keys; @@ -25,12 +25,12 @@ pub struct Output { pub async fn pegboard_actor_list_for_ns(ctx: &OperationCtx, input: &Input) -> Result { let actors_with_wf_ids = ctx .udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); let mut results = Vec::new(); if let Some(key) = &input.key { - let actor_subspace = txs.subspace(&keys::ns::ActorByKeyKey::subspace( + let actor_subspace = keys::subspace().subspace(&keys::ns::ActorByKeyKey::subspace( input.namespace_id, input.name.clone(), key.clone(), @@ -38,7 +38,7 @@ pub async fn pegboard_actor_list_for_ns(ctx: &OperationCtx, input: &Input) -> Re let (start, end) = actor_subspace.range(); let end = if let Some(created_before) = input.created_before { - udb_util::end_of_key_range(&txs.pack( + universaldb::utils::end_of_key_range(&tx.pack( &keys::ns::ActorByKeyKey::subspace_with_create_ts( input.namespace_id, input.name.clone(), @@ -50,18 +50,18 @@ pub async fn pegboard_actor_list_for_ns(ctx: &OperationCtx, input: &Input) -> Re end }; - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::Iterator, reverse: true, ..(start, end).into() }, // NOTE: Does not have to be serializable because we are listing, stale data does not matter - SNAPSHOT, + Snapshot, ); while let Some(entry) = stream.try_next().await? { - let (idx_key, data) = txs.read_entry::(&entry)?; + let (idx_key, data) = tx.read_entry::(&entry)?; if !data.is_destroyed || input.include_destroyed { results.push((idx_key.actor_id, data.workflow_id)); @@ -72,14 +72,14 @@ pub async fn pegboard_actor_list_for_ns(ctx: &OperationCtx, input: &Input) -> Re } } } else if input.include_destroyed { - let actor_subspace = txs.subspace(&keys::ns::AllActorKey::subspace( + let actor_subspace = keys::subspace().subspace(&keys::ns::AllActorKey::subspace( input.namespace_id, input.name.clone(), )); let (start, end) = actor_subspace.range(); let end = if let Some(created_before) = input.created_before { - udb_util::end_of_key_range(&txs.pack( + universaldb::utils::end_of_key_range(&tx.pack( &keys::ns::AllActorKey::subspace_with_create_ts( input.namespace_id, input.name.clone(), @@ -90,18 +90,18 @@ pub async fn pegboard_actor_list_for_ns(ctx: &OperationCtx, input: &Input) -> Re end }; - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::Iterator, reverse: true, ..(start, end).into() }, // NOTE: Does not have to be serializable because we are listing, stale data does not matter - SNAPSHOT, + Snapshot, ); while let Some(entry) = stream.try_next().await? { - let (idx_key, workflow_id) = txs.read_entry::(&entry)?; + let (idx_key, workflow_id) = tx.read_entry::(&entry)?; results.push((idx_key.actor_id, workflow_id)); @@ -110,14 +110,13 @@ pub async fn pegboard_actor_list_for_ns(ctx: &OperationCtx, input: &Input) -> Re } } } else { - let actor_subspace = txs.subspace(&keys::ns::ActiveActorKey::subspace( - input.namespace_id, - input.name.clone(), - )); + let actor_subspace = keys::subspace().subspace( + &keys::ns::ActiveActorKey::subspace(input.namespace_id, input.name.clone()), + ); let (start, end) = actor_subspace.range(); let end = if let Some(created_before) = input.created_before { - udb_util::end_of_key_range(&txs.pack( + universaldb::utils::end_of_key_range(&tx.pack( &keys::ns::ActiveActorKey::subspace_with_create_ts( input.namespace_id, input.name.clone(), @@ -128,19 +127,19 @@ pub async fn pegboard_actor_list_for_ns(ctx: &OperationCtx, input: &Input) -> Re end }; - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::Iterator, reverse: true, ..(start, end).into() }, // NOTE: Does not have to be serializable because we are listing, stale data does not matter - SNAPSHOT, + Snapshot, ); while let Some(entry) = stream.try_next().await? { let (idx_key, workflow_id) = - txs.read_entry::(&entry)?; + tx.read_entry::(&entry)?; results.push((idx_key.actor_id, workflow_id)); diff --git a/packages/services/pegboard/src/ops/actor/list_names.rs b/packages/services/pegboard/src/ops/actor/list_names.rs index 0cd3752ce3..ac15a99f6c 100644 --- a/packages/services/pegboard/src/ops/actor/list_names.rs +++ b/packages/services/pegboard/src/ops/actor/list_names.rs @@ -1,8 +1,8 @@ use futures_util::{StreamExt, TryStreamExt}; use gas::prelude::*; use rivet_data::converted::ActorNameKeyData; -use udb_util::{SNAPSHOT, TxnExt}; -use universaldb::{self as udb, options::StreamingMode}; +use universaldb::options::StreamingMode; +use universaldb::utils::IsolationLevel::*; use crate::keys; @@ -22,15 +22,15 @@ pub struct Output { pub async fn pegboard_actor_list_names(ctx: &OperationCtx, input: &Input) -> Result { let names = ctx .udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); let actor_name_subspace = - txs.subspace(&keys::ns::ActorNameKey::subspace(input.namespace_id)); + keys::subspace().subspace(&keys::ns::ActorNameKey::subspace(input.namespace_id)); let (start, end) = actor_name_subspace.range(); let start = if let Some(name) = &input.after_name { - txs.pack(&keys::ns::ActorNameKey::new( + tx.pack(&keys::ns::ActorNameKey::new( input.namespace_id, name.clone(), )) @@ -38,22 +38,19 @@ pub async fn pegboard_actor_list_names(ctx: &OperationCtx, input: &Input) -> Res start }; - txs.get_ranges_keyvalues( - udb::RangeOption { + tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::WantAll, limit: Some(input.limit), ..(start, end).into() }, - // NOTE: This is not SERIALIZABLE to prevent contention with inserting new names - SNAPSHOT, + // NOTE: This is not Serializable to prevent contention with inserting new names + Snapshot, ) - .map(|res| match res { - Ok(entry) => { - let (key, metadata) = txs.read_entry::(&entry)?; + .map(|res| { + let (key, metadata) = tx.read_entry::(&res?)?; - Ok((key.name, metadata)) - } - Err(err) => Err(Into::::into(err)), + Ok((key.name, metadata)) }) .try_collect::>() .await diff --git a/packages/services/pegboard/src/ops/runner/get.rs b/packages/services/pegboard/src/ops/runner/get.rs index 484eb8e0de..3c588170fb 100644 --- a/packages/services/pegboard/src/ops/runner/get.rs +++ b/packages/services/pegboard/src/ops/runner/get.rs @@ -2,8 +2,8 @@ use anyhow::Result; use futures_util::TryStreamExt; use gas::prelude::*; use rivet_types::runners::Runner; -use udb_util::{FormalChunkedKey, SERIALIZABLE, SNAPSHOT, TxnExt}; -use universaldb::{self as udb, options::StreamingMode}; +use universaldb::options::StreamingMode; +use universaldb::utils::{FormalChunkedKey, IsolationLevel::*}; use crate::keys; @@ -23,7 +23,7 @@ pub async fn pegboard_runner_get(ctx: &OperationCtx, input: &Input) -> Result Result::Ok(runners) + Ok(runners) } }) .await?; @@ -44,18 +44,18 @@ pub async fn pegboard_runner_get(ctx: &OperationCtx, input: &Input) -> Result std::result::Result, udb::FdbBindingError> { - let txs = tx.subspace(keys::subspace()); +) -> Result> { + let tx = tx.with_subspace(keys::subspace()); // TODO: Make this part of the below try join to reduce round trip count // Check if runner exists by looking for workflow ID - if !txs - .exists(&keys::runner::WorkflowIdKey::new(runner_id), SERIALIZABLE) + if !tx + .exists(&keys::runner::WorkflowIdKey::new(runner_id), Serializable) .await? { - return std::result::Result::Ok(None); + return Ok(None); } let namespace_id_key = keys::runner::NamespaceIdKey::new(runner_id); @@ -71,7 +71,7 @@ pub(crate) async fn get_inner( let last_ping_ts_key = keys::runner::LastPingTsKey::new(runner_id); let last_rtt_key = keys::runner::LastRttKey::new(runner_id); let metadata_key = keys::runner::MetadataKey::new(runner_id); - let metadata_subspace = txs.subspace(&metadata_key); + let metadata_subspace = keys::subspace().subspace(&metadata_key); let ( namespace_id, @@ -88,27 +88,27 @@ pub(crate) async fn get_inner( last_rtt, metadata_chunks, ) = tokio::try_join!( - // NOTE: These are not SERIALIZABLE because this op is meant for basic information (i.e. data for the + // NOTE: These are not Serializable because this op is meant for basic information (i.e. data for the // API) - txs.read(&namespace_id_key, SNAPSHOT), - txs.read(&name_key, SNAPSHOT), - txs.read(&key_key, SNAPSHOT), - txs.read(&version_key, SNAPSHOT), - txs.read(&total_slots_key, SNAPSHOT), - txs.read(&remaining_slots_key, SNAPSHOT), - txs.read(&create_ts_key, SNAPSHOT), - txs.read_opt(&connected_ts_key, SNAPSHOT), - txs.read_opt(&drain_ts_key, SNAPSHOT), - txs.read_opt(&stop_ts_key, SNAPSHOT), - txs.read_opt(&last_ping_ts_key, SNAPSHOT), - txs.read_opt(&last_rtt_key, SNAPSHOT), + tx.read(&namespace_id_key, Snapshot), + tx.read(&name_key, Snapshot), + tx.read(&key_key, Snapshot), + tx.read(&version_key, Snapshot), + tx.read(&total_slots_key, Snapshot), + tx.read(&remaining_slots_key, Snapshot), + tx.read(&create_ts_key, Snapshot), + tx.read_opt(&connected_ts_key, Snapshot), + tx.read_opt(&drain_ts_key, Snapshot), + tx.read_opt(&stop_ts_key, Snapshot), + tx.read_opt(&last_ping_ts_key, Snapshot), + tx.read_opt(&last_rtt_key, Snapshot), async { - txs.get_ranges_keyvalues( - udb::RangeOption { + tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&metadata_subspace).into() }, - SNAPSHOT, + Snapshot, ) .try_collect::>() .await @@ -119,12 +119,7 @@ pub(crate) async fn get_inner( let metadata = if metadata_chunks.is_empty() { None } else { - Some( - metadata_key - .combine(metadata_chunks) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))? - .metadata, - ) + Some(metadata_key.combine(metadata_chunks)?.metadata) }; std::result::Result::Ok(Some(Runner { diff --git a/packages/services/pegboard/src/ops/runner/get_by_key.rs b/packages/services/pegboard/src/ops/runner/get_by_key.rs index 5a6ea63bf1..ab1a4919be 100644 --- a/packages/services/pegboard/src/ops/runner/get_by_key.rs +++ b/packages/services/pegboard/src/ops/runner/get_by_key.rs @@ -1,8 +1,7 @@ -use anyhow::*; +use anyhow::Result; use gas::prelude::*; use rivet_types::runners::Runner; -use udb_util::{SERIALIZABLE, TxnExt}; -use universaldb as udb; +use universaldb::utils::IsolationLevel::*; use crate::keys; @@ -24,24 +23,24 @@ pub async fn pegboard_runner_get_by_key(ctx: &OperationCtx, input: &Input) -> Re let runner = ctx .udb()? - .run(|tx, _mc| { + .run(|tx| { let dc_name = dc_name.to_string(); let input = input.clone(); async move { - let txs = tx.subspace(keys::subspace()); + let tx = tx.with_subspace(keys::subspace()); // Look up runner by key let runner_by_key_key = keys::ns::RunnerByKeyKey::new(input.namespace_id, input.name, input.key); - let runner_data = txs.read_opt(&runner_by_key_key, SERIALIZABLE).await?; + let runner_data = tx.read_opt(&runner_by_key_key, Serializable).await?; if let Some(data) = runner_data { // Get full runner details using the runner_id let runner = super::get::get_inner(&dc_name, &tx, data.runner_id).await?; - std::result::Result::<_, udb::FdbBindingError>::Ok(runner) + Ok(runner) } else { - std::result::Result::<_, udb::FdbBindingError>::Ok(None) + Ok(None) } } }) diff --git a/packages/services/pegboard/src/ops/runner/list_for_ns.rs b/packages/services/pegboard/src/ops/runner/list_for_ns.rs index 58bb974b1e..0947a8d1c6 100644 --- a/packages/services/pegboard/src/ops/runner/list_for_ns.rs +++ b/packages/services/pegboard/src/ops/runner/list_for_ns.rs @@ -2,8 +2,8 @@ use anyhow::Result; use futures_util::{StreamExt, TryStreamExt}; use gas::prelude::*; use rivet_types::runners::Runner; -use udb_util::{SNAPSHOT, TxnExt}; -use universaldb::{self as udb, options::StreamingMode}; +use universaldb::options::StreamingMode; +use universaldb::utils::IsolationLevel::*; use crate::keys; @@ -27,24 +27,24 @@ pub async fn pegboard_runner_list_for_ns(ctx: &OperationCtx, input: &Input) -> R let runners = ctx .udb()? - .run(|tx, _mc| { + .run(|tx| { let dc_name = dc_name.to_string(); async move { - let txs = tx.subspace(keys::subspace()); + let tx = tx.with_subspace(keys::subspace()); let mut results = Vec::new(); // TODO: Lots of duplicate code if let Some(name) = &input.name { if input.include_stopped { let runner_subspace = - txs.subspace(&keys::ns::AllRunnerByNameKey::subspace( + keys::subspace().subspace(&keys::ns::AllRunnerByNameKey::subspace( input.namespace_id, name.clone(), )); let (start, end) = runner_subspace.range(); let end = if let Some(created_before) = input.created_before { - udb_util::end_of_key_range(&txs.pack( + universaldb::utils::end_of_key_range(&tx.pack( &keys::ns::AllRunnerByNameKey::subspace_with_create_ts( input.namespace_id, name.clone(), @@ -55,19 +55,18 @@ pub async fn pegboard_runner_list_for_ns(ctx: &OperationCtx, input: &Input) -> R end }; - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::Iterator, reverse: true, ..(start, end).into() }, // NOTE: Does not have to be serializable because we are listing, stale data does not matter - SNAPSHOT, + Snapshot, ); while let Some(entry) = stream.try_next().await? { - let idx_key = - txs.unpack::(entry.key())?; + let idx_key = tx.unpack::(entry.key())?; results.push(idx_key.runner_id); @@ -77,14 +76,14 @@ pub async fn pegboard_runner_list_for_ns(ctx: &OperationCtx, input: &Input) -> R } } else { let runner_subspace = - txs.subspace(&keys::ns::ActiveRunnerByNameKey::subspace( + keys::subspace().subspace(&keys::ns::ActiveRunnerByNameKey::subspace( input.namespace_id, name.clone(), )); let (start, end) = runner_subspace.range(); let end = if let Some(created_before) = input.created_before { - udb_util::end_of_key_range(&txs.pack( + universaldb::utils::end_of_key_range(&tx.pack( &keys::ns::ActiveRunnerByNameKey::subspace_with_create_ts( input.namespace_id, name.clone(), @@ -95,19 +94,19 @@ pub async fn pegboard_runner_list_for_ns(ctx: &OperationCtx, input: &Input) -> R end }; - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::Iterator, reverse: true, ..(start, end).into() }, // NOTE: Does not have to be serializable because we are listing, stale data does not matter - SNAPSHOT, + Snapshot, ); while let Some(entry) = stream.try_next().await? { let idx_key = - txs.unpack::(entry.key())?; + tx.unpack::(entry.key())?; results.push(idx_key.runner_id); @@ -118,12 +117,12 @@ pub async fn pegboard_runner_list_for_ns(ctx: &OperationCtx, input: &Input) -> R } } else { if input.include_stopped { - let runner_subspace = - txs.subspace(&keys::ns::AllRunnerKey::subspace(input.namespace_id)); + let runner_subspace = keys::subspace() + .subspace(&keys::ns::AllRunnerKey::subspace(input.namespace_id)); let (start, end) = runner_subspace.range(); let end = if let Some(created_before) = input.created_before { - udb_util::end_of_key_range(&txs.pack( + universaldb::utils::end_of_key_range(&tx.pack( &keys::ns::AllRunnerKey::subspace_with_create_ts( input.namespace_id, created_before, @@ -133,18 +132,18 @@ pub async fn pegboard_runner_list_for_ns(ctx: &OperationCtx, input: &Input) -> R end }; - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::Iterator, reverse: true, ..(start, end).into() }, // NOTE: Does not have to be serializable because we are listing, stale data does not matter - SNAPSHOT, + Snapshot, ); while let Some(entry) = stream.try_next().await? { - let idx_key = txs.unpack::(entry.key())?; + let idx_key = tx.unpack::(entry.key())?; results.push(idx_key.runner_id); @@ -153,12 +152,12 @@ pub async fn pegboard_runner_list_for_ns(ctx: &OperationCtx, input: &Input) -> R } } } else { - let runner_subspace = - txs.subspace(&keys::ns::ActiveRunnerKey::subspace(input.namespace_id)); + let runner_subspace = keys::subspace() + .subspace(&keys::ns::ActiveRunnerKey::subspace(input.namespace_id)); let (start, end) = runner_subspace.range(); let end = if let Some(created_before) = input.created_before { - udb_util::end_of_key_range(&txs.pack( + universaldb::utils::end_of_key_range(&tx.pack( &keys::ns::ActiveRunnerKey::subspace_with_create_ts( input.namespace_id, created_before, @@ -168,18 +167,18 @@ pub async fn pegboard_runner_list_for_ns(ctx: &OperationCtx, input: &Input) -> R end }; - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::Iterator, reverse: true, ..(start, end).into() }, // NOTE: Does not have to be serializable because we are listing, stale data does not matter - SNAPSHOT, + Snapshot, ); while let Some(entry) = stream.try_next().await? { - let idx_key = txs.unpack::(entry.key())?; + let idx_key = tx.unpack::(entry.key())?; results.push(idx_key.runner_id); diff --git a/packages/services/pegboard/src/ops/runner/list_names.rs b/packages/services/pegboard/src/ops/runner/list_names.rs index cde5e8ea10..518971521e 100644 --- a/packages/services/pegboard/src/ops/runner/list_names.rs +++ b/packages/services/pegboard/src/ops/runner/list_names.rs @@ -1,7 +1,7 @@ use futures_util::{StreamExt, TryStreamExt}; use gas::prelude::*; -use udb_util::{SNAPSHOT, TxnExt}; -use universaldb::{self as udb, options::StreamingMode}; +use universaldb::options::StreamingMode; +use universaldb::utils::IsolationLevel::*; use crate::keys; @@ -21,15 +21,15 @@ pub struct Output { pub async fn pegboard_runner_list_names(ctx: &OperationCtx, input: &Input) -> Result { let names = ctx .udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); let runner_name_subspace = - txs.subspace(&keys::ns::RunnerNameKey::subspace(input.namespace_id)); + keys::subspace().subspace(&keys::ns::RunnerNameKey::subspace(input.namespace_id)); let (start, end) = runner_name_subspace.range(); let start = if let Some(name) = &input.after_name { - txs.pack(&keys::ns::RunnerNameKey::new( + tx.pack(&keys::ns::RunnerNameKey::new( input.namespace_id, name.clone(), )) @@ -37,22 +37,18 @@ pub async fn pegboard_runner_list_names(ctx: &OperationCtx, input: &Input) -> Re start }; - txs.get_ranges_keyvalues( - udb::RangeOption { + tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::WantAll, limit: Some(input.limit), ..(start, end).into() }, - // NOTE: This is not SERIALIZABLE to prevent contention with inserting new names - SNAPSHOT, + // NOTE: This is not Serializable to prevent contention with inserting new names + Snapshot, ) - .map(|res| match res { - Ok(entry) => { - let key = txs.unpack::(entry.key())?; - - Ok(key.name) - } - Err(err) => Err(Into::::into(err)), + .map(|res| { + let key = tx.unpack::(res?.key())?; + Ok(key.name) }) .try_collect::>() .await diff --git a/packages/services/pegboard/src/ops/runner/update_alloc_idx.rs b/packages/services/pegboard/src/ops/runner/update_alloc_idx.rs index 39519590df..5aa16e7572 100644 --- a/packages/services/pegboard/src/ops/runner/update_alloc_idx.rs +++ b/packages/services/pegboard/src/ops/runner/update_alloc_idx.rs @@ -1,6 +1,6 @@ use gas::prelude::*; -use udb_util::{SERIALIZABLE, TxnExt}; use universaldb::options::ConflictRangeType; +use universaldb::utils::IsolationLevel::*; use crate::{keys, workflows::runner::RUNNER_ELIGIBLE_THRESHOLD_MS}; @@ -47,11 +47,11 @@ pub enum RunnerEligibility { pub async fn pegboard_runner_update_alloc_idx(ctx: &OperationCtx, input: &Input) -> Result { let notifications = ctx .udb()? - .run(|tx, _mc| { + .run(|tx| { let runners = input.runners.clone(); async move { - let txs = tx.subspace(keys::subspace()); + let tx = tx.with_subspace(keys::subspace()); let mut notifications = Vec::new(); // TODO: Parallelize @@ -76,14 +76,14 @@ pub async fn pegboard_runner_update_alloc_idx(ctx: &OperationCtx, input: &Input) last_ping_ts_entry, expired_ts_entry, ) = tokio::try_join!( - txs.read_opt(&workflow_id_key, SERIALIZABLE), - txs.read_opt(&namespace_id_key, SERIALIZABLE), - txs.read_opt(&name_key, SERIALIZABLE), - txs.read_opt(&version_key, SERIALIZABLE), - txs.read_opt(&remaining_slots_key, SERIALIZABLE), - txs.read_opt(&total_slots_key, SERIALIZABLE), - txs.read_opt(&last_ping_ts_key, SERIALIZABLE), - txs.read_opt(&expired_ts_key, SERIALIZABLE), + tx.read_opt(&workflow_id_key, Serializable), + tx.read_opt(&namespace_id_key, Serializable), + tx.read_opt(&name_key, Serializable), + tx.read_opt(&version_key, Serializable), + tx.read_opt(&remaining_slots_key, Serializable), + tx.read_opt(&total_slots_key, Serializable), + tx.read_opt(&last_ping_ts_key, Serializable), + tx.read_opt(&expired_ts_key, Serializable), )?; let ( @@ -131,14 +131,14 @@ pub async fn pegboard_runner_update_alloc_idx(ctx: &OperationCtx, input: &Input) ); // Add read conflict - txs.add_conflict_key(&old_alloc_key, ConflictRangeType::Read)?; + tx.add_conflict_key(&old_alloc_key, ConflictRangeType::Read)?; match runner.action { Action::ClearIdx => { - txs.delete(&old_alloc_key); + tx.delete(&old_alloc_key); } Action::AddIdx => { - txs.write( + tx.write( &old_alloc_key, rivet_data::converted::RunnerAllocIdxKeyData { workflow_id, @@ -151,17 +151,17 @@ pub async fn pegboard_runner_update_alloc_idx(ctx: &OperationCtx, input: &Input) let last_ping_ts = util::timestamp::now(); // Write new ping - txs.write(&last_ping_ts_key, last_ping_ts)?; + tx.write(&last_ping_ts_key, last_ping_ts)?; let last_rtt_key = keys::runner::LastRttKey::new(runner.runner_id); - txs.write(&last_rtt_key, rtt)?; + tx.write(&last_rtt_key, rtt)?; // Only update allocation idx if it existed before - if txs.exists(&old_alloc_key, SERIALIZABLE).await? { + if tx.exists(&old_alloc_key, Serializable).await? { // Clear old key - txs.delete(&old_alloc_key); + tx.delete(&old_alloc_key); - txs.write( + tx.write( &keys::ns::RunnerAllocIdxKey::new( namespace_id, name.clone(), diff --git a/packages/services/pegboard/src/workflows/actor/actor_keys.rs b/packages/services/pegboard/src/workflows/actor/actor_keys.rs index e5cc89a10c..7bb8bcf851 100644 --- a/packages/services/pegboard/src/workflows/actor/actor_keys.rs +++ b/packages/services/pegboard/src/workflows/actor/actor_keys.rs @@ -5,8 +5,8 @@ use epoxy::{ use futures_util::TryStreamExt; use gas::prelude::*; use rivet_data::converted::ActorByKeyKeyData; -use udb_util::prelude::*; -use universaldb::{self as udb, FdbBindingError, options::StreamingMode}; +use universaldb::options::StreamingMode; +use universaldb::prelude::*; use crate::keys; @@ -231,27 +231,27 @@ pub async fn reserve_actor_key( ) -> Result { let res = ctx .udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); // Check if there are any actors that share the same key that are not destroyed - let actor_key_subspace = txs.subspace(&keys::ns::ActorByKeyKey::subspace( + let actor_key_subspace = keys::subspace().subspace(&keys::ns::ActorByKeyKey::subspace( input.namespace_id, input.name.clone(), input.key.clone(), )); let (start, end) = actor_key_subspace.range(); - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::Iterator, ..(start, end).into() }, - SERIALIZABLE, + Serializable, ); while let Some(entry) = stream.try_next().await? { - let (_idx_key, data) = txs.read_entry::(&entry)?; + let (_idx_key, data) = tx.read_entry::(&entry)?; if !data.is_destroyed { return Ok(ReserveActorKeyOutput::ExistingActor { existing_actor_id: _idx_key.actor_id, @@ -260,7 +260,7 @@ pub async fn reserve_actor_key( } // Write key - txs.write( + tx.write( &keys::ns::ActorByKeyKey::new( input.namespace_id, input.name.clone(), @@ -274,7 +274,7 @@ pub async fn reserve_actor_key( }, )?; - Result::<_, FdbBindingError>::Ok(ReserveActorKeyOutput::Success) + Ok(ReserveActorKeyOutput::Success) }) .await?; diff --git a/packages/services/pegboard/src/workflows/actor/destroy.rs b/packages/services/pegboard/src/workflows/actor/destroy.rs index 7ba691c760..f267328831 100644 --- a/packages/services/pegboard/src/workflows/actor/destroy.rs +++ b/packages/services/pegboard/src/workflows/actor/destroy.rs @@ -1,8 +1,8 @@ use gas::prelude::*; use rivet_data::converted::ActorByKeyKeyData; use rivet_runner_protocol::protocol; -use udb_util::{SERIALIZABLE, TxnExt}; -use universaldb::{self as udb, options::MutationType}; +use universaldb::options::MutationType; +use universaldb::utils::IsolationLevel::*; use super::{DestroyComplete, DestroyStarted, State}; @@ -28,7 +28,7 @@ pub(crate) async fn pegboard_actor_destroy(ctx: &mut WorkflowCtx, input: &Input) .await?; let res = ctx - .activity(UpdateStateAndFdbInput { + .activity(UpdateStateAndDbInput { actor_id: input.actor_id, }) .await?; @@ -53,31 +53,31 @@ pub(crate) async fn pegboard_actor_destroy(ctx: &mut WorkflowCtx, input: &Input) } #[derive(Debug, Serialize, Deserialize, Hash)] -struct UpdateStateAndFdbInput { +struct UpdateStateAndDbInput { actor_id: Id, } #[derive(Debug, Serialize, Deserialize, Hash)] -struct UpdateStateAndFdbOutput { +struct UpdateStateAndDbOutput { runner_workflow_id: Option, } -#[activity(UpdateStateAndFdb)] -async fn update_state_and_fdb( +#[activity(UpdateStateAndDb)] +async fn update_state_and_db( ctx: &ActivityCtx, - input: &UpdateStateAndFdbInput, -) -> Result { + input: &UpdateStateAndDbInput, +) -> Result { let mut state = ctx.state::()?; let destroy_ts = util::timestamp::now(); ctx.udb()? - .run(|tx, _mc| { + .run(|tx| { let state = (*state).clone(); async move { - let txs = tx.subspace(keys::subspace()); + let tx = tx.with_subspace(keys::subspace()); - txs.write(&keys::actor::DestroyTsKey::new(input.actor_id), destroy_ts)?; + tx.write(&keys::actor::DestroyTsKey::new(input.actor_id), destroy_ts)?; if let Some(runner_id) = state.runner_id { clear_slot( @@ -92,7 +92,7 @@ async fn update_state_and_fdb( } // Update namespace indexes - txs.delete(&keys::ns::ActiveActorKey::new( + tx.delete(&keys::ns::ActiveActorKey::new( state.namespace_id, state.name.clone(), state.create_ts, @@ -100,7 +100,7 @@ async fn update_state_and_fdb( )); if let Some(k) = &state.key { - txs.write( + tx.write( &keys::ns::ActorByKeyKey::new( state.namespace_id, state.name.clone(), @@ -125,7 +125,7 @@ async fn update_state_and_fdb( state.runner_id = None; let runner_workflow_id = state.runner_workflow_id.take(); - Ok(UpdateStateAndFdbOutput { runner_workflow_id }) + Ok(UpdateStateAndDbOutput { runner_workflow_id }) } #[derive(Debug, Serialize, Deserialize, Hash)] @@ -143,7 +143,7 @@ async fn clear_kv(ctx: &ActivityCtx, input: &ClearKvInput) -> Result Result<(), udb::FdbBindingError> { - let txs = tx.subspace(keys::subspace()); + tx: &universaldb::Transaction, +) -> Result<()> { + let tx = tx.with_subspace(keys::subspace()); - txs.delete(&keys::actor::RunnerIdKey::new(actor_id)); + tx.delete(&keys::actor::RunnerIdKey::new(actor_id)); // This is cleared when the state changes as well as when the actor is destroyed to ensure // consistency during rescheduling and forced deletion. - txs.delete(&keys::runner::ActorKey::new(runner_id, actor_id)); + tx.delete(&keys::runner::ActorKey::new(runner_id, actor_id)); let runner_workflow_id_key = keys::runner::WorkflowIdKey::new(runner_id); let runner_version_key = keys::runner::VersionKey::new(runner_id); @@ -187,18 +187,18 @@ pub(crate) async fn clear_slot( runner_total_slots, runner_last_ping_ts, ) = tokio::try_join!( - txs.read(&runner_workflow_id_key, SERIALIZABLE), - txs.read(&runner_version_key, SERIALIZABLE), - txs.read(&runner_remaining_slots_key, SERIALIZABLE), - txs.read(&runner_total_slots_key, SERIALIZABLE), - txs.read(&runner_last_ping_ts_key, SERIALIZABLE), + tx.read(&runner_workflow_id_key, Serializable), + tx.read(&runner_version_key, Serializable), + tx.read(&runner_remaining_slots_key, Serializable), + tx.read(&runner_total_slots_key, Serializable), + tx.read(&runner_last_ping_ts_key, Serializable), )?; let old_runner_remaining_millislots = (runner_remaining_slots * 1000) / runner_total_slots; let new_runner_remaining_slots = runner_remaining_slots + 1; // Write new remaining slots - txs.write(&runner_remaining_slots_key, new_runner_remaining_slots)?; + tx.write(&runner_remaining_slots_key, new_runner_remaining_slots)?; let old_runner_alloc_key = keys::ns::RunnerAllocIdxKey::new( namespace_id, @@ -210,9 +210,9 @@ pub(crate) async fn clear_slot( ); // Only update allocation idx if it existed before - if txs.exists(&old_runner_alloc_key, SERIALIZABLE).await? { + if tx.exists(&old_runner_alloc_key, Serializable).await? { // Clear old key - txs.delete(&old_runner_alloc_key); + tx.delete(&old_runner_alloc_key); let new_remaining_millislots = (new_runner_remaining_slots * 1000) / runner_total_slots; let new_runner_alloc_key = keys::ns::RunnerAllocIdxKey::new( @@ -224,7 +224,7 @@ pub(crate) async fn clear_slot( runner_id, ); - txs.write( + tx.write( &new_runner_alloc_key, rivet_data::converted::RunnerAllocIdxKeyData { workflow_id: runner_workflow_id, @@ -235,7 +235,7 @@ pub(crate) async fn clear_slot( } if for_serverless { - txs.atomic_op( + tx.atomic_op( &rivet_types::keys::pegboard::ns::ServerlessDesiredSlotsKey::new( namespace_id, runner_name_selector.to_string(), diff --git a/packages/services/pegboard/src/workflows/actor/runtime.rs b/packages/services/pegboard/src/workflows/actor/runtime.rs index 9172583612..d01755d520 100644 --- a/packages/services/pegboard/src/workflows/actor/runtime.rs +++ b/packages/services/pegboard/src/workflows/actor/runtime.rs @@ -5,11 +5,8 @@ use futures_util::{FutureExt, TryStreamExt}; use gas::prelude::*; use rivet_metrics::KeyValue; use rivet_runner_protocol::protocol; -use udb_util::{FormalKey, SERIALIZABLE, SNAPSHOT, TxnExt}; -use universaldb::{ - self as udb, - options::{ConflictRangeType, MutationType, StreamingMode}, -}; +use universaldb::options::{ConflictRangeType, MutationType, StreamingMode}; +use universaldb::utils::{FormalKey, IsolationLevel::*}; use crate::{keys, metrics, workflows::runner::RUNNER_ELIGIBLE_THRESHOLD_MS}; @@ -107,24 +104,24 @@ async fn allocate_actor( // client wf let (for_serverless, res) = ctx .udb()? - .run(|tx, _mc| async move { + .run(|tx| async move { let ping_threshold_ts = util::timestamp::now() - RUNNER_ELIGIBLE_THRESHOLD_MS; - let txs = tx.subspace(keys::subspace()); + let tx = tx.with_subspace(keys::subspace()); // Check if runner is an serverless runner - let for_serverless = txs + let for_serverless = tx .exists( &namespace::keys::RunnerConfigByVariantKey::new( namespace_id, namespace::keys::RunnerConfigVariant::Serverless, input.runner_name_selector.clone(), ), - SERIALIZABLE, + Serializable, ) .await?; if for_serverless { - txs.atomic_op( + tx.atomic_op( &rivet_types::keys::pegboard::ns::ServerlessDesiredSlotsKey::new( namespace_id, input.runner_name_selector.clone(), @@ -135,40 +132,42 @@ async fn allocate_actor( } // Check if a queue exists - let pending_actor_subspace = - txs.subspace(&keys::ns::PendingActorByRunnerNameSelectorKey::subspace( + let pending_actor_subspace = keys::subspace().subspace( + &keys::ns::PendingActorByRunnerNameSelectorKey::subspace( namespace_id, input.runner_name_selector.clone(), - )); - let queue_exists = txs + ), + ); + let queue_exists = tx .get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::Exact, limit: Some(1), ..(&pending_actor_subspace).into() }, - // NOTE: This is not SERIALIZABLE because we don't want to conflict with other + // NOTE: This is not Serializable because we don't want to conflict with other // inserts/clears to this range - SNAPSHOT, + Snapshot, ) .next() .await .is_some(); if !queue_exists { - let runner_alloc_subspace = txs.subspace(&keys::ns::RunnerAllocIdxKey::subspace( - namespace_id, - input.runner_name_selector.clone(), - )); + let runner_alloc_subspace = + keys::subspace().subspace(&keys::ns::RunnerAllocIdxKey::subspace( + namespace_id, + input.runner_name_selector.clone(), + )); - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::Iterator, ..(&runner_alloc_subspace).into() }, - // NOTE: This is not SERIALIZABLE because we don't want to conflict with all of the + // NOTE: This is not Serializable because we don't want to conflict with all of the // keys, just the one we choose - SNAPSHOT, + Snapshot, ); let mut highest_version = None; @@ -179,7 +178,7 @@ async fn allocate_actor( }; let (old_runner_alloc_key, old_runner_alloc_key_data) = - txs.read_entry::(&entry)?; + tx.read_entry::(&entry)?; if let Some(highest_version) = highest_version { // We have passed all of the runners with the highest version. This is reachable if @@ -202,10 +201,10 @@ async fn allocate_actor( } // Add read conflict only for this key - txs.add_conflict_key(&old_runner_alloc_key, ConflictRangeType::Read)?; + tx.add_conflict_key(&old_runner_alloc_key, ConflictRangeType::Read)?; // Clear old entry - txs.delete(&old_runner_alloc_key); + tx.delete(&old_runner_alloc_key); let new_remaining_slots = old_runner_alloc_key_data.remaining_slots.saturating_sub(1); @@ -213,7 +212,7 @@ async fn allocate_actor( (new_remaining_slots * 1000) / old_runner_alloc_key_data.total_slots; // Write new allocation key with 1 less slot - txs.write( + tx.write( &keys::ns::RunnerAllocIdxKey::new( namespace_id, input.runner_name_selector.clone(), @@ -230,19 +229,19 @@ async fn allocate_actor( )?; // Update runner record - txs.write( + tx.write( &keys::runner::RemainingSlotsKey::new(old_runner_alloc_key.runner_id), new_remaining_slots, )?; // Set runner id of actor - txs.write( + tx.write( &keys::actor::RunnerIdKey::new(input.actor_id), old_runner_alloc_key.runner_id, )?; // Insert actor index key - txs.write( + tx.write( &keys::runner::ActorKey::new( old_runner_alloc_key.runner_id, input.actor_id, @@ -251,7 +250,7 @@ async fn allocate_actor( )?; // Set actor as not sleeping - txs.delete(&keys::actor::SleepTsKey::new(input.actor_id)); + tx.delete(&keys::actor::SleepTsKey::new(input.actor_id)); return Ok(( for_serverless, @@ -270,7 +269,7 @@ async fn allocate_actor( // NOTE: This will conflict with serializable reads to the alloc queue, which is the behavior we // want. If a runner reads from the queue while this is being inserted, one of the two txns will // retry and we ensure the actor does not end up in queue limbo. - txs.write( + tx.write( &keys::ns::PendingActorByRunnerNameSelectorKey::new( namespace_id, input.runner_name_selector.clone(), @@ -316,7 +315,7 @@ pub async fn set_not_connectable(ctx: &ActivityCtx, input: &SetNotConnectableInp let mut state = ctx.state::()?; ctx.udb()? - .run(|tx, _mc| async move { + .run(|tx| async move { let connectable_key = keys::actor::ConnectableKey::new(input.actor_id); tx.clear(&keys::subspace().pack(&connectable_key)); @@ -344,10 +343,10 @@ pub async fn deallocate(ctx: &ActivityCtx, input: &DeallocateInput) -> Result<() let for_serverless = state.for_serverless; ctx.udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); - txs.delete(&keys::actor::ConnectableKey::new(input.actor_id)); + tx.delete(&keys::actor::ConnectableKey::new(input.actor_id)); if let Some(runner_id) = runner_id { destroy::clear_slot( @@ -360,7 +359,7 @@ pub async fn deallocate(ctx: &ActivityCtx, input: &DeallocateInput) -> Result<() ) .await?; } else if for_serverless { - txs.atomic_op( + tx.atomic_op( &rivet_types::keys::pegboard::ns::ServerlessDesiredSlotsKey::new( namespace_id, runner_name_selector.clone(), @@ -574,7 +573,7 @@ pub async fn clear_pending_allocation( // Clear self from alloc queue let cleared = ctx .udb()? - .run(|tx, _mc| async move { + .run(|tx| async move { let pending_alloc_key = keys::subspace().pack(&keys::ns::PendingActorByRunnerNameSelectorKey::new( input.namespace_id, @@ -583,7 +582,7 @@ pub async fn clear_pending_allocation( input.actor_id, )); - let exists = tx.get(&pending_alloc_key, SERIALIZABLE).await?.is_some(); + let exists = tx.get(&pending_alloc_key, Serializable).await?.is_some(); tx.clear(&pending_alloc_key); @@ -620,13 +619,11 @@ pub async fn set_started(ctx: &ActivityCtx, input: &SetStartedInput) -> Result<( state.connectable_ts = Some(util::timestamp::now()); ctx.udb()? - .run(|tx, _mc| async move { + .run(|tx| async move { let connectable_key = keys::actor::ConnectableKey::new(input.actor_id); tx.set( &keys::subspace().pack(&connectable_key), - &connectable_key - .serialize(()) - .map_err(|x| udb::FdbBindingError::CustomError(x.into()))?, + &connectable_key.serialize(())?, ); Ok(()) @@ -650,13 +647,13 @@ pub async fn set_sleeping(ctx: &ActivityCtx, input: &SetSleepingInput) -> Result state.connectable_ts = None; ctx.udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); // Make not connectable - txs.delete(&keys::actor::ConnectableKey::new(input.actor_id)); + tx.delete(&keys::actor::ConnectableKey::new(input.actor_id)); - txs.write(&keys::actor::SleepTsKey::new(input.actor_id), sleep_ts)?; + tx.write(&keys::actor::SleepTsKey::new(input.actor_id), sleep_ts)?; Ok(()) }) diff --git a/packages/services/pegboard/src/workflows/actor/setup.rs b/packages/services/pegboard/src/workflows/actor/setup.rs index 068cc562f0..9ac87beeb2 100644 --- a/packages/services/pegboard/src/workflows/actor/setup.rs +++ b/packages/services/pegboard/src/workflows/actor/setup.rs @@ -1,7 +1,7 @@ use gas::prelude::*; use rivet_data::converted::ActorNameKeyData; use rivet_types::actors::CrashPolicy; -use udb_util::{SERIALIZABLE, TxnExt}; +use universaldb::utils::IsolationLevel::*; use super::State; @@ -69,8 +69,8 @@ pub struct InitStateAndUdbInput { pub create_ts: i64, } -#[activity(InitStateAndFdb)] -pub async fn insert_state_and_fdb(ctx: &ActivityCtx, input: &InitStateAndUdbInput) -> Result<()> { +#[activity(InitStateAndDb)] +pub async fn insert_state_and_db(ctx: &ActivityCtx, input: &InitStateAndUdbInput) -> Result<()> { let mut state = ctx.state::>()?; *state = Some(State::new( @@ -83,14 +83,14 @@ pub async fn insert_state_and_fdb(ctx: &ActivityCtx, input: &InitStateAndUdbInpu )); ctx.udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); - txs.write( + tx.write( &keys::actor::CreateTsKey::new(input.actor_id), input.create_ts, )?; - txs.write( + tx.write( &keys::actor::WorkflowIdKey::new(input.actor_id), ctx.workflow_id(), )?; @@ -120,15 +120,15 @@ pub async fn add_indexes_and_set_create_complete( // Populate indexes ctx.udb()? - .run(|tx, _mc| { + .run(|tx| { let namespace_id = state.namespace_id; let name = state.name.clone(); let create_ts = state.create_ts; async move { - let txs = tx.subspace(keys::subspace()); + let tx = tx.with_subspace(keys::subspace()); // Populate indexes - txs.write( + tx.write( &keys::ns::ActiveActorKey::new( namespace_id, name.clone(), @@ -138,7 +138,7 @@ pub async fn add_indexes_and_set_create_complete( ctx.workflow_id(), )?; - txs.write( + tx.write( &keys::ns::AllActorKey::new( namespace_id, name.clone(), @@ -150,8 +150,8 @@ pub async fn add_indexes_and_set_create_complete( // Write name into namespace actor names list with empty metadata (if it doesn't already exist) let name_key = keys::ns::ActorNameKey::new(namespace_id, name.clone()); - if !txs.exists(&name_key, SERIALIZABLE).await? { - txs.write( + if !tx.exists(&name_key, Serializable).await? { + tx.write( &name_key, ActorNameKeyData { metadata: serde_json::Map::new(), diff --git a/packages/services/pegboard/src/workflows/runner.rs b/packages/services/pegboard/src/workflows/runner.rs index b88cc019c2..7c2dd7ae93 100644 --- a/packages/services/pegboard/src/workflows/runner.rs +++ b/packages/services/pegboard/src/workflows/runner.rs @@ -2,11 +2,8 @@ use futures_util::{FutureExt, StreamExt, TryStreamExt}; use gas::prelude::*; use rivet_data::converted::{ActorNameKeyData, MetadataKeyData, RunnerByKeyKeyData}; use rivet_runner_protocol::protocol; -use udb_util::{FormalChunkedKey, SERIALIZABLE, SNAPSHOT, TxnExt}; -use universaldb::{ - self as udb, - options::{ConflictRangeType, StreamingMode}, -}; +use universaldb::options::{ConflictRangeType, StreamingMode}; +use universaldb::utils::{FormalChunkedKey, IsolationLevel::*}; use crate::{keys, workflows::actor::Allocate}; @@ -127,7 +124,7 @@ pub async fn pegboard_runner(ctx: &mut WorkflowCtx, input: &Input) -> Result<()> } if !state.draining { - ctx.activity(InsertFdbInput { + ctx.activity(InsertDbInput { runner_id: input.runner_id, namespace_id: input.namespace_id, name: input.name.clone(), @@ -207,7 +204,7 @@ pub async fn pegboard_runner(ctx: &mut WorkflowCtx, input: &Input) -> Result<()> state.draining = true; // Can't parallelize these two, requires reading from state - ctx.activity(ClearFdbInput { + ctx.activity(ClearDbInput { runner_id: input.runner_id, name: input.name.clone(), key: input.key.clone(), @@ -358,7 +355,7 @@ pub async fn pegboard_runner(ctx: &mut WorkflowCtx, input: &Input) -> Result<()> }) .await?; - ctx.activity(ClearFdbInput { + ctx.activity(ClearDbInput { runner_id: input.runner_id, name: input.name.clone(), key: input.key.clone(), @@ -443,8 +440,8 @@ async fn init(ctx: &ActivityCtx, input: &InitInput) -> Result { let evict_workflow_id = ctx .udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); let runner_by_key_key = keys::ns::RunnerByKeyKey::new( input.namespace_id, @@ -453,13 +450,13 @@ async fn init(ctx: &ActivityCtx, input: &InitInput) -> Result { ); // Read existing runner by key slot - let evict_workflow_id = txs - .read_opt(&runner_by_key_key, SERIALIZABLE) + let evict_workflow_id = tx + .read_opt(&runner_by_key_key, Serializable) .await? .map(|x| x.workflow_id); // Allocate self - txs.write( + tx.write( &runner_by_key_key, RunnerByKeyKeyData { runner_id: input.runner_id, @@ -475,7 +472,7 @@ async fn init(ctx: &ActivityCtx, input: &InitInput) -> Result { } #[derive(Debug, Serialize, Deserialize, Hash)] -struct InsertFdbInput { +struct InsertDbInput { runner_id: Id, namespace_id: Id, name: String, @@ -485,19 +482,19 @@ struct InsertFdbInput { create_ts: i64, } -#[activity(InsertFdb)] -async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> Result<()> { +#[activity(InsertDb)] +async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> Result<()> { ctx.udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); let remaining_slots_key = keys::runner::RemainingSlotsKey::new(input.runner_id); let last_ping_ts_key = keys::runner::LastPingTsKey::new(input.runner_id); let workflow_id_key = keys::runner::WorkflowIdKey::new(input.runner_id); let (remaining_slots_entry, last_ping_ts_entry) = tokio::try_join!( - txs.read_opt(&remaining_slots_key, SERIALIZABLE), - txs.read_opt(&last_ping_ts_key, SERIALIZABLE), + tx.read_opt(&remaining_slots_key, Serializable), + tx.read_opt(&last_ping_ts_key, Serializable), )?; let now = util::timestamp::now(); @@ -516,44 +513,44 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> Result<()> { } // NOTE: These properties are only inserted once else { - txs.write(&workflow_id_key, ctx.workflow_id())?; + tx.write(&workflow_id_key, ctx.workflow_id())?; - txs.write( + tx.write( &keys::runner::NamespaceIdKey::new(input.runner_id), input.namespace_id, )?; - txs.write( + tx.write( &keys::runner::NameKey::new(input.runner_id), input.name.clone(), )?; - txs.write( + tx.write( &keys::runner::KeyKey::new(input.runner_id), input.key.clone(), )?; - txs.write( + tx.write( &keys::runner::VersionKey::new(input.runner_id), input.version, )?; - txs.write(&remaining_slots_key, input.total_slots)?; + tx.write(&remaining_slots_key, input.total_slots)?; - txs.write( + tx.write( &keys::runner::TotalSlotsKey::new(input.runner_id), input.total_slots, )?; - txs.write( + tx.write( &keys::runner::CreateTsKey::new(input.runner_id), input.create_ts, )?; - txs.write(&last_ping_ts_key, now)?; + tx.write(&last_ping_ts_key, now)?; // Populate ns indexes - txs.write( + tx.write( &keys::ns::ActiveRunnerKey::new( input.namespace_id, input.create_ts, @@ -561,7 +558,7 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> Result<()> { ), ctx.workflow_id(), )?; - txs.write( + tx.write( &keys::ns::ActiveRunnerByNameKey::new( input.namespace_id, input.name.clone(), @@ -570,7 +567,7 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> Result<()> { ), ctx.workflow_id(), )?; - txs.write( + tx.write( &keys::ns::AllRunnerKey::new( input.namespace_id, input.create_ts, @@ -578,7 +575,7 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> Result<()> { ), ctx.workflow_id(), )?; - txs.write( + tx.write( &keys::ns::AllRunnerByNameKey::new( input.namespace_id, input.name.clone(), @@ -589,7 +586,7 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> Result<()> { )?; // Write name into namespace runner names list - txs.write( + tx.write( &keys::ns::RunnerNameKey::new(input.namespace_id, input.name.clone()), (), )?; @@ -598,12 +595,12 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> Result<()> { }; // Set last connect ts - txs.write(&keys::runner::ConnectedTsKey::new(input.runner_id), now)?; + tx.write(&keys::runner::ConnectedTsKey::new(input.runner_id), now)?; let remaining_millislots = (remaining_slots * 1000) / input.total_slots; // Insert into index (same as the `update_alloc_idx` op with `AddIdx`) - txs.write( + tx.write( &keys::ns::RunnerAllocIdxKey::new( input.namespace_id, input.name.clone(), @@ -628,7 +625,7 @@ async fn insert_fdb(ctx: &ActivityCtx, input: &InsertFdbInput) -> Result<()> { } #[derive(Debug, Serialize, Deserialize, Hash)] -struct ClearFdbInput { +struct ClearDbInput { runner_id: Id, name: String, key: String, @@ -641,44 +638,44 @@ enum RunnerState { Stopped, } -#[activity(ClearFdb)] -async fn clear_fdb(ctx: &ActivityCtx, input: &ClearFdbInput) -> Result<()> { +#[activity(ClearDb)] +async fn clear_Db(ctx: &ActivityCtx, input: &ClearDbInput) -> Result<()> { let state = ctx.state::()?; let namespace_id = state.namespace_id; let create_ts = state.create_ts; - // TODO: Combine into a single fdb txn + // TODO: Combine into a single udb txn ctx.udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); let now = util::timestamp::now(); // Clear runner by key idx if its still the current runner let runner_by_key_key = keys::ns::RunnerByKeyKey::new(namespace_id, input.name.clone(), input.key.clone()); - let runner_id = txs - .read_opt(&runner_by_key_key, SERIALIZABLE) + let runner_id = tx + .read_opt(&runner_by_key_key, Serializable) .await? .map(|x| x.runner_id); if runner_id == Some(input.runner_id) { - txs.delete(&runner_by_key_key); + tx.delete(&runner_by_key_key); } match input.update_state { RunnerState::Draining => { - txs.write(&keys::runner::DrainTsKey::new(input.runner_id), now)?; - txs.write(&keys::runner::ExpiredTsKey::new(input.runner_id), now)?; + tx.write(&keys::runner::DrainTsKey::new(input.runner_id), now)?; + tx.write(&keys::runner::ExpiredTsKey::new(input.runner_id), now)?; } RunnerState::Stopped => { - txs.write(&keys::runner::StopTsKey::new(input.runner_id), now)?; + tx.write(&keys::runner::StopTsKey::new(input.runner_id), now)?; // Update namespace indexes - txs.delete(&keys::ns::ActiveRunnerKey::new( + tx.delete(&keys::ns::ActiveRunnerKey::new( namespace_id, create_ts, input.runner_id, )); - txs.delete(&keys::ns::ActiveRunnerByNameKey::new( + tx.delete(&keys::ns::ActiveRunnerByNameKey::new( namespace_id, input.name.clone(), create_ts, @@ -723,8 +720,8 @@ async fn process_init(ctx: &ActivityCtx, input: &ProcessInitInput) -> Result()?; ctx.udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); // Populate actor names if provided if let Some(actor_names) = &input.prepopulate_actor_names { @@ -736,7 +733,7 @@ async fn process_init(ctx: &ActivityCtx, input: &ProcessInitInput) -> Result Result Result> { let actors = ctx .udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); let actor_subspace = keys::subspace().subspace(&keys::runner::ActorKey::subspace(input.runner_id)); tx.get_ranges_keyvalues( - udb::RangeOption { + universaldb::RangeOption { mode: StreamingMode::WantAll, ..(&actor_subspace).into() }, - SERIALIZABLE, + Serializable, ) - .map(|res| match res { - Ok(entry) => { - let (key, generation) = txs.read_entry::(&entry)?; + .map(|res| { + let (key, generation) = tx.read_entry::(&res?)?; - Ok((key.actor_id.into(), generation)) - } - Err(err) => Err(Into::::into(err)), + Ok((key.actor_id.into(), generation)) }) .try_collect::>() .await @@ -910,13 +899,13 @@ struct CheckExpiredInput { #[activity(CheckExpired)] async fn check_expired(ctx: &ActivityCtx, input: &CheckExpiredInput) -> Result { ctx.udb()? - .run(|tx, _mc| async move { - let txs = tx.subspace(keys::subspace()); + .run(|tx| async move { + let tx = tx.with_subspace(keys::subspace()); - let last_ping_ts = txs + let last_ping_ts = tx .read( &keys::runner::LastPingTsKey::new(input.runner_id), - SERIALIZABLE, + Serializable, ) .await?; @@ -924,7 +913,7 @@ async fn check_expired(ctx: &ActivityCtx, input: &CheckExpiredInput) -> Result(&queue_entry)?; + tx.read_entry::(&queue_entry)?; - let runner_alloc_subspace = txs.subspace(&keys::ns::RunnerAllocIdxKey::subspace( - input.namespace_id, - input.name.clone(), - )); + let runner_alloc_subspace = keys::subspace().subspace( + &keys::ns::RunnerAllocIdxKey::subspace(input.namespace_id, input.name.clone()), + ); - let mut stream = txs.get_ranges_keyvalues( - udb::RangeOption { + let mut stream = tx.get_ranges_keyvalues( + universaldb::RangeOption { mode: StreamingMode::Iterator, // Containers bin pack so we reverse the order reverse: true, ..(&runner_alloc_subspace).into() }, - // NOTE: This is not SERIALIZABLE because we don't want to conflict with all of the + // NOTE: This is not Serializable because we don't want to conflict with all of the // keys, just the one we choose - SNAPSHOT, + Snapshot, ); let mut highest_version = None; @@ -1012,7 +1001,7 @@ pub(crate) async fn allocate_pending_actors( }; let (old_runner_alloc_key, old_runner_alloc_key_data) = - txs.read_entry::(&entry)?; + tx.read_entry::(&entry)?; if let Some(highest_version) = highest_version { // We have passed all of the runners with the highest version. This is reachable if @@ -1035,12 +1024,12 @@ pub(crate) async fn allocate_pending_actors( } // Add read conflict only for this runner key - txs.add_conflict_key(&old_runner_alloc_key, ConflictRangeType::Read)?; - txs.delete(&old_runner_alloc_key); + tx.add_conflict_key(&old_runner_alloc_key, ConflictRangeType::Read)?; + tx.delete(&old_runner_alloc_key); // Add read conflict for the queue key - txs.add_conflict_key(&queue_key, ConflictRangeType::Read)?; - txs.delete(&queue_key); + tx.add_conflict_key(&queue_key, ConflictRangeType::Read)?; + tx.delete(&queue_key); let new_remaining_slots = old_runner_alloc_key_data.remaining_slots.saturating_sub(1); @@ -1048,7 +1037,7 @@ pub(crate) async fn allocate_pending_actors( (new_remaining_slots * 1000) / old_runner_alloc_key_data.total_slots; // Write new allocation key with 1 less slot - txs.write( + tx.write( &keys::ns::RunnerAllocIdxKey::new( input.namespace_id, input.name.clone(), @@ -1065,19 +1054,19 @@ pub(crate) async fn allocate_pending_actors( )?; // Update runner record - txs.write( + tx.write( &keys::runner::RemainingSlotsKey::new(old_runner_alloc_key.runner_id), new_remaining_slots, )?; // Set runner id of actor - txs.write( + tx.write( &keys::actor::RunnerIdKey::new(queue_key.actor_id), old_runner_alloc_key.runner_id, )?; // Insert actor index key - txs.write( + tx.write( &keys::runner::ActorKey::new( old_runner_alloc_key.runner_id, queue_key.actor_id,