diff --git a/.cargo/config.toml b/.cargo/config.toml index 55183631d0a..0129707b6b2 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,5 +1,11 @@ -[target.'(target_familiy="unix")'] -rustflags = ["-C", "force-frame-pointers=yes"] +[target.'cfg(target_family="unix")'] +rustflags = [ + "-C", "force-frame-pointers=yes", + # Add dynamic DuckDB library directory to runtime search path. + "-C", "link-arg=-Wl,-rpath,$ORIGIN/../duckdb-v1.3.0", + "-C", "link-arg=-Wl,-rpath,@executable_path/../duckdb-v1.3.0" +] + [target.wasm32-unknown-unknown] rustflags = ['--cfg', 'getrandom_backend="wasm_js"', '-C', 'target-feature=+atomics'] diff --git a/.github/workflows/sql-benchmarks.yml b/.github/workflows/sql-benchmarks.yml index 276528acc37..af93c93f3f1 100644 --- a/.github/workflows/sql-benchmarks.yml +++ b/.github/workflows/sql-benchmarks.yml @@ -140,6 +140,7 @@ jobs: shell: bash env: RUST_BACKTRACE: full + LD_LIBRARY_PATH: target/duckdb-v1.3.0 run: | # Generate data, running each query once to make sure they don't panic. target/release_debug/${{ matrix.binary_name }} --targets datafusion:parquet -i1 -d gh-json --skip-duckdb-build @@ -171,6 +172,7 @@ jobs: OTEL_EXPORTER_OTLP_ENDPOINT: '${{ secrets.OTEL_EXPORTER_OTLP_ENDPOINT }}' OTEL_EXPORTER_OTLP_HEADERS: '${{ secrets.OTEL_EXPORTER_OTLP_HEADERS }}' OTEL_RESOURCE_ATTRIBUTES: 'bench-name=${{ matrix.id }}' + LD_LIBRARY_PATH: target/duckdb-v1.3.0 run: | target/release_debug/${{ matrix.binary_name }} \ -d gh-json \ @@ -189,6 +191,7 @@ jobs: OTEL_EXPORTER_OTLP_ENDPOINT: '${{ secrets.OTEL_EXPORTER_OTLP_ENDPOINT }}' OTEL_EXPORTER_OTLP_HEADERS: '${{ secrets.OTEL_EXPORTER_OTLP_HEADERS }}' OTEL_RESOURCE_ATTRIBUTES: 'bench-name=${{ matrix.id }}' + LD_LIBRARY_PATH: target/duckdb-v1.3.0 run: | target/release_debug/${{ matrix.binary_name }} \ --use-remote-data-dir ${{ matrix.remote_storage }} \ diff --git a/Cargo.lock b/Cargo.lock index 97155603e6a..5676cfef0a3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -697,6 +697,7 @@ dependencies = [ "uuid", "vortex", "vortex-datafusion", + "vortex-duckdb-ext", "xshell", ] diff --git a/Cargo.toml b/Cargo.toml index bc649ace03a..414dd5bb4a5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -213,6 +213,7 @@ vortex-zstd = { version = "0.1.0", path = "./encodings/zstd", default-features = # END crates published by this project # No version contraints for unpublished crates. +vortex-duckdb-ext = { path = "./vortex-duckdb-ext", default-features = false } vortex-duckdb = { path = "./vortex-duckdb", default-features = false } vortex-ffi = { path = "./vortex-ffi", default-features = false } @@ -275,7 +276,8 @@ use_debug = "deny" [profile.release] codegen-units = 1 -lto = "thin" # attempts to perform optimizations across all crates within the dependency graph +# Turn LTO off, as it breaks when vortex-duckdb-ext is linked. +lto = "off" [profile.release_debug] debug = "full" diff --git a/bench-vortex/Cargo.toml b/bench-vortex/Cargo.toml index 935e33720d5..c2286783364 100644 --- a/bench-vortex/Cargo.toml +++ b/bench-vortex/Cargo.toml @@ -64,6 +64,7 @@ url = { workspace = true } uuid = { workspace = true, features = ["v4"] } vortex = { workspace = true, features = ["object_store", "parquet", "files"] } vortex-datafusion = { workspace = true } +vortex-duckdb-ext = { workspace = true } xshell = { workspace = true } [features] diff --git a/bench-vortex/src/bin/clickbench.rs b/bench-vortex/src/bin/clickbench.rs index 32c8747782d..2aadc007014 100644 --- a/bench-vortex/src/bin/clickbench.rs +++ b/bench-vortex/src/bin/clickbench.rs @@ -4,14 +4,14 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use bench_vortex::clickbench::{Flavor, clickbench_queries}; -use bench_vortex::ddb::{DuckDBExecutor, register_tables}; use bench_vortex::display::{DisplayFormat, print_measurements_json, render_table}; +use bench_vortex::engines::ddb2; use bench_vortex::measurements::QueryMeasurement; use bench_vortex::metrics::{MetricsSetExt, export_plan_spans}; use bench_vortex::utils::constants::{CLICKBENCH_DATASET, STORAGE_NVME}; use bench_vortex::utils::new_tokio_runtime; use bench_vortex::{ - BenchmarkDataset, Engine, Format, IdempotentPath, Target, ddb, default_env_filter, df, + BenchmarkDataset, Engine, Format, IdempotentPath, Target, default_env_filter, df, }; use clap::{Parser, value_parser}; use datafusion::prelude; @@ -86,21 +86,9 @@ struct DataFusionCtx { emit_plan: bool, } -struct DuckDBCtx { - duckdb_path: PathBuf, -} - -impl DuckDBCtx { - pub fn duckdb_file(&self, format: Format) -> PathBuf { - let dir = format!("clickbench_partitioned/{}", format.name()).to_data_path(); - std::fs::create_dir_all(&dir).vortex_expect("failed to create duckdb data dir"); - dir.join("hits.db") - } -} - enum EngineCtx { DataFusion(DataFusionCtx), - DuckDB(DuckDBCtx), + DuckDB(ddb2::DuckDBCtx), } impl EngineCtx { @@ -113,10 +101,8 @@ impl EngineCtx { }) } - fn new_with_duckdb(duckdb_path: &Path) -> Self { - EngineCtx::DuckDB(DuckDBCtx { - duckdb_path: duckdb_path.to_path_buf(), - }) + fn new_with_duckdb() -> anyhow::Result { + Ok(EngineCtx::DuckDB(ddb2::DuckDBCtx::new()?)) } fn to_engine(&self) -> Engine { @@ -202,19 +188,6 @@ fn main() -> anyhow::Result<()> { let mut query_measurements = Vec::new(); - let resolved_path = args - .targets - .iter() - .any(|t| t.engine() == Engine::DuckDB) - .then(|| { - let path = ddb::duckdb_executable_path(&args.duckdb_path); - // If the path is to the duckdb-vortex extension, try to rebuild - if args.duckdb_path.is_none() && !args.skip_duckdb_build { - ddb::build_vortex_duckdb(); - } - path - }); - for target in args.targets.iter() { let engine = target.engine(); let file_format = target.format(); @@ -227,9 +200,7 @@ fn main() -> anyhow::Result<()> { EngineCtx::new_with_datafusion(session_ctx, args.emit_plan) } - Engine::DuckDB => EngineCtx::new_with_duckdb( - resolved_path.as_ref().vortex_expect("path resolved above"), - ), + Engine::DuckDB => EngineCtx::new_with_duckdb()?, _ => unreachable!("engine not supported"), }; @@ -391,12 +362,9 @@ async fn init_data_source( } }, EngineCtx::DuckDB(ctx) => match file_format { - Format::Parquet | Format::OnDiskVortex | Format::OnDiskDuckDB => register_tables( - &DuckDBExecutor::new(ctx.duckdb_path.clone(), ctx.duckdb_file(file_format)), - base_url, - file_format, - dataset, - )?, + Format::Parquet | Format::OnDiskVortex | Format::OnDiskDuckDB => { + ctx.register_tables(base_url, file_format, dataset)?; + } _ => { vortex_panic!( "Engine {} Format {file_format} isn't supported on ClickBench", @@ -469,14 +437,8 @@ fn execute_queries( dataset: CLICKBENCH_DATASET.to_owned(), }); } - - EngineCtx::DuckDB(args) => { - let fastest_run = benchmark_duckdb_query( - query_idx, - query_string, - iterations, - &DuckDBExecutor::new(args.duckdb_path.clone(), args.duckdb_file(file_format)), - ); + EngineCtx::DuckDB(ctx) => { + let fastest_run = benchmark_duckdb_query(query_idx, query_string, iterations, ctx); query_measurements.push(QueryMeasurement { query_idx, @@ -567,10 +529,11 @@ fn benchmark_duckdb_query( query_idx: usize, query_string: &str, iterations: usize, - duckdb_executor: &DuckDBExecutor, + duckdb_ctx: &ddb2::DuckDBCtx, ) -> Duration { (0..iterations).fold(Duration::from_millis(u64::MAX), |fastest, _| { - let duration = ddb::execute_clickbench_query(query_string, duckdb_executor) + let duration = duckdb_ctx + .execute_query(query_string) .unwrap_or_else(|err| vortex_panic!("query: {query_idx} failed with: {err}")); fastest.min(duration) diff --git a/bench-vortex/src/engines/ddb2/mod.rs b/bench-vortex/src/engines/ddb2/mod.rs new file mode 100644 index 00000000000..6797130b238 --- /dev/null +++ b/bench-vortex/src/engines/ddb2/mod.rs @@ -0,0 +1,177 @@ +use std::time::{Duration, Instant}; + +use anyhow::Result; +use log::trace; +use url::Url; +use vortex_duckdb_ext::duckdb::{Connection, Database}; + +use crate::{BenchmarkDataset, Format}; + +// TODO: handle S3 + +#[derive(Debug, Clone)] +enum DuckDBObject { + Table, + View, +} + +impl DuckDBObject { + fn to_str(&self) -> &str { + match self { + DuckDBObject::Table => "TABLE", + DuckDBObject::View => "VIEW", + } + } +} + +/// DuckDB context for benchmarks. +pub struct DuckDBCtx { + pub db: Database, + pub connection: Connection, +} + +impl DuckDBCtx { + pub fn new() -> Result { + let db = Database::open_in_memory()?; + let connection = db.connect()?; + vortex_duckdb_ext::init(&connection)?; + Ok(Self { db, connection }) + } + + /// Execute DuckDB queries for benchmarks using the internal connection + pub fn execute_query(&self, query: &str) -> Result { + // TODO: handle multiple queries + trace!("execute duckdb query: {}", query); + let time_instant = Instant::now(); + self.connection.execute(query)?; + let query_time = time_instant.elapsed(); + trace!("query completed in {:.3}s", query_time.as_secs_f64()); + + Ok(query_time) + } + + /// Register tables for benchmarks using the internal connection + pub fn register_tables( + &self, + base_url: &Url, + file_format: Format, + dataset: BenchmarkDataset, + ) -> Result<()> { + let object = match file_format { + Format::Parquet | Format::OnDiskVortex => DuckDBObject::View, + Format::OnDiskDuckDB => DuckDBObject::Table, + format => anyhow::bail!("Format {format} isn't supported for DuckDB"), + }; + + let load_format = match file_format { + // Duckdb loads values from parquet to duckdb + Format::Parquet | Format::OnDiskDuckDB => Format::Parquet, + f => f, + }; + + let effective_url = self.resolve_storage_url(base_url, load_format, dataset)?; + let extension = match load_format { + Format::Parquet => "parquet", + Format::OnDiskVortex => "vortex", + other => anyhow::bail!("Format {other} isn't supported for DuckDB"), + }; + + // Generate and execute table registration commands + let commands = self.generate_table_commands(&effective_url, extension, dataset, object); + self.execute_query(&commands)?; + trace!("Executing table registration commands: {}", commands); + + Ok(()) + } + + /// Resolves the storage URL based on dataset and format requirements + fn resolve_storage_url( + &self, + base_url: &Url, + file_format: Format, + dataset: BenchmarkDataset, + ) -> Result { + if file_format == Format::OnDiskVortex { + match dataset.vortex_path(base_url) { + Ok(vortex_url) => { + // Check if the directory exists (for file:// URLs) + if vortex_url.scheme() == "file" { + let path = std::path::Path::new(vortex_url.path()); + if !path.exists() { + log::warn!( + "Vortex directory doesn't exist at: {}. Run with DataFusion engine first to generate Vortex files.", + path.display() + ); + } + } + Ok(vortex_url) + } + Err(_) => Ok(base_url.clone()), + } + } else if file_format == Format::Parquet { + match dataset.parquet_path(base_url) { + Ok(parquet_url) => Ok(parquet_url), + Err(_) => Ok(base_url.clone()), + } + } else { + Ok(base_url.clone()) + } + } + + /// Generate SQL commands for table registration. + fn generate_table_commands( + &self, + base_url: &Url, + extension: &str, + dataset: BenchmarkDataset, + duckdb_object: DuckDBObject, + ) -> String { + // Base path contains trailing /. + let base_dir = base_url.as_str(); + let base_dir = base_dir.strip_prefix("file://").unwrap_or(base_dir); + + match dataset { + BenchmarkDataset::TpcH => { + let mut commands = String::new(); + let tables = [ + "customer", "lineitem", "nation", "orders", "part", "partsupp", "region", + "supplier", + ]; + + for table_name in &tables { + let table_path = format!("{base_dir}{table_name}.{extension}"); + commands.push_str(&format!( + "CREATE {} IF NOT EXISTS {table_name} AS SELECT * FROM read_{extension}('{table_path}');\n", + duckdb_object.to_str(), + )); + } + commands + } + BenchmarkDataset::ClickBench { single_file } => { + let file_glob = if single_file { + format!("{base_dir}hits.{extension}") + } else { + format!("{base_dir}*.{extension}") + }; + + format!( + "CREATE {} IF NOT EXISTS hits AS SELECT * FROM read_{extension}('{file_glob}');", + duckdb_object.to_str() + ) + } + BenchmarkDataset::TpcDS => { + let mut commands = String::new(); + let tables = BenchmarkDataset::TpcDS.tables(); + + for table_name in tables { + let table_path = format!("{base_dir}{table_name}.{extension}"); + commands.push_str(&format!( + "CREATE {} IF NOT EXISTS {table_name} AS SELECT * FROM read_{extension}('{table_path}');\n", + duckdb_object.to_str(), + )); + } + commands + } + } + } +} diff --git a/bench-vortex/src/engines/mod.rs b/bench-vortex/src/engines/mod.rs index fe37ee38de7..58166df5ff1 100644 --- a/bench-vortex/src/engines/mod.rs +++ b/bench-vortex/src/engines/mod.rs @@ -1,2 +1,3 @@ pub mod ddb; +pub mod ddb2; pub mod df; diff --git a/bench-vortex/src/lib.rs b/bench-vortex/src/lib.rs index 9dfe6165c9a..91a9fa0e0e7 100644 --- a/bench-vortex/src/lib.rs +++ b/bench-vortex/src/lib.rs @@ -25,7 +25,7 @@ pub mod tpch; pub mod utils; pub use datasets::{BenchmarkDataset, file}; -pub use engines::{ddb, df}; +pub use engines::{ddb, ddb2, df}; pub use vortex::error::vortex_panic; // All benchmarks run with mimalloc for consistency. diff --git a/vortex-duckdb-ext/Cargo.toml b/vortex-duckdb-ext/Cargo.toml index d41dad7f923..5a71085f273 100644 --- a/vortex-duckdb-ext/Cargo.toml +++ b/vortex-duckdb-ext/Cargo.toml @@ -17,15 +17,14 @@ readme = { workspace = true } [lib] name = "vortex_duckdb_ext" path = "src/lib.rs" -crate-type = ["staticlib", "cdylib"] +crate-type = ["staticlib", "cdylib", "rlib"] [dependencies] bitvec = { workspace = true } crossbeam-queue = { workspace = true } # duckdb-rs using arrow-rs v55.1.0 with decimal32/64 types cherry-picked on top -# (https://github.com/apache/arrow-rs/pull/7098) -# this is will be removed once we hit 56 (hopefully in july). -# required since duckdb returns decimal32/64 from scans. +# (https://github.com/apache/arrow-rs/pull/7098) this is will be removed once we +# hit 56 (hopefully in july). required since duckdb returns decimal32/64 from scans. duckdb = { git = "https://github.com/vortex-data/duckdb-rs", rev = "247ffb36c41bd44bb18e586bdd6640a95783bb5e", features = [ "vtab-full", ] } diff --git a/vortex-duckdb-ext/build.rs b/vortex-duckdb-ext/build.rs index 7d49b7b20f9..db70ab60979 100644 --- a/vortex-duckdb-ext/build.rs +++ b/vortex-duckdb-ext/build.rs @@ -7,8 +7,16 @@ const DUCKDB_VERSION: &str = "v1.3.0"; const DUCKDB_BASE_URL: &str = "https://github.com/duckdb/duckdb/releases/download"; fn download_duckdb_archive() -> Result> { - let out_dir = PathBuf::from(env::var("OUT_DIR")?); - let duckdb_dir = out_dir.join(format!("duckdb-{DUCKDB_VERSION}")); + let manifest_dir = PathBuf::from(env::var("CARGO_MANIFEST_DIR")?); + let workspace_root = manifest_dir + .parent() + .ok_or("Failed to get workspace root")?; + + let target_dir = env::var("CARGO_TARGET_DIR") + .map(PathBuf::from) + .unwrap_or_else(|_| workspace_root.join("target")); + + let duckdb_dir = target_dir.join(format!("duckdb-{DUCKDB_VERSION}")); let target = env::var("TARGET")?; let (platform, arch) = match target.as_str() { @@ -27,7 +35,7 @@ fn download_duckdb_archive() -> Result> { fs::create_dir_all(&duckdb_dir)?; if !archive_path.exists() { - println!("Downloading DuckDB static libraries from {url}"); + println!("Downloading DuckDB libraries from {url}"); let response = reqwest::blocking::get(&url)?; fs::write(&archive_path, &response.bytes()?)?; println!("Downloaded to {}", archive_path.display()); @@ -44,18 +52,17 @@ fn extract_duckdb_libraries(archive_path: PathBuf) -> Result" - // ); - // println!( - // "cargo:rustc-link-arg=-Wl,-rpath,{}", - // "<>" - // ); - // println!("cargo:rustc-link-lib=static=duckdb"); - // Download and extract prebuilt DuckDB libraries. let zip_path = download_duckdb_archive().unwrap(); let lib_path = extract_duckdb_libraries(zip_path).unwrap(); // Link against DuckDB dylib. println!("cargo:rustc-link-search=native={}", lib_path.display()); - println!("cargo:rustc-link-arg=-Wl,-rpath,{}", lib_path.display()); + println!("cargo:rustc-link-lib=dylib=duckdb"); if env::var("TARGET").unwrap().contains("linux") { println!("cargo:rustc-link-lib=stdc++"); @@ -149,8 +145,14 @@ fn main() { .expect("error: Unable to generate bindings for vortex.h") .write_to_file(crate_dir.join("include/vortex.h")); - for entry in walkdir::WalkDir::new("cpp/") { - println!("cargo:rerun-if-changed={}", entry.unwrap().path().display()); + // Watch C/C++ source files for changes. + for entry in walkdir::WalkDir::new("cpp/").into_iter().flatten() { + if entry + .path() + .extension() + .is_some_and(|ext| ext == "cpp" || ext == "h" || ext == "hpp") + { + println!("cargo:rerun-if-changed={}", entry.path().display()); + } } - println!("cargo:rerun-if-changed=src/cpp.rs"); } diff --git a/vortex-duckdb-ext/cpp/table_function.cpp b/vortex-duckdb-ext/cpp/table_function.cpp index c8cca386f0c..6fd56597f5f 100644 --- a/vortex-duckdb-ext/cpp/table_function.cpp +++ b/vortex-duckdb-ext/cpp/table_function.cpp @@ -47,8 +47,7 @@ struct CTableGlobalData final : GlobalTableFunctionState { optional_ptr ffi_data; idx_t MaxThreads() const override { - return 1; - // return GlobalTableFunctionState::MAX_THREADS; + return GlobalTableFunctionState::MAX_THREADS; } }; diff --git a/vortex-duckdb-ext/src/duckdb/connection.rs b/vortex-duckdb-ext/src/duckdb/connection.rs index 412d9688203..8c456ea17e7 100644 --- a/vortex-duckdb-ext/src/duckdb/connection.rs +++ b/vortex-duckdb-ext/src/duckdb/connection.rs @@ -1,6 +1,6 @@ use std::ptr; -use vortex::error::VortexResult; +use vortex::error::{VortexResult, vortex_err}; use crate::duckdb::Database; use crate::{cpp, duckdb_try, wrapper}; @@ -21,4 +21,63 @@ impl Connection { ); Ok(unsafe { Self::own(ptr) }) } + + /// Execute SQL query and return the row count. + pub fn execute_and_get_row_count(&self, query: &str) -> VortexResult { + let mut result: cpp::duckdb_result = unsafe { std::mem::zeroed() }; + let query_cstr = + std::ffi::CString::new(query).map_err(|_| vortex_err!("Invalid query string"))?; + + let status = unsafe { cpp::duckdb_query(self.as_ptr(), query_cstr.as_ptr(), &mut result) }; + + if status != cpp::duckdb_state::DuckDBSuccess { + let error_msg = unsafe { + let error_ptr = cpp::duckdb_result_error(&mut result); + if error_ptr.is_null() { + "Unknown DuckDB error".to_string() + } else { + std::ffi::CStr::from_ptr(error_ptr) + .to_string_lossy() + .into_owned() + } + }; + + unsafe { cpp::duckdb_destroy_result(&mut result) }; + return Err(vortex_err!("Failed to execute query: {}", error_msg)); + } + + let row_count = unsafe { cpp::duckdb_row_count(&mut result).try_into()? }; + unsafe { cpp::duckdb_destroy_result(&mut result) }; + + Ok(row_count) + } + + /// Execute SQL query. + pub fn execute(&self, query: &str) -> VortexResult<()> { + let mut result: cpp::duckdb_result = unsafe { std::mem::zeroed() }; + let query_cstr = + std::ffi::CString::new(query).map_err(|_| vortex_err!("Invalid query string"))?; + + let status = unsafe { cpp::duckdb_query(self.as_ptr(), query_cstr.as_ptr(), &mut result) }; + + if status != cpp::duckdb_state::DuckDBSuccess { + let error_msg = unsafe { + let error_ptr = cpp::duckdb_result_error(&mut result); + if error_ptr.is_null() { + "Unknown DuckDB error".to_string() + } else { + std::ffi::CStr::from_ptr(error_ptr) + .to_string_lossy() + .into_owned() + } + }; + + unsafe { cpp::duckdb_destroy_result(&mut result) }; + return Err(vortex_err!("Failed to execute query: {}", error_msg)); + } + + unsafe { cpp::duckdb_destroy_result(&mut result) }; + + Ok(()) + } } diff --git a/vortex-duckdb-ext/src/lib.rs b/vortex-duckdb-ext/src/lib.rs index 983ac1835b0..ffdafe66b1f 100644 --- a/vortex-duckdb-ext/src/lib.rs +++ b/vortex-duckdb-ext/src/lib.rs @@ -7,7 +7,7 @@ use duckdb::*; // **WARNING end use vortex::error::{VortexExpect, VortexResult}; -use crate::duckdb::{Connection, Database}; +pub use crate::duckdb::{Connection, Database}; use crate::scan::VortexTableFunction; mod convert; @@ -32,7 +32,8 @@ mod scan_tests; /// Initialize the Vortex extension by registering the `vortex_scan` function. pub fn init(conn: &Connection) -> VortexResult<()> { - conn.register_table_function::(c"vortex_scan") + conn.register_table_function::(c"vortex_scan")?; + conn.register_table_function::(c"read_vortex") } /// The DuckDB extension ABI initialization function. diff --git a/vortex-duckdb-ext/src/scan_tests.rs b/vortex-duckdb-ext/src/scan_tests.rs index 076dd2e09c8..a74ed58727b 100644 --- a/vortex-duckdb-ext/src/scan_tests.rs +++ b/vortex-duckdb-ext/src/scan_tests.rs @@ -9,17 +9,11 @@ use vortex::scalar::Scalar; use vortex::validity::Validity; use crate::duckdb::Database; -use crate::scan::VortexTableFunction; fn database_connection() -> Connection { let db = Database::open_in_memory().unwrap(); let connection = db.connect().unwrap(); - connection - .register_table_function::(c"vortex_scan") - .unwrap(); - connection - .register_table_function::(c"read_vortex") - .unwrap(); + crate::init(&connection).unwrap(); unsafe { Connection::open_from_raw(db.as_ptr().cast()) }.unwrap() }