Skip to content
Closed
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
409 changes: 363 additions & 46 deletions Cargo.lock

Large diffs are not rendered by default.

29 changes: 29 additions & 0 deletions crates/nostr-postgresdb/Cargo.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
[package]
name = "nostr-postgresdb"
version = "0.41.0"
edition = "2021"
description = "Postgres storage backend for Nostr apps"
authors.workspace = true
homepage.workspace = true
repository.workspace = true
license.workspace = true
readme = "README.md"
rust-version.workspace = true
keywords = ["nostr", "database", "postgres"]

[dependencies]
nostr = { workspace = true, features = ["std"] }
nostr-database = { workspace = true, features = ["flatbuf"] }
tracing.workspace = true
diesel = { version = "2", features = ["postgres", "serde_json"] }
diesel-async = { version = "0.5", features = ["postgres", "deadpool"] }
diesel_migrations = { version = "2", features = ["postgres"] }
deadpool = { version = "0.12", features = ["managed", "rt_tokio_1"] }

[dev-dependencies]
tokio.workspace = true
nostr-relay-builder = { workspace = true }
tracing-subscriber = { workspace = true }

[[example]]
name = "postgres-relay"
19 changes: 19 additions & 0 deletions crates/nostr-postgresdb/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# Nostr Postgres database backend

Postgres storage backend for nostr apps

## State

**This library is in an ALPHA state**, things that are implemented generally
work but the API will change in breaking ways.

## Donations

`rust-nostr` is free and open-source. This means we do not earn any revenue by
selling it. Instead, we rely on your financial support. If you actively use any
of the `rust-nostr` libs/software/services, then please [donate](https://rust-nostr.org/donate).

## License

This project is distributed under the MIT software license - see the
[LICENSE](../../LICENSE) file for details
10 changes: 10 additions & 0 deletions crates/nostr-postgresdb/diesel.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# For documentation on how to configure this file,
# see https://diesel.rs/guides/configuring-diesel-cli

[print_schema]
file = "src/schema.rs"
custom_type_derives = ["diesel::query_builder::QueryId", "Clone"]
schema = "nostr"

[migrations_directory]
dir = "migrations"
37 changes: 37 additions & 0 deletions crates/nostr-postgresdb/examples/postgres-relay.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
// Copyright (c) 2025 Protom
// Distributed under the MIT software license

use std::time::Duration;

use nostr_database::prelude::*;
use nostr_postgresdb::NostrPostgres;
use nostr_relay_builder::prelude::*;

// Your database URL
const DB_URL: &str = "postgres://postgres:password@localhost:5432";

#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();

// This will programatically run pending db migrations
nostr_postgresdb::run_migrations(DB_URL)?;

// Create a conncetion pool
let pool = nostr_postgresdb::postgres_connection_pool(DB_URL).await?;

// Create a nostr db instance
let db: NostrPostgres = pool.into();

// Add db to builder
let builder = RelayBuilder::default().database(db);

// Create local relay
let relay = LocalRelay::run(builder).await?;
println!("Url: {}", relay.url());

// Keep up the program
loop {
tokio::time::sleep(Duration::from_secs(60)).await;
}
}
Empty file.
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.

DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
DROP FUNCTION IF EXISTS diesel_set_updated_at();
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.




-- Sets up a trigger for the given table to automatically set a column called
-- `updated_at` whenever the row is modified (unless `updated_at` was included
-- in the modified columns)
--
-- # Example
--
-- ```sql
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
--
-- SELECT diesel_manage_updated_at('users');
-- ```
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
BEGIN
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
END;
$$ LANGUAGE plpgsql;

CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
BEGIN
IF (
NEW IS DISTINCT FROM OLD AND
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
) THEN
NEW.updated_at := current_timestamp;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
-- This file should undo anything in `up.sql`
DROP TABLE nostr.event_tags;
DROP TABLE nostr.events;
DROP SCHEMA nostr;
30 changes: 30 additions & 0 deletions crates/nostr-postgresdb/migrations/2025-04-11-095120_events/up.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
-- Init the schema
CREATE SCHEMA IF NOT EXISTS nostr;

-- The actual event data
CREATE TABLE nostr.events (
id VARCHAR(64) PRIMARY KEY,
pubkey VARCHAR(64) NOT NULL,
created_at BIGINT NOT NULL,
kind BIGINT NOT NULL,
payload BYTEA NOT NULL,
signature VARCHAR(128) NOT NULL,
deleted BOOLEAN NOT NULL
);

-- Direct indexes
CREATE INDEX event_pubkey ON nostr.events (pubkey);
CREATE INDEX event_date ON nostr.events (created_at);
CREATE INDEX event_kind ON nostr.events (kind);
CREATE INDEX event_deleted ON nostr.events (deleted);

-- The tag index, the primary will give us the index automatically
CREATE TABLE nostr.event_tags (
tag TEXT NOT NULL,
tag_value TEXT NOT NULL,
event_id VARCHAR(64) NOT NULL
REFERENCES nostr.events (id)
ON DELETE CASCADE
ON UPDATE CASCADE,
PRIMARY KEY (tag, tag_value, event_id)
);
135 changes: 135 additions & 0 deletions crates/nostr-postgresdb/src/lib.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
mod migrations;
mod model;
mod postgres;
mod schema;

use diesel::prelude::*;
use diesel_async::RunQueryDsl;
pub use migrations::run_migrations;
use model::{EventDataDb, EventDb};
use nostr::event::*;
use nostr::filter::Filter;
use nostr::types::Timestamp;
use nostr::util::BoxedFuture;
use nostr_database::*;
use postgres::{build_filter_query, with_limit};
pub use postgres::{postgres_connection_pool, NostrPostgres};
use schema::nostr::events;

impl NostrDatabase for NostrPostgres {
fn backend(&self) -> Backend {
Backend::Custom("Postgres".to_string())
}
}

impl NostrEventsDatabase for NostrPostgres {
/// Save [`Event`] into store
///
/// **This method assumes that [`Event`] was already verified**
fn save_event<'a>(
&'a self,
event: &'a Event,
) -> BoxedFuture<'a, Result<SaveEventStatus, DatabaseError>> {
Box::pin(async move { self.save(EventDataDb::try_from(event)?).await })
}

/// Check event status by ID
///
/// Check if the event is saved, deleted or not existent.
fn check_id<'a>(
&'a self,
event_id: &'a EventId,
) -> BoxedFuture<'a, Result<DatabaseEventStatus, DatabaseError>> {
Box::pin(async move {
let status = match self.event_by_id(event_id).await? {
Some(e) if e.deleted => DatabaseEventStatus::Deleted,
Some(_) => DatabaseEventStatus::Saved,
None => DatabaseEventStatus::NotExistent,
};
Ok(status)
})
}

/// Coordinate feature is not supported yet
fn has_coordinate_been_deleted<'a>(
&'a self,
_coordinate: &'a nostr::nips::nip01::CoordinateBorrow<'a>,
_timestamp: &'a Timestamp,
) -> BoxedFuture<'a, Result<bool, DatabaseError>> {
Box::pin(async move { Ok(false) })
}

/// Get [`Event`] by [`EventId`]
fn event_by_id<'a>(
&'a self,
_event_id: &'a EventId,
) -> BoxedFuture<'a, Result<Option<Event>, DatabaseError>> {
Box::pin(async move {
let event = match self.event_by_id(_event_id).await? {
Some(e) if !e.deleted => {
Some(Event::decode(&e.payload).map_err(DatabaseError::backend)?)
}
_ => None,
};
Ok(event)
})
}

/// Count the number of events found with [`Filter`].
///
/// Use `Filter::new()` or `Filter::default()` to count all events.
fn count(&self, filter: Filter) -> BoxedFuture<Result<usize, DatabaseError>> {
Box::pin(async move {
let res: i64 = build_filter_query(filter)
.count()
.get_result(&mut self.get_connection().await?)
.await
.map_err(DatabaseError::backend)?;
Ok(res as usize)
})
}

/// Query stored events.
fn query(&self, filter: Filter) -> BoxedFuture<Result<Events, DatabaseError>> {
let filter = with_limit(filter, 10000);
Box::pin(async move {
let mut events = Events::new(&filter);
let result = build_filter_query(filter.clone())
.select(EventDb::as_select())
.load(&mut self.get_connection().await?)
.await
.map_err(DatabaseError::backend)?;

for item in result.into_iter() {
if let Ok(event) = Event::decode(&item.payload) {
events.insert(event);
}
}
Ok(events)
})
}

/// Delete all events that match the [Filter]
fn delete(&self, filter: Filter) -> BoxedFuture<Result<(), DatabaseError>> {
let filter = with_limit(filter, 999);
Box::pin(async move {
let filter = build_filter_query(filter);
diesel::update(events::table)
.set(events::deleted.eq(true))
.filter(events::id.eq_any(filter.select(events::id)))
.execute(&mut self.get_connection().await?)
.await
.map_err(DatabaseError::backend)?;

Ok(())
})
}
}

/// For now we want to avoid wiping the database
impl NostrDatabaseWipe for NostrPostgres {
#[inline]
fn wipe(&self) -> BoxedFuture<Result<(), DatabaseError>> {
Box::pin(async move { Err(DatabaseError::NotSupported) })
}
}
19 changes: 19 additions & 0 deletions crates/nostr-postgresdb/src/migrations.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
use diesel::{Connection, PgConnection};
use diesel_migrations::{embed_migrations, EmbeddedMigrations, MigrationHarness};
use nostr_database::DatabaseError;
use tracing::info;

const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");

/// programatically run the db migrations
pub fn run_migrations(connection_string: &str) -> Result<(), DatabaseError> {
info!("Running db migrations in postgres database",);
let mut connection =
PgConnection::establish(connection_string).map_err(DatabaseError::backend)?;

let res = connection
.run_pending_migrations(MIGRATIONS)
.map_err(DatabaseError::Backend)?;
info!("Successfully executed postgres db migrations {:?}", res);
Ok(())
}
Loading