diff --git a/.tool-versions b/.tool-versions
deleted file mode 100644
index c3507cd6..00000000
--- a/.tool-versions
+++ /dev/null
@@ -1 +0,0 @@
-rust stable
\ No newline at end of file
diff --git a/Cargo.lock b/Cargo.lock
index acd50f41..5083992f 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -800,6 +800,7 @@ name = "cipherstash-proxy"
version = "2.0.0"
dependencies = [
"arc-swap",
+ "async-trait",
"aws-lc-rs",
"bigdecimal",
"bytes",
diff --git a/docs/errors.md b/docs/errors.md
index 9f5c60e1..670df8b7 100644
--- a/docs/errors.md
+++ b/docs/errors.md
@@ -32,6 +32,8 @@
- Configuration errors:
- [Missing or invalid TLS configuration](#config-missing-or-invalid-tls)
+ - [Network configuration change requires restart](#config-network-change-requires-restart)
+
@@ -651,3 +653,35 @@ Check that the certificate and private key are valid.
+
+
+## Network configuration change requires restart
+
+A configuration reload was attempted with network-level changes that require a full restart.
+
+### Error message
+
+```
+Network configuration change requires restart
+```
+
+### Notes
+
+When receiving a SIGHUP signal, CipherStash Proxy attempts to reload application-level configuration without disrupting active connections. However, certain network-related configuration changes require stopping and restarting the proxy service to take effect.
+
+The following settings require a restart when changed:
+- `server.host` - The host address the proxy listens on
+- `server.port` - The port the proxy listens on
+- `server.require_tls` - TLS requirement setting
+- `server.worker_threads` - Number of worker threads
+- `tls` - Any TLS certificate or key configuration
+
+### How to fix
+
+1. Stop the CipherStash Proxy service
+2. Update the configuration as needed
+3. Restart the CipherStash Proxy service
+
+Application-level configuration changes (database, auth, encrypt, log, prometheus, development) can be reloaded without restart using SIGHUP.
+
+
diff --git a/mise.toml b/mise.toml
index eb451dcf..d0256394 100644
--- a/mise.toml
+++ b/mise.toml
@@ -309,6 +309,8 @@ echo
mise --env tcp run postgres:setup
mise --env tls run postgres:setup
+mise run test:integration:showcase
+
echo
echo '###############################################'
echo '# Test: Prometheus'
@@ -377,10 +379,8 @@ echo '###############################################'
echo '# Test: Showcase'
echo '###############################################'
echo
-mise --env tls run proxy:up proxy-tls --extra-args "--detach --wait"
-mise --env tls run test:wait_for_postgres_to_quack --port 6432 --max-retries 20 --tls
-RUST_BACKTRACE=full cargo run -p showcase
-mise --env tls run proxy:down
+
+mise run test:integration:showcase
echo
echo '###############################################'
@@ -637,6 +637,15 @@ else
fi
"""
+[tasks."test:integration:showcase"]
+description = "Run Showcase integration test"
+run = """
+mise --env tls run proxy:up proxy-tls --extra-args "--detach --wait"
+mise --env tls run test:wait_for_postgres_to_quack --port 6432 --max-retries 20 --tls
+RUST_BACKTRACE=full cargo run -p showcase
+mise --env tls run proxy:down
+"""
+
[tasks.release]
description = "Publish release artifacts"
depends = ["release:docker"]
diff --git a/packages/cipherstash-proxy/Cargo.toml b/packages/cipherstash-proxy/Cargo.toml
index e9373779..b47033da 100644
--- a/packages/cipherstash-proxy/Cargo.toml
+++ b/packages/cipherstash-proxy/Cargo.toml
@@ -4,6 +4,7 @@ version = "2.0.0"
edition = "2021"
[dependencies]
+async-trait = "0.1"
aws-lc-rs = "1.13.3"
bigdecimal = { version = "0.4.6", features = ["serde-json"] }
arc-swap = "1.7.1"
diff --git a/packages/cipherstash-proxy/src/config/database.rs b/packages/cipherstash-proxy/src/config/database.rs
index 464698cb..428d96ac 100644
--- a/packages/cipherstash-proxy/src/config/database.rs
+++ b/packages/cipherstash-proxy/src/config/database.rs
@@ -81,6 +81,21 @@ impl DatabaseConfig {
})?;
Ok(name)
}
+
+ #[cfg(test)]
+ pub fn for_testing() -> Self {
+ Self {
+ host: Self::default_host(),
+ port: Self::default_port(),
+ name: "test".to_string(),
+ username: "test".to_string(),
+ password: Protected::new("test".to_string()),
+ connection_timeout: None,
+ with_tls_verification: false,
+ config_reload_interval: Self::default_config_reload_interval(),
+ schema_reload_interval: Self::default_schema_reload_interval(),
+ }
+ }
}
///
diff --git a/packages/cipherstash-proxy/src/config/tandem.rs b/packages/cipherstash-proxy/src/config/tandem.rs
index bf018e99..9b5e6a3b 100644
--- a/packages/cipherstash-proxy/src/config/tandem.rs
+++ b/packages/cipherstash-proxy/src/config/tandem.rs
@@ -273,6 +273,29 @@ impl TandemConfig {
DEFAULT_THREAD_STACK_SIZE
}
+
+ #[cfg(test)]
+ pub fn for_testing() -> Self {
+ Self {
+ server: ServerConfig::default(),
+ database: DatabaseConfig::for_testing(),
+ auth: AuthConfig {
+ workspace_crn: "crn:ap-southeast-2.aws:IJGECSCWKREECNBS".parse().unwrap(),
+ client_access_key: "test".to_string(),
+ },
+ encrypt: EncryptConfig {
+ client_id: "test".to_string(),
+ client_key: "test".to_string(),
+ default_keyset_id: Some(
+ Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(),
+ ),
+ },
+ tls: None,
+ log: LogConfig::default(),
+ prometheus: PrometheusConfig::default(),
+ development: None,
+ }
+ }
}
impl PrometheusConfig {
diff --git a/packages/cipherstash-proxy/src/config/tls.rs b/packages/cipherstash-proxy/src/config/tls.rs
index e5c0647b..9585cff6 100644
--- a/packages/cipherstash-proxy/src/config/tls.rs
+++ b/packages/cipherstash-proxy/src/config/tls.rs
@@ -10,7 +10,7 @@ use crate::{error::TlsConfigError, log::CONFIG};
/// Server TLS Configuration
/// This is listener/inbound connection config
///
-#[derive(Clone, Debug, Deserialize)]
+#[derive(Clone, Debug, Deserialize, PartialEq)]
#[serde(untagged)]
pub enum TlsConfig {
Pem {
diff --git a/packages/cipherstash-proxy/src/error.rs b/packages/cipherstash-proxy/src/error.rs
index 2d09107c..8a1f0a8a 100644
--- a/packages/cipherstash-proxy/src/error.rs
+++ b/packages/cipherstash-proxy/src/error.rs
@@ -176,6 +176,9 @@ pub enum ConfigError {
#[error("Expected an Encrypt configuration table")]
MissingEncryptConfigTable,
+ #[error("Network configuration change requires restart For help visit {}#config-network-change-requires-restart", ERROR_DOC_BASE_URL)]
+ NetworkConfigurationChangeRequiresRestart,
+
#[error(transparent)]
Parse(#[from] serde_json::Error),
diff --git a/packages/cipherstash-proxy/src/main.rs b/packages/cipherstash-proxy/src/main.rs
index fa3958c6..a11da143 100644
--- a/packages/cipherstash-proxy/src/main.rs
+++ b/packages/cipherstash-proxy/src/main.rs
@@ -1,12 +1,11 @@
use cipherstash_proxy::config::TandemConfig;
use cipherstash_proxy::connect::{self, AsyncStream};
-use cipherstash_proxy::error::Error;
+use cipherstash_proxy::error::{ConfigError, Error};
use cipherstash_proxy::prometheus::CLIENTS_ACTIVE_CONNECTIONS;
use cipherstash_proxy::proxy::Proxy;
use cipherstash_proxy::{cli, log, postgresql as pg, prometheus, tls, Args};
use clap::Parser;
use metrics::gauge;
-use tokio::net::TcpListener;
use tokio::signal::unix::{signal, SignalKind};
use tokio_util::task::TaskTracker;
use tracing::{error, info, warn};
@@ -55,7 +54,7 @@ fn main() -> Result<(), Box> {
let mut proxy = init(config).await;
- let mut listener = connect::bind_with_retry(&proxy.config.server).await;
+ let listener = connect::bind_with_retry(&proxy.config.server).await;
let tracker = TaskTracker::new();
let mut client_id = 0;
@@ -81,9 +80,8 @@ fn main() -> Result<(), Box> {
break;
},
_ = sighup() => {
- info!(msg = "Received SIGHUP. Reloading configuration");
- (listener, proxy) = reload_config(listener, &args, proxy).await;
- info!(msg = "Reloaded configuration");
+ info!(msg = "Received SIGHUP. Reloading application configuration");
+ proxy = reload_application_config(&proxy.config, &args).await.unwrap_or(proxy);
},
_ = sigterm() => {
info!(msg = "Received SIGTERM");
@@ -91,16 +89,15 @@ fn main() -> Result<(), Box> {
},
Ok(client_stream) = AsyncStream::accept(&listener) => {
- let proxy = proxy.clone();
-
client_id += 1;
+ let context = proxy.context(client_id);
+
tracker.spawn(async move {
- let proxy = proxy.clone();
gauge!(CLIENTS_ACTIVE_CONNECTIONS).increment(1);
- match pg::handler(client_stream, proxy, client_id).await {
+ match pg::handler(client_stream,context).await {
Ok(_) => (),
Err(err) => {
@@ -261,7 +258,15 @@ async fn sighup() -> std::io::Result<()> {
Ok(())
}
-async fn reload_config(listener: TcpListener, args: &Args, proxy: Proxy) -> (TcpListener, Proxy) {
+fn has_network_config_changed(current: &TandemConfig, new: &TandemConfig) -> bool {
+ current.server.host != new.server.host
+ || current.server.port != new.server.port
+ || current.server.require_tls != new.server.require_tls
+ || current.server.worker_threads != new.server.worker_threads
+ || current.tls != new.tls
+}
+
+async fn reload_application_config(config: &TandemConfig, args: &Args) -> Result {
let new_config = match TandemConfig::load(args) {
Ok(config) => config,
Err(err) => {
@@ -269,17 +274,19 @@ async fn reload_config(listener: TcpListener, args: &Args, proxy: Proxy) -> (Tcp
msg = "Configuration could not be reloaded: {}",
error = err.to_string()
);
- return (listener, proxy);
+ return Err(err);
}
};
- let new_proxy = init(new_config).await;
+ // Check for network config changes that require restart
+ if has_network_config_changed(config, &new_config) {
+ let err = ConfigError::NetworkConfigurationChangeRequiresRestart;
+ warn!(msg = err.to_string());
- // Explicit drop needed here to free the network resources before binding if using the same address & port
- std::mem::drop(listener);
+ return Err(err.into());
+ }
- (
- connect::bind_with_retry(&new_proxy.config.server).await,
- new_proxy,
- )
+ info!(msg = "Configuration reloaded");
+ let proxy = init(new_config).await;
+ Ok(proxy)
}
diff --git a/packages/cipherstash-proxy/src/postgresql/backend.rs b/packages/cipherstash-proxy/src/postgresql/backend.rs
index 474d8309..f093716e 100644
--- a/packages/cipherstash-proxy/src/postgresql/backend.rs
+++ b/packages/cipherstash-proxy/src/postgresql/backend.rs
@@ -19,7 +19,7 @@ use crate::prometheus::{
DECRYPTION_ERROR_TOTAL, DECRYPTION_REQUESTS_TOTAL, ROWS_ENCRYPTED_TOTAL,
ROWS_PASSTHROUGH_TOTAL, ROWS_TOTAL, SERVER_BYTES_RECEIVED_TOTAL,
};
-use crate::proxy::Proxy;
+use crate::proxy::EncryptionService;
use bytes::BytesMut;
use metrics::{counter, histogram};
use std::time::Instant;
@@ -70,25 +70,25 @@ use tracing::{debug, error, info, warn};
/// - `RowDescription`: Result column metadata (modified for encrypted columns)
/// - `ParameterDescription`: Parameter metadata (modified for encrypted parameters)
/// - `ReadyForQuery`: Session ready state (triggers schema reload if needed)
-pub struct Backend
+pub struct Backend
where
R: AsyncRead + Unpin,
+ S: EncryptionService,
{
/// Sender for outgoing messages to client
client_sender: Sender,
/// Reader for incoming messages from server
server_reader: R,
- /// Encryption service for column decryption
- proxy: Proxy,
/// Session context with portal and statement metadata
- context: Context,
+ context: Context,
/// Buffer for batching DataRow messages before decryption
buffer: MessageBuffer,
}
-impl Backend
+impl Backend
where
R: AsyncRead + Unpin,
+ S: EncryptionService,
{
/// Creates a new Backend instance.
///
@@ -98,12 +98,11 @@ where
/// * `server_reader` - Stream for reading messages from the PostgreSQL server
/// * `encrypt` - Encryption service for handling column decryption
/// * `context` - Session context shared with the frontend
- pub fn new(client_sender: Sender, server_reader: R, proxy: Proxy, context: Context) -> Self {
+ pub fn new(client_sender: Sender, server_reader: R, context: Context) -> Self {
let buffer = MessageBuffer::new();
Backend {
client_sender,
server_reader,
- proxy,
context,
buffer,
}
@@ -150,19 +149,17 @@ where
/// Returns `Ok(())` on successful message processing, or an `Error` if a fatal
/// error occurs that should terminate the connection.
pub async fn rewrite(&mut self) -> Result<(), Error> {
- let connection_timeout = self.proxy.config.database.connection_timeout();
-
let (code, mut bytes) = protocol::read_message(
&mut self.server_reader,
self.context.client_id,
- connection_timeout,
+ self.context.connection_timeout(),
)
.await?;
let sent: u64 = bytes.len() as u64;
counter!(SERVER_BYTES_RECEIVED_TOTAL).increment(sent);
- if self.proxy.is_passthrough() {
+ if self.context.is_passthrough() {
debug!(target: DEVELOPMENT,
client_id = self.context.client_id,
msg = "Passthrough enabled"
@@ -250,7 +247,7 @@ where
msg = "ReadyForQuery"
);
if self.context.schema_changed() {
- self.proxy.reload_schema().await;
+ self.context.reload_schema().await;
}
}
@@ -450,16 +447,12 @@ where
);
// Decrypt CipherText -> Plaintext
- let plaintexts = self
- .proxy
- .decrypt(keyset_id, ciphertexts)
- .await
- .inspect_err(|_| {
- counter!(DECRYPTION_ERROR_TOTAL).increment(1);
- })?;
+ let plaintexts = self.context.decrypt(ciphertexts).await.inspect_err(|_| {
+ counter!(DECRYPTION_ERROR_TOTAL).increment(1);
+ })?;
// Avoid the iter calculation if we can
- if self.proxy.config.prometheus_enabled() {
+ if self.context.prometheus_enabled() {
let decrypted_count =
plaintexts
.iter()
@@ -655,9 +648,10 @@ where
}
/// Implementation of PostgreSQL error handling for the Backend component.
-impl PostgreSqlErrorHandler for Backend
+impl PostgreSqlErrorHandler for Backend
where
R: AsyncRead + Unpin,
+ S: EncryptionService,
{
fn client_sender(&mut self) -> &mut Sender {
&mut self.client_sender
diff --git a/packages/cipherstash-proxy/src/postgresql/column_mapper.rs b/packages/cipherstash-proxy/src/postgresql/column_mapper.rs
new file mode 100644
index 00000000..c63bba03
--- /dev/null
+++ b/packages/cipherstash-proxy/src/postgresql/column_mapper.rs
@@ -0,0 +1,167 @@
+use crate::{
+ eql::Identifier,
+ error::{EncryptError, Error},
+ log::MAPPER,
+ postgresql::Column,
+ proxy::EncryptConfig,
+};
+use eql_mapper::{EqlTerm, TableColumn, TypeCheckedStatement};
+use postgres_types::Type;
+use std::sync::Arc;
+use tracing::{debug, warn};
+
+/// Service responsible for processing columns from type-checked SQL statements
+/// and mapping them to encryption configurations.
+#[derive(Clone)]
+pub struct ColumnMapper {
+ encrypt_config: Arc,
+}
+
+impl ColumnMapper {
+ /// Create a new ColumnProcessor with the given schema service and client ID
+ pub fn new(encrypt_config: Arc) -> Self {
+ Self { encrypt_config }
+ }
+
+ /// Maps typed statement projection columns to an Encrypt column configuration
+ ///
+ /// The returned `Vec` is of `Option` because the Projection columns are a mix of native and EQL types.
+ /// Only EQL columns will have a configuration. Native types are always None.
+ ///
+ /// Preserves the ordering and semantics of the projection to reduce the complexity of positional encryption.
+ pub fn get_projection_columns(
+ &self,
+ typed_statement: &TypeCheckedStatement<'_>,
+ ) -> Result>, Error> {
+ let mut projection_columns = vec![];
+
+ for col in typed_statement.projection.columns() {
+ let eql_mapper::ProjectionColumn { ty, .. } = col;
+ let configured_column = match &**ty {
+ eql_mapper::Type::Value(eql_mapper::Value::Eql(eql_term)) => {
+ let TableColumn { table, column } = eql_term.table_column();
+ let identifier: Identifier = Identifier::from((table, column));
+
+ debug!(
+ target: MAPPER,
+ msg = "Configured column",
+ column = ?identifier,
+ ?eql_term,
+ );
+ self.get_column(identifier, eql_term)?
+ }
+ _ => None,
+ };
+ projection_columns.push(configured_column)
+ }
+
+ Ok(projection_columns)
+ }
+
+ /// Maps typed statement param columns to an Encrypt column configuration
+ ///
+ /// The returned `Vec` is of `Option` because the Param columns are a mix of native and EQL types.
+ /// Only EQL colunms will have a configuration. Native types are always None.
+ ///
+ /// Preserves the ordering and semantics of the projection to reduce the complexity of positional encryption.
+ pub fn get_param_columns(
+ &self,
+ typed_statement: &TypeCheckedStatement<'_>,
+ ) -> Result>, Error> {
+ let mut param_columns = vec![];
+
+ for param in typed_statement.params.iter() {
+ let configured_column = match param {
+ (_, eql_mapper::Value::Eql(eql_term)) => {
+ let TableColumn { table, column } = eql_term.table_column();
+ let identifier = Identifier::from((table, column));
+
+ debug!(
+ target: MAPPER,
+ msg = "Encrypted parameter",
+ column = ?identifier,
+ ?eql_term,
+ );
+
+ self.get_column(identifier, eql_term)?
+ }
+ _ => None,
+ };
+ param_columns.push(configured_column);
+ }
+
+ Ok(param_columns)
+ }
+
+ /// Maps typed statement literal columns to an Encrypt column configuration
+ pub fn get_literal_columns(
+ &self,
+ typed_statement: &TypeCheckedStatement<'_>,
+ ) -> Result>, Error> {
+ let mut literal_columns = vec![];
+
+ for (eql_term, _) in typed_statement.literals.iter() {
+ let TableColumn { table, column } = eql_term.table_column();
+ let identifier = Identifier::from((table, column));
+
+ debug!(
+ target: MAPPER,
+ msg = "Encrypted literal",
+ column = ?identifier,
+ ?eql_term,
+ );
+ let col = self.get_column(identifier, eql_term)?;
+ if col.is_some() {
+ literal_columns.push(col);
+ }
+ }
+
+ Ok(literal_columns)
+ }
+
+ /// Get the column configuration for the Identifier
+ /// Returns `EncryptError::UnknownColumn` if configuration cannot be found for the Identified column
+ /// if mapping enabled, and None if mapping is disabled. It'll log a warning either way.
+ fn get_column(
+ &self,
+ identifier: Identifier,
+ eql_term: &EqlTerm,
+ ) -> Result