Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
5794215
feat: db orchestrator design
MarcosNicolau Dec 18, 2025
5340365
feat: receive config via params and log errors
MarcosNicolau Dec 18, 2025
5e32525
feat: orchestrator write/read failure and preferred order by last suc…
MarcosNicolau Dec 18, 2025
5a2d233
feat: make orchestrator clone safe between threads
MarcosNicolau Dec 18, 2025
4ab27f1
feat: use orchestrator in proof aggregator
MarcosNicolau Dec 18, 2025
67e8d05
feat: use db orchestrator in gateway + remove mutable reference to self
MarcosNicolau Dec 18, 2025
440dc33
fix: build of aggregation-mode gateway
MarcosNicolau Dec 19, 2025
d472330
feat: orchestrator for payment poller
MarcosNicolau Dec 19, 2025
51085bd
chore: read connection urls vector from config
MarcosNicolau Dec 19, 2025
eb78706
chore: address clippy warnings
MarcosNicolau Dec 19, 2025
2d4b876
chore: address more clippy warnings
MarcosNicolau Dec 19, 2025
34b2ece
refactor: comment backoff delays remove stale todos and set initial r…
MarcosNicolau Dec 22, 2025
7c6e035
adjust db retries params + explanation
MarcosNicolau Dec 22, 2025
37e6a88
chore: link to aws in backoff alg
MarcosNicolau Dec 22, 2025
da75e45
Apply suggestion from @maximopalopoli
MarcosNicolau Jan 5, 2026
3fa5063
refactor: rename DbOrchestartor for DbOrchestrator
MarcosNicolau Jan 5, 2026
4a65db1
fix: query execution max times comparison
MarcosNicolau Jan 5, 2026
79134a5
refactor: move next backoff delay function to retry.rs
MarcosNicolau Jan 5, 2026
f10a34b
fix: use min instead of max in next_backoff_delay
MarcosNicolau Jan 5, 2026
4a67e58
remove prefered order feature
MarcosNicolau Jan 5, 2026
ccb502e
chore: address clippy warnings
MarcosNicolau Jan 6, 2026
66d9837
update comments
JuArce Jan 6, 2026
11f1d40
Merge remote-tracking branch 'origin/staging' into feat/support-multi…
MarcosNicolau Jan 6, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions aggregation_mode/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 3 additions & 2 deletions aggregation_mode/db/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@ version = "0.1.0"
edition = "2021"

[dependencies]
serde = { workspace = true }
tokio = { version = "1"}
sqlx = { version = "0.8", features = [ "runtime-tokio", "postgres", "migrate", "chrono" ] }

sqlx = { version = "0.8", features = [ "runtime-tokio", "postgres", "migrate", "chrono", "uuid", "bigdecimal"] }
tracing = { version = "0.1", features = ["log"] }

[[bin]]
name = "migrate"
Expand Down
2 changes: 2 additions & 0 deletions aggregation_mode/db/src/lib.rs
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
pub mod orchestrator;
pub mod retry;
pub mod types;
143 changes: 143 additions & 0 deletions aggregation_mode/db/src/orchestrator.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
use std::{future::Future, sync::Arc, time::Duration};

use sqlx::{postgres::PgPoolOptions, Pool, Postgres};

use crate::retry::{next_backoff_delay, RetryConfig, RetryError};

/// A single DB node: connection pool plus shared health flags (used to prioritize nodes).
#[derive(Debug)]
struct DbNode {
pool: Pool<Postgres>,
}

/// Database orchestrator for running reads/writes across multiple PostgreSQL nodes with retry/backoff.
///
/// `DbOrchestrator` holds a list of database nodes (connection pools) and will
/// retry transient failures with exponential backoff based on `retry_config`,
///
/// ## Thread-safe `Clone`
/// This type is cheap and thread-safe to clone:
/// - `nodes` is `Vec<Arc<DbNode>>`, so cloning only increments `Arc` ref-counts and shares the same pools/nodes,
/// - `sqlx::Pool<Postgres>` is internally reference-counted and designed to be cloned and used concurrently,
/// - the node health flags are `AtomicBool`, so updates are safe from multiple threads/tasks.
///
/// Clones share health state (the atomics) and the underlying pools, so all clones observe and influence
/// the same “preferred node” ordering decisions.
#[derive(Debug, Clone)]
pub struct DbOrchestrator {
nodes: Vec<Arc<DbNode>>,
retry_config: RetryConfig,
}

#[derive(Debug)]
pub enum DbOrchestratorError {
InvalidNumberOfConnectionUrls,
Sqlx(sqlx::Error),
}

impl std::fmt::Display for DbOrchestratorError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::InvalidNumberOfConnectionUrls => {
write!(f, "invalid number of connection URLs")
}
Self::Sqlx(e) => write!(f, "{e}"),
}
}
}

impl DbOrchestrator {
pub fn try_new(
connection_urls: &[String],
retry_config: RetryConfig,
) -> Result<Self, DbOrchestratorError> {
if connection_urls.is_empty() {
return Err(DbOrchestratorError::InvalidNumberOfConnectionUrls);
}

let nodes = connection_urls
.iter()
.map(|url| {
let pool = PgPoolOptions::new().max_connections(5).connect_lazy(url)?;

Ok(Arc::new(DbNode { pool }))
})
.collect::<Result<Vec<_>, sqlx::Error>>()
.map_err(DbOrchestratorError::Sqlx)?;

Ok(Self {
nodes,
retry_config,
})
}

pub async fn query<T, Q, Fut>(&self, query_fn: Q) -> Result<T, sqlx::Error>
where
Q: Fn(Pool<Postgres>) -> Fut,
Fut: Future<Output = Result<T, sqlx::Error>>,
{
let mut attempts = 0;
let mut delay = Duration::from_millis(self.retry_config.min_delay_millis);

loop {
match self.execute_once(&query_fn).await {
Ok(value) => return Ok(value),
Err(RetryError::Permanent(err)) => return Err(err),
Err(RetryError::Transient(err)) => {
if attempts >= self.retry_config.max_times {
return Err(err);
}

tracing::warn!(attempt = attempts, delay_millis = delay.as_millis(), error = ?err, "retrying after backoff");
tokio::time::sleep(delay).await;
delay = next_backoff_delay(delay, self.retry_config.clone());
attempts += 1;
}
}
}
}

async fn execute_once<T, Q, Fut>(&self, query_fn: &Q) -> Result<T, RetryError<sqlx::Error>>
where
Q: Fn(Pool<Postgres>) -> Fut,
Fut: Future<Output = Result<T, sqlx::Error>>,
{
let mut last_error = None;

for (idx, node) in self.nodes.iter().enumerate() {
let pool = node.pool.clone();

match query_fn(pool).await {
Ok(res) => {
return Ok(res);
}
Err(err) => {
if Self::is_connection_error(&err) {
tracing::warn!(node_index = idx, error = ?err, "database query failed");
last_error = Some(err);
} else {
return Err(RetryError::Permanent(err));
}
}
};
}

Err(RetryError::Transient(
last_error.expect("write_op attempted without database nodes"),
))
}

fn is_connection_error(error: &sqlx::Error) -> bool {
matches!(
error,
sqlx::Error::Io(_)
| sqlx::Error::Tls(_)
| sqlx::Error::Protocol(_)
| sqlx::Error::PoolTimedOut
| sqlx::Error::PoolClosed
| sqlx::Error::WorkerCrashed
| sqlx::Error::BeginFailed
| sqlx::Error::Database(_)
)
}
}
63 changes: 63 additions & 0 deletions aggregation_mode/db/src/retry.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
use std::time::Duration;

#[derive(Debug)]
pub(super) enum RetryError<E> {
Transient(E),
Permanent(E),
}

impl<E: std::fmt::Display> std::fmt::Display for RetryError<E> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
RetryError::Transient(e) => write!(f, "{e}"),
RetryError::Permanent(e) => write!(f, "{e}"),
}
}
}

impl<E: std::fmt::Display> std::error::Error for RetryError<E> where E: std::fmt::Debug {}

#[derive(Debug, Clone)]
pub struct RetryConfig {
/// * `min_delay_millis` - Initial delay before first retry attempt (in milliseconds)
pub min_delay_millis: u64,
/// * `factor` - Exponential backoff multiplier for retry delays
pub factor: f32,
/// * `max_times` - Maximum number of retry attempts
pub max_times: usize,
/// * `max_delay_seconds` - Maximum delay between retry attempts (in seconds)
pub max_delay_seconds: u64,
}

// Exponential backoff with a hard cap.
//
// Each retry multiplies the previous delay by `retry_config.factor`,
// then clamps it to `max_delay_seconds`. This yields:
//
// d_{n+1} = min(max, d_n * factor) => d_n = min(max, d_initial * factor^n)
//
// Example starting at 500ms with factor = 2.0 (no jitter):
// retry 0: 0.5s
// retry 1: 1.0s
// retry 2: 2.0s
// retry 3: 4.0s
// retry 4: 8.0s
// ...
// until the delay reaches `max_delay_seconds`, after which it stays at that max.
// see reference: https://en.wikipedia.org/wiki/Exponential_backoff
// and here: https://docs.aws.amazon.com/prescriptive-guidance/latest/cloud-design-patterns/retry-backoff.html
pub fn next_backoff_delay(current_delay: Duration, retry_config: RetryConfig) -> Duration {
let max: Duration = Duration::from_secs(retry_config.max_delay_seconds);
// Defensive: factor should be >= 1.0 for backoff, we clamp it to avoid shrinking/NaN.
let factor = f64::from(retry_config.factor).max(1.0);

let scaled_secs = current_delay.as_secs_f64() * factor;
let scaled_secs = if scaled_secs.is_finite() {
scaled_secs
} else {
max.as_secs_f64()
};

let scaled = Duration::from_secs_f64(scaled_secs);
scaled.min(max)
}
2 changes: 1 addition & 1 deletion aggregation_mode/db/src/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use sqlx::{
Type,
};

#[derive(Debug, Clone, Copy, PartialEq, Eq, Type)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Type, serde::Serialize)]
#[sqlx(type_name = "task_status", rename_all = "lowercase")]
pub enum TaskStatus {
Pending,
Expand Down
1 change: 1 addition & 0 deletions aggregation_mode/gateway/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ serde_yaml = { workspace = true }
agg_mode_sdk = { path = "../sdk"}
aligned-sdk = { workspace = true }
sp1-sdk = { workspace = true }
db = { workspace = true }
tracing = { version = "0.1", features = ["log"] }
tracing-subscriber = { version = "0.3.0", features = ["env-filter"] }
bincode = "1.3.3"
Expand Down
2 changes: 1 addition & 1 deletion aggregation_mode/gateway/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize};
pub struct Config {
pub ip: String,
pub port: u16,
pub db_connection_url: String,
pub db_connection_urls: Vec<String>,
pub network: String,
pub max_daily_proofs_per_user: i64,
}
Expand Down
Loading
Loading