Skip to content

Commit 224b57e

Browse files
authored
Merge pull request #1950 from tursodatabase/batch_push
Push up to 128 frames in sync
2 parents 7aad3e6 + ac6f1bc commit 224b57e

File tree

3 files changed

+51
-22
lines changed

3 files changed

+51
-22
lines changed

libsql/src/database/builder.rs

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ impl Builder<()> {
107107
connector: None,
108108
read_your_writes: true,
109109
remote_writes: false,
110+
push_batch_size: 0,
110111
},
111112
}
112113
}
@@ -524,6 +525,7 @@ cfg_sync! {
524525
connector: Option<crate::util::ConnectorService>,
525526
remote_writes: bool,
526527
read_your_writes: bool,
528+
push_batch_size: u32,
527529
}
528530

529531
impl Builder<SyncedDatabase> {
@@ -543,6 +545,11 @@ cfg_sync! {
543545
self
544546
}
545547

548+
pub fn set_push_batch_size(mut self, v: u32) -> Builder<SyncedDatabase> {
549+
self.inner.push_batch_size = v;
550+
self
551+
}
552+
546553
/// Provide a custom http connector that will be used to create http connections.
547554
pub fn connector<C>(mut self, connector: C) -> Builder<SyncedDatabase>
548555
where
@@ -570,6 +577,7 @@ cfg_sync! {
570577
connector,
571578
remote_writes,
572579
read_your_writes,
580+
push_batch_size,
573581
} = self.inner;
574582

575583
let path = path.to_str().ok_or(crate::Error::InvalidUTF8Path)?.to_owned();
@@ -596,6 +604,10 @@ cfg_sync! {
596604
)
597605
.await?;
598606

607+
if push_batch_size > 0 {
608+
db.sync_ctx.as_ref().unwrap().lock().await.set_push_batch_size(push_batch_size);
609+
}
610+
599611
Ok(Database {
600612
db_type: DbType::Offline {
601613
db,

libsql/src/sync.rs

Lines changed: 30 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ pub mod transaction;
1919
const METADATA_VERSION: u32 = 0;
2020

2121
const DEFAULT_MAX_RETRIES: usize = 5;
22+
const DEFAULT_PUSH_BATCH_SIZE: u32 = 128;
2223

2324
#[derive(thiserror::Error, Debug)]
2425
#[non_exhaustive]
@@ -74,6 +75,7 @@ pub struct SyncContext {
7475
sync_url: String,
7576
auth_token: Option<HeaderValue>,
7677
max_retries: usize,
78+
push_batch_size: u32,
7779
/// The current durable generation.
7880
durable_generation: u32,
7981
/// Represents the max_frame_no from the server.
@@ -102,6 +104,7 @@ impl SyncContext {
102104
sync_url,
103105
auth_token,
104106
max_retries: DEFAULT_MAX_RETRIES,
107+
push_batch_size: DEFAULT_PUSH_BATCH_SIZE,
105108
client,
106109
durable_generation: 1,
107110
durable_frame_num: 0,
@@ -117,6 +120,10 @@ impl SyncContext {
117120
Ok(me)
118121
}
119122

123+
pub fn set_push_batch_size(&mut self, push_batch_size: u32) {
124+
self.push_batch_size = push_batch_size;
125+
}
126+
120127
#[tracing::instrument(skip(self))]
121128
pub(crate) async fn pull_one_frame(
122129
&mut self,
@@ -134,25 +141,26 @@ impl SyncContext {
134141
self.pull_with_retry(uri, self.max_retries).await
135142
}
136143

137-
#[tracing::instrument(skip(self, frame))]
138-
pub(crate) async fn push_one_frame(
144+
#[tracing::instrument(skip(self, frames))]
145+
pub(crate) async fn push_frames(
139146
&mut self,
140-
frame: Bytes,
147+
frames: Bytes,
141148
generation: u32,
142149
frame_no: u32,
150+
frames_count: u32,
143151
) -> Result<u32> {
144152
let uri = format!(
145153
"{}/sync/{}/{}/{}",
146154
self.sync_url,
147155
generation,
148156
frame_no,
149-
frame_no + 1
157+
frame_no + frames_count
150158
);
151159
tracing::debug!("pushing frame");
152160

153-
let (generation, durable_frame_num) = self.push_with_retry(uri, frame, self.max_retries).await?;
161+
let (generation, durable_frame_num) = self.push_with_retry(uri, frames, self.max_retries).await?;
154162

155-
if durable_frame_num > frame_no {
163+
if durable_frame_num > frame_no + frames_count - 1 {
156164
tracing::error!(
157165
"server returned durable_frame_num larger than what we sent: sent={}, got={}",
158166
frame_no,
@@ -162,7 +170,7 @@ impl SyncContext {
162170
return Err(SyncError::InvalidPushFrameNoHigh(frame_no, durable_frame_num).into());
163171
}
164172

165-
if durable_frame_num < frame_no {
173+
if durable_frame_num < frame_no + frames_count - 1 {
166174
// Update our knowledge of where the server is at frame wise.
167175
self.durable_frame_num = durable_frame_num;
168176

@@ -186,7 +194,7 @@ impl SyncContext {
186194
Ok(durable_frame_num)
187195
}
188196

189-
async fn push_with_retry(&self, uri: String, frame: Bytes, max_retries: usize) -> Result<(u32, u32)> {
197+
async fn push_with_retry(&self, uri: String, body: Bytes, max_retries: usize) -> Result<(u32, u32)> {
190198
let mut nr_retries = 0;
191199
loop {
192200
let mut req = http::Request::post(uri.clone());
@@ -200,7 +208,7 @@ impl SyncContext {
200208
None => {}
201209
}
202210

203-
let req = req.body(frame.clone().into()).expect("valid body");
211+
let req = req.body(body.clone().into()).expect("valid body");
204212

205213
let res = self
206214
.client
@@ -537,19 +545,28 @@ async fn try_push(
537545

538546
let mut frame_no = start_frame_no;
539547
while frame_no <= end_frame_no {
540-
let frame = conn.wal_get_frame(frame_no, page_size)?;
548+
let batch_size = sync_ctx.push_batch_size.min(end_frame_no - frame_no + 1);
549+
let mut frames = conn.wal_get_frame(frame_no, page_size)?;
550+
if batch_size > 1 {
551+
frames.reserve((batch_size - 1) as usize * frames.len());
552+
}
553+
for idx in 1..batch_size {
554+
let frame = conn.wal_get_frame(frame_no + idx, page_size)?;
555+
frames.extend_from_slice(frame.as_ref())
556+
}
541557

542558
// The server returns its maximum frame number. To avoid resending
543559
// frames the server already knows about, we need to update the
544560
// frame number to the one returned by the server.
545561
let max_frame_no = sync_ctx
546-
.push_one_frame(frame.freeze(), generation, frame_no)
562+
.push_frames(frames.freeze(), generation, frame_no, batch_size)
547563
.await?;
548564

549565
if max_frame_no > frame_no {
550-
frame_no = max_frame_no;
566+
frame_no = max_frame_no + 1;
567+
} else {
568+
frame_no += batch_size;
551569
}
552-
frame_no += 1;
553570
}
554571

555572
sync_ctx.write_metadata().await?;

libsql/src/sync/test.rs

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ async fn test_sync_context_push_frame() {
2828
let mut sync_ctx = sync_ctx;
2929

3030
// Push a frame and verify the response
31-
let durable_frame = sync_ctx.push_one_frame(frame, 1, 0).await.unwrap();
31+
let durable_frame = sync_ctx.push_frames(frame, 1, 0, 1).await.unwrap();
3232
sync_ctx.write_metadata().await.unwrap();
3333
assert_eq!(durable_frame, 0); // First frame should return max_frame_no = 0
3434

@@ -56,7 +56,7 @@ async fn test_sync_context_with_auth() {
5656
let frame = Bytes::from("test frame with auth");
5757
let mut sync_ctx = sync_ctx;
5858

59-
let durable_frame = sync_ctx.push_one_frame(frame, 1, 0).await.unwrap();
59+
let durable_frame = sync_ctx.push_frames(frame, 1, 0, 1).await.unwrap();
6060
sync_ctx.write_metadata().await.unwrap();
6161
assert_eq!(durable_frame, 0);
6262
assert_eq!(server.frame_count(), 1);
@@ -82,7 +82,7 @@ async fn test_sync_context_multiple_frames() {
8282
// Push multiple frames and verify incrementing frame numbers
8383
for i in 0..3 {
8484
let frame = Bytes::from(format!("frame data {}", i));
85-
let durable_frame = sync_ctx.push_one_frame(frame, 1, i).await.unwrap();
85+
let durable_frame = sync_ctx.push_frames(frame, 1, i, 1).await.unwrap();
8686
sync_ctx.write_metadata().await.unwrap();
8787
assert_eq!(durable_frame, i);
8888
assert_eq!(sync_ctx.durable_frame_num(), i);
@@ -108,7 +108,7 @@ async fn test_sync_context_corrupted_metadata() {
108108

109109
let mut sync_ctx = sync_ctx;
110110
let frame = Bytes::from("test frame data");
111-
let durable_frame = sync_ctx.push_one_frame(frame, 1, 0).await.unwrap();
111+
let durable_frame = sync_ctx.push_frames(frame, 1, 0, 1).await.unwrap();
112112
sync_ctx.write_metadata().await.unwrap();
113113
assert_eq!(durable_frame, 0);
114114
assert_eq!(server.frame_count(), 1);
@@ -152,7 +152,7 @@ async fn test_sync_restarts_with_lower_max_frame_no() {
152152

153153
let mut sync_ctx = sync_ctx;
154154
let frame = Bytes::from("test frame data");
155-
let durable_frame = sync_ctx.push_one_frame(frame.clone(), 1, 0).await.unwrap();
155+
let durable_frame = sync_ctx.push_frames(frame.clone(), 1, 0, 1).await.unwrap();
156156
sync_ctx.write_metadata().await.unwrap();
157157
assert_eq!(durable_frame, 0);
158158
assert_eq!(server.frame_count(), 1);
@@ -180,14 +180,14 @@ async fn test_sync_restarts_with_lower_max_frame_no() {
180180
// This push should fail because we are ahead of the server and thus should get an invalid
181181
// frame no error.
182182
sync_ctx
183-
.push_one_frame(frame.clone(), 1, frame_no)
183+
.push_frames(frame.clone(), 1, frame_no, 1)
184184
.await
185185
.unwrap_err();
186186

187187
let frame_no = sync_ctx.durable_frame_num() + 1;
188188
// This then should work because when the last one failed it updated our state of the server
189189
// durable_frame_num and we should then start writing from there.
190-
sync_ctx.push_one_frame(frame, 1, frame_no).await.unwrap();
190+
sync_ctx.push_frames(frame, 1, frame_no, 1).await.unwrap();
191191
}
192192

193193
#[tokio::test]
@@ -215,7 +215,7 @@ async fn test_sync_context_retry_on_error() {
215215
server.return_error.store(true, Ordering::SeqCst);
216216

217217
// First attempt should fail but retry
218-
let result = sync_ctx.push_one_frame(frame.clone(), 1, 0).await;
218+
let result = sync_ctx.push_frames(frame.clone(), 1, 0, 1).await;
219219
assert!(result.is_err());
220220

221221
// Advance time to trigger retries faster
@@ -228,7 +228,7 @@ async fn test_sync_context_retry_on_error() {
228228
server.return_error.store(false, Ordering::SeqCst);
229229

230230
// Next attempt should succeed
231-
let durable_frame = sync_ctx.push_one_frame(frame, 1, 0).await.unwrap();
231+
let durable_frame = sync_ctx.push_frames(frame, 1, 0, 1).await.unwrap();
232232
sync_ctx.write_metadata().await.unwrap();
233233
assert_eq!(durable_frame, 0);
234234
assert_eq!(server.frame_count(), 1);

0 commit comments

Comments
 (0)