Skip to content

Commit b288365

Browse files
bring back progress tracking to upload
1 parent ead7d0f commit b288365

File tree

2 files changed

+49
-19
lines changed

2 files changed

+49
-19
lines changed

apps/desktop/src-tauri/src/api.rs

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,6 +93,8 @@ pub struct UploadedPart {
9393
pub part_number: u32,
9494
pub etag: String,
9595
pub size: usize,
96+
#[serde(skip)]
97+
pub total_size: u64,
9698
}
9799

98100
#[derive(Serialize, Debug, Clone)]

apps/desktop/src-tauri/src/upload.rs

Lines changed: 47 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -595,12 +595,16 @@ impl InstantMultipartUpload {
595595
let upload_id = api::upload_multipart_initiate(&app, &video_id).await?;
596596

597597
// TODO: Will it be a problem that `ReaderStream` doesn't have a fixed chunk size??? We should fix that!!!!
598-
let parts = progress(uploader(
598+
let parts = progress(
599599
app.clone(),
600600
video_id.clone(),
601-
upload_id.clone(),
602-
from_pending_file(file_path.clone(), realtime_video_done),
603-
))
601+
uploader(
602+
app.clone(),
603+
video_id.clone(),
604+
upload_id.clone(),
605+
from_pending_file(file_path.clone(), realtime_video_done),
606+
),
607+
)
604608
.try_collect::<Vec<_>>()
605609
.await?;
606610

@@ -622,13 +626,23 @@ impl InstantMultipartUpload {
622626
}
623627
}
624628

629+
struct Chunk {
630+
/// The total size of the file to be uploaded.
631+
/// This can change as the recording grows.
632+
total_size: u64,
633+
/// The part number. `FILE_OFFSET = PART_NUMBER * CHUNK_SIZE`.
634+
part_number: u32,
635+
/// Actual data bytes of this chunk
636+
chunk: Bytes,
637+
}
638+
625639
/// Creates a stream that reads chunks from a potentially growing file,
626640
/// yielding (part_number, chunk_data) pairs. The first chunk is yielded last
627641
/// to allow for header rewriting after recording completion.
628642
pub fn from_pending_file(
629643
path: PathBuf,
630644
realtime_upload_done: Option<Receiver<()>>,
631-
) -> impl Stream<Item = io::Result<(u32, Bytes)>> {
645+
) -> impl Stream<Item = io::Result<Chunk>> {
632646
try_stream! {
633647
let mut part_number = 2; // Start at 2 since part 1 will be yielded last
634648
let mut last_read_position: u64 = 0;
@@ -703,7 +717,11 @@ pub fn from_pending_file(
703717
first_chunk_size = Some(total_read as u64);
704718
} else {
705719
// Yield non-first chunks immediately
706-
yield (part_number, Bytes::from(chunk));
720+
yield Chunk {
721+
total_size: file_size,
722+
part_number,
723+
chunk: Bytes::from(chunk),
724+
};
707725
part_number += 1;
708726
}
709727

@@ -728,7 +746,11 @@ pub fn from_pending_file(
728746

729747
if total_read > 0 {
730748
first_chunk.truncate(total_read);
731-
yield (1, Bytes::from(first_chunk));
749+
yield Chunk {
750+
total_size: file_size,
751+
part_number: 1,
752+
chunk: Bytes::from(first_chunk),
753+
};
732754
}
733755
}
734756
break;
@@ -747,15 +769,15 @@ fn uploader(
747769
app: AppHandle,
748770
video_id: String,
749771
upload_id: String,
750-
stream: impl Stream<Item = io::Result<(u32, Bytes)>>,
772+
stream: impl Stream<Item = io::Result<Chunk>>,
751773
) -> impl Stream<Item = Result<UploadedPart, String>> {
752774
let client = reqwest::Client::default();
753775

754776
try_stream! {
755777
let mut stream = pin!(stream);
756778
let mut prev_part_number = None;
757779
while let Some(item) = stream.next().await {
758-
let (part_number, chunk) = item.map_err(|err| format!("uploader/part/{:?}/fs: {err:?}", prev_part_number.map(|p| p + 1)))?;
780+
let Chunk { total_size, part_number, chunk } = item.map_err(|err| format!("uploader/part/{:?}/fs: {err:?}", prev_part_number.map(|p| p + 1)))?;
759781
prev_part_number = Some(part_number);
760782
let md5_sum = base64::encode(md5::compute(&chunk).0);
761783
let size = chunk.len();
@@ -786,31 +808,37 @@ fn uploader(
786808
etag: etag.ok_or_else(|| format!("uploader/part/{part_number}/error: ETag header not found"))?,
787809
part_number,
788810
size,
811+
total_size
789812
};
790813
}
791814
}
792815
}
793816

794817
/// Monitor the stream to report the upload progress
795818
fn progress(
819+
app: AppHandle,
820+
video_id: String,
796821
stream: impl Stream<Item = Result<UploadedPart, String>>,
797822
) -> impl Stream<Item = Result<UploadedPart, String>> {
798-
// TODO: Reenable progress reporting to the backend but build it on streams directly here.
799-
// let mut progress = UploadProgressUpdater::new(app.clone(), pre_created_video.id.clone());
823+
// TODO: Flatten this implementation into here
824+
let mut progress = UploadProgressUpdater::new(app.clone(), video_id.clone());
825+
let mut uploaded = 0;
800826

801827
stream! {
802828
let mut stream = pin!(stream);
803829

804830
while let Some(part) = stream.next().await {
805831
if let Ok(part) = &part {
806-
// progress.update(expected_pos, file_size);
807-
// UploadProgressEvent {
808-
// video_id: video_id.to_string(),
809-
// uploaded: last_uploaded_position.to_string(),
810-
// total: file_size.to_string(),
811-
// }
812-
// .emit(app)
813-
// .ok();
832+
uploaded += part.size as u64;
833+
834+
progress.update(uploaded, part.total_size);
835+
UploadProgressEvent {
836+
video_id: video_id.to_string(),
837+
uploaded: uploaded.to_string(),
838+
total: part.total_size.to_string(),
839+
}
840+
.emit(&app)
841+
.ok();
814842
}
815843

816844
yield part;

0 commit comments

Comments
 (0)