Skip to content

Commit 6d8ab62

Browse files
authored
fix: (#33) Rare combination of settings could lead to writing a corrupt archive with overlength extra data, and data_start locations when reading the archive back were also wrong (#221)
* fix: Rare combination of settings could lead to writing a corrupt archive with overlength extra data * fix: Previous fix was breaking alignment * style: cargo fmt --all * fix: ZIP64 header was being written twice * style: cargo fmt --all * ci(fuzz): Add check that file-creation options are individually valid * fix: Need to update extra_data_start in deep_copy_file * style: cargo fmt --all * test(fuzz): fix bug in Arbitrary impl * fix: Cursor-position bugs when merging archives or opening for append * fix: unintended feature dependency * style: cargo fmt --all * fix: merge_contents was miscalculating new start positions for absorbed archive's files * fix: shallow_copy_file needs to reset CDE location since the CDE is copied * fix: ZIP64 header was being written after AES header location was already calculated * fix: ZIP64 header was being counted twice when writing extra-field length * fix: deep_copy_file was positioning cursor incorrectly * test(fuzz): Reimplement Debug so that it prints the method calls actually made * test(fuzz): Fix issues with `Option<&mut Formatter>` * chore: Partial debug * chore: Revert: `merge_contents` already adjusts header_start and data_start * chore: Revert unused `mut` * style: cargo fmt --all * refactor: eliminate a magic number for CDE block size * chore: WIP: fix bugs * refactor: Minor refactors * refactor: eliminate a magic number for CDE block size * refactor: Minor refactors * refactor: Can use cde_start_pos to locate ZIP64 end locator * chore: Fix import that can no longer be feature-gated * chore: Fix import that can no longer be feature-gated * refactor: Confusing variable name * style: cargo fmt --all and fix Clippy warnings * style: fix another Clippy warning --------- Signed-off-by: Chris Hennick <[email protected]>
1 parent fd5f804 commit 6d8ab62

File tree

3 files changed

+436
-309
lines changed

3 files changed

+436
-309
lines changed

src/read.rs

Lines changed: 15 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -359,7 +359,7 @@ fn find_data_start(
359359
// easily overflow a u16.
360360
block.file_name_length as u64 + block.extra_field_length as u64;
361361
let data_start =
362-
data.header_start + mem::size_of::<ZipLocalEntryBlock>() as u64 + variable_fields_len;
362+
data.header_start + size_of::<ZipLocalEntryBlock>() as u64 + variable_fields_len;
363363
// Set the value so we don't have to read it again.
364364
match data.data_start.set(data_start) {
365365
Ok(()) => (),
@@ -497,22 +497,28 @@ impl<R: Read + Seek> ZipArchive<R> {
497497
* assert_eq!(0, new_files[0].header_start); // Avoid this.
498498
*/
499499

500-
let new_initial_header_start = w.stream_position()?;
500+
let first_new_file_header_start = w.stream_position()?;
501+
501502
/* Push back file header starts for all entries in the covered files. */
502503
new_files.values_mut().try_for_each(|f| {
503504
/* This is probably the only really important thing to change. */
504-
f.header_start = f.header_start.checked_add(new_initial_header_start).ok_or(
505-
ZipError::InvalidArchive("new header start from merge would have been too large"),
506-
)?;
505+
f.header_start = f
506+
.header_start
507+
.checked_add(first_new_file_header_start)
508+
.ok_or(InvalidArchive(
509+
"new header start from merge would have been too large",
510+
))?;
507511
/* This is only ever used internally to cache metadata lookups (it's not part of the
508512
* zip spec), and 0 is the sentinel value. */
509-
// f.central_header_start = 0;
513+
f.central_header_start = 0;
510514
/* This is an atomic variable so it can be updated from another thread in the
511515
* implementation (which is good!). */
512516
if let Some(old_data_start) = f.data_start.take() {
513-
let new_data_start = old_data_start.checked_add(new_initial_header_start).ok_or(
514-
ZipError::InvalidArchive("new data start from merge would have been too large"),
515-
)?;
517+
let new_data_start = old_data_start
518+
.checked_add(first_new_file_header_start)
519+
.ok_or(InvalidArchive(
520+
"new data start from merge would have been too large",
521+
))?;
516522
f.data_start.get_or_init(|| new_data_start);
517523
}
518524
Ok::<_, ZipError>(())
@@ -1239,7 +1245,6 @@ pub(crate) fn central_header_to_zip_file<R: Read + Seek>(
12391245
"A file can't start after its central-directory header",
12401246
));
12411247
}
1242-
file.data_start.get_or_init(|| data_start);
12431248
reader.seek(SeekFrom::Start(central_header_end))?;
12441249
Ok(file)
12451250
}

src/types.rs

Lines changed: 55 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ use crate::extra_fields::ExtraField;
2424
use crate::result::DateTimeRangeError;
2525
use crate::spec::is_dir;
2626
use crate::types::ffi::S_IFDIR;
27-
use crate::CompressionMethod;
27+
use crate::{CompressionMethod, ZIP64_BYTES_THR};
2828
#[cfg(feature = "time")]
2929
use time::{error::ComponentRange, Date, Month, OffsetDateTime, PrimitiveDateTime, Time};
3030

@@ -625,10 +625,10 @@ impl ZipFileData {
625625
extra_field: &[u8],
626626
) -> Self
627627
where
628-
S: Into<Box<str>>,
628+
S: ToString,
629629
{
630630
let permissions = options.permissions.unwrap_or(0o100644);
631-
let file_name: Box<str> = name.into();
631+
let file_name: Box<str> = name.to_string().into_boxed_str();
632632
let file_name_raw: Box<[u8]> = file_name.bytes().collect();
633633
let mut local_block = ZipFileData {
634634
system: System::Unix,
@@ -778,12 +778,8 @@ impl ZipFileData {
778778
pub(crate) fn local_block(&self) -> ZipResult<ZipLocalEntryBlock> {
779779
let compressed_size: u32 = self.clamp_size_field(self.compressed_size);
780780
let uncompressed_size: u32 = self.clamp_size_field(self.uncompressed_size);
781-
782-
let extra_block_len: usize = self
783-
.zip64_extra_field_block()
784-
.map(|block| block.full_size())
785-
.unwrap_or(0);
786-
let extra_field_length: u16 = (self.extra_field_len() + extra_block_len)
781+
let extra_field_length: u16 = self
782+
.extra_field_len()
787783
.try_into()
788784
.map_err(|_| ZipError::InvalidArchive("Extra data field is too large"))?;
789785

@@ -805,7 +801,7 @@ impl ZipFileData {
805801
})
806802
}
807803

808-
pub(crate) fn block(&self, zip64_extra_field_length: u16) -> ZipResult<ZipCentralEntryBlock> {
804+
pub(crate) fn block(&self) -> ZipResult<ZipCentralEntryBlock> {
809805
let extra_field_len: u16 = self.extra_field_len().try_into().unwrap();
810806
let central_extra_field_len: u16 = self.central_extra_field_len().try_into().unwrap();
811807
let last_modified_time = self
@@ -832,11 +828,9 @@ impl ZipFileData {
832828
.try_into()
833829
.unwrap(),
834830
file_name_length: self.file_name_raw.len().try_into().unwrap(),
835-
extra_field_length: zip64_extra_field_length
836-
.checked_add(extra_field_len + central_extra_field_len)
837-
.ok_or(ZipError::InvalidArchive(
838-
"Extra field length in central directory exceeds 64KiB",
839-
))?,
831+
extra_field_length: extra_field_len.checked_add(central_extra_field_len).ok_or(
832+
ZipError::InvalidArchive("Extra field length in central directory exceeds 64KiB"),
833+
)?,
840834
file_comment_length: self.file_comment.as_bytes().len().try_into().unwrap(),
841835
disk_number: 0,
842836
internal_file_attributes: 0,
@@ -850,45 +844,12 @@ impl ZipFileData {
850844
}
851845

852846
pub(crate) fn zip64_extra_field_block(&self) -> Option<Zip64ExtraFieldBlock> {
853-
let uncompressed_size: Option<u64> =
854-
if self.uncompressed_size >= spec::ZIP64_BYTES_THR || self.large_file {
855-
Some(spec::ZIP64_BYTES_THR)
856-
} else {
857-
None
858-
};
859-
let compressed_size: Option<u64> =
860-
if self.compressed_size >= spec::ZIP64_BYTES_THR || self.large_file {
861-
Some(spec::ZIP64_BYTES_THR)
862-
} else {
863-
None
864-
};
865-
let header_start: Option<u64> = if self.header_start >= spec::ZIP64_BYTES_THR {
866-
Some(spec::ZIP64_BYTES_THR)
867-
} else {
868-
None
869-
};
870-
871-
let mut size: u16 = 0;
872-
if uncompressed_size.is_some() {
873-
size += mem::size_of::<u64>() as u16;
874-
}
875-
if compressed_size.is_some() {
876-
size += mem::size_of::<u64>() as u16;
877-
}
878-
if header_start.is_some() {
879-
size += mem::size_of::<u64>() as u16;
880-
}
881-
if size == 0 {
882-
return None;
883-
}
884-
885-
Some(Zip64ExtraFieldBlock {
886-
magic: spec::ExtraFieldMagic::ZIP64_EXTRA_FIELD_TAG,
887-
size,
888-
uncompressed_size,
889-
compressed_size,
890-
header_start,
891-
})
847+
Zip64ExtraFieldBlock::maybe_new(
848+
self.large_file,
849+
self.uncompressed_size,
850+
self.compressed_size,
851+
self.header_start,
852+
)
892853
}
893854
}
894855

@@ -1002,6 +963,46 @@ pub(crate) struct Zip64ExtraFieldBlock {
1002963
// u32: disk start number
1003964
}
1004965

966+
impl Zip64ExtraFieldBlock {
967+
pub(crate) fn maybe_new(
968+
large_file: bool,
969+
uncompressed_size: u64,
970+
compressed_size: u64,
971+
header_start: u64,
972+
) -> Option<Zip64ExtraFieldBlock> {
973+
let mut size: u16 = 0;
974+
let uncompressed_size = if uncompressed_size >= ZIP64_BYTES_THR || large_file {
975+
size += mem::size_of::<u64>() as u16;
976+
Some(uncompressed_size)
977+
} else {
978+
None
979+
};
980+
let compressed_size = if compressed_size >= ZIP64_BYTES_THR || large_file {
981+
size += mem::size_of::<u64>() as u16;
982+
Some(compressed_size)
983+
} else {
984+
None
985+
};
986+
let header_start = if header_start >= ZIP64_BYTES_THR {
987+
size += mem::size_of::<u64>() as u16;
988+
Some(header_start)
989+
} else {
990+
None
991+
};
992+
if size == 0 {
993+
return None;
994+
}
995+
996+
Some(Zip64ExtraFieldBlock {
997+
magic: spec::ExtraFieldMagic::ZIP64_EXTRA_FIELD_TAG,
998+
size,
999+
uncompressed_size,
1000+
compressed_size,
1001+
header_start,
1002+
})
1003+
}
1004+
}
1005+
10051006
impl Zip64ExtraFieldBlock {
10061007
pub fn full_size(&self) -> usize {
10071008
assert!(self.size > 0);

0 commit comments

Comments
 (0)