Skip to content

Commit 19b275e

Browse files
author
Giuseppe Fabiano
committed
cli: changed the logic to speed up transfer and not hog speed
The previous logics tried to flash each useful chunk one by one in 4KB chunks but this hogged the speed due to firehose transfers that took ~20-50ms for each XML command ACK cycle. Inspired by official qcom tool I merged different type of sparsed image chunks filling also the gaps of don't care with zeroes and try to keep large contiguous transfers. This allowed the flash to speedup from minutes to some seconds on sparse images like persist. Signed-off-by: Giuseppe Fabiano <giuseppe.fabiano@intecs.it>
1 parent 49f883d commit 19b275e

File tree

1 file changed

+106
-30
lines changed

1 file changed

+106
-30
lines changed

cli/src/programfile.rs

Lines changed: 106 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -159,56 +159,132 @@ fn parse_program_cmd<T: Read + Write + QdlChan>(
159159
let header = FileHeader::from_bytes(&header_bytes)?;
160160

161161
let mut offset: usize = 0;
162-
let start_sector = start_sector.parse::<usize>()?;
162+
let start_sector_base = start_sector.parse::<usize>()?;
163+
164+
let mut agg_data = Vec::new();
165+
let mut agg_start_sector = 0;
166+
let mut agg_num_sectors = 0;
167+
163168
for index in 0..header.chunks {
164-
let label_sparse = format!("{label}_{index}");
165169
let mut chunk_bytes = ChunkHeaderBytes::default();
166170
buf.read_exact(&mut chunk_bytes)?;
167171
let chunk = ChunkHeader::from_bytes(&chunk_bytes)?;
168172

169173
let out_size = chunk.out_size(&header);
170174
let num_sectors = out_size / sector_size;
171-
let start_offset = start_sector + offset;
175+
let current_start_sector = start_sector_base + offset;
176+
172177
match chunk.chunk_type {
173-
ChunkType::Raw => {
174-
firehose_program_storage(
175-
channel,
176-
&mut buf,
177-
&label_sparse,
178-
num_sectors,
179-
phys_part_idx,
180-
start_offset.to_string().as_str(),
181-
)?;
182-
}
183-
ChunkType::Fill => {
184-
let mut fill_value = [0u8; 4];
185-
buf.read_exact(&mut fill_value)?;
178+
ChunkType::Raw | ChunkType::Fill => {
179+
let is_large = out_size > channel.fh_config().send_buffer_size;
180+
let is_contiguous = agg_num_sectors > 0
181+
&& (agg_start_sector + agg_num_sectors == current_start_sector);
182+
let would_overflow =
183+
agg_data.len() + out_size > channel.fh_config().send_buffer_size;
186184

187-
let mut fill_vec = Vec::<u8>::with_capacity(out_size);
188-
for _ in 0..out_size / 4 {
189-
fill_vec.extend_from_slice(&fill_value[..]);
185+
if !is_contiguous || would_overflow || is_large {
186+
if agg_num_sectors > 0 {
187+
firehose_program_storage(
188+
channel,
189+
&mut &agg_data[..],
190+
&format!("{label}_merged"),
191+
agg_num_sectors,
192+
phys_part_idx,
193+
agg_start_sector.to_string().as_str(),
194+
)?;
195+
agg_data.clear();
196+
agg_num_sectors = 0;
197+
}
190198
}
191199

192-
firehose_program_storage(
193-
channel,
194-
&mut &fill_vec[..],
195-
&label_sparse,
196-
num_sectors,
197-
phys_part_idx,
198-
start_offset.to_string().as_str(),
199-
)?;
200+
if is_large {
201+
if chunk.chunk_type == ChunkType::Raw {
202+
firehose_program_storage(
203+
channel,
204+
&mut buf,
205+
&format!("{label}_{index}"),
206+
num_sectors,
207+
phys_part_idx,
208+
current_start_sector.to_string().as_str(),
209+
)?;
210+
} else {
211+
let mut fill_value = [0u8; 4];
212+
buf.read_exact(&mut fill_value)?;
213+
214+
let mut fill_vec = Vec::<u8>::with_capacity(out_size);
215+
for _ in 0..out_size / 4 {
216+
fill_vec.extend_from_slice(&fill_value[..]);
217+
}
218+
219+
firehose_program_storage(
220+
channel,
221+
&mut &fill_vec[..],
222+
&format!("{label}_{index}"),
223+
num_sectors,
224+
phys_part_idx,
225+
current_start_sector.to_string().as_str(),
226+
)?;
227+
}
228+
} else {
229+
if agg_num_sectors == 0 {
230+
agg_start_sector = current_start_sector;
231+
}
232+
if chunk.chunk_type == ChunkType::Raw {
233+
let mut tmp = vec![0u8; out_size];
234+
buf.read_exact(&mut tmp)?;
235+
agg_data.extend(tmp);
236+
} else {
237+
let mut fill_value = [0u8; 4];
238+
buf.read_exact(&mut fill_value)?;
239+
for _ in 0..out_size / 4 {
240+
agg_data.extend_from_slice(&fill_value[..]);
241+
}
242+
}
243+
agg_num_sectors += num_sectors;
244+
}
200245
}
201246
ChunkType::DontCare => {
202-
// Don't Care, skip
247+
// Fill gaps up to 256KB
248+
let is_small_gap = out_size <= 256 * 1024;
249+
let would_overflow =
250+
agg_data.len() + out_size > channel.fh_config().send_buffer_size;
251+
252+
if agg_num_sectors > 0 && is_small_gap && !would_overflow {
253+
// Fill gap with zeros to keep aggregation going
254+
agg_data.resize(agg_data.len() + out_size, 0);
255+
agg_num_sectors += num_sectors;
256+
} else if agg_num_sectors > 0 {
257+
firehose_program_storage(
258+
channel,
259+
&mut &agg_data[..],
260+
&format!("{label}_merged"),
261+
agg_num_sectors,
262+
phys_part_idx,
263+
agg_start_sector.to_string().as_str(),
264+
)?;
265+
agg_data.clear();
266+
agg_num_sectors = 0;
267+
}
203268
}
204269
ChunkType::Crc32 => {
205-
// Not supported, on qcom tools is ignored, seek if present
206270
buf.seek_relative(4)?;
207271
}
208272
}
209273

210-
offset += out_size;
274+
offset += num_sectors;
211275
}
276+
277+
if agg_num_sectors > 0 {
278+
firehose_program_storage(
279+
channel,
280+
&mut &agg_data[..],
281+
&format!("{label}_merged"),
282+
agg_num_sectors,
283+
phys_part_idx,
284+
agg_start_sector.to_string().as_str(),
285+
)?;
286+
}
287+
212288
return Ok(());
213289
}
214290

0 commit comments

Comments
 (0)