Skip to content

Commit 52ee435

Browse files
committed
twim: remove chunking in copy_write_then_read
The previous implementation would send a TWI start condition for each chunk, so the slave chip would see each as a separate transaction. This is not equivalent to `write_then_read`, which was the original goal of adding chunking. It seems TWIM has no way to send multiple buffers in a single transaction, therefore the ability to chunk is removed.
1 parent ee6d674 commit 52ee435

File tree

1 file changed

+11
-64
lines changed

1 file changed

+11
-64
lines changed

nrf-hal-common/src/twim.rs

Lines changed: 11 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -350,79 +350,26 @@ where
350350
/// Copy data into RAM and write to an I2C slave, then read data from the slave without
351351
/// triggering a stop condition between the two.
352352
///
353+
/// The write buffer must have a length of at most 255 bytes on the nRF52832
354+
/// and at most 1024 bytes on the nRF52840.
355+
///
353356
/// The read buffer must have a length of at most 255 bytes on the nRF52832
354357
/// and at most 65535 bytes on the nRF52840.
355358
pub fn copy_write_then_read(
356359
&mut self,
357360
address: u8,
358-
tx_buffer: &[u8],
359-
rx_buffer: &mut [u8],
361+
wr_buffer: &[u8],
362+
rd_buffer: &mut [u8],
360363
) -> Result<(), Error> {
361-
// Conservative compiler fence to prevent optimizations that do not
362-
// take in to account actions by DMA. The fence has been placed here,
363-
// before any DMA action has started.
364-
compiler_fence(SeqCst);
365-
366-
self.0
367-
.address
368-
.write(|w| unsafe { w.address().bits(address) });
369-
370-
// Set up the DMA read.
371-
unsafe { self.set_rx_buffer(rx_buffer)? };
372-
373-
// Chunk write data.
374-
let wr_buffer = &mut [0; FORCE_COPY_BUFFER_SIZE][..];
375-
for chunk in tx_buffer.chunks(FORCE_COPY_BUFFER_SIZE) {
376-
// Copy chunk into RAM.
377-
wr_buffer[..chunk.len()].copy_from_slice(chunk);
378-
379-
// Set up the DMA write.
380-
unsafe { self.set_tx_buffer(wr_buffer)? };
381-
382-
// Start write operation.
383-
self.0.tasks_starttx.write(|w|
384-
// `1` is a valid value to write to task registers.
385-
unsafe { w.bits(1) });
386-
387-
// Wait until write operation is about to end.
388-
while self.0.events_lasttx.read().bits() == 0 {}
389-
self.0.events_lasttx.reset();
390-
391-
// Check for bad writes.
392-
if self.0.txd.amount.read().bits() != wr_buffer.len() as u32 {
393-
return Err(Error::Transmit);
394-
}
364+
if wr_buffer.len() > FORCE_COPY_BUFFER_SIZE {
365+
return Err(Error::TxBufferTooLong);
395366
}
396367

397-
// Start read operation.
398-
self.0.tasks_startrx.write(|w|
399-
// `1` is a valid value to write to task registers.
400-
unsafe { w.bits(1) });
401-
402-
// Wait until read operation is about to end.
403-
while self.0.events_lastrx.read().bits() == 0 {}
404-
self.0.events_lastrx.reset();
405-
406-
// Stop read operation.
407-
self.0.tasks_stop.write(|w|
408-
// `1` is a valid value to write to task registers.
409-
unsafe { w.bits(1) });
410-
411-
// Wait until total operation has ended.
412-
while self.0.events_stopped.read().bits() == 0 {}
413-
self.0.events_stopped.reset();
368+
// Copy to RAM
369+
let wr_ram_buffer = &mut [0; FORCE_COPY_BUFFER_SIZE][..wr_buffer.len()];
370+
wr_ram_buffer.copy_from_slice(wr_buffer);
414371

415-
// Conservative compiler fence to prevent optimizations that do not
416-
// take in to account actions by DMA. The fence has been placed here,
417-
// after all possible DMA actions have completed.
418-
compiler_fence(SeqCst);
419-
420-
// Check for bad reads.
421-
if self.0.rxd.amount.read().bits() != rx_buffer.len() as u32 {
422-
return Err(Error::Receive);
423-
}
424-
425-
Ok(())
372+
self.write_then_read(address, wr_ram_buffer, rd_buffer)
426373
}
427374

428375
/// Return the raw interface to the underlying TWIM peripheral.

0 commit comments

Comments
 (0)