1010#include "riscv.h"
1111#include "field_helpers.h"
1212
13+ // TODO: DTM_DMI_MAX_ADDRESS_LENGTH should be reduced to 32 (per the debug spec)
1314#define DTM_DMI_MAX_ADDRESS_LENGTH ((1<<DTM_DTMCS_ABITS_LENGTH)-1)
1415#define DMI_SCAN_MAX_BIT_LENGTH (DTM_DMI_MAX_ADDRESS_LENGTH + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH)
16+
1517#define DMI_SCAN_BUF_SIZE (DIV_ROUND_UP(DMI_SCAN_MAX_BIT_LENGTH, 8))
1618
1719/* Reserve extra room in the batch (needed for the last NOP operation) */
1820#define BATCH_RESERVED_SCANS 1
1921
22+ static unsigned int get_dmi_scan_length (const struct target * target )
23+ {
24+ const unsigned int abits = riscv_get_dmi_address_bits (target );
25+ assert (abits > 0 );
26+ assert (abits <= DTM_DMI_MAX_ADDRESS_LENGTH );
27+
28+ return abits + DTM_DMI_DATA_LENGTH + DTM_DMI_OP_LENGTH ;
29+ }
30+
2031struct riscv_batch * riscv_batch_alloc (struct target * target , size_t scans )
2132{
2233 scans += BATCH_RESERVED_SCANS ;
@@ -127,11 +138,10 @@ static void add_idle_before_batch(const struct riscv_batch *batch, size_t start_
127138 const unsigned int idle_change = new_delay - batch -> last_scan_delay ;
128139 LOG_TARGET_DEBUG (batch -> target , "Adding %u idle cycles before the batch." ,
129140 idle_change );
130- assert (idle_change <= INT_MAX );
131141 jtag_add_runtest (idle_change , TAP_IDLE );
132142}
133143
134- static int get_delay (const struct riscv_batch * batch , size_t scan_idx ,
144+ static unsigned int get_delay (const struct riscv_batch * batch , size_t scan_idx ,
135145 const struct riscv_scan_delays * delays , bool resets_delays ,
136146 size_t reset_delays_after )
137147{
@@ -142,7 +152,6 @@ static int get_delay(const struct riscv_batch *batch, size_t scan_idx,
142152 const enum riscv_scan_delay_class delay_class =
143153 batch -> delay_classes [scan_idx ];
144154 const unsigned int delay = riscv_scan_get_delay (delays , delay_class );
145- assert (delay <= INT_MAX );
146155 return delays_were_reset ? 0 : delay ;
147156}
148157
@@ -198,10 +207,7 @@ static void log_batch(const struct riscv_batch *batch, size_t start_idx,
198207 if (debug_level < LOG_LVL_DEBUG )
199208 return ;
200209
201- const unsigned int scan_bits = batch -> fields -> num_bits ;
202- assert (scan_bits == (unsigned int )riscv_get_dmi_scan_length (batch -> target ));
203- const unsigned int abits = scan_bits - DTM_DMI_OP_LENGTH
204- - DTM_DMI_DATA_LENGTH ;
210+ const unsigned int abits = riscv_get_dmi_address_bits (batch -> target );
205211
206212 /* Determine the "op" and "address" of the scan that preceded the first
207213 * executed scan.
@@ -211,7 +217,7 @@ static void log_batch(const struct riscv_batch *batch, size_t start_idx,
211217 * would be a more robust solution.
212218 */
213219 bool last_scan_was_read = false;
214- uint32_t last_scan_address = -1 /* to silence maybe-uninitialized */ ;
220+ uint32_t last_scan_address = ( uint32_t )( -1 ) /* to silence maybe-uninitialized */ ;
215221 if (start_idx > 0 ) {
216222 const struct scan_field * const field = & batch -> fields [start_idx - 1 ];
217223 assert (field -> out_value );
@@ -224,7 +230,7 @@ static void log_batch(const struct riscv_batch *batch, size_t start_idx,
224230 /* Decode and log every executed scan */
225231 for (size_t i = start_idx ; i < batch -> used_scans ; ++ i ) {
226232 static const char * const op_string [] = {"-" , "r" , "w" , "?" };
227- const int delay = get_delay (batch , i , delays , resets_delays ,
233+ const unsigned int delay = get_delay (batch , i , delays , resets_delays ,
228234 reset_delays_after );
229235 const struct scan_field * const field = & batch -> fields [i ];
230236
@@ -247,15 +253,15 @@ static void log_batch(const struct riscv_batch *batch, size_t start_idx,
247253 DTM_DMI_ADDRESS_OFFSET , abits );
248254
249255 LOG_DEBUG ("%db %s %08" PRIx32 " @%02" PRIx32
250- " -> %s %08" PRIx32 " @%02" PRIx32 "; %di " ,
256+ " -> %s %08" PRIx32 " @%02" PRIx32 "; %ui " ,
251257 field -> num_bits , op_string [out_op ], out_data , out_address ,
252258 status_string [in_op ], in_data , in_address , delay );
253259
254260 if (last_scan_was_read && in_op == DTM_DMI_OP_SUCCESS )
255261 log_dmi_decoded (batch , /*write*/ false,
256262 last_scan_address , in_data );
257263 } else {
258- LOG_DEBUG ("%db %s %08" PRIx32 " @%02" PRIx32 " -> ?; %di " ,
264+ LOG_DEBUG ("%db %s %08" PRIx32 " @%02" PRIx32 " -> ?; %ui " ,
259265 field -> num_bits , op_string [out_op ], out_data , out_address ,
260266 delay );
261267 }
@@ -321,35 +327,56 @@ int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
321327 return ERROR_OK ;
322328}
323329
324- void riscv_batch_add_dmi_write (struct riscv_batch * batch , uint64_t address , uint32_t data ,
330+ void riscv_batch_add_dmi_write (struct riscv_batch * batch , uint32_t address , uint32_t data ,
325331 bool read_back , enum riscv_scan_delay_class delay_class )
326332{
333+ // TODO: Check that the bit width of "address" is no more than dtmcs.abits,
334+ // otherwise return an error (during batch creation or when the batch is executed).
335+
327336 assert (batch -> used_scans < batch -> allocated_scans );
328337 struct scan_field * field = batch -> fields + batch -> used_scans ;
329- field -> num_bits = riscv_get_dmi_scan_length (batch -> target );
330- field -> out_value = (void * )(batch -> data_out + batch -> used_scans * DMI_SCAN_BUF_SIZE );
331- riscv_fill_dmi_write (batch -> target , (char * )field -> out_value , address , data );
338+
339+ field -> num_bits = get_dmi_scan_length (batch -> target );
340+ assert (field -> num_bits <= DMI_SCAN_MAX_BIT_LENGTH );
341+
342+ uint8_t * out_value = batch -> data_out + batch -> used_scans * DMI_SCAN_BUF_SIZE ;
343+ uint8_t * in_value = batch -> data_in + batch -> used_scans * DMI_SCAN_BUF_SIZE ;
344+
345+ field -> out_value = out_value ;
346+ riscv_fill_dmi_write (batch -> target , out_value , address , data );
347+
332348 if (read_back ) {
333- field -> in_value = ( void * )( batch -> data_in + batch -> used_scans * DMI_SCAN_BUF_SIZE ) ;
334- riscv_fill_dm_nop (batch -> target , ( char * ) field -> in_value );
349+ field -> in_value = in_value ;
350+ riscv_fill_dm_nop (batch -> target , in_value );
335351 } else {
336352 field -> in_value = NULL ;
337353 }
354+
338355 batch -> delay_classes [batch -> used_scans ] = delay_class ;
339356 batch -> last_scan = RISCV_SCAN_TYPE_WRITE ;
340357 batch -> used_scans ++ ;
341358}
342359
343- size_t riscv_batch_add_dmi_read (struct riscv_batch * batch , uint64_t address ,
360+ size_t riscv_batch_add_dmi_read (struct riscv_batch * batch , uint32_t address ,
344361 enum riscv_scan_delay_class delay_class )
345362{
363+ // TODO: Check that the bit width of "address" is no more than dtmcs.abits,
364+ // otherwise return an error (during batch creation or when the batch is executed).
365+
346366 assert (batch -> used_scans < batch -> allocated_scans );
347367 struct scan_field * field = batch -> fields + batch -> used_scans ;
348- field -> num_bits = riscv_get_dmi_scan_length (batch -> target );
349- field -> out_value = (void * )(batch -> data_out + batch -> used_scans * DMI_SCAN_BUF_SIZE );
350- field -> in_value = (void * )(batch -> data_in + batch -> used_scans * DMI_SCAN_BUF_SIZE );
351- riscv_fill_dmi_read (batch -> target , (char * )field -> out_value , address );
352- riscv_fill_dm_nop (batch -> target , (char * )field -> in_value );
368+
369+ field -> num_bits = get_dmi_scan_length (batch -> target );
370+ assert (field -> num_bits <= DMI_SCAN_MAX_BIT_LENGTH );
371+
372+ uint8_t * out_value = batch -> data_out + batch -> used_scans * DMI_SCAN_BUF_SIZE ;
373+ uint8_t * in_value = batch -> data_in + batch -> used_scans * DMI_SCAN_BUF_SIZE ;
374+
375+ field -> out_value = out_value ;
376+ field -> in_value = in_value ;
377+ riscv_fill_dmi_read (batch -> target , out_value , address );
378+ riscv_fill_dm_nop (batch -> target , in_value );
379+
353380 batch -> delay_classes [batch -> used_scans ] = delay_class ;
354381 batch -> last_scan = RISCV_SCAN_TYPE_READ ;
355382 batch -> used_scans ++ ;
@@ -382,11 +409,18 @@ void riscv_batch_add_nop(struct riscv_batch *batch)
382409{
383410 assert (batch -> used_scans < batch -> allocated_scans );
384411 struct scan_field * field = batch -> fields + batch -> used_scans ;
385- field -> num_bits = riscv_get_dmi_scan_length (batch -> target );
386- field -> out_value = (void * )(batch -> data_out + batch -> used_scans * DMI_SCAN_BUF_SIZE );
387- field -> in_value = (void * )(batch -> data_in + batch -> used_scans * DMI_SCAN_BUF_SIZE );
388- riscv_fill_dm_nop (batch -> target , (char * )field -> out_value );
389- riscv_fill_dm_nop (batch -> target , (char * )field -> in_value );
412+
413+ field -> num_bits = get_dmi_scan_length (batch -> target );
414+ assert (field -> num_bits <= DMI_SCAN_MAX_BIT_LENGTH );
415+
416+ uint8_t * out_value = batch -> data_out + batch -> used_scans * DMI_SCAN_BUF_SIZE ;
417+ uint8_t * in_value = batch -> data_in + batch -> used_scans * DMI_SCAN_BUF_SIZE ;
418+
419+ field -> out_value = out_value ;
420+ field -> in_value = in_value ;
421+ riscv_fill_dm_nop (batch -> target , out_value );
422+ riscv_fill_dm_nop (batch -> target , in_value );
423+
390424 /* DMI NOP never triggers any debug module operation,
391425 * so the shortest (base) delay can be used. */
392426 batch -> delay_classes [batch -> used_scans ] = RISCV_DELAY_BASE ;
0 commit comments