66
77#include "batch.h"
88#include "debug_defines.h"
9+ #include "debug_reg_printer.h"
910#include "riscv.h"
1011#include "field_helpers.h"
1112
@@ -131,22 +132,149 @@ static void add_idle_before_batch(const struct riscv_batch *batch, size_t start_
131132}
132133
133134static int get_delay (const struct riscv_batch * batch , size_t scan_idx ,
134- const struct riscv_scan_delays * delays )
135+ const struct riscv_scan_delays * delays , bool resets_delays ,
136+ size_t reset_delays_after )
135137{
136138 assert (batch );
137139 assert (scan_idx < batch -> used_scans );
140+ const bool delays_were_reset = resets_delays
141+ && (scan_idx >= reset_delays_after );
138142 const enum riscv_scan_delay_class delay_class =
139143 batch -> delay_classes [scan_idx ];
140144 const unsigned int delay = riscv_scan_get_delay (delays , delay_class );
141145 assert (delay <= INT_MAX );
142- return delay ;
146+ return delays_were_reset ? 0 : delay ;
147+ }
148+
149+ static unsigned int decode_dmi (const struct riscv_batch * batch , char * text ,
150+ uint32_t address , uint32_t data )
151+ {
152+ static const struct {
153+ uint32_t address ;
154+ enum riscv_debug_reg_ordinal ordinal ;
155+ } description [] = {
156+ {DM_DMCONTROL , DM_DMCONTROL_ORDINAL },
157+ {DM_DMSTATUS , DM_DMSTATUS_ORDINAL },
158+ {DM_ABSTRACTCS , DM_ABSTRACTCS_ORDINAL },
159+ {DM_COMMAND , DM_COMMAND_ORDINAL },
160+ {DM_SBCS , DM_SBCS_ORDINAL }
161+ };
162+
163+ for (unsigned int i = 0 ; i < ARRAY_SIZE (description ); i ++ ) {
164+ if (riscv_get_dmi_address (batch -> target , description [i ].address )
165+ == address ) {
166+ const riscv_debug_reg_ctx_t context = {
167+ .XLEN = { .value = 0 , .is_set = false },
168+ .DXLEN = { .value = 0 , .is_set = false },
169+ .abits = { .value = 0 , .is_set = false },
170+ };
171+ return riscv_debug_reg_to_s (text , description [i ].ordinal ,
172+ context , data , RISCV_DEBUG_REG_HIDE_ALL_0 );
173+ }
174+ }
175+ if (text )
176+ text [0 ] = '\0' ;
177+ return 0 ;
178+ }
179+
180+ static void log_dmi_decoded (const struct riscv_batch * batch , bool write ,
181+ uint32_t address , uint32_t data )
182+ {
183+ const size_t size = decode_dmi (batch , /* text */ NULL , address , data ) + 1 ;
184+ char * const decoded = malloc (size );
185+ if (!decoded ) {
186+ LOG_ERROR ("Not enough memory to allocate %zu bytes." , size );
187+ return ;
188+ }
189+ decode_dmi (batch , decoded , address , data );
190+ LOG_DEBUG ("%s: %s" , write ? "write" : "read" , decoded );
191+ free (decoded );
192+ }
193+
194+ static void log_batch (const struct riscv_batch * batch , size_t start_idx ,
195+ const struct riscv_scan_delays * delays , bool resets_delays ,
196+ size_t reset_delays_after )
197+ {
198+ if (debug_level < LOG_LVL_DEBUG )
199+ return ;
200+
201+ const unsigned int scan_bits = batch -> fields -> num_bits ;
202+ assert (scan_bits == (unsigned int )riscv_get_dmi_scan_length (batch -> target ));
203+ const unsigned int abits = scan_bits - DTM_DMI_OP_LENGTH
204+ - DTM_DMI_DATA_LENGTH ;
205+
206+ /* Determine the "op" and "address" of the scan that preceded the first
207+ * executed scan.
208+ * FIXME: The code here assumes that there were no DMI operations between
209+ * the last execution of the batch and the current one.
210+ * Caching the info about the last executed DMI scan in "dm013_info_t"
211+ * would be a more robust solution.
212+ */
213+ bool last_scan_was_read = false;
214+ uint32_t last_scan_address = -1 /* to silence maybe-uninitialized */ ;
215+ if (start_idx > 0 ) {
216+ const struct scan_field * const field = & batch -> fields [start_idx - 1 ];
217+ assert (field -> out_value );
218+ last_scan_was_read = buf_get_u32 (field -> out_value , DTM_DMI_OP_OFFSET ,
219+ DTM_DMI_OP_LENGTH ) == DTM_DMI_OP_READ ;
220+ last_scan_address = buf_get_u32 (field -> out_value ,
221+ DTM_DMI_ADDRESS_OFFSET , abits );
222+ }
223+
224+ /* Decode and log every executed scan */
225+ for (size_t i = start_idx ; i < batch -> used_scans ; ++ i ) {
226+ static const char * const op_string [] = {"-" , "r" , "w" , "?" };
227+ const int delay = get_delay (batch , i , delays , resets_delays ,
228+ reset_delays_after );
229+ const struct scan_field * const field = & batch -> fields [i ];
230+
231+ assert (field -> out_value );
232+ const unsigned int out_op = buf_get_u32 (field -> out_value ,
233+ DTM_DMI_OP_OFFSET , DTM_DMI_OP_LENGTH );
234+ const uint32_t out_data = buf_get_u32 (field -> out_value ,
235+ DTM_DMI_DATA_OFFSET , DTM_DMI_DATA_LENGTH );
236+ const uint32_t out_address = buf_get_u32 (field -> out_value ,
237+ DTM_DMI_ADDRESS_OFFSET , abits );
238+ if (field -> in_value ) {
239+ static const char * const status_string [] = {
240+ "+" , "?" , "F" , "b"
241+ };
242+ const unsigned int in_op = buf_get_u32 (field -> in_value ,
243+ DTM_DMI_OP_OFFSET , DTM_DMI_OP_LENGTH );
244+ const uint32_t in_data = buf_get_u32 (field -> in_value ,
245+ DTM_DMI_DATA_OFFSET , DTM_DMI_DATA_LENGTH );
246+ const uint32_t in_address = buf_get_u32 (field -> in_value ,
247+ DTM_DMI_ADDRESS_OFFSET , abits );
248+
249+ LOG_DEBUG ("%db %s %08" PRIx32 " @%02" PRIx32
250+ " -> %s %08" PRIx32 " @%02" PRIx32 "; %di" ,
251+ field -> num_bits , op_string [out_op ], out_data , out_address ,
252+ status_string [in_op ], in_data , in_address , delay );
253+
254+ if (last_scan_was_read && in_op == DTM_DMI_OP_SUCCESS )
255+ log_dmi_decoded (batch , /*write*/ false,
256+ last_scan_address , in_data );
257+ } else {
258+ LOG_DEBUG ("%db %s %08" PRIx32 " @%02" PRIx32 " -> ?; %di" ,
259+ field -> num_bits , op_string [out_op ], out_data , out_address ,
260+ delay );
261+ }
262+
263+ if (out_op == DTM_DMI_OP_WRITE )
264+ log_dmi_decoded (batch , /*write*/ true, out_address ,
265+ out_data );
266+
267+ last_scan_was_read = out_op == DTM_DMI_OP_READ ;
268+ last_scan_address = out_address ;
269+ }
143270}
144271
145272int riscv_batch_run_from (struct riscv_batch * batch , size_t start_idx ,
146273 const struct riscv_scan_delays * delays , bool resets_delays ,
147274 size_t reset_delays_after )
148275{
149276 assert (batch -> used_scans );
277+ assert (start_idx < batch -> used_scans );
150278 assert (batch -> last_scan == RISCV_SCAN_TYPE_NOP );
151279 assert (!batch -> was_run || riscv_batch_was_scan_busy (batch , start_idx ));
152280 assert (start_idx == 0 || !riscv_batch_was_scan_busy (batch , start_idx - 1 ));
@@ -157,17 +285,16 @@ int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
157285 LOG_TARGET_DEBUG (batch -> target , "Running batch of scans [%zu, %zu)" ,
158286 start_idx , batch -> used_scans );
159287
288+ unsigned int delay = 0 /* to silence maybe-uninitialized */ ;
160289 for (size_t i = start_idx ; i < batch -> used_scans ; ++ i ) {
161290 if (bscan_tunnel_ir_width != 0 )
162291 riscv_add_bscan_tunneled_scan (batch -> target , batch -> fields + i , batch -> bscan_ctxt + i );
163292 else
164293 jtag_add_dr_scan (batch -> target -> tap , 1 , batch -> fields + i , TAP_IDLE );
165294
166- const bool delays_were_reset = resets_delays
167- && (i >= reset_delays_after );
168- const int delay = get_delay (batch , i , delays );
169-
170- if (!delays_were_reset )
295+ delay = get_delay (batch , i , delays , resets_delays ,
296+ reset_delays_after );
297+ if (delay > 0 )
171298 jtag_add_runtest (delay , TAP_IDLE );
172299 }
173300
@@ -188,13 +315,9 @@ int riscv_batch_run_from(struct riscv_batch *batch, size_t start_idx,
188315 }
189316 }
190317
191- for (size_t i = start_idx ; i < batch -> used_scans ; ++ i ) {
192- const int delay = get_delay (batch , i , delays );
193- riscv_log_dmi_scan (batch -> target , delay , batch -> fields + i );
194- }
195-
318+ log_batch (batch , start_idx , delays , resets_delays , reset_delays_after );
196319 batch -> was_run = true;
197- batch -> last_scan_delay = get_delay ( batch , batch -> used_scans - 1 , delays ) ;
320+ batch -> last_scan_delay = delay ;
198321 return ERROR_OK ;
199322}
200323
0 commit comments