@@ -293,34 +293,31 @@ void *xtensa_excint1_c(int *interrupted_stack)
293
293
uint32_t ps ;
294
294
void * pc ;
295
295
296
+ #ifdef CONFIG_XTENSA_MMU
297
+ bool is_dblexc ;
298
+ uint32_t depc ;
299
+ #else
300
+ const bool is_dblexc = false;
301
+ #endif /* CONFIG_XTENSA_MMU */
302
+
296
303
__asm__ volatile ("rsr.exccause %0" : "=r" (cause ));
297
304
298
305
#ifdef CONFIG_XTENSA_MMU
299
- /* TLB miss exception comes through level 1 interrupt also.
300
- * We need to preserve execution context after we have handled
301
- * the TLB miss, so we cannot unconditionally unmask interrupts.
302
- * For other cause, we can unmask interrupts so this would act
303
- * the same as if there is no MMU.
304
- */
305
- switch (cause ) {
306
- case EXCCAUSE_ITLB_MISS :
307
- /* Instruction TLB miss */
308
- __fallthrough ;
309
- case EXCCAUSE_DTLB_MISS :
310
- /* Data TLB miss */
306
+ __asm__ volatile ("rsr.depc %0" : "=r" (depc ));
311
307
312
- /* Do not unmask interrupt while handling TLB misses. */
313
- break ;
314
- default :
315
- /* For others, we can unmask interrupts. */
316
- bsa -> ps &= ~PS_INTLEVEL_MASK ;
317
- break ;
318
- }
308
+ is_dblexc = (depc != 0U );
319
309
#endif /* CONFIG_XTENSA_MMU */
320
310
321
311
switch (cause ) {
322
312
case EXCCAUSE_LEVEL1_INTERRUPT :
323
- return xtensa_int1_c (interrupted_stack );
313
+ if (!is_dblexc ) {
314
+ return xtensa_int1_c (interrupted_stack );
315
+ }
316
+ break ;
317
+ #ifndef CONFIG_USERSPACE
318
+ /* Syscalls are handled earlier in assembly if MMU is enabled.
319
+ * So we don't need this here.
320
+ */
324
321
case EXCCAUSE_SYSCALL :
325
322
/* Just report it to the console for now */
326
323
LOG_ERR (" ** SYSCALL PS %p PC %p" ,
@@ -333,38 +330,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
333
330
*/
334
331
bsa -> pc += 3 ;
335
332
break ;
336
- #ifdef CONFIG_XTENSA_MMU
337
- case EXCCAUSE_ITLB_MISS :
338
- /* Instruction TLB miss */
339
- __fallthrough ;
340
- case EXCCAUSE_DTLB_MISS :
341
- /* Data TLB miss */
342
-
343
- /**
344
- * The way it works is, when we try to access an address
345
- * that is not mapped, we will have a miss. The HW then
346
- * will try to get the correspondent memory in the page
347
- * table. As the page table is not mapped in memory we will
348
- * have a second miss, which will trigger an exception.
349
- * In the exception (here) what we do is to exploit this
350
- * hardware capability just trying to load the page table
351
- * (not mapped address), which will cause a miss, but then
352
- * the hardware will automatically map it again from
353
- * the page table. This time it will work since the page
354
- * necessary to map the page table itself are wired map.
355
- */
356
- __asm__ volatile ("wsr a0, " ZSR_EXTRA0_STR "\n\t"
357
- "rsr.ptevaddr a0\n\t"
358
- "l32i a0, a0, 0\n\t"
359
- "rsr a0, " ZSR_EXTRA0_STR "\n\t"
360
- "rsync"
361
- : : : "a0" , "memory" );
362
-
363
- /* Since we are dealing with TLB misses, we will probably not
364
- * want to switch to another thread.
365
- */
366
- return interrupted_stack ;
367
- #endif /* CONFIG_XTENSA_MMU */
333
+ #endif /* !CONFIG_USERSPACE */
368
334
default :
369
335
ps = bsa -> ps ;
370
336
pc = (void * )bsa -> pc ;
@@ -373,6 +339,7 @@ void *xtensa_excint1_c(int *interrupted_stack)
373
339
374
340
/* Default for exception */
375
341
int reason = K_ERR_CPU_EXCEPTION ;
342
+ is_fatal_error = true;
376
343
377
344
/* We need to distinguish between an ill in xtensa_arch_except,
378
345
* e.g for k_panic, and any other ill. For exceptions caused by
@@ -389,13 +356,19 @@ void *xtensa_excint1_c(int *interrupted_stack)
389
356
reason = bsa -> a2 ;
390
357
}
391
358
392
- LOG_ERR (" ** FATAL EXCEPTION" );
359
+ LOG_ERR (" ** FATAL EXCEPTION%s" , ( is_dblexc ? " (DOUBLE)" : "" ) );
393
360
LOG_ERR (" ** CPU %d EXCCAUSE %d (%s)" ,
394
361
arch_curr_cpu ()-> id , cause ,
395
362
z_xtensa_exccause (cause ));
396
363
LOG_ERR (" ** PC %p VADDR %p" ,
397
364
pc , (void * )vaddr );
398
365
LOG_ERR (" ** PS %p" , (void * )bsa -> ps );
366
+ if (is_dblexc ) {
367
+ LOG_ERR (" ** DEPC %p" , (void * )depc );
368
+ }
369
+ #ifdef CONFIG_USERSPACE
370
+ LOG_ERR (" ** THREADPTR %p" , (void * )bsa -> threadptr );
371
+ #endif /* CONFIG_USERSPACE */
399
372
LOG_ERR (" ** (INTLEVEL:%d EXCM: %d UM:%d RING:%d WOE:%d OWB:%d CALLINC:%d)" ,
400
373
get_bits (0 , 4 , ps ), get_bits (4 , 1 , ps ),
401
374
get_bits (5 , 1 , ps ), get_bits (6 , 2 , ps ),
@@ -412,21 +385,25 @@ void *xtensa_excint1_c(int *interrupted_stack)
412
385
break ;
413
386
}
414
387
415
-
388
+ #ifdef CONFIG_XTENSA_MMU
416
389
switch (cause ) {
417
- case EXCCAUSE_SYSCALL :
418
390
case EXCCAUSE_LEVEL1_INTERRUPT :
419
- case EXCCAUSE_ALLOCA :
420
- case EXCCAUSE_ITLB_MISS :
421
- case EXCCAUSE_DTLB_MISS :
391
+ #ifndef CONFIG_USERSPACE
392
+ case EXCCAUSE_SYSCALL :
393
+ #endif /* !CONFIG_USERSPACE */
422
394
is_fatal_error = false;
423
395
break ;
424
396
default :
425
397
is_fatal_error = true;
426
398
break ;
427
399
}
428
400
429
- if (is_fatal_error ) {
401
+ if (is_dblexc ) {
402
+ __asm__ volatile ("wsr.depc %0" : : "r" (0 ));
403
+ }
404
+ #endif /* CONFIG_XTENSA_MMU */
405
+
406
+ if (is_dblexc || is_fatal_error ) {
430
407
uint32_t ignore ;
431
408
432
409
/* We are going to manipulate _current_cpu->nested manually.
0 commit comments