@@ -547,6 +547,7 @@ void files_close_onexec(FAR struct tcb_s *tcb)
547
547
int fs_getfilep (int fd , FAR struct file * * filep )
548
548
{
549
549
FAR struct filelist * list ;
550
+ irqstate_t flags ;
550
551
int ret ;
551
552
552
553
#ifdef CONFIG_FDCHECK
@@ -574,6 +575,27 @@ int fs_getfilep(int fd, FAR struct file **filep)
574
575
return - EBADF ;
575
576
}
576
577
578
+ /* Protect this part with a critical section to make sure that we won't
579
+ * interrupt the mutex lock-unclock sequence below which may lead to the
580
+ * priority inversion. The case is as follows:
581
+ *
582
+ * We have two threads: low-priority thread A and high-priority thread B,
583
+ * both threads share the same task group data.
584
+ *
585
+ * Thread A performs IO on files periodically. Thread B is woken up by a
586
+ * high-frequency interrupts, and performs IO on files periodically.
587
+ *
588
+ * There is a chance that thread B wakes up exactly when thread A holds
589
+ * the mutex below, and consequently the file access in thread B will be
590
+ * delayed due to thread A holding the list->fl_lock mutex and execution
591
+ * will be returned to a thread with lower priority.
592
+ *
593
+ * The correct solution to this problem is to use the read-write lock,
594
+ * which is currently not supported by NuttX.
595
+ */
596
+
597
+ flags = enter_critical_section ();
598
+
577
599
/* The descriptor is in a valid range to file descriptor... Get the
578
600
* thread-specific file list.
579
601
*/
@@ -583,6 +605,7 @@ int fs_getfilep(int fd, FAR struct file **filep)
583
605
ret = nxmutex_lock (& list -> fl_lock );
584
606
if (ret < 0 )
585
607
{
608
+ leave_critical_section (flags );
586
609
return ret ;
587
610
}
588
611
@@ -598,6 +621,7 @@ int fs_getfilep(int fd, FAR struct file **filep)
598
621
}
599
622
600
623
nxmutex_unlock (& list -> fl_lock );
624
+ leave_critical_section (flags );
601
625
return ret ;
602
626
}
603
627
0 commit comments