Skip to content

Commit b4def16

Browse files
GUIDINGLIxiaoxiang781216
authored andcommitted
Revert "mm/iob: Replace the critical section with spin lock"
`g_iob_sem.semcount` is both manually changed in iob source code and api nxsem_xxx. nxsem related API uses critical_section to ensure sem value is modified correctly. If iob using spin lock and modify sem value in the same time, it's not safe. This PR revert the spin lock change and uses critical section to align with what nxsem uses.
1 parent 1280ac8 commit b4def16

File tree

9 files changed

+23
-36
lines changed

9 files changed

+23
-36
lines changed

mm/iob/iob.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,6 @@
3232
#include <debug.h>
3333

3434
#include <nuttx/mm/iob.h>
35-
#include <nuttx/spinlock.h>
3635
#include <nuttx/semaphore.h>
3736

3837
#ifdef CONFIG_MM_IOB
@@ -85,8 +84,6 @@ extern sem_t g_throttle_sem; /* Counts available I/O buffers when throttled */
8584
extern sem_t g_qentry_sem; /* Counts free I/O buffer queue containers */
8685
#endif
8786

88-
extern spinlock_t g_iob_lock;
89-
9087
/****************************************************************************
9188
* Public Function Prototypes
9289
****************************************************************************/

mm/iob/iob_add_queue.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
6363

6464
qentry->qe_flink = NULL;
6565

66-
irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
66+
irqstate_t flags = enter_critical_section();
6767
if (!iobq->qh_head)
6868
{
6969
iobq->qh_head = qentry;
@@ -76,7 +76,7 @@ static int iob_add_queue_internal(FAR struct iob_s *iob,
7676
iobq->qh_tail = qentry;
7777
}
7878

79-
spin_unlock_irqrestore(&g_iob_lock, flags);
79+
leave_critical_section(flags);
8080

8181
return 0;
8282
}

mm/iob/iob_alloc.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
7878
* to protect the committed list: We disable interrupts very briefly.
7979
*/
8080

81-
flags = spin_lock_irqsave(&g_iob_lock);
81+
flags = enter_critical_section();
8282

8383
/* Take the I/O buffer from the head of the committed list */
8484

@@ -97,7 +97,7 @@ static FAR struct iob_s *iob_alloc_committed(void)
9797
iob->io_pktlen = 0; /* Total length of the packet */
9898
}
9999

100-
spin_unlock_irqrestore(&g_iob_lock, flags);
100+
leave_critical_section(flags);
101101
return iob;
102102
}
103103

@@ -264,7 +264,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
264264
* to protect the free list: We disable interrupts very briefly.
265265
*/
266266

267-
flags = spin_lock_irqsave(&g_iob_lock);
267+
flags = enter_critical_section();
268268

269269
#if CONFIG_IOB_THROTTLE > 0
270270
/* If there are free I/O buffers for this allocation */
@@ -308,7 +308,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
308308
}
309309
#endif
310310

311-
spin_unlock_irqrestore(&g_iob_lock, flags);
311+
leave_critical_section(flags);
312312

313313
/* Put the I/O buffer in a known state */
314314

@@ -320,7 +320,7 @@ FAR struct iob_s *iob_tryalloc(bool throttled)
320320
}
321321
}
322322

323-
spin_unlock_irqrestore(&g_iob_lock, flags);
323+
leave_critical_section(flags);
324324
return NULL;
325325
}
326326

mm/iob/iob_alloc_qentry.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
5959
* to protect the committed list: We disable interrupts very briefly.
6060
*/
6161

62-
flags = spin_lock_irqsave(&g_iob_lock);
62+
flags = enter_critical_section();
6363

6464
/* Take the I/O buffer from the head of the committed list */
6565

@@ -75,7 +75,7 @@ static FAR struct iob_qentry_s *iob_alloc_qcommitted(void)
7575
iobq->qe_head = NULL; /* Nothing is contained */
7676
}
7777

78-
spin_unlock_irqrestore(&g_iob_lock, flags);
78+
leave_critical_section(flags);
7979
return iobq;
8080
}
8181

@@ -201,7 +201,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
201201
* to protect the free list: We disable interrupts very briefly.
202202
*/
203203

204-
flags = spin_lock_irqsave(&g_iob_lock);
204+
flags = enter_critical_section();
205205
iobq = g_iob_freeqlist;
206206
if (iobq)
207207
{
@@ -227,7 +227,7 @@ FAR struct iob_qentry_s *iob_tryalloc_qentry(void)
227227
iobq->qe_head = NULL; /* Nothing is contained */
228228
}
229229

230-
spin_unlock_irqrestore(&g_iob_lock, flags);
230+
leave_critical_section(flags);
231231
return iobq;
232232
}
233233

mm/iob/iob_free.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
135135
* interrupts very briefly.
136136
*/
137137

138-
flags = spin_lock_irqsave(&g_iob_lock);
138+
flags = enter_critical_section();
139139

140140
/* Which list? If there is a task waiting for an IOB, then put
141141
* the IOB on either the free list or on the committed list where
@@ -168,7 +168,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
168168
g_iob_freelist = iob;
169169
}
170170

171-
spin_unlock_irqrestore(&g_iob_lock, flags);
171+
leave_critical_section(flags);
172172

173173
/* Signal that an IOB is available. This is done with schedule locked
174174
* to make sure that both g_iob_sem and g_throttle_sem are incremented
@@ -184,7 +184,7 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
184184
DEBUGASSERT(g_iob_sem.semcount <= CONFIG_IOB_NBUFFERS);
185185

186186
#if CONFIG_IOB_THROTTLE > 0
187-
flags = spin_lock_irqsave(&g_iob_lock);
187+
flags = enter_critical_section();
188188

189189
if (g_iob_sem.semcount > CONFIG_IOB_THROTTLE)
190190
{
@@ -205,15 +205,15 @@ FAR struct iob_s *iob_free(FAR struct iob_s *iob)
205205
g_iob_sem.semcount--;
206206
}
207207

208-
spin_unlock_irqrestore(&g_iob_lock, flags);
208+
leave_critical_section(flags);
209209

210210
nxsem_post(&g_throttle_sem);
211211
DEBUGASSERT(g_throttle_sem.semcount <=
212212
(CONFIG_IOB_NBUFFERS - CONFIG_IOB_THROTTLE));
213213
}
214214
else
215215
{
216-
spin_unlock_irqrestore(&g_iob_lock, flags);
216+
leave_critical_section(flags);
217217
}
218218
#endif
219219

mm/iob/iob_free_qentry.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
6060
* interrupts very briefly.
6161
*/
6262

63-
flags = spin_lock_irqsave(&g_iob_lock);
63+
flags = enter_critical_section();
6464

6565
/* Which list? If there is a task waiting for an IOB chain, then put
6666
* the IOB chain on either the free list or on the committed list where
@@ -79,8 +79,6 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
7979
g_iob_freeqlist = iobq;
8080
}
8181

82-
spin_unlock_irqrestore(&g_iob_lock, flags);
83-
8482
/* Signal that an I/O buffer chain container is available. If there
8583
* is a thread waiting for an I/O buffer chain container, this will
8684
* wake up exactly one thread. The semaphore count will correctly
@@ -89,6 +87,7 @@ FAR struct iob_qentry_s *iob_free_qentry(FAR struct iob_qentry_s *iobq)
8987
*/
9088

9189
nxsem_post(&g_qentry_sem);
90+
leave_critical_section(flags);
9291

9392
/* And return the I/O buffer chain container after the one that was freed */
9493

mm/iob/iob_free_queue_qentry.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
5353
FAR struct iob_qentry_s *prev = NULL;
5454
FAR struct iob_qentry_s *qentry;
5555

56-
irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
56+
irqstate_t flags = enter_critical_section();
5757
for (qentry = iobq->qh_head; qentry != NULL;
5858
prev = qentry, qentry = qentry->qe_flink)
5959
{
@@ -75,8 +75,6 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
7575
iobq->qh_tail = prev;
7676
}
7777

78-
spin_unlock_irqrestore(&g_iob_lock, flags);
79-
8078
/* Remove the queue container */
8179

8280
iob_free_qentry(qentry);
@@ -85,11 +83,11 @@ void iob_free_queue_qentry(FAR struct iob_s *iob,
8583

8684
iob_free_chain(iob);
8785

88-
return;
86+
break;
8987
}
9088
}
9189

92-
spin_unlock_irqrestore(&g_iob_lock, flags);
90+
leave_critical_section(flags);
9391
}
9492

9593
#endif /* CONFIG_IOB_NCHAINS > 0 */

mm/iob/iob_initialize.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,6 @@ sem_t g_throttle_sem = SEM_INITIALIZER(CONFIG_IOB_NBUFFERS -
108108
sem_t g_qentry_sem = SEM_INITIALIZER(CONFIG_IOB_NCHAINS);
109109
#endif
110110

111-
spinlock_t g_iob_lock = SP_UNLOCKED;
112-
113111
/****************************************************************************
114112
* Public Functions
115113
****************************************************************************/

mm/iob/iob_remove_queue.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
5858

5959
/* Remove the I/O buffer chain from the head of the queue */
6060

61-
irqstate_t flags = spin_lock_irqsave(&g_iob_lock);
61+
irqstate_t flags = enter_critical_section();
6262
qentry = iobq->qh_head;
6363
if (qentry)
6464
{
@@ -68,20 +68,15 @@ FAR struct iob_s *iob_remove_queue(FAR struct iob_queue_s *iobq)
6868
iobq->qh_tail = NULL;
6969
}
7070

71-
spin_unlock_irqrestore(&g_iob_lock, flags);
72-
7371
/* Extract the I/O buffer chain from the container and free the
7472
* container.
7573
*/
7674

7775
iob = qentry->qe_head;
7876
iob_free_qentry(qentry);
7977
}
80-
else
81-
{
82-
spin_unlock_irqrestore(&g_iob_lock, flags);
83-
}
8478

79+
leave_critical_section(flags);
8580
return iob;
8681
}
8782

0 commit comments

Comments
 (0)