1
1
#include <lib/libc.h>
2
+ #include <hal.h>
3
+ #include <spinlock.h>
2
4
#include <sys/pipe.h>
3
5
#include <sys/task.h>
4
6
@@ -17,8 +19,11 @@ static inline bool pipe_is_valid(const pipe_t *p)
17
19
p -> head <= p -> mask && p -> tail <= p -> mask ;
18
20
}
19
21
22
+ static spinlock_t pipe_lock = SPINLOCK_INITIALIZER ;
23
+ static uint32_t pipe_flags = 0 ;
24
+
20
25
/* empty/full checks using the used counter */
21
- static inline bool pipe_is_empty (const pipe_t * p )
26
+ static inline int pipe_is_empty (const pipe_t * p )
22
27
{
23
28
return p -> used == 0 ;
24
29
}
@@ -171,9 +176,9 @@ void mo_pipe_flush(pipe_t *p)
171
176
if (unlikely (!pipe_is_valid (p )))
172
177
return ;
173
178
174
- CRITICAL_ENTER ( );
179
+ spin_lock_irqsave ( & pipe_lock , & pipe_flags );
175
180
p -> head = p -> tail = p -> used = 0 ;
176
- CRITICAL_LEAVE ( );
181
+ spin_unlock_irqrestore ( & pipe_lock , pipe_flags );
177
182
}
178
183
179
184
int32_t mo_pipe_size (pipe_t * p )
@@ -209,27 +214,27 @@ int32_t mo_pipe_free_space(pipe_t *p)
209
214
static void pipe_wait_until_readable (pipe_t * p )
210
215
{
211
216
while (1 ) {
212
- CRITICAL_ENTER ( );
217
+ spin_lock_irqsave ( & pipe_lock , & pipe_flags );
213
218
if (!pipe_is_empty (p )) {
214
- CRITICAL_LEAVE ( );
219
+ spin_unlock_irqrestore ( & pipe_lock , pipe_flags );
215
220
return ;
216
221
}
217
222
/* Nothing to read – drop critical section and yield CPU */
218
- CRITICAL_LEAVE ( );
223
+ spin_unlock_irqrestore ( & pipe_lock , pipe_flags );
219
224
mo_task_wfi (); /* Yield CPU without blocking task state */
220
225
}
221
226
}
222
227
223
228
static void pipe_wait_until_writable (pipe_t * p )
224
229
{
225
230
while (1 ) {
226
- CRITICAL_ENTER ( );
231
+ spin_lock_irqsave ( & pipe_lock , & pipe_flags );
227
232
if (!pipe_is_full (p )) {
228
- CRITICAL_LEAVE ( );
233
+ spin_unlock_irqrestore ( & pipe_lock , pipe_flags );
229
234
return ;
230
235
}
231
236
/* Buffer full – yield until space is available */
232
- CRITICAL_LEAVE ( );
237
+ spin_unlock_irqrestore ( & pipe_lock , pipe_flags );
233
238
mo_task_wfi (); /* Yield CPU without blocking task state */
234
239
}
235
240
}
@@ -247,9 +252,9 @@ int32_t mo_pipe_read(pipe_t *p, char *dst, uint16_t len)
247
252
pipe_wait_until_readable (p );
248
253
249
254
/* Read as much as possible in one critical section */
250
- CRITICAL_ENTER ( );
255
+ spin_lock_irqsave ( & pipe_lock , & pipe_flags );
251
256
uint16_t chunk = pipe_bulk_read (p , dst + bytes_read , len - bytes_read );
252
- CRITICAL_LEAVE ( );
257
+ spin_unlock_irqrestore ( & pipe_lock , pipe_flags );
253
258
254
259
bytes_read += chunk ;
255
260
@@ -272,10 +277,10 @@ int32_t mo_pipe_write(pipe_t *p, const char *src, uint16_t len)
272
277
pipe_wait_until_writable (p );
273
278
274
279
/* Write as much as possible in one critical section */
275
- CRITICAL_ENTER ( );
280
+ spin_lock_irqsave ( & pipe_lock , & pipe_flags );
276
281
uint16_t chunk =
277
282
pipe_bulk_write (p , src + bytes_written , len - bytes_written );
278
- CRITICAL_LEAVE ( );
283
+ spin_unlock_irqrestore ( & pipe_lock , pipe_flags );
279
284
280
285
bytes_written += chunk ;
281
286
@@ -294,9 +299,9 @@ int32_t mo_pipe_nbread(pipe_t *p, char *dst, uint16_t len)
294
299
295
300
uint16_t bytes_read ;
296
301
297
- CRITICAL_ENTER ( );
302
+ spin_lock_irqsave ( & pipe_lock , & pipe_flags );
298
303
bytes_read = pipe_bulk_read (p , dst , len );
299
- CRITICAL_LEAVE ( );
304
+ spin_unlock_irqrestore ( & pipe_lock , pipe_flags );
300
305
301
306
return (int32_t ) bytes_read ;
302
307
}
@@ -309,9 +314,9 @@ int32_t mo_pipe_nbwrite(pipe_t *p, const char *src, uint16_t len)
309
314
310
315
uint16_t bytes_written ;
311
316
312
- CRITICAL_ENTER ( );
317
+ spin_lock_irqsave ( & pipe_lock , & pipe_flags );
313
318
bytes_written = pipe_bulk_write (p , src , len );
314
- CRITICAL_LEAVE ( );
319
+ spin_unlock_irqrestore ( & pipe_lock , pipe_flags );
315
320
316
321
return (int32_t ) bytes_written ;
317
322
}
0 commit comments