|
2 | 2 | #include <stdbool.h>
|
3 | 3 | #include <stdio.h>
|
4 | 4 | #include <stdlib.h>
|
5 |
| -#include <unistd.h> |
6 | 5 |
|
7 | 6 | #include "tock.h"
|
8 | 7 |
|
9 |
| -typedef struct { |
10 |
| - subscribe_upcall* cb; |
11 |
| - int arg0; |
12 |
| - int arg1; |
13 |
| - int arg2; |
14 |
| - void* ud; |
15 |
| -} tock_task_t; |
16 |
| - |
17 |
| -#define TASK_QUEUE_SIZE 16 |
18 |
| -static tock_task_t task_queue[TASK_QUEUE_SIZE]; |
19 |
| -static int task_cur = 0; |
20 |
| -static int task_last = 0; |
21 |
| - |
22 |
| -int tock_enqueue(subscribe_upcall cb, int arg0, int arg1, int arg2, void* ud) { |
23 |
| - int next_task_last = (task_last + 1) % TASK_QUEUE_SIZE; |
24 |
| - if (next_task_last == task_cur) { |
25 |
| - return -1; |
26 |
| - } |
27 |
| - |
28 |
| - task_queue[task_last].cb = cb; |
29 |
| - task_queue[task_last].arg0 = arg0; |
30 |
| - task_queue[task_last].arg1 = arg1; |
31 |
| - task_queue[task_last].arg2 = arg2; |
32 |
| - task_queue[task_last].ud = ud; |
33 |
| - task_last = next_task_last; |
34 |
| - |
35 |
| - return task_last; |
36 |
| -} |
37 |
| - |
38 | 8 | int tock_status_to_returncode(statuscode_t status) {
|
39 | 9 | // Conversion is easy. Since ReturnCode numeric mappings are -1*ErrorCode,
|
40 | 10 | // and success is 0 in both cases, we can just multiply by -1.
|
@@ -143,95 +113,75 @@ void yield_for(bool* cond) {
|
143 | 113 | }
|
144 | 114 | }
|
145 | 115 |
|
146 |
| -// Returns 1 if a task is processed, 0 otherwise |
147 |
| -int yield_check_tasks(void) { |
148 |
| - if (task_cur != task_last) { |
149 |
| - tock_task_t task = task_queue[task_cur]; |
150 |
| - task_cur = (task_cur + 1) % TASK_QUEUE_SIZE; |
151 |
| - task.cb(task.arg0, task.arg1, task.arg2, task.ud); |
152 |
| - return 1; |
153 |
| - } else { |
154 |
| - return 0; |
155 |
| - } |
156 |
| -} |
157 |
| - |
158 | 116 | #if defined(__thumb__)
|
159 | 117 |
|
160 | 118 |
|
161 | 119 | void yield(void) {
|
162 |
| - if (yield_check_tasks()) { |
163 |
| - return; |
164 |
| - } else { |
165 |
| - // Note: A process stops yielding when there is a callback ready to run, |
166 |
| - // which the kernel executes by modifying the stack frame pushed by the |
167 |
| - // hardware. The kernel copies the PC value from the stack frame to the LR |
168 |
| - // field, and sets the PC value to callback to run. When this frame is |
169 |
| - // unstacked during the interrupt return, the effectively clobbers the LR |
170 |
| - // register. |
171 |
| - // |
172 |
| - // At this point, the callback function is now executing, which may itself |
173 |
| - // clobber any of the other caller-saved registers. Thus we mark this |
174 |
| - // inline assembly as conservatively clobbering all caller-saved registers, |
175 |
| - // forcing yield to save any live registers. |
176 |
| - // |
177 |
| - // Upon direct observation of this function, the LR is the only register |
178 |
| - // that is live across the SVC invocation, however, if the yield call is |
179 |
| - // inlined, it is possible that the LR won't be live at all (commonly seen |
180 |
| - // for the `while (1) { yield(); }` idiom) or that other registers are |
181 |
| - // live, thus it is important to let the compiler do the work here. |
182 |
| - // |
183 |
| - // According to the AAPCS: A subroutine must preserve the contents of the |
184 |
| - // registers r4-r8, r10, r11 and SP (and r9 in PCS variants that designate |
185 |
| - // r9 as v6) As our compilation flags mark r9 as the PIC base register, it |
186 |
| - // does not need to be saved. Thus we must clobber r0-3, r12, and LR |
187 |
| - register uint32_t wait __asm__ ("r0") = 1; // yield-wait |
188 |
| - register uint32_t wait_field __asm__ ("r1") = 0; // yield result ptr |
189 |
| - __asm__ volatile ( |
190 |
| - "svc 0 \n" |
191 |
| - : |
192 |
| - : "r" (wait), "r" (wait_field) |
193 |
| - : "memory", "r2", "r3", "r12", "lr" |
194 |
| - ); |
195 |
| - } |
| 120 | + // Note: A process stops yielding when there is a callback ready to run, |
| 121 | + // which the kernel executes by modifying the stack frame pushed by the |
| 122 | + // hardware. The kernel copies the PC value from the stack frame to the LR |
| 123 | + // field, and sets the PC value to callback to run. When this frame is |
| 124 | + // unstacked during the interrupt return, the effectively clobbers the LR |
| 125 | + // register. |
| 126 | + // |
| 127 | + // At this point, the callback function is now executing, which may itself |
| 128 | + // clobber any of the other caller-saved registers. Thus we mark this |
| 129 | + // inline assembly as conservatively clobbering all caller-saved registers, |
| 130 | + // forcing yield to save any live registers. |
| 131 | + // |
| 132 | + // Upon direct observation of this function, the LR is the only register |
| 133 | + // that is live across the SVC invocation, however, if the yield call is |
| 134 | + // inlined, it is possible that the LR won't be live at all (commonly seen |
| 135 | + // for the `while (1) { yield(); }` idiom) or that other registers are |
| 136 | + // live, thus it is important to let the compiler do the work here. |
| 137 | + // |
| 138 | + // According to the AAPCS: A subroutine must preserve the contents of the |
| 139 | + // registers r4-r8, r10, r11 and SP (and r9 in PCS variants that designate |
| 140 | + // r9 as v6) As our compilation flags mark r9 as the PIC base register, it |
| 141 | + // does not need to be saved. Thus we must clobber r0-3, r12, and LR |
| 142 | + register uint32_t wait __asm__ ("r0") = 1; // yield-wait |
| 143 | + register uint32_t wait_field __asm__ ("r1") = 0; // yield result ptr |
| 144 | + __asm__ volatile ( |
| 145 | + "svc 0 \n" |
| 146 | + : |
| 147 | + : "r" (wait), "r" (wait_field) |
| 148 | + : "memory", "r2", "r3", "r12", "lr" |
| 149 | + ); |
196 | 150 | }
|
197 | 151 |
|
198 | 152 | int yield_no_wait(void) {
|
199 |
| - if (yield_check_tasks()) { |
200 |
| - return 1; |
201 |
| - } else { |
202 |
| - // Note: A process stops yielding when there is a callback ready to run, |
203 |
| - // which the kernel executes by modifying the stack frame pushed by the |
204 |
| - // hardware. The kernel copies the PC value from the stack frame to the LR |
205 |
| - // field, and sets the PC value to callback to run. When this frame is |
206 |
| - // unstacked during the interrupt return, the effectively clobbers the LR |
207 |
| - // register. |
208 |
| - // |
209 |
| - // At this point, the callback function is now executing, which may itself |
210 |
| - // clobber any of the other caller-saved registers. Thus we mark this |
211 |
| - // inline assembly as conservatively clobbering all caller-saved registers, |
212 |
| - // forcing yield to save any live registers. |
213 |
| - // |
214 |
| - // Upon direct observation of this function, the LR is the only register |
215 |
| - // that is live across the SVC invocation, however, if the yield call is |
216 |
| - // inlined, it is possible that the LR won't be live at all (commonly seen |
217 |
| - // for the `while (1) { yield(); }` idiom) or that other registers are |
218 |
| - // live, thus it is important to let the compiler do the work here. |
219 |
| - // |
220 |
| - // According to the AAPCS: A subroutine must preserve the contents of the |
221 |
| - // registers r4-r8, r10, r11 and SP (and r9 in PCS variants that designate |
222 |
| - // r9 as v6) As our compilation flags mark r9 as the PIC base register, it |
223 |
| - // does not need to be saved. Thus we must clobber r0-3, r12, and LR |
224 |
| - uint8_t result = 0; |
225 |
| - register uint32_t wait __asm__ ("r0") = 0; // yield-no-wait |
226 |
| - register uint8_t* wait_field __asm__ ("r1") = &result; // yield result ptr |
227 |
| - __asm__ volatile ( |
228 |
| - "svc 0 \n" |
229 |
| - : |
230 |
| - : "r" (wait), "r" (wait_field) |
231 |
| - : "memory", "r2", "r3", "r12", "lr" |
232 |
| - ); |
233 |
| - return (int)result; |
234 |
| - } |
| 153 | + // Note: A process stops yielding when there is a callback ready to run, |
| 154 | + // which the kernel executes by modifying the stack frame pushed by the |
| 155 | + // hardware. The kernel copies the PC value from the stack frame to the LR |
| 156 | + // field, and sets the PC value to callback to run. When this frame is |
| 157 | + // unstacked during the interrupt return, the effectively clobbers the LR |
| 158 | + // register. |
| 159 | + // |
| 160 | + // At this point, the callback function is now executing, which may itself |
| 161 | + // clobber any of the other caller-saved registers. Thus we mark this |
| 162 | + // inline assembly as conservatively clobbering all caller-saved registers, |
| 163 | + // forcing yield to save any live registers. |
| 164 | + // |
| 165 | + // Upon direct observation of this function, the LR is the only register |
| 166 | + // that is live across the SVC invocation, however, if the yield call is |
| 167 | + // inlined, it is possible that the LR won't be live at all (commonly seen |
| 168 | + // for the `while (1) { yield(); }` idiom) or that other registers are |
| 169 | + // live, thus it is important to let the compiler do the work here. |
| 170 | + // |
| 171 | + // According to the AAPCS: A subroutine must preserve the contents of the |
| 172 | + // registers r4-r8, r10, r11 and SP (and r9 in PCS variants that designate |
| 173 | + // r9 as v6) As our compilation flags mark r9 as the PIC base register, it |
| 174 | + // does not need to be saved. Thus we must clobber r0-3, r12, and LR |
| 175 | + uint8_t result = 0; |
| 176 | + register uint32_t wait __asm__ ("r0") = 0; // yield-no-wait |
| 177 | + register uint8_t* wait_field __asm__ ("r1") = &result; // yield result ptr |
| 178 | + __asm__ volatile ( |
| 179 | + "svc 0 \n" |
| 180 | + : |
| 181 | + : "r" (wait), "r" (wait_field) |
| 182 | + : "memory", "r2", "r3", "r12", "lr" |
| 183 | + ); |
| 184 | + return (int)result; |
235 | 185 | }
|
236 | 186 |
|
237 | 187 | void tock_exit(uint32_t completion_code) {
|
@@ -423,40 +373,31 @@ memop_return_t memop(uint32_t op_type, int arg1) {
|
423 | 373 | // a0-a3. Nothing specifically syscall related is pushed to the process stack.
|
424 | 374 |
|
425 | 375 | void yield(void) {
|
426 |
| - if (yield_check_tasks()) { |
427 |
| - return; |
428 |
| - } else { |
429 |
| - register uint32_t a0 __asm__ ("a0") = 1; // yield-wait |
430 |
| - register uint32_t wait_field __asm__ ("a1") = 0; // yield result ptr |
431 |
| - __asm__ volatile ( |
432 |
| - "li a4, 0\n" |
433 |
| - "ecall\n" |
434 |
| - : |
435 |
| - : "r" (a0), "r" (wait_field) |
436 |
| - : "memory", "a2", "a3", "a4", "a5", "a6", "a7", |
437 |
| - "t0", "t1", "t2", "t3", "t4", "t5", "t6", "ra" |
438 |
| - ); |
439 |
| - |
440 |
| - } |
| 376 | + register uint32_t a0 __asm__ ("a0") = 1; // yield-wait |
| 377 | + register uint32_t wait_field __asm__ ("a1") = 0; // yield result ptr |
| 378 | + __asm__ volatile ( |
| 379 | + "li a4, 0\n" |
| 380 | + "ecall\n" |
| 381 | + : |
| 382 | + : "r" (a0), "r" (wait_field) |
| 383 | + : "memory", "a2", "a3", "a4", "a5", "a6", "a7", |
| 384 | + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "ra" |
| 385 | + ); |
441 | 386 | }
|
442 | 387 |
|
443 | 388 | int yield_no_wait(void) {
|
444 |
| - if (yield_check_tasks()) { |
445 |
| - return 1; |
446 |
| - } else { |
447 |
| - uint8_t result = 0; |
448 |
| - register uint32_t a0 __asm__ ("a0") = 0; // yield-no-wait |
449 |
| - register uint8_t* a1 __asm__ ("a1") = &result; |
450 |
| - __asm__ volatile ( |
451 |
| - "li a4, 0\n" |
452 |
| - "ecall\n" |
453 |
| - : |
454 |
| - : "r" (a0), "r" (a1) |
455 |
| - : "memory", "a2", "a3", "a4", "a5", "a6", "a7", |
456 |
| - "t0", "t1", "t2", "t3", "t4", "t5", "t6", "ra" |
457 |
| - ); |
458 |
| - return (int)result; |
459 |
| - } |
| 389 | + uint8_t result = 0; |
| 390 | + register uint32_t a0 __asm__ ("a0") = 0; // yield-no-wait |
| 391 | + register uint8_t* a1 __asm__ ("a1") = &result; |
| 392 | + __asm__ volatile ( |
| 393 | + "li a4, 0\n" |
| 394 | + "ecall\n" |
| 395 | + : |
| 396 | + : "r" (a0), "r" (a1) |
| 397 | + : "memory", "a2", "a3", "a4", "a5", "a6", "a7", |
| 398 | + "t0", "t1", "t2", "t3", "t4", "t5", "t6", "ra" |
| 399 | + ); |
| 400 | + return (int)result; |
460 | 401 | }
|
461 | 402 |
|
462 | 403 |
|
|
0 commit comments