|
| 1 | +//go:build scheduler.threads |
| 2 | + |
| 3 | +package task |
| 4 | + |
| 5 | +import ( |
| 6 | + "sync/atomic" |
| 7 | + "unsafe" |
| 8 | +) |
| 9 | + |
| 10 | +// If true, print verbose debug logs. |
| 11 | +const verbose = false |
| 12 | + |
| 13 | +// Scheduler-specific state. |
| 14 | +type state struct { |
| 15 | + // Goroutine ID. The number here is not really significant and after a while |
| 16 | + // it could wrap around. But it is useful for debugging. |
| 17 | + id uint64 |
| 18 | + |
| 19 | + // Semaphore to pause/resume the thread atomically. |
| 20 | + sem sem |
| 21 | +} |
| 22 | + |
| 23 | +// Goroutine counter, starting at 0 for the main goroutine. |
| 24 | +var goroutineID uint64 |
| 25 | + |
| 26 | +var mainTask Task |
| 27 | + |
| 28 | +func OnSystemStack() bool { |
| 29 | + runtimePanic("todo: task.OnSystemStack") |
| 30 | + return false |
| 31 | +} |
| 32 | + |
| 33 | +// Initialize the main goroutine state. Must be called by the runtime on |
| 34 | +// startup, before starting any other goroutines. |
| 35 | +func Init() { |
| 36 | + // Sanity check. With ThinLTO, this should be getting optimized away. |
| 37 | + if unsafe.Sizeof(pthread_mutex{}) != tinygo_mutex_size() { |
| 38 | + panic("internal/task: unexpected sizeof(pthread_mutex_t)") |
| 39 | + } |
| 40 | + if unsafe.Alignof(pthread_mutex{}) != tinygo_mutex_align() { |
| 41 | + panic("internal/task: unexpected _Alignof(pthread_mutex_t)") |
| 42 | + } |
| 43 | + if unsafe.Sizeof(sem{}) != tinygo_sem_size() { |
| 44 | + panic("semaphore is an unexpected size!") |
| 45 | + } |
| 46 | + if unsafe.Alignof(sem{}) != tinygo_sem_align() { |
| 47 | + panic("semaphore is an unexpected alignment!") |
| 48 | + } |
| 49 | + |
| 50 | + mainTask.init() |
| 51 | + tinygo_task_set_current(&mainTask) |
| 52 | +} |
| 53 | + |
| 54 | +func (t *Task) init() { |
| 55 | + sem_init(&t.state.sem, 0, 0) |
| 56 | +} |
| 57 | + |
| 58 | +// Return the task struct for the current thread. |
| 59 | +func Current() *Task { |
| 60 | + t := (*Task)(tinygo_task_current()) |
| 61 | + if t == nil { |
| 62 | + runtimePanic("unknown current task") |
| 63 | + } |
| 64 | + return t |
| 65 | +} |
| 66 | + |
| 67 | +// Pause pauses the current task, until it is resumed by another task. |
| 68 | +// It is possible that another task has called Resume() on the task before it |
| 69 | +// hits Pause(), in which case the task won't be paused but continues |
| 70 | +// immediately. |
| 71 | +func Pause() { |
| 72 | + // Wait until resumed |
| 73 | + t := Current() |
| 74 | + if verbose { |
| 75 | + println("*** pause: ", t.state.id) |
| 76 | + } |
| 77 | + if sem_wait(&t.state.sem) != 0 { |
| 78 | + runtimePanic("sem_wait error!") |
| 79 | + } |
| 80 | +} |
| 81 | + |
| 82 | +// Resume the given task. |
| 83 | +// It is legal to resume a task before it gets paused, it means that the next |
| 84 | +// call to Pause() won't pause but will continue immediately. This happens in |
| 85 | +// practice sometimes in channel operations, where the Resume() might get called |
| 86 | +// between the channel unlock and the call to Pause(). |
| 87 | +func (t *Task) Resume() { |
| 88 | + if verbose { |
| 89 | + println("*** resume: ", t.state.id) |
| 90 | + } |
| 91 | + // Increment the semaphore counter. |
| 92 | + // If the task is currently paused in sem_wait, it will resume. |
| 93 | + // If the task is not yet paused, the next call to sem_wait will continue |
| 94 | + // immediately. |
| 95 | + if sem_post(&t.state.sem) != 0 { |
| 96 | + runtimePanic("sem_post: error!") |
| 97 | + } |
| 98 | +} |
| 99 | + |
| 100 | +// Start a new OS thread. |
| 101 | +func start(fn uintptr, args unsafe.Pointer, stackSize uintptr) { |
| 102 | + t := &Task{} |
| 103 | + t.state.id = atomic.AddUint64(&goroutineID, 1) |
| 104 | + if verbose { |
| 105 | + println("*** start: ", t.state.id, "from", Current().state.id) |
| 106 | + } |
| 107 | + t.init() |
| 108 | + errCode := tinygo_task_start(fn, args, t, t.state.id) |
| 109 | + if errCode != 0 { |
| 110 | + runtimePanic("could not start thread") |
| 111 | + } |
| 112 | +} |
| 113 | + |
| 114 | +type AsyncLock struct { |
| 115 | + // TODO: lock on macOS needs to be initialized with a magic value |
| 116 | + pthread_mutex |
| 117 | +} |
| 118 | + |
| 119 | +func (l *pthread_mutex) Lock() { |
| 120 | + errCode := pthread_mutex_lock(l) |
| 121 | + if errCode != 0 { |
| 122 | + runtimePanic("mutex Lock has error code") |
| 123 | + } |
| 124 | +} |
| 125 | + |
| 126 | +func (l *pthread_mutex) TryLock() bool { |
| 127 | + return pthread_mutex_trylock(l) == 0 |
| 128 | +} |
| 129 | + |
| 130 | +func (l *pthread_mutex) Unlock() { |
| 131 | + errCode := pthread_mutex_unlock(l) |
| 132 | + if errCode != 0 { |
| 133 | + runtimePanic("mutex Unlock has error code") |
| 134 | + } |
| 135 | +} |
| 136 | + |
| 137 | +//go:linkname runtimePanic runtime.runtimePanic |
| 138 | +func runtimePanic(msg string) |
| 139 | + |
| 140 | +// Using //go:linkname instead of //export so that we don't tell the compiler |
| 141 | +// that the 't' parameter won't escape (because it will). |
| 142 | +// |
| 143 | +//go:linkname tinygo_task_set_current tinygo_task_set_current |
| 144 | +func tinygo_task_set_current(t *Task) |
| 145 | + |
| 146 | +// Here same as for tinygo_task_set_current. |
| 147 | +// |
| 148 | +//go:linkname tinygo_task_start tinygo_task_start |
| 149 | +func tinygo_task_start(fn uintptr, args unsafe.Pointer, t *Task, id uint64) int32 |
| 150 | + |
| 151 | +//export tinygo_task_current |
| 152 | +func tinygo_task_current() unsafe.Pointer |
| 153 | + |
| 154 | +//export tinygo_mutex_size |
| 155 | +func tinygo_mutex_size() uintptr |
| 156 | + |
| 157 | +//export tinygo_mutex_align |
| 158 | +func tinygo_mutex_align() uintptr |
| 159 | + |
| 160 | +//export pthread_mutex_lock |
| 161 | +func pthread_mutex_lock(*pthread_mutex) int32 |
| 162 | + |
| 163 | +//export pthread_mutex_trylock |
| 164 | +func pthread_mutex_trylock(*pthread_mutex) int32 |
| 165 | + |
| 166 | +//export pthread_mutex_unlock |
| 167 | +func pthread_mutex_unlock(*pthread_mutex) int32 |
| 168 | + |
| 169 | +//export sem_init |
| 170 | +func sem_init(s *sem, pshared int32, value uint32) int32 |
| 171 | + |
| 172 | +//export sem_wait |
| 173 | +func sem_wait(*sem) int32 |
| 174 | + |
| 175 | +//export sem_post |
| 176 | +func sem_post(*sem) int32 |
| 177 | + |
| 178 | +//export tinygo_sem_size |
| 179 | +func tinygo_sem_size() uintptr |
| 180 | + |
| 181 | +//export tinygo_sem_align |
| 182 | +func tinygo_sem_align() uintptr |
0 commit comments