diff --git a/compiler/compiler.go b/compiler/compiler.go index 14b45a4327..f1a50af01b 100644 --- a/compiler/compiler.go +++ b/compiler/compiler.go @@ -180,7 +180,7 @@ func Compile(pkgName string, machine llvm.TargetMachine, config *compileopts.Con path = path[len(tinygoPath+"/src/"):] } switch path { - case "machine", "os", "reflect", "runtime", "runtime/interrupt", "runtime/volatile", "sync", "testing", "internal/reflectlite", "internal/task": + case "machine", "os", "reflect", "runtime", "runtime/interrupt", "runtime/volatile", "runtime/sync", "sync", "testing", "internal/reflectlite", "internal/task": return path default: if strings.HasPrefix(path, "device/") || strings.HasPrefix(path, "examples/") { @@ -252,6 +252,13 @@ func Compile(pkgName string, machine llvm.TargetMachine, config *compileopts.Con c.loadASTComments(lprogram) + // Forcibly preload special types. + runtimePkg := c.ir.Program.ImportedPackage("runtime") + c.getLLVMType(runtimePkg.Type("_interface").Type()) + c.getLLVMType(runtimePkg.Type("_string").Type()) + c.getLLVMType(runtimePkg.Type("hashmap").Type()) + c.getLLVMType(runtimePkg.Type("channel").Type()) + // Declare runtime types. // TODO: lazily create runtime types in getLLVMRuntimeType when they are // needed. Eventually this will be required anyway, when packages are diff --git a/src/examples/intsync/README.md b/src/examples/intsync/README.md new file mode 100644 index 0000000000..cfff3b80a1 --- /dev/null +++ b/src/examples/intsync/README.md @@ -0,0 +1,11 @@ +# TinyGo ARM SysTick example w/ condition variable + +This example uses the ARM System Timer to awake a goroutine. +That goroutine sends to a channel, and another goroutine toggles an LED on every notification. + +Many ARM-based chips have this timer feature. If you run the example and the +LED blinks, then you have one. + +The System Timer runs from a cycle counter. The more cycles, the slower the +LED will blink. This counter is 24 bits wide, which places an upper bound on +the number of cycles, and the slowness of the blinking. diff --git a/src/examples/intsync/intsync.go b/src/examples/intsync/intsync.go new file mode 100644 index 0000000000..eea26df3f5 --- /dev/null +++ b/src/examples/intsync/intsync.go @@ -0,0 +1,44 @@ +package main + +import ( + "device/arm" + "machine" + "runtime/sync" +) + +func main() { + machine.LED.Configure(machine.PinConfig{Mode: machine.PinOutput}) + + timerch := make(chan struct{}) + + // Toggle the LED on every receive from the timer channel. + go func() { + for { + machine.LED.High() + <-timerch + machine.LED.Low() + <-timerch + } + }() + + // Send to the timer channel on every timerCond notification. + go func() { + for { + timerCond.Wait() + timerch <- struct{}{} + } + }() + + // timer fires 10 times per second + arm.SetupSystemTimer(machine.CPUFrequency() / 10) + + select {} +} + +// timerCond is a condition variable used to handle the systick interrupts. +var timerCond sync.IntCond + +//go:export SysTick_Handler +func timer_isr() { + timerCond.Notify() +} diff --git a/src/internal/task/interruptqueue.go b/src/internal/task/interruptqueue.go new file mode 100644 index 0000000000..4eb48a8bc2 --- /dev/null +++ b/src/internal/task/interruptqueue.go @@ -0,0 +1,66 @@ +package task + +import "runtime/volatile" + +// InterruptQueue is a specialized version of Queue, designed for working with interrupts. +// It can be safely pushed to from an interrupt (assuming that a memory reference to the task remains elsewhere), and popped outside of an interrupt. +// It cannot be pushed to outside of an interrupt or popped inside an interrupt. +type InterruptQueue struct { + // This implementation uses a double-buffer of queues. + // bufSelect contains the index of the queue currently available for pop operations. + // The opposite queue is available for push operations. + bufSelect volatile.Register8 + queues [2]Queue +} + +// Push a task onto the queue. +// This can only be safely called from inside an interrupt. +func (q *InterruptQueue) Push(t *Task) { + // Avoid nesting interrupts inside here. + var nest nonest + nest.Lock() + defer nest.Unlock() + + // Push to inactive queue. + q.queues[1-q.bufSelect.Get()].Push(t) +} + +// Check if the queue is empty. +// This will return false if any tasks were pushed strictly before this call. +// If any pushes occur during the call, the queue may or may not be marked as empty. +// This cannot be safely called inside an interrupt. +func (q *InterruptQueue) Empty() bool { + // Check currently active queue. + active := q.bufSelect.Get() & 1 + if !q.queues[active].Empty() { + return false + } + + // Swap to other queue. + active ^= 1 + q.bufSelect.Set(active) + + // Check other queue. + return q.queues[active].Empty() +} + +// Pop removes a single task from the queue. +// This will return nil if the queue is empty (with the same semantics as Empty). +// This cannot be safely called inside an interrupt. +func (q *InterruptQueue) Pop() *Task { + // Select non-empty queue if one exists. + if q.Empty() { + return nil + } + + // Pop from active queue. + return q.queues[q.bufSelect.Get()&1].Pop() +} + +// AppendTo pops all tasks from this queue and pushes them to another queue. +// This operation has the same semantics as repeated calls to pop. +func (q *InterruptQueue) AppendTo(other *Queue) { + for !q.Empty() { + q.queues[q.bufSelect.Get()&1].AppendTo(other) + } +} diff --git a/src/internal/task/nonest_arm.go b/src/internal/task/nonest_arm.go new file mode 100644 index 0000000000..4040e430f6 --- /dev/null +++ b/src/internal/task/nonest_arm.go @@ -0,0 +1,20 @@ +// +build arm,baremetal,!avr + +package task + +import "device/arm" + +// nonest is a sync.Locker that blocks nested interrupts while held. +type nonest struct { + state uintptr +} + +//go:inline +func (n *nonest) Lock() { + n.state = arm.DisableInterrupts() +} + +//go:inline +func (n *nonest) Unlock() { + arm.EnableInterrupts(n.state) +} diff --git a/src/internal/task/nonest_none.go b/src/internal/task/nonest_none.go new file mode 100644 index 0000000000..1589b92942 --- /dev/null +++ b/src/internal/task/nonest_none.go @@ -0,0 +1,10 @@ +// +build !arm !baremetal avr + +package task + +// nonest is a sync.Locker that blocks nested interrupts while held. +// On non-ARM platforms, this is a no-op. +type nonest struct{} + +func (n nonest) Lock() {} +func (n nonest) Unlock() {} diff --git a/src/internal/task/queue.go b/src/internal/task/queue.go index c86bc596cb..10cfde6935 100644 --- a/src/internal/task/queue.go +++ b/src/internal/task/queue.go @@ -37,6 +37,11 @@ func (q *Queue) Pop() *Task { return t } +// Empty checks if there are any tasks in the queue. +func (q *Queue) Empty() bool { + return q.head == nil +} + // Append pops the contents of another queue and pushes them onto the end of this queue. func (q *Queue) Append(other *Queue) { if q.head == nil { @@ -48,6 +53,11 @@ func (q *Queue) Append(other *Queue) { other.head, other.tail = nil, nil } +// AppendTo pops the contents of this queue and pushes them onto another queue. +func (q *Queue) AppendTo(other *Queue) { + other.Append(q) +} + // Stack is a LIFO container of tasks. // The zero value is an empty stack. // This is slightly cheaper than a queue, so it can be preferable when strict ordering is not necessary. diff --git a/src/runtime/runtime_atsamd21.go b/src/runtime/runtime_atsamd21.go index 5c535cf1d2..1e3fa38c1b 100644 --- a/src/runtime/runtime_atsamd21.go +++ b/src/runtime/runtime_atsamd21.go @@ -3,11 +3,10 @@ package runtime import ( - "device/arm" "device/sam" + "internal/task" "machine" "runtime/interrupt" - "runtime/volatile" "unsafe" ) @@ -217,10 +216,7 @@ func initRTC() { waitForSync() intr := interrupt.New(sam.IRQ_RTC, func(intr interrupt.Interrupt) { - // disable IRQ for CMP0 compare - sam.RTC_MODE0.INTFLAG.Set(sam.RTC_MODE0_INTENSET_CMP0) - - timerWakeup.Set(1) + rtc.handleInterrupt() }) intr.SetPriority(0xc0) intr.Enable() @@ -239,42 +235,22 @@ var ( timerLastCounter uint64 ) -var timerWakeup volatile.Register8 - -const asyncScheduler = false +type rtcTimer struct{} -// sleepTicks should sleep for d number of microseconds. -func sleepTicks(d timeUnit) { - for d != 0 { - ticks() // update timestamp - ticks := uint32(d) - timerSleep(ticks) - d -= timeUnit(ticks) +func (t rtcTimer) setTimer(wakeup timeUnit) { + now := ticks() + if now >= wakeup { + wakeup = now } -} - -// ticks returns number of microseconds since start. -func ticks() timeUnit { - // request read of count - sam.RTC_MODE0.READREQ.Set(sam.RTC_MODE0_READREQ_RREQ) - waitForSync() - rtcCounter := (uint64(sam.RTC_MODE0.COUNT.Get()) * 305) / 10 // each counter tick == 30.5us - offset := (rtcCounter - timerLastCounter) // change since last measurement - timerLastCounter = rtcCounter - timestamp += timeUnit(offset) // TODO: not precise - return timestamp -} + delay := wakeup - now -// ticks are in microseconds -func timerSleep(ticks uint32) { - timerWakeup.Set(0) - if ticks < 214 { + if delay < 214 { // due to around 183us delay waiting for the register value to sync, the minimum sleep value // for the SAMD21 is 214us. // For related info, see: // https://community.atmel.com/comment/2507091#comment-2507091 - ticks = 214 + delay = 214 } // request read of count @@ -283,15 +259,46 @@ func timerSleep(ticks uint32) { // set compare value cnt := sam.RTC_MODE0.COUNT.Get() - sam.RTC_MODE0.COMP0.Set(uint32(cnt) + (ticks * 10 / 305)) // each counter tick == 30.5us + sam.RTC_MODE0.COMP0.Set(uint32(cnt) + (uint32(delay) * 10 / 305)) // each counter tick == 30.5us waitForSync() // enable IRQ for CMP0 compare sam.RTC_MODE0.INTENSET.SetBits(sam.RTC_MODE0_INTENSET_CMP0) +} - for timerWakeup.Get() == 0 { - arm.Asm("wfi") - } +func (t rtcTimer) disableTimer() { + // disable IRQ for CMP0 compare + sam.RTC_MODE0.INTFLAG.Set(sam.RTC_MODE0_INTENSET_CMP0) +} + +var rtc = timerController{ + t: rtcTimer{}, +} + +// Add this task to the sleep queue, assuming its state is set to sleeping. +func addSleepTask(t *task.Task, duration int64) { + rtc.enqueue(t, ticks()+timeUnit(duration/tickMicros)) +} + +// devicePoll polls for device-specific events. +// For atsamd21, the only device-specific events are RTC timers. +func devicePoll() bool { + return rtc.poll() +} + +const asyncScheduler = false + +// ticks returns number of microseconds since start. +func ticks() timeUnit { + // request read of count + sam.RTC_MODE0.READREQ.Set(sam.RTC_MODE0_READREQ_RREQ) + waitForSync() + + rtcCounter := (uint64(sam.RTC_MODE0.COUNT.Get()) * 305) / 10 // each counter tick == 30.5us + offset := (rtcCounter - timerLastCounter) // change since last measurement + timerLastCounter = rtcCounter + timestamp += timeUnit(offset) // TODO: not precise + return timestamp } func initUSBClock() { diff --git a/src/runtime/scheduler.go b/src/runtime/scheduler.go index 49cf8e4027..31c4cac347 100644 --- a/src/runtime/scheduler.go +++ b/src/runtime/scheduler.go @@ -20,12 +20,8 @@ import ( const schedulerDebug = false -// Queues used by the scheduler. -var ( - runqueue task.Queue - sleepQueue *task.Task - sleepQueueBaseTime timeUnit -) +// runqueue is a queue of tasks that are ready to run. +var runqueue task.Queue // Simple logging, for debugging. func scheduleLog(msg string) { @@ -74,95 +70,51 @@ func runqueuePushBack(t *task.Task) { runqueue.Push(t) } -// Add this task to the sleep queue, assuming its state is set to sleeping. -func addSleepTask(t *task.Task, duration int64) { - if schedulerDebug { - println(" set sleep:", t, uint(duration/tickMicros)) - if t.Next != nil { - panic("runtime: addSleepTask: expected next task to be nil") - } - } - t.Data = uint(duration / tickMicros) // TODO: longer durations - now := ticks() - if sleepQueue == nil { - scheduleLog(" -> sleep new queue") +var schedulerDone bool - // set new base time - sleepQueueBaseTime = now - } - - // Add to sleep queue. - q := &sleepQueue - for ; *q != nil; q = &(*q).Next { - if t.Data < (*q).Data { - // this will finish earlier than the next - insert here - break - } else { - // this will finish later - adjust delay - t.Data -= (*q).Data - } - } - if *q != nil { - // cut delay time between this sleep task and the next - (*q).Data -= t.Data - } - t.Next = *q - *q = t -} +const pollInterval = 256 // Run the scheduler until all tasks have finished. func scheduler() { - // Main scheduler loop. - var now timeUnit - for { - scheduleLog("") - scheduleLog(" schedule") - if sleepQueue != nil { - now = ticks() - } - - // Add tasks that are done sleeping to the end of the runqueue so they - // will be executed soon. - if sleepQueue != nil && now-sleepQueueBaseTime >= timeUnit(sleepQueue.Data) { - t := sleepQueue - scheduleLogTask(" awake:", t) - sleepQueueBaseTime += timeUnit(t.Data) - sleepQueue = t.Next - t.Next = nil - runqueue.Push(t) - } - + var n uint + for !schedulerDone { + // Get the next available task. t := runqueue.Pop() if t == nil { - if sleepQueue == nil { - // No more tasks to execute. - // It would be nice if we could detect deadlocks here, because - // there might still be functions waiting on each other in a - // deadlock. - scheduleLog(" no tasks left!") - return - } - timeLeft := timeUnit(sleepQueue.Data) - (now - sleepQueueBaseTime) - if schedulerDebug { - println(" sleeping...", sleepQueue, uint(timeLeft)) - for t := sleepQueue; t != nil; t = t.Next { - println(" task sleeping:", t, timeUnit(t.Data)) - } + scheduleLog(" runqueue empty") + + // Check for any available tasks. + if poll() { + // A task was found and pushed onto the runqueue. + continue } - sleepTicks(timeLeft) + + // Sleep until another task is available. + wait() if asyncScheduler { - // The sleepTicks function above only sets a timeout at which - // point the scheduler will be called again. It does not really - // sleep. - break + // This platform (WebAssembly) requires us to return control to the host while waiting. + // The host will eventually re-invoke the scheduler when there is work available. + return } + + // Try again. continue } - // Run the given task. - scheduleLogTask(" run:", t) + // Run task. + scheduleLogTask("resuming:", t) t.Resume() + + // Periodically poll for additional events. + if !asyncScheduler && n > pollInterval { + poll() + n = 0 + } else { + n++ + } } + + scheduleLog(" program complete!") } func Gosched() { diff --git a/src/runtime/scheduler_any.go b/src/runtime/scheduler_any.go index 41a904535d..e21c4a8c3f 100644 --- a/src/runtime/scheduler_any.go +++ b/src/runtime/scheduler_any.go @@ -19,6 +19,7 @@ func run() { initAll() postinit() callMain() + schedulerDone = true }() scheduler() } diff --git a/src/runtime/scheduler_arm.go b/src/runtime/scheduler_arm.go new file mode 100644 index 0000000000..a472ccb837 --- /dev/null +++ b/src/runtime/scheduler_arm.go @@ -0,0 +1,34 @@ +// +build arm,!avr,baremetal +// +build atsamd21 + +package runtime + +import ( + "device/arm" + "internal/task" +) + +var intq task.InterruptQueue + +func pushInterrupt(t *task.Task) { + intq.Push(t) +} + +func poll() bool { + var found bool + + // Check the interrupt queue. + if !intq.Empty() { + intq.AppendTo(&runqueue) + found = true + } + + // Check for device-specific events. + found = devicePoll() || found + + return found +} + +func wait() { + arm.Asm("wfi") +} diff --git a/src/runtime/scheduler_legacy.go b/src/runtime/scheduler_legacy.go new file mode 100644 index 0000000000..af071172e7 --- /dev/null +++ b/src/runtime/scheduler_legacy.go @@ -0,0 +1,102 @@ +// +build wasm !atsamd21 + +package runtime + +import "internal/task" + +var ( + sleepQueue *task.Task + sleepQueueBaseTime timeUnit +) + +// Add this task to the sleep queue, assuming its state is set to sleeping. +func addSleepTask(t *task.Task, duration int64) { + if schedulerDebug { + println(" set sleep:", t, uint(duration/tickMicros)) + if t.Next != nil { + panic("runtime: addSleepTask: expected next task to be nil") + } + } + t.Data = uint(duration / tickMicros) // TODO: longer durations + now := ticks() + if sleepQueue == nil { + scheduleLog(" -> sleep new queue") + + // set new base time + sleepQueueBaseTime = now + } + + // Add to sleep queue. + q := &sleepQueue + for ; *q != nil; q = &(*q).Next { + if t.Data < (*q).Data { + // this will finish earlier than the next - insert here + break + } else { + // this will finish later - adjust delay + t.Data -= (*q).Data + } + } + if *q != nil { + // cut delay time between this sleep task and the next + (*q).Data -= t.Data + } + t.Next = *q + *q = t +} + +// pollSleepQueue checks the sleep queue to see if any tasks are now ready to run. +func pollSleepQueue(now timeUnit) bool { + // Add tasks that are done sleeping to the end of the runqueue so they + // will be executed soon. + var awoke bool + for sleepQueue != nil && now-sleepQueueBaseTime >= timeUnit(sleepQueue.Data) { + t := sleepQueue + scheduleLogTask(" awake:", t) + sleepQueueBaseTime += timeUnit(t.Data) + sleepQueue = t.Next + t.Next = nil + runqueue.Push(t) + awoke = true + } + + return awoke +} + +var curTime timeUnit + +// poll checks for any events that are ready and pushes them onto the runqueue. +// It returns true if any tasks were ready. +func poll() bool { + scheduleLog(" polling for events") + if sleepQueue == nil { + return false + } + curTime = ticks() + return pollSleepQueue(curTime) +} + +// wait sleeps until any tasks are awoken by external events. +func wait() { + scheduleLog(" waiting for events") + if sleepQueue == nil { + // There are no timers, so timer wakeup is impossible. + if asyncScheduler { + // On WASM, sometimes callbacks are used to process wakeups from JS events. + // Therefore, we do not actually know if we deadlocked or not. + scheduleLog(" no tasks left!") + return + } + runtimePanic("deadlock") + } + + // Sleep until the next timer hits. + timeLeft := timeUnit(sleepQueue.Data) - (curTime - sleepQueueBaseTime) + if schedulerDebug { + println(" sleeping...", sleepQueue, uint(timeLeft)) + for t := sleepQueue; t != nil; t = t.Next { + println(" task sleeping:", t, timeUnit(t.Data)) + } + } + sleepTicks(timeLeft) +} diff --git a/src/runtime/sync/cond.go b/src/runtime/sync/cond.go new file mode 100644 index 0000000000..f04c90c8cc --- /dev/null +++ b/src/runtime/sync/cond.go @@ -0,0 +1,76 @@ +package sync + +import ( + "internal/task" + "runtime/volatile" + "unsafe" +) + +//go:linkname pushInterrupt runtime.pushInterrupt +func pushInterrupt(*task.Task) + +// IntCond is a condition variable which can be safely signaled from an interrupt. +type IntCond struct { + // setup is a flag which is used to indicate that a waiting goroutine is currently being set up. + // While this set, the signaler will not push the next task onto the runqueue. + setup volatile.Register8 + + // signaledEarly is a flag used to indicate that the condition variable was signaled during or before setup. + signaledEarly volatile.Register8 + + // signaledNext is a flag used to indicate that the task stored in next has been signaled. + signaledNext volatile.Register8 + + // next is the next waiting task. + next *task.Task +} + +// Wait for a signal. +// This cannot be used outside of a goroutine. +// This is not safe for concurrent use. +func (c *IntCond) Wait() { + if c.next != nil { + panic("concurrent waiters on interrupt condition") + } + + // Store the task pointer to be reawakened. + c.setup.Set(1) + volatile.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&c.next)), unsafe.Pointer(task.Current())) + c.setup.Set(0) + if c.signaledEarly.Get() != 0 { + // We were signaled during setup or before. + volatile.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&c.next)), nil) + c.signaledEarly.Set(0) + return + } + + // Wait to be reawkakened. + task.Pause() + + // Remove the task pointer. + volatile.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&c.next)), nil) + + // Clear the signal flag. + c.signaledNext.Set(0) +} + +// Notify a waiting goroutine. +// If no goroutine is waiting, this flags the condition variable as notified and the next goroutine to wait will wake up immediately. +// If the condition variable already has a pending signal, this does nothing and returns false. +// Notify cannot be safely nested. +func (c *IntCond) Notify() bool { + switch { + case c.signaledEarly.Get() != 0 || c.signaledNext.Get() != 0: + // There is already an unconsumed signal. + return false + case c.setup.Get() != 0 || c.next == nil: + // Nothing is waiting yet - propogate an early signal. + c.signaledEarly.Set(1) + default: + // Reawaken the waiting task. + c.signaledNext.Set(1) + pushInterrupt(c.next) + } + + return true +} diff --git a/src/runtime/timer.go b/src/runtime/timer.go new file mode 100644 index 0000000000..b5fc4275c0 --- /dev/null +++ b/src/runtime/timer.go @@ -0,0 +1,157 @@ +package runtime + +import ( + "internal/task" + "runtime/volatile" +) + +// timer is an interface which can be implemented as a wrapper around a timer interrupt. +type timer interface { + // setTimer configures the timer interrupt to fire when the ticks() value reaches the specified wakeup time. + // The timer interrupt must be disabled before calling this function. + setTimer(wakeup timeUnit) + + // disableTimer disables the timer interrupt. + disableTimer() +} + +// timerController is a type which manages a hardware timer, with builtin queueing. +type timerController struct { + // fired is used to indicate whether the timer interrupt handler fired: + // 0: the timer interrupt handler has not fired + // 1: the timer interrupt handler fired + fired volatile.Register8 + + t timer + + baseTime timeUnit + queue *task.Task +} + +// set configures the timer interrupt to fire when the next timer expires. +// This function assumes that there is a timer on the queue. +func (c *timerController) set() { + // Disable the timer to avoid a race condition. + c.t.disableTimer() + + // Clear the fired flag. + c.fired.Set(0) + + // Set the timer with the appropriate time. + c.t.setTimer(c.baseTime) +} + +// enqueue queues a task to be awoken at the specified wakeup time. +func (c *timerController) enqueue(t *task.Task, wakeup timeUnit) { + t.Data = 0 + + // Insert task into timer queue. + var baseUpdated bool + switch { + case c.queue == nil: + // There were no timers previously in the queue. + scheduleLog(" new timer queue") + c.queue = t + c.baseTime = wakeup + baseUpdated = true + case wakeup < c.baseTime: + // This task comes before everything else on the queue. + c.queue.Data += uint(c.baseTime - wakeup) + t.Next = c.queue + c.queue = t + c.baseTime = wakeup + baseUpdated = true + default: + // Insert this task later in the queue. + offset := wakeup - c.baseTime + prev := c.queue + for ; prev.Next != nil && offset < timeUnit(prev.Next.Data); prev = prev.Next { + offset -= timeUnit(prev.Data) + } + prev.Next, t.Next = t, prev.Next + if t.Next != nil { + t.Next.Data -= uint(offset) + } + t.Data = uint(offset) + } + + if c.fired.Get() != 0 { + // The timer already fired. + // Poll for completed timer events. + c.poll() + return + } + + if baseUpdated { + // The base time has changed, so the timer interrupt needs to be reconfigured. + c.set() + } +} + +// poll checks for any expired timers and schedules them. +// This must be called periodically by the scheduler. +func (c *timerController) poll() bool { + if c.fired.Get() == 0 { + // The timer has not fired. + return false + } + + // Clear fired flag. + c.fired.Set(0) + + if c.queue == nil { + // The timer fired even though we did not ask it to. + scheduleLog(" unrequested timer fired") + return false + } + + // The first timer is known to be complete. + { + t := c.queue + c.queue = t.Next + t.Next = nil + runqueue.Push(t) + } + + if c.queue == nil { + // Bail out early to avoid the call to ticks. + scheduleLog(" all timers have expired") + return true + } + + // Check the time. + now := ticks() + + // Pop and schedule all tasks with expired timers. + for c.queue != nil && c.baseTime+timeUnit(c.queue.Data) < now { + // Pop a task from the queue. + t := c.queue.Next + c.queue = t.Next + t.Next = nil + + // Adjust base time. + c.baseTime += timeUnit(t.Data) + + // Schedule task. + runqueue.Push(t) + } + + if c.queue == nil { + scheduleLog(" all timers have expired") + return true + } + + // Normalize representation of queue such that the first task has a zero offset. + c.baseTime += timeUnit(c.queue.Data) + c.queue.Data = 0 + + // Configure the interrupt. + c.set() + + return true +} + +func (c *timerController) handleInterrupt() { + c.t.disableTimer() + c.fired.Set(1) +} diff --git a/src/runtime/volatile/volatile.go b/src/runtime/volatile/volatile.go index 47262f3470..b61e92fc9b 100644 --- a/src/runtime/volatile/volatile.go +++ b/src/runtime/volatile/volatile.go @@ -15,6 +15,8 @@ // and https://blog.regehr.org/archives/28. package volatile +import "unsafe" + // LoadUint8 loads the volatile value *addr. func LoadUint8(addr *uint8) (val uint8) @@ -24,6 +26,9 @@ func LoadUint16(addr *uint16) (val uint16) // LoadUint32 loads the volatile value *addr. func LoadUint32(addr *uint32) (val uint32) +// LoadPointer loads the volatile value *addr. +func LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer) + // StoreUint8 stores val to the volatile value *addr. func StoreUint8(addr *uint8, val uint8) @@ -32,3 +37,6 @@ func StoreUint16(addr *uint16, val uint16) // StoreUint32 stores val to the volatile value *addr. func StoreUint32(addr *uint32, val uint32) + +// StorePointer stores val to the volatile value *addr. +func StorePointer(addr *unsafe.Pointer, val unsafe.Pointer)