-
Notifications
You must be signed in to change notification settings - Fork 996
Add goroutine core affinity support for RP2040/RP2350 systems #5092
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: dev
Are you sure you want to change the base?
Changes from 3 commits
16409a2
870af4f
c00fc84
9025053
8820b4e
3ca2b71
217adfb
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,111 @@ | ||
| // This example demonstrates goroutine core pinning on multi-core systems (RP2040/RP2350). | ||
| // It shows how to pin goroutines to specific CPU cores and verify their execution. | ||
|
|
||
| //go:build rp2040 || rp2350 | ||
|
|
||
| package main | ||
|
|
||
| import ( | ||
| "machine" | ||
| "runtime" | ||
| "time" | ||
| ) | ||
|
|
||
| func main() { | ||
| time.Sleep(5 * time.Second) | ||
| println("=== Core Pinning Example ===") | ||
| println("Number of CPU cores:", runtime.NumCPU()) | ||
| println("[main] Main starting on core:", machine.CurrentCore()) | ||
| println() | ||
|
|
||
| // Example 1: Pin using standard Go API (LockOSThread) | ||
| // This pins to whichever core this goroutine is currently running on | ||
| runtime.LockOSThread() | ||
| println("[main] Pinned using runtime.LockOSThread()") | ||
| println("[main] Running on core:", machine.CurrentCore()) | ||
| runtime.UnlockOSThread() | ||
| println("[main] Unpinned using runtime.UnlockOSThread()") | ||
| println() | ||
|
|
||
| // Example 2: Pin to a specific core using machine package | ||
| machine.LockCore(0) | ||
| println("[main] Explicitly pinned to core 0 using machine.LockCore()") | ||
| println() | ||
|
|
||
| // Start a goroutine pinned to core 1 | ||
| go core1Worker() | ||
|
|
||
| // Start a goroutine using standard LockOSThread | ||
| go standardLockWorker() | ||
|
|
||
| // Start an unpinned goroutine (can run on either core) | ||
| go unpinnedWorker() | ||
|
|
||
| // Main loop on core 0 | ||
| for i := 0; i < 10; i++ { | ||
| println("[main] loop", i, "on CPU", machine.CurrentCore()) | ||
| time.Sleep(500 * time.Millisecond) | ||
| } | ||
|
|
||
| // Unpin and let main run on any core | ||
| machine.UnlockCore() | ||
| println() | ||
| println("[main] Unpinned using machine.UnlockCore()") | ||
|
|
||
| // Continue running for a bit to show potential migration | ||
| for i := 0; i < 5; i++ { | ||
| println("[main] unpinned loop on CPU", machine.CurrentCore()) | ||
| time.Sleep(500 * time.Millisecond) | ||
| } | ||
|
|
||
| println() | ||
| println("Example complete!") | ||
| } | ||
|
|
||
| // Worker function that pins to core 1 using explicit core selection | ||
| func core1Worker() { | ||
| // Pin this goroutine to core 1 explicitly | ||
| machine.LockCore(1) | ||
| println("[core1-worker] Worker pinned to core 1 using machine.LockCore()") | ||
|
|
||
| for i := 0; i < 10; i++ { | ||
| println("[core1-worker] loop", i, "on CPU", machine.CurrentCore()) | ||
| time.Sleep(500 * time.Millisecond) | ||
| } | ||
|
|
||
| println("[core1-worker] Finished") | ||
| } | ||
|
|
||
| // Worker function that uses standard Go LockOSThread() | ||
| func standardLockWorker() { | ||
| // Pin this goroutine to whichever core it starts on | ||
| runtime.LockOSThread() | ||
| defer runtime.UnlockOSThread() | ||
|
|
||
| core := machine.CurrentCore() | ||
| println("[std-lock-worker] Worker locked using runtime.LockOSThread()") | ||
| println("[std-lock-worker] Running on core:", core) | ||
|
|
||
| for i := 0; i < 10; i++ { | ||
| println("[std-lock-worker] loop", i, "on CPU", machine.CurrentCore()) | ||
| time.Sleep(600 * time.Millisecond) | ||
| } | ||
|
|
||
| println("[std-lock-worker] Finished") | ||
| } | ||
|
|
||
| // Worker function that is not pinned (can run on any core) | ||
| func unpinnedWorker() { | ||
| println("[unpinned-worker] Starting") | ||
|
|
||
| for i := 0; i < 10; i++ { | ||
| cpu := machine.CurrentCore() | ||
| println("[unpinned-worker] loop", i, "on CPU", cpu) | ||
| time.Sleep(700 * time.Millisecond) | ||
|
|
||
| // Yield to potentially migrate to another core | ||
| runtime.Gosched() | ||
| } | ||
|
|
||
| println("[unpinned-worker] Finished") | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,57 @@ | ||
| //go:build (rp2040 || rp2350) && scheduler.cores | ||
|
|
||
| package machine | ||
|
|
||
| const numCPU = 2 // RP2040 and RP2350 both have 2 cores | ||
|
|
||
| // LockCore sets the affinity for the current goroutine to the specified core. | ||
| // This does not immediately migrate the goroutine; migration occurs at the next | ||
| // scheduling point. See machine_rp2.go for full documentation. | ||
| // Important: LockCore sets the affinity but does not immediately migrate the | ||
| // goroutine to the target core. The actual migration happens at the next | ||
| // scheduling point (e.g., channel operation, time.Sleep, or Gosched). After | ||
| // that point, the goroutine will wait in the target core's queue if that core | ||
| // is busy running another goroutine. | ||
| // | ||
| // To avoid potential blocking on a busy core, consider calling LockCore in an | ||
| // init function before any other goroutines have started. This guarantees the | ||
| // target core is available. | ||
|
Comment on lines
+11
to
+13
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think this should be a hard requirements; that is,
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Regarding "panic if goroutines started" - I have a specific use case for dynamic pinning: Motion control board where: Core 0: Communications and non-critical tasks func main() { Would you accept one of these:
The deadlock risk is manageable if users follow the pattern of pinning early and using one goroutine per core for pinned work.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Yes, if we do this However, given your use case: is it possible to run the step generation off a hardware timer and an interrupt handler? That seems a better fit, and you can keep both cores running non-critical code.
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No, That is precisely the implementation that I am trying to get away from.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can you elaborate why? Assuming your
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The interrupt-based approach has several issues for precision step generation:
func stepGenerationLoop() { The core isn't idle - it's maintaining precise timing. An interrupt would add latency between "timer fired" and "step pin toggled."
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see. I believe I'm doing something similar on a PIO-capable chip (rp2350). My solution is a 3-layer architecture:
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There is always multiple ways to skin a cat. I am using an approach similar to what you described as well. But having core pinning has its advantages as well. Also, It seems like several people have asked for it previously. Appreciate your reviews and responses |
||
| // | ||
| // This is useful for: | ||
| // - Isolating time-critical operations to a dedicated core | ||
| // - Improving cache locality for performance-sensitive code | ||
| // - Exclusive access to core-local resources | ||
| // | ||
| // Warning: Pinning goroutines can lead to load imbalance. The goroutine will | ||
| // wait in the specified core's queue even if other cores are idle. If a | ||
| // long-running goroutine occupies the target core, LockCore may appear to | ||
| // block indefinitely (until the next scheduling point on the target core). | ||
| // | ||
| // Valid core values are 0 and 1. Panics if core is out of range. | ||
| // | ||
| // Only available on RP2040 and RP2350 with the "cores" scheduler. | ||
amken3d marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
| func LockCore(core int) { | ||
| if core < 0 || core >= numCPU { | ||
| panic("machine: core out of range") | ||
| } | ||
| machineLockCore(core) | ||
| } | ||
|
|
||
| // UnlockCore unpins the calling goroutine, allowing it to run on any available core. | ||
| // This undoes a previous call to LockCore. | ||
| // | ||
| // After calling UnlockCore, the scheduler is free to schedule the goroutine on | ||
| // any core for automatic load balancing. | ||
| // | ||
| // Only available on RP2040 and RP2350 with the "cores" scheduler. | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Superfluous comment. |
||
| func UnlockCore() { | ||
| machineUnlockCore() | ||
| } | ||
|
|
||
| // Internal functions implemented in runtime/scheduler_cores.go | ||
| // | ||
| //go:linkname machineLockCore runtime.machineLockCore | ||
| func machineLockCore(core int) | ||
|
|
||
| //go:linkname machineUnlockCore runtime.machineUnlockCore | ||
| func machineUnlockCore() | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,15 @@ | ||
| //go:build (rp2040 || rp2350) && !scheduler.cores | ||
|
|
||
| package machine | ||
|
|
||
| // LockCore is not available without the cores scheduler. | ||
| // This is a stub that panics. | ||
| func LockCore(core int) { | ||
| panic("machine.LockCore: not available without scheduler.cores") | ||
| } | ||
|
|
||
| // UnlockCore is not available without the cores scheduler. | ||
| // This is a stub that panics. | ||
| func UnlockCore() { | ||
| panic("machine.UnlockCore: not available without scheduler.cores") | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -98,14 +98,23 @@ func os_sigpipe() { | |
| } | ||
|
|
||
| // LockOSThread wires the calling goroutine to its current operating system thread. | ||
| // Stub for now | ||
| // On microcontrollers with multiple cores (e.g., RP2040/RP2350), this pins the | ||
| // goroutine to the core it's currently running on. | ||
|
Comment on lines
+101
to
+102
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is it more precise to say "with the "cores" scheduler"? |
||
| // With the "cores" scheduler on RP2040/RP2350, this pins the goroutine to the | ||
| // core it's currently running on. The pinning takes effect at the next | ||
| // scheduling point (e.g., channel operation, time.Sleep, or Gosched). | ||
| // Called by go1.18 standard library on windows, see https://github.com/golang/go/issues/49320 | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. While here, remove this now irrelevant comment. |
||
| func LockOSThread() { | ||
| lockOSThreadImpl() | ||
| } | ||
|
|
||
| // UnlockOSThread undoes an earlier call to LockOSThread. | ||
| // Stub for now | ||
| // On microcontrollers with multiple cores, this unpins the goroutine, allowing | ||
| // it to run on any available core. | ||
| // With the "cores" scheduler, this unpins the goroutine, allowing it to run on | ||
| // any available core. | ||
| func UnlockOSThread() { | ||
| unlockOSThreadImpl() | ||
| } | ||
|
|
||
| // KeepAlive makes sure the value in the interface is alive until at least the | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.