Skip to content

Commit fed433c

Browse files
aykevldeadprogram
authored andcommitted
compiler: add support for atomic operations
This also implements DisableInterrupts/EnableInterrupts for RISC-V, as those operations were needed to implement a few libcalls.
1 parent 734613c commit fed433c

File tree

10 files changed

+389
-24
lines changed

10 files changed

+389
-24
lines changed

compiler/atomic.go

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
package compiler
2+
3+
import (
4+
"golang.org/x/tools/go/ssa"
5+
"tinygo.org/x/go-llvm"
6+
)
7+
8+
// createAtomicOp lowers an atomic library call by lowering it as an LLVM atomic
9+
// operation. It returns the result of the operation and true if the call could
10+
// be lowered inline, and false otherwise.
11+
func (b *builder) createAtomicOp(call *ssa.CallCommon) (llvm.Value, bool) {
12+
name := call.Value.(*ssa.Function).Name()
13+
switch name {
14+
case "AddInt32", "AddInt64", "AddUint32", "AddUint64", "AddUintptr":
15+
ptr := b.getValue(call.Args[0])
16+
val := b.getValue(call.Args[1])
17+
oldVal := b.CreateAtomicRMW(llvm.AtomicRMWBinOpAdd, ptr, val, llvm.AtomicOrderingSequentiallyConsistent, true)
18+
// Return the new value, not the original value returned by atomicrmw.
19+
return b.CreateAdd(oldVal, val, ""), true
20+
case "SwapInt32", "SwapInt64", "SwapUint32", "SwapUint64", "SwapUintptr", "SwapPointer":
21+
ptr := b.getValue(call.Args[0])
22+
val := b.getValue(call.Args[1])
23+
isPointer := val.Type().TypeKind() == llvm.PointerTypeKind
24+
if isPointer {
25+
// atomicrmw only supports integers, so cast to an integer.
26+
val = b.CreatePtrToInt(val, b.uintptrType, "")
27+
ptr = b.CreateBitCast(ptr, llvm.PointerType(val.Type(), 0), "")
28+
}
29+
oldVal := b.CreateAtomicRMW(llvm.AtomicRMWBinOpXchg, ptr, val, llvm.AtomicOrderingSequentiallyConsistent, true)
30+
if isPointer {
31+
oldVal = b.CreateIntToPtr(oldVal, b.i8ptrType, "")
32+
}
33+
return oldVal, true
34+
case "CompareAndSwapInt32", "CompareAndSwapInt64", "CompareAndSwapUint32", "CompareAndSwapUint64", "CompareAndSwapUintptr", "CompareAndSwapPointer":
35+
ptr := b.getValue(call.Args[0])
36+
old := b.getValue(call.Args[1])
37+
newVal := b.getValue(call.Args[2])
38+
tuple := b.CreateAtomicCmpXchg(ptr, old, newVal, llvm.AtomicOrderingSequentiallyConsistent, llvm.AtomicOrderingSequentiallyConsistent, true)
39+
swapped := b.CreateExtractValue(tuple, 1, "")
40+
return swapped, true
41+
case "LoadInt32", "LoadInt64", "LoadUint32", "LoadUint64", "LoadUintptr", "LoadPointer":
42+
ptr := b.getValue(call.Args[0])
43+
val := b.CreateLoad(ptr, "")
44+
val.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent)
45+
val.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required
46+
return val, true
47+
case "StoreInt32", "StoreInt64", "StoreUint32", "StoreUint64", "StoreUintptr", "StorePointer":
48+
ptr := b.getValue(call.Args[0])
49+
val := b.getValue(call.Args[1])
50+
store := b.CreateStore(val, ptr)
51+
store.SetOrdering(llvm.AtomicOrderingSequentiallyConsistent)
52+
store.SetAlignment(b.targetData.PrefTypeAlignment(val.Type())) // required
53+
return store, true
54+
default:
55+
return llvm.Value{}, false
56+
}
57+
}

compiler/compiler.go

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1323,6 +1323,14 @@ func (b *builder) createFunctionCall(instr *ssa.CallCommon) (llvm.Value, error)
13231323
return b.createVolatileLoad(instr)
13241324
case strings.HasPrefix(name, "runtime/volatile.Store"):
13251325
return b.createVolatileStore(instr)
1326+
case strings.HasPrefix(name, "sync/atomic."):
1327+
val, ok := b.createAtomicOp(instr)
1328+
if ok {
1329+
// This call could be lowered as an atomic operation.
1330+
return val, nil
1331+
}
1332+
// This call couldn't be lowered as an atomic operation, it's
1333+
// probably something else. Continue as usual.
13261334
case name == "runtime/interrupt.New":
13271335
return b.createInterruptGlobal(instr)
13281336
}

src/device/riscv/riscv.go

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,3 +19,19 @@ func Asm(asm string)
1919
// You can use {} in the asm string (which expands to a register) to set the
2020
// return value.
2121
func AsmFull(asm string, regs map[string]interface{}) uintptr
22+
23+
// DisableInterrupts disables all interrupts, and returns the old interrupt
24+
// state.
25+
func DisableInterrupts() uintptr {
26+
// Note: this can be optimized with a CSRRW instruction, which atomically
27+
// swaps the value and returns the old value.
28+
mask := MIE.Get()
29+
MIE.Set(0)
30+
return mask
31+
}
32+
33+
// EnableInterrupts enables all interrupts again. The value passed in must be
34+
// the mask returned by DisableInterrupts.
35+
func EnableInterrupts(mask uintptr) {
36+
MIE.Set(mask)
37+
}

src/runtime/arch_cortexm.go

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,3 +19,86 @@ func align(ptr uintptr) uintptr {
1919
func getCurrentStackPointer() uintptr {
2020
return arm.AsmFull("mov {}, sp", nil)
2121
}
22+
23+
// Documentation:
24+
// * https://llvm.org/docs/Atomics.html
25+
// * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
26+
//
27+
// In the case of Cortex-M, some atomic operations are emitted inline while
28+
// others are emitted as libcalls. How many are emitted as libcalls depends on
29+
// the MCU core variant (M3 and higher support some 32-bit atomic operations
30+
// while M0 and M0+ do not).
31+
32+
//export __sync_fetch_and_add_4
33+
func __sync_fetch_and_add_4(ptr *uint32, value uint32) uint32 {
34+
mask := arm.DisableInterrupts()
35+
oldValue := *ptr
36+
*ptr = oldValue + value
37+
arm.EnableInterrupts(mask)
38+
return oldValue
39+
}
40+
41+
//export __sync_fetch_and_add_8
42+
func __sync_fetch_and_add_8(ptr *uint64, value uint64) uint64 {
43+
mask := arm.DisableInterrupts()
44+
oldValue := *ptr
45+
*ptr = oldValue + value
46+
arm.EnableInterrupts(mask)
47+
return oldValue
48+
}
49+
50+
//export __sync_lock_test_and_set_4
51+
func __sync_lock_test_and_set_4(ptr *uint32, value uint32) uint32 {
52+
mask := arm.DisableInterrupts()
53+
oldValue := *ptr
54+
*ptr = value
55+
arm.EnableInterrupts(mask)
56+
return oldValue
57+
}
58+
59+
//export __sync_lock_test_and_set_8
60+
func __sync_lock_test_and_set_8(ptr *uint64, value uint64) uint64 {
61+
mask := arm.DisableInterrupts()
62+
oldValue := *ptr
63+
*ptr = value
64+
arm.EnableInterrupts(mask)
65+
return oldValue
66+
}
67+
68+
//export __sync_val_compare_and_swap_4
69+
func __sync_val_compare_and_swap_4(ptr *uint32, expected, desired uint32) uint32 {
70+
mask := arm.DisableInterrupts()
71+
oldValue := *ptr
72+
if oldValue == expected {
73+
*ptr = desired
74+
}
75+
arm.EnableInterrupts(mask)
76+
return oldValue
77+
}
78+
79+
//export __sync_val_compare_and_swap_8
80+
func __sync_val_compare_and_swap_8(ptr *uint64, expected, desired uint64) uint64 {
81+
mask := arm.DisableInterrupts()
82+
oldValue := *ptr
83+
if oldValue == expected {
84+
*ptr = desired
85+
}
86+
arm.EnableInterrupts(mask)
87+
return oldValue
88+
}
89+
90+
// The safest thing to do here would just be to disable interrupts for
91+
// procPin/procUnpin. Note that a global variable is safe in this case, as any
92+
// access to procPinnedMask will happen with interrupts disabled.
93+
94+
var procPinnedMask uintptr
95+
96+
//go:linkname procPin sync/atomic.runtime_procPin
97+
func procPin() {
98+
procPinnedMask = arm.DisableInterrupts()
99+
}
100+
101+
//go:linkname procUnpin sync/atomic.runtime_procUnpin
102+
func procUnpin() {
103+
arm.EnableInterrupts(procPinnedMask)
104+
}

src/runtime/arch_tinygoriscv.go

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,3 +17,76 @@ func align(ptr uintptr) uintptr {
1717
func getCurrentStackPointer() uintptr {
1818
return riscv.AsmFull("mv {}, sp", nil)
1919
}
20+
21+
// Documentation:
22+
// * https://llvm.org/docs/Atomics.html
23+
// * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html
24+
//
25+
// In the case of RISC-V, some operations may be implemented with libcalls if
26+
// the operation is too big to be handled by assembly. Officially, these calls
27+
// should be implemented with a lock-free algorithm but as (as of this time) all
28+
// supported RISC-V chips have a single hart, we can simply disable interrupts
29+
// to get the same behavior.
30+
31+
//export __atomic_load_8
32+
func __atomic_load_8(ptr *uint64, ordering int32) uint64 {
33+
mask := riscv.DisableInterrupts()
34+
value := *ptr
35+
riscv.EnableInterrupts(mask)
36+
return value
37+
}
38+
39+
//export __atomic_store_8
40+
func __atomic_store_8(ptr *uint64, value uint64, ordering int32) {
41+
mask := riscv.DisableInterrupts()
42+
*ptr = value
43+
riscv.EnableInterrupts(mask)
44+
}
45+
46+
//export __atomic_exchange_8
47+
func __atomic_exchange_8(ptr *uint64, value uint64, ordering int32) uint64 {
48+
mask := riscv.DisableInterrupts()
49+
oldValue := *ptr
50+
*ptr = value
51+
riscv.EnableInterrupts(mask)
52+
return oldValue
53+
}
54+
55+
//export __atomic_compare_exchange_8
56+
func __atomic_compare_exchange_8(ptr, expected *uint64, desired uint64, success_ordering, failure_ordering int32) bool {
57+
mask := riscv.DisableInterrupts()
58+
oldValue := *ptr
59+
success := oldValue == *expected
60+
if success {
61+
*ptr = desired
62+
} else {
63+
*expected = oldValue
64+
}
65+
riscv.EnableInterrupts(mask)
66+
return success
67+
}
68+
69+
//export __atomic_fetch_add_8
70+
func __atomic_fetch_add_8(ptr *uint64, value uint64, ordering int32) uint64 {
71+
mask := riscv.DisableInterrupts()
72+
oldValue := *ptr
73+
*ptr = oldValue + value
74+
riscv.EnableInterrupts(mask)
75+
return oldValue
76+
}
77+
78+
// The safest thing to do here would just be to disable interrupts for
79+
// procPin/procUnpin. Note that a global variable is safe in this case, as any
80+
// access to procPinnedMask will happen with interrupts disabled.
81+
82+
var procPinnedMask uintptr
83+
84+
//go:linkname procPin sync/atomic.runtime_procPin
85+
func procPin() {
86+
procPinnedMask = riscv.DisableInterrupts()
87+
}
88+
89+
//go:linkname procUnpin sync/atomic.runtime_procUnpin
90+
func procUnpin() {
91+
riscv.EnableInterrupts(procPinnedMask)
92+
}

src/runtime/atomic.go

Lines changed: 0 additions & 24 deletions
This file was deleted.

src/runtime/runtime_unix.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,3 +94,14 @@ func extalloc(size uintptr) unsafe.Pointer {
9494

9595
//export free
9696
func extfree(ptr unsafe.Pointer)
97+
98+
// TinyGo does not yet support any form of parallelism on an OS, so these can be
99+
// left empty.
100+
101+
//go:linkname procPin sync/atomic.runtime_procPin
102+
func procPin() {
103+
}
104+
105+
//go:linkname procUnpin sync/atomic.runtime_procUnpin
106+
func procUnpin() {
107+
}

src/runtime/runtime_wasm.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -91,3 +91,14 @@ func ticks() timeUnit
9191
func abort() {
9292
trap()
9393
}
94+
95+
// TinyGo does not yet support any form of parallelism on WebAssembly, so these
96+
// can be left empty.
97+
98+
//go:linkname procPin sync/atomic.runtime_procPin
99+
func procPin() {
100+
}
101+
102+
//go:linkname procUnpin sync/atomic.runtime_procUnpin
103+
func procUnpin() {
104+
}

0 commit comments

Comments
 (0)