Skip to content

Commit bbc2bf5

Browse files
committed
runtime (gc): seperate AVR GC
1 parent 1ad6953 commit bbc2bf5

File tree

2 files changed

+229
-0
lines changed

2 files changed

+229
-0
lines changed

src/runtime/gc_avr.go

Lines changed: 228 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,228 @@
1+
// +build gc.conservative
2+
// +build avr
3+
4+
package runtime
5+
6+
// This memory manager is partially inspired by the memory allocator included in avr-libc.
7+
// Unlike avr-libc malloc, this memory manager stores an allocation list instead of a free list.
8+
// This gives an overhead of 4 bytes/allocation (up from avr-libc's 2 bytes/allocation).
9+
// The allocation list is stored in ascending address order.
10+
// The set of free spans is implicitly derived from the gaps between allocations.
11+
// This allocator has an effective allocation complexity of O(n) and worst-case GC complexity of O(n^2).
12+
// Due to architectural quirks of AVR, as well as tiny heaps, this should almost always be faster than the standard conservative collector.
13+
14+
import (
15+
"machine"
16+
"unsafe"
17+
)
18+
19+
// isInHeap checks if an address is inside the heap.
20+
func isInHeap(addr uintptr) bool {
21+
return addr >= heapStart && addr < heapEnd
22+
}
23+
24+
// allocNode is a node in the allocations list.
25+
// It is prepended to every allocation, resulting in an overhead of 4 bytes per allocation.
26+
type allocNode struct {
27+
// next is a pointer to the next allocation node.
28+
next *allocNode
29+
30+
// len is the length of the body of this node in bytes.
31+
len uintptr
32+
33+
// base is the start of the body of this node.
34+
base struct{}
35+
}
36+
37+
// allocList is a singly linked list of allocations.
38+
// It is stored in ascending address order.
39+
var allocList *allocNode
40+
41+
// scanList is a stack of allocations to scan.
42+
var scanList *allocNode
43+
44+
func markRoot(addr, root uintptr) {
45+
markAddr(root)
46+
}
47+
48+
func markRoots(start, end uintptr) {
49+
markMem(start, end)
50+
}
51+
52+
// markAddr marks the allocation containing the specified address.
53+
func markAddr(addr uintptr) {
54+
if !isInHeap(addr) {
55+
// The address is not in the heap.
56+
return
57+
}
58+
59+
// Search the allocation list for the address.
60+
for node, prev := allocList, &allocList; node != nil; node, prev = node.next, &node.next {
61+
baseAddr := uintptr(unsafe.Pointer(&node.base))
62+
if addr < baseAddr {
63+
// The address comes before this node.
64+
// Therefore the address must either be that of a node header or a free span.
65+
return
66+
}
67+
68+
endAddr := baseAddr + node.len
69+
if addr < endAddr {
70+
// The address is included in this allocation.
71+
// Move the allocation to the scan stack.
72+
*prev = node.next
73+
scanList, node.next = node, scanList
74+
return
75+
}
76+
}
77+
}
78+
79+
// markMem scans a memory region for pointers and marks anything that is pointed to.
80+
func markMem(start, end uintptr) {
81+
if start >= end {
82+
return
83+
}
84+
prevByte := *(*byte)(unsafe.Pointer(start))
85+
for ; start != end; start++ {
86+
b := *(*byte)(unsafe.Pointer(start))
87+
addr := (uintptr(b) << 8) | uintptr(prevByte)
88+
markAddr(addr)
89+
prevByte = b
90+
}
91+
}
92+
93+
// GC runs a garbage collection cycle.
94+
func GC() {
95+
// Mark phase: mark all reachable objects, recursively.
96+
markGlobals()
97+
markStack()
98+
var keep *allocNode
99+
for scanList != nil {
100+
// Pop a node off of the scan list.
101+
node := scanList
102+
scanList = node.next
103+
104+
// Scan the node.
105+
baseAddr := uintptr(unsafe.Pointer(&node.base))
106+
endAddr := baseAddr + node.len
107+
markMem(baseAddr, endAddr)
108+
109+
// Insert the node into the output heap.
110+
var prev *allocNode
111+
keepNode := keep
112+
for keepNode != nil && uintptr(unsafe.Pointer(node)) > uintptr(unsafe.Pointer(keepNode)) {
113+
// Move onto the next node.
114+
prev, keepNode = keepNode, keepNode.next
115+
}
116+
if prev == nil {
117+
keep, node.next = node, keep
118+
} else {
119+
prev.next, node.next = node, keepNode
120+
}
121+
}
122+
123+
// Sweep phase: replace the heap.
124+
allocList = keep
125+
}
126+
127+
// findMem searches the heap for a free span large enough to contain an allocation of the specified size.
128+
// If there are no sufficiently large free spans available, this returns nil.
129+
func findMem(size uintptr) *allocNode {
130+
// This memory allocator implementation is effectively the same algorithm applied by avr-libc.
131+
// It loops through the set of all free spans, and selects the smallest span that is large enough to fit the allocation.
132+
133+
nodeSize := unsafe.Sizeof(allocNode{}) + size
134+
135+
// best* store the best-fit free span.
136+
var bestDst **allocNode
137+
var bestStart uintptr
138+
var bestSize uintptr
139+
140+
start := heapStart
141+
dst := &allocList
142+
searchLoop:
143+
for {
144+
// Find the allocation node after this free span.
145+
node := *dst
146+
147+
// Find the end of this free span.
148+
var end uintptr
149+
if node != nil {
150+
// The node terminates the free span.
151+
end = uintptr(unsafe.Pointer(node))
152+
} else {
153+
// The free span ends at the end of the heap.
154+
end = heapEnd
155+
}
156+
157+
// Calculate the size of the free span.
158+
freeSpanSize := end - start
159+
160+
switch {
161+
case freeSpanSize == nodeSize:
162+
// This span is a perfect fit.
163+
bestDst = dst
164+
bestStart = start
165+
break searchLoop
166+
case freeSpanSize > nodeSize && (bestDst == nil || bestSize > nodeSize):
167+
// This span is a better fit than the previous best.
168+
bestDst = dst
169+
bestStart = start
170+
bestSize = freeSpanSize
171+
}
172+
173+
// Move to the next free span.
174+
if node == nil {
175+
// That was the last free region.
176+
break searchLoop
177+
}
178+
start = uintptr(unsafe.Pointer(&node.base)) + node.len
179+
dst = &node.next
180+
}
181+
182+
if bestDst == nil {
183+
// There is no suitable allocation.
184+
return nil
185+
}
186+
187+
mem := (*allocNode)(unsafe.Pointer(bestStart))
188+
*bestDst, mem.next = mem, *bestDst
189+
mem.len = size
190+
return mem
191+
}
192+
193+
// alloc tries to find some free space on the heap, possibly doing a garbage
194+
// collection cycle if needed. If no space is free, it panics.
195+
//go:noinline
196+
func alloc(size uintptr) unsafe.Pointer {
197+
var ranGC bool
198+
tryAlloc:
199+
// Search for available memory.
200+
node := findMem(size)
201+
if node == nil {
202+
// There is no free span large enough for the allocation.
203+
204+
if ranGC {
205+
// Even after running the GC, there is not enough memory.
206+
runtimePanic("out of memory")
207+
}
208+
209+
// Run the garbage collector and try again.
210+
GC()
211+
ranGC = true
212+
goto tryAlloc
213+
}
214+
215+
// Zero the allocation and return it.
216+
ptr := unsafe.Pointer(&node.base)
217+
memzero(ptr, size)
218+
return ptr
219+
}
220+
221+
func free(ptr unsafe.Pointer) {
222+
// TODO: free memory on request, when the compiler knows it is unused.
223+
}
224+
225+
func initHeap() {
226+
// This memory manager requires no initialization other than the zeroing of globals.
227+
// This function is provided for compatability with other memory managers.
228+
}

src/runtime/gc_conservative.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
// +build gc.conservative
2+
// +build !avr
23

34
package runtime
45

0 commit comments

Comments
 (0)