1- // +build gc.conservative
2- // +build avr
1+ // +build gc.list
32
43package runtime
54
5+ import (
6+ "internal/task"
7+ "runtime/interrupt"
8+ "unsafe"
9+ )
10+
611// This memory manager is partially inspired by the memory allocator included in avr-libc.
712// Unlike avr-libc malloc, this memory manager stores an allocation list instead of a free list.
8- // This gives an overhead of 4 bytes /allocation (up from avr-libc's 2 bytes /allocation).
13+ // This gives an overhead of 2 pointers /allocation (up from avr-libc's 1 pointer-width /allocation).
914// The allocation list is stored in ascending address order.
1015// The set of free spans is implicitly derived from the gaps between allocations.
1116// This allocator has an effective allocation complexity of O(n) and worst-case GC complexity of O(n^2).
12- // Due to architectural quirks of AVR, as well as tiny heaps, this should almost always be faster than the standard conservative collector.
13-
14- import "unsafe"
17+ // Due to architectural quirks, as well as tiny heaps, this should almost always be faster than the standard conservative collector on AVR.
1518
1619// isInHeap checks if an address is inside the heap.
1720func isInHeap (addr uintptr ) bool {
@@ -73,25 +76,28 @@ func markAddr(addr uintptr) {
7376 }
7477}
7578
76- // markMem scans a memory region for pointers and marks anything that is pointed to.
77- func markMem (start , end uintptr ) {
78- if start >= end {
79- return
80- }
81- prevByte := * (* byte )(unsafe .Pointer (start ))
82- for ; start != end ; start ++ {
83- b := * (* byte )(unsafe .Pointer (start ))
84- addr := (uintptr (b ) << 8 ) | uintptr (prevByte )
85- markAddr (addr )
86- prevByte = b
87- }
88- }
89-
9079// GC runs a garbage collection cycle.
9180func GC () {
9281 // Mark phase: mark all reachable objects, recursively.
9382 markGlobals ()
9483 markStack ()
84+ var markedTaskQueue task.Queue
85+ runqueueScan:
86+ if baremetal && hasScheduler {
87+ // Channel operations in interrupts may move task pointers around while we are marking.
88+ // Therefore we need to scan the runqueue seperately.
89+ for ! runqueue .Empty () {
90+ // Pop the next task off of the runqueue.
91+ t := runqueue .Pop ()
92+
93+ // Mark the task if it has not already been marked.
94+ markRoot (uintptr (unsafe .Pointer (& runqueue )), uintptr (unsafe .Pointer (t )))
95+
96+ // Push the task onto our temporary queue.
97+ markedTaskQueue .Push (t )
98+ }
99+ }
100+
95101 var keep * allocNode
96102 for scanList != nil {
97103 // Pop a node off of the scan list.
@@ -117,6 +123,18 @@ func GC() {
117123 }
118124 }
119125
126+ if baremetal && hasScheduler {
127+ // Restore the runqueue.
128+ i := interrupt .Disable ()
129+ if ! runqueue .Empty () {
130+ // Something new came in while finishing the mark.
131+ interrupt .Restore (i )
132+ goto runqueueScan
133+ }
134+ runqueue = markedTaskQueue
135+ interrupt .Restore (i )
136+ }
137+
120138 // Sweep phase: replace the heap.
121139 allocList = keep
122140}
@@ -172,7 +190,7 @@ searchLoop:
172190 // That was the last free region.
173191 break searchLoop
174192 }
175- start = uintptr (unsafe .Pointer (& node .base )) + node .len
193+ start = align ( uintptr (unsafe .Pointer (& node .base )) + node .len )
176194 dst = & node .next
177195 }
178196
0 commit comments