|
| 1 | +// +build gc.conservative |
| 2 | +// +build avr |
| 3 | + |
| 4 | +package runtime |
| 5 | + |
| 6 | +// This memory manager is partially inspired by the memory allocator included in avr-libc. |
| 7 | +// Unlike avr-libc malloc, this memory manager stores an allocation list instead of a free list. |
| 8 | +// This gives an overhead of 4 bytes/allocation (up from avr-libc's 2 bytes/allocation). |
| 9 | +// The allocation list is stored in ascending address order. |
| 10 | +// The set of free spans is implicitly derived from the gaps between allocations. |
| 11 | +// This allocator has an effective allocation complexity of O(n) and worst-case GC complexity of O(n^2). |
| 12 | +// Due to architectural quirks of AVR, as well as tiny heaps, this should almost always be faster than the standard conservative collector. |
| 13 | + |
| 14 | +import "unsafe" |
| 15 | + |
| 16 | +// isInHeap checks if an address is inside the heap. |
| 17 | +func isInHeap(addr uintptr) bool { |
| 18 | + return addr >= heapStart && addr < heapEnd |
| 19 | +} |
| 20 | + |
| 21 | +// allocNode is a node in the allocations list. |
| 22 | +// It is prepended to every allocation, resulting in an overhead of 4 bytes per allocation. |
| 23 | +type allocNode struct { |
| 24 | + // next is a pointer to the next allocation node. |
| 25 | + next *allocNode |
| 26 | + |
| 27 | + // len is the length of the body of this node in bytes. |
| 28 | + len uintptr |
| 29 | + |
| 30 | + // base is the start of the body of this node. |
| 31 | + base struct{} |
| 32 | +} |
| 33 | + |
| 34 | +// allocList is a singly linked list of allocations. |
| 35 | +// It is stored in ascending address order. |
| 36 | +var allocList *allocNode |
| 37 | + |
| 38 | +// scanList is a stack of allocations to scan. |
| 39 | +var scanList *allocNode |
| 40 | + |
| 41 | +func markRoot(addr, root uintptr) { |
| 42 | + markAddr(root) |
| 43 | +} |
| 44 | + |
| 45 | +func markRoots(start, end uintptr) { |
| 46 | + markMem(start, end) |
| 47 | +} |
| 48 | + |
| 49 | +// markAddr marks the allocation containing the specified address. |
| 50 | +func markAddr(addr uintptr) { |
| 51 | + if !isInHeap(addr) { |
| 52 | + // The address is not in the heap. |
| 53 | + return |
| 54 | + } |
| 55 | + |
| 56 | + // Search the allocation list for the address. |
| 57 | + for node, prev := allocList, &allocList; node != nil; node, prev = node.next, &node.next { |
| 58 | + baseAddr := uintptr(unsafe.Pointer(&node.base)) |
| 59 | + if addr < baseAddr { |
| 60 | + // The address comes before this node. |
| 61 | + // Therefore the address must either be that of a node header or a free span. |
| 62 | + return |
| 63 | + } |
| 64 | + |
| 65 | + endAddr := baseAddr + node.len |
| 66 | + if addr < endAddr { |
| 67 | + // The address is included in this allocation. |
| 68 | + // Move the allocation to the scan stack. |
| 69 | + *prev = node.next |
| 70 | + scanList, node.next = node, scanList |
| 71 | + return |
| 72 | + } |
| 73 | + } |
| 74 | +} |
| 75 | + |
| 76 | +// markMem scans a memory region for pointers and marks anything that is pointed to. |
| 77 | +func markMem(start, end uintptr) { |
| 78 | + if start >= end { |
| 79 | + return |
| 80 | + } |
| 81 | + prevByte := *(*byte)(unsafe.Pointer(start)) |
| 82 | + for ; start != end; start++ { |
| 83 | + b := *(*byte)(unsafe.Pointer(start)) |
| 84 | + addr := (uintptr(b) << 8) | uintptr(prevByte) |
| 85 | + markAddr(addr) |
| 86 | + prevByte = b |
| 87 | + } |
| 88 | +} |
| 89 | + |
| 90 | +// GC runs a garbage collection cycle. |
| 91 | +func GC() { |
| 92 | + // Mark phase: mark all reachable objects, recursively. |
| 93 | + markGlobals() |
| 94 | + markStack() |
| 95 | + var keep *allocNode |
| 96 | + for scanList != nil { |
| 97 | + // Pop a node off of the scan list. |
| 98 | + node := scanList |
| 99 | + scanList = node.next |
| 100 | + |
| 101 | + // Scan the node. |
| 102 | + baseAddr := uintptr(unsafe.Pointer(&node.base)) |
| 103 | + endAddr := baseAddr + node.len |
| 104 | + markMem(baseAddr, endAddr) |
| 105 | + |
| 106 | + // Insert the node into the output heap. |
| 107 | + var prev *allocNode |
| 108 | + keepNode := keep |
| 109 | + for keepNode != nil && uintptr(unsafe.Pointer(node)) > uintptr(unsafe.Pointer(keepNode)) { |
| 110 | + // Move onto the next node. |
| 111 | + prev, keepNode = keepNode, keepNode.next |
| 112 | + } |
| 113 | + if prev == nil { |
| 114 | + keep, node.next = node, keep |
| 115 | + } else { |
| 116 | + prev.next, node.next = node, keepNode |
| 117 | + } |
| 118 | + } |
| 119 | + |
| 120 | + // Sweep phase: replace the heap. |
| 121 | + allocList = keep |
| 122 | +} |
| 123 | + |
| 124 | +// findMem searches the heap for a free span large enough to contain an allocation of the specified size. |
| 125 | +// If there are no sufficiently large free spans available, this returns nil. |
| 126 | +func findMem(size uintptr) *allocNode { |
| 127 | + // This memory allocator implementation is effectively the same algorithm applied by avr-libc. |
| 128 | + // It loops through the set of all free spans, and selects the smallest span that is large enough to fit the allocation. |
| 129 | + |
| 130 | + nodeSize := unsafe.Sizeof(allocNode{}) + size |
| 131 | + |
| 132 | + // best* store the best-fit free span. |
| 133 | + var bestDst **allocNode |
| 134 | + var bestStart uintptr |
| 135 | + var bestSize uintptr |
| 136 | + |
| 137 | + start := heapStart |
| 138 | + dst := &allocList |
| 139 | +searchLoop: |
| 140 | + for { |
| 141 | + // Find the allocation node after this free span. |
| 142 | + node := *dst |
| 143 | + |
| 144 | + // Find the end of this free span. |
| 145 | + var end uintptr |
| 146 | + if node != nil { |
| 147 | + // The node terminates the free span. |
| 148 | + end = uintptr(unsafe.Pointer(node)) |
| 149 | + } else { |
| 150 | + // The free span ends at the end of the heap. |
| 151 | + end = heapEnd |
| 152 | + } |
| 153 | + |
| 154 | + // Calculate the size of the free span. |
| 155 | + freeSpanSize := end - start |
| 156 | + |
| 157 | + switch { |
| 158 | + case freeSpanSize == nodeSize: |
| 159 | + // This span is a perfect fit. |
| 160 | + bestDst = dst |
| 161 | + bestStart = start |
| 162 | + break searchLoop |
| 163 | + case freeSpanSize > nodeSize && (bestDst == nil || bestSize > nodeSize): |
| 164 | + // This span is a better fit than the previous best. |
| 165 | + bestDst = dst |
| 166 | + bestStart = start |
| 167 | + bestSize = freeSpanSize |
| 168 | + } |
| 169 | + |
| 170 | + // Move to the next free span. |
| 171 | + if node == nil { |
| 172 | + // That was the last free region. |
| 173 | + break searchLoop |
| 174 | + } |
| 175 | + start = uintptr(unsafe.Pointer(&node.base)) + node.len |
| 176 | + dst = &node.next |
| 177 | + } |
| 178 | + |
| 179 | + if bestDst == nil { |
| 180 | + // There is no suitable allocation. |
| 181 | + return nil |
| 182 | + } |
| 183 | + |
| 184 | + mem := (*allocNode)(unsafe.Pointer(bestStart)) |
| 185 | + *bestDst, mem.next = mem, *bestDst |
| 186 | + mem.len = size |
| 187 | + return mem |
| 188 | +} |
| 189 | + |
| 190 | +// alloc tries to find some free space on the heap, possibly doing a garbage |
| 191 | +// collection cycle if needed. If no space is free, it panics. |
| 192 | +//go:noinline |
| 193 | +func alloc(size uintptr) unsafe.Pointer { |
| 194 | + var ranGC bool |
| 195 | +tryAlloc: |
| 196 | + // Search for available memory. |
| 197 | + node := findMem(size) |
| 198 | + if node == nil { |
| 199 | + // There is no free span large enough for the allocation. |
| 200 | + |
| 201 | + if ranGC { |
| 202 | + // Even after running the GC, there is not enough memory. |
| 203 | + runtimePanic("out of memory") |
| 204 | + } |
| 205 | + |
| 206 | + // Run the garbage collector and try again. |
| 207 | + GC() |
| 208 | + ranGC = true |
| 209 | + goto tryAlloc |
| 210 | + } |
| 211 | + |
| 212 | + // Zero the allocation and return it. |
| 213 | + ptr := unsafe.Pointer(&node.base) |
| 214 | + memzero(ptr, size) |
| 215 | + return ptr |
| 216 | +} |
| 217 | + |
| 218 | +func free(ptr unsafe.Pointer) { |
| 219 | + // TODO: free memory on request, when the compiler knows it is unused. |
| 220 | +} |
| 221 | + |
| 222 | +func initHeap() { |
| 223 | + // This memory manager requires no initialization other than the zeroing of globals. |
| 224 | + // This function is provided for compatability with other memory managers. |
| 225 | +} |
0 commit comments