diff --git a/compileopts/config.go b/compileopts/config.go index 2803d16463..af0fc8e732 100644 --- a/compileopts/config.go +++ b/compileopts/config.go @@ -79,7 +79,12 @@ func (c *Config) GOARCH() string { // BuildTags returns the complete list of build tags used during this build. func (c *Config) BuildTags() []string { - tags := append(c.Target.BuildTags, []string{"tinygo", "gc." + c.GC(), "scheduler." + c.Scheduler()}...) + gc := c.GC() + tags := append(c.Target.BuildTags, []string{"tinygo", "gc." + gc, "scheduler." + c.Scheduler()}...) + switch gc { + case "list", "blocks", "extalloc": + tags = append(tags, "gc.conservative") + } for i := 1; i <= c.GoMinorVersion; i++ { tags = append(tags, fmt.Sprintf("go1.%d", i)) } @@ -96,7 +101,7 @@ func (c *Config) CgoEnabled() bool { } // GC returns the garbage collection strategy in use on this platform. Valid -// values are "none", "leaking", "extalloc", and "conservative". +// values are "none", "leaking", "list", "extalloc", and "blocks". func (c *Config) GC() string { if c.Options.GC != "" { return c.Options.GC @@ -106,7 +111,7 @@ func (c *Config) GC() string { } for _, tag := range c.Target.BuildTags { if tag == "baremetal" || tag == "wasm" { - return "conservative" + return "blocks" } } return "extalloc" @@ -116,7 +121,7 @@ func (c *Config) GC() string { // that can be traced by the garbage collector. func (c *Config) NeedsStackObjects() bool { switch c.GC() { - case "conservative", "extalloc": + case "list", "blocks", "extalloc": for _, tag := range c.BuildTags() { if tag == "wasm" { return true diff --git a/compileopts/options.go b/compileopts/options.go index 7948b71c6d..fa043eb6b9 100644 --- a/compileopts/options.go +++ b/compileopts/options.go @@ -6,7 +6,7 @@ import ( ) var ( - validGCOptions = []string{"none", "leaking", "extalloc", "conservative"} + validGCOptions = []string{"none", "leaking", "list", "extalloc", "blocks"} validSchedulerOptions = []string{"none", "tasks", "coroutines"} validPrintSizeOptions = []string{"none", "short", "full"} validPanicStrategyOptions = []string{"print", "trap"} diff --git a/compileopts/options_test.go b/compileopts/options_test.go index 1ff532cc4f..10751e78fd 100644 --- a/compileopts/options_test.go +++ b/compileopts/options_test.go @@ -9,7 +9,7 @@ import ( func TestVerifyOptions(t *testing.T) { - expectedGCError := errors.New(`invalid gc option 'incorrect': valid values are none, leaking, extalloc, conservative`) + expectedGCError := errors.New(`invalid gc option 'incorrect': valid values are none, leaking, list, extalloc, blocks`) expectedSchedulerError := errors.New(`invalid scheduler option 'incorrect': valid values are none, tasks, coroutines`) expectedPrintSizeError := errors.New(`invalid size option 'incorrect': valid values are none, short, full`) expectedPanicStrategyError := errors.New(`invalid panic option 'incorrect': valid values are print, trap`) @@ -42,6 +42,12 @@ func TestVerifyOptions(t *testing.T) { GC: "leaking", }, }, + { + name: "GCOptionList", + opts: compileopts.Options{ + GC: "list", + }, + }, { name: "GCOptionExtalloc", opts: compileopts.Options{ @@ -49,9 +55,9 @@ func TestVerifyOptions(t *testing.T) { }, }, { - name: "GCOptionConservative", + name: "GCOptionBlocks", opts: compileopts.Options{ - GC: "conservative", + GC: "blocks", }, }, { diff --git a/compiler/compiler.go b/compiler/compiler.go index 832f232cbb..80bc39e276 100644 --- a/compiler/compiler.go +++ b/compiler/compiler.go @@ -13,6 +13,7 @@ import ( "strings" "github.com/tinygo-org/tinygo/compileopts" + "github.com/tinygo-org/tinygo/compiler/gctype" "github.com/tinygo-org/tinygo/compiler/llvmutil" "github.com/tinygo-org/tinygo/ir" "github.com/tinygo-org/tinygo/loader" @@ -51,6 +52,7 @@ type compilerContext struct { ir *ir.Program diagnostics []error astComments map[string]*ast.CommentGroup + typer *gctype.Typer } // builder contains all information relevant to build a single function. @@ -249,6 +251,13 @@ func Compile(pkgName string, machine llvm.TargetMachine, config *compileopts.Con c.createFunctionDeclaration(f) } + allocTyped := c.mod.NamedFunction("runtime.allocTyped") + if !allocTyped.IsNil() { + // The runtime has precise types available. + // Initialize garbage collector type system. + c.typer = gctype.NewTyper(c.ctx, c.mod, c.targetData) + } + // Add definitions to declarations. var initFuncs []llvm.Value irbuilder := c.ctx.NewBuilder() @@ -314,8 +323,14 @@ func Compile(pkgName string, machine llvm.TargetMachine, config *compileopts.Con // Tell the optimizer that runtime.alloc is an allocator, meaning that it // returns values that are never null and never alias to an existing value. + // Do the same for the typed allocator. + alloc := c.mod.NamedFunction("runtime.alloc") for _, attrName := range []string{"noalias", "nonnull"} { - c.mod.NamedFunction("runtime.alloc").AddAttributeAtIndex(0, getAttr(attrName)) + attr := getAttr(attrName) + alloc.AddAttributeAtIndex(0, attr) + if !allocTyped.IsNil() { + allocTyped.AddAttributeAtIndex(0, attr) + } } // On *nix systems, the "abort" functuion in libc is used to handle fatal panics. @@ -1462,7 +1477,18 @@ func (b *builder) createExpr(expr ssa.Value) (llvm.Value, error) { return llvm.Value{}, b.makeError(expr.Pos(), fmt.Sprintf("value is too big (%v bytes)", size)) } sizeValue := llvm.ConstInt(b.uintptrType, size, false) - buf := b.createRuntimeCall("alloc", []llvm.Value{sizeValue}, expr.Comment) + var buf llvm.Value + if b.typer != nil { + // Allocate a typed value. + t, err := b.typer.Create(typ) + if err != nil { + return llvm.Value{}, b.makeError(expr.Pos(), err.Error()) + } + buf = b.createRuntimeCall("allocTyped", []llvm.Value{sizeValue, t}, expr.Comment) + } else { + // Allocate an untyped value. + buf = b.createRuntimeCall("alloc", []llvm.Value{sizeValue}, expr.Comment) + } buf = b.CreateBitCast(buf, llvm.PointerType(typ, 0), "") return buf, nil } else { @@ -1675,7 +1701,18 @@ func (b *builder) createExpr(expr ssa.Value) (llvm.Value, error) { return llvm.Value{}, err } sliceSize := b.CreateBinOp(llvm.Mul, elemSizeValue, sliceCapCast, "makeslice.cap") - slicePtr := b.createRuntimeCall("alloc", []llvm.Value{sliceSize}, "makeslice.buf") + var slicePtr llvm.Value + if b.typer != nil { + // Allocate a typed value. + t, err := b.typer.Create(llvmElemType) + if err != nil { + return llvm.Value{}, b.makeError(expr.Pos(), err.Error()) + } + slicePtr = b.createRuntimeCall("allocTyped", []llvm.Value{sliceSize, t}, "makeslice.buf") + } else { + // Allocate an untyped value. + slicePtr = b.createRuntimeCall("alloc", []llvm.Value{sliceSize}, "makeslice.buf") + } slicePtr = b.CreateBitCast(slicePtr, llvm.PointerType(llvmElemType, 0), "makeslice.array") // Extend or truncate if necessary. This is safe as we've already done diff --git a/compiler/gctype/gctype.go b/compiler/gctype/gctype.go new file mode 100644 index 0000000000..f715fcb854 --- /dev/null +++ b/compiler/gctype/gctype.go @@ -0,0 +1,161 @@ +package gctype + +import ( + "errors" + "fmt" + "math/big" + + "tinygo.org/x/go-llvm" +) + +// getPointerBitmap scans the given LLVM type for pointers and sets bits in a +// bigint at the word offset that contains a pointer. This scan is recursive. +func getPointerBitmap(targetData llvm.TargetData, typ llvm.Type, name string) *big.Int { + alignment := targetData.PrefTypeAlignment(llvm.PointerType(typ.Context().Int8Type(), 0)) + switch typ.TypeKind() { + case llvm.IntegerTypeKind, llvm.FloatTypeKind, llvm.DoubleTypeKind: + return big.NewInt(0) + case llvm.PointerTypeKind: + return big.NewInt(1) + case llvm.StructTypeKind: + ptrs := big.NewInt(0) + for i, subtyp := range typ.StructElementTypes() { + subptrs := getPointerBitmap(targetData, subtyp, name) + if subptrs.BitLen() == 0 { + continue + } + offset := targetData.ElementOffset(typ, i) + if offset%uint64(alignment) != 0 { + panic("precise GC: global contains unaligned pointer: " + name) + } + subptrs.Lsh(subptrs, uint(offset)/uint(alignment)) + ptrs.Or(ptrs, subptrs) + } + return ptrs + case llvm.ArrayTypeKind: + subtyp := typ.ElementType() + subptrs := getPointerBitmap(targetData, subtyp, name) + ptrs := big.NewInt(0) + if subptrs.BitLen() == 0 { + return ptrs + } + elementSize := targetData.TypeAllocSize(subtyp) + for i := 0; i < typ.ArrayLength(); i++ { + ptrs.Lsh(ptrs, uint(elementSize)/uint(alignment)) + ptrs.Or(ptrs, subptrs) + } + return ptrs + default: + panic("unknown type kind: " + name) + } +} + +// NewTyper creates a Typer. +func NewTyper(ctx llvm.Context, mod llvm.Module, td llvm.TargetData) *Typer { + ptr := llvm.PointerType(ctx.Int8Type(), 0) + return &Typer{ + tcache: make(map[llvm.Type]llvm.Value), + td: td, + ctx: ctx, + mod: mod, + uintptr: ctx.IntType(int(td.TypeSizeInBits(ptr))), + i8: ctx.Int8Type(), + ptrSize: td.TypeAllocSize(ptr), + ptrAlign: uint64(td.ABITypeAlignment(ptr)), + } +} + +// Typer creates GC types. +type Typer struct { + // tcache is a cache of GC types by LLVM type. + tcache map[llvm.Type]llvm.Value + + // td is the target platform data. + td llvm.TargetData + + ctx llvm.Context + + mod llvm.Module + + uintptr, i8 llvm.Type + + ptrSize, ptrAlign uint64 +} + +// Create a GC type for the given LLVM type. +func (t *Typer) Create(typ llvm.Type) (llvm.Value, error) { + // Check the cache before attempting to create the global. + if g, ok := t.tcache[typ]; ok { + return g, nil + } + + // Find the type size. + size := t.td.TypeAllocSize(typ) + + // Compute a pointer bitmap. + // TODO: clean this up and maybe use error handling? + b := getPointerBitmap(t.td, typ, "") + if b.Cmp(big.NewInt(0)) == 0 { + // The type has no pointers. + return llvm.ConstNull(llvm.PointerType(t.uintptr, 0)), nil + } + + // Use some limited sanity-checking. + align := uint64(t.td.ABITypeAlignment(typ)) + switch { + case size < t.ptrSize: + return llvm.Value{}, errors.New("type has pointers but is smaller than a pointer") + case align%t.ptrAlign != 0: + return llvm.Value{}, errors.New("alignment of pointery type is not a multiple of pointer alignment") + case size%align != 0: + return llvm.Value{}, errors.New("type violates the array alignment invariant") + } + + // Convert size into increments of pointer-align. + size /= t.ptrAlign + + // Create a global for the type. + g := t.createGlobal(size, b.Bytes()) + + // Save the global to the cache. + t.tcache[typ] = g + + return g, nil +} + +func (t *Typer) createGlobal(size uint64, layout []byte) llvm.Value { + // TODO: compression? + + // Generate the name of the global. + name := fmt.Sprintf("tinygo.gc.type.%d.%x", size, layout) + + // Create the global if it does not exist. + g := t.mod.NamedGlobal(name) + if g.IsNil() { + // Convert the encoded layout to a byte array. + bitmapValues := make([]llvm.Value, len(layout)) + for i, b := range layout { + bitmapValues[len(layout)-i-1] = llvm.ConstInt(t.i8, uint64(b), false) + } + bitmapArray := llvm.ConstArray(t.i8, bitmapValues) + + // Construct a tuple of the size + the array. + tuple := t.ctx.ConstStruct([]llvm.Value{ + llvm.ConstInt(t.uintptr, size, false), + bitmapArray, + }, false) + + // Create a global constant initialized with the tuple. + g = llvm.AddGlobal(t.mod, tuple.Type(), name) + g.SetInitializer(tuple) + g.SetGlobalConstant(true) + g.SetUnnamedAddr(true) + g.SetLinkage(llvm.InternalLinkage) + } + + // Get a pointer to the size component of the global. + // This is used because different globals will end up with different sizes. + g = llvm.ConstBitCast(g, llvm.PointerType(t.uintptr, 0)) + + return g +} diff --git a/interp/interpreter.go b/interp/interpreter.go index 36a845d7c5..536411f0ce 100644 --- a/interp/interpreter.go +++ b/interp/interpreter.go @@ -185,7 +185,7 @@ func (r *runner) run(fn *function, params []value, parentMem *memoryView, indent // means that monotonic time in the time package is counted from // time.Time{}.Sub(1), which should be fine. locals[inst.localIndex] = literalValue{uint64(0)} - case callFn.name == "runtime.alloc": + case callFn.name == "runtime.alloc" || callFn.name == "runtime.allocTyped": // Allocate heap memory. At compile time, this is instead done // by creating a global variable. diff --git a/main.go b/main.go index c562dc5e2a..21c8c1b136 100644 --- a/main.go +++ b/main.go @@ -822,7 +822,7 @@ func main() { command := os.Args[1] opt := flag.String("opt", "z", "optimization level: 0, 1, 2, s, z") - gc := flag.String("gc", "", "garbage collector to use (none, leaking, extalloc, conservative)") + gc := flag.String("gc", "", "garbage collector to use (none, leaking, extalloc, blocks, list)") panicStrategy := flag.String("panic", "print", "panic strategy (print, trap)") scheduler := flag.String("scheduler", "", "which scheduler to use (none, coroutines, tasks)") printIR := flag.Bool("printir", false, "print LLVM IR") diff --git a/src/internal/task/task_stack.go b/src/internal/task/task_stack.go index a703d10ac5..172f432c08 100644 --- a/src/internal/task/task_stack.go +++ b/src/internal/task/task_stack.go @@ -61,13 +61,13 @@ func (t *Task) Resume() { // initialize the state and prepare to call the specified function with the specified argument bundle. func (s *state) initialize(fn uintptr, args unsafe.Pointer, stackSize uintptr) { // Create a stack. - stack := make([]uintptr, stackSize/unsafe.Sizeof(uintptr(0))) + stack := make([]unsafe.Pointer, stackSize/unsafe.Sizeof(unsafe.Pointer(nil))) // Set up the stack canary, a random number that should be checked when // switching from the task back to the scheduler. The stack canary pointer // points to the first word of the stack. If it has changed between now and // the next stack switch, there was a stack overflow. - s.canaryPtr = &stack[0] + s.canaryPtr = (*uintptr)(unsafe.Pointer(&stack[0])) *s.canaryPtr = stackCanary // Get a pointer to the top of the stack, where the initial register values diff --git a/src/runtime/gc_conservative.go b/src/runtime/gc_blocks.go similarity index 99% rename from src/runtime/gc_conservative.go rename to src/runtime/gc_blocks.go index 1d0c42c5da..e0dbfda9ec 100644 --- a/src/runtime/gc_conservative.go +++ b/src/runtime/gc_blocks.go @@ -1,4 +1,4 @@ -// +build gc.conservative +// +build gc.blocks package runtime diff --git a/src/runtime/gc_globals_conservative.go b/src/runtime/gc_globals_conservative.go index a45a1a65f5..b4882d6eeb 100644 --- a/src/runtime/gc_globals_conservative.go +++ b/src/runtime/gc_globals_conservative.go @@ -1,4 +1,4 @@ -// +build gc.conservative gc.extalloc +// +build gc.conservative // +build baremetal package runtime diff --git a/src/runtime/gc_globals_precise.go b/src/runtime/gc_globals_precise.go index ade5d15026..79659b7882 100644 --- a/src/runtime/gc_globals_precise.go +++ b/src/runtime/gc_globals_precise.go @@ -1,4 +1,4 @@ -// +build gc.conservative gc.extalloc +// +build gc.conservative // +build !baremetal package runtime diff --git a/src/runtime/gc_list.go b/src/runtime/gc_list.go new file mode 100644 index 0000000000..4e833f10af --- /dev/null +++ b/src/runtime/gc_list.go @@ -0,0 +1,332 @@ +// +build gc.list + +package runtime + +import ( + "internal/task" + "runtime/interrupt" + "unsafe" +) + +// This memory manager is partially inspired by the memory allocator included in avr-libc. +// Unlike avr-libc malloc, this memory manager stores an allocation list instead of a free list. +// Additionally, this memory manager uses strongly typed memory - making it a precise GC. +// There is an overhead of 3 pointer-widths/allocation (up from avr-libc's 1 pointer-width/allocation). +// The allocation list is stored in ascending address order. +// The set of free spans is implicitly derived from the gaps between allocations. +// This allocator has an effective allocation complexity of O(n) and worst-case GC complexity of O(n^2). +// Due to architectural quirks, as well as tiny heaps, this should almost always be faster than the standard conservative collector on AVR. + +// isInHeap checks if an address is inside the heap. +func isInHeap(addr uintptr) bool { + return addr >= heapStart && addr < heapEnd +} + +// gcType is a chunk of metadata used to semi-precisely scan an allocation. +// The compiler will produce constant globals which can be used in this form. +type gcType struct { + // size is the element size, measured in increments of pointer alignment. + size uintptr + + // data is a bitmap following the size value. + // It is organized as a []byte, and set bits indicate places where a pointer may be present. + data struct{} +} + +// allocNode is a node in the allocations list. +// It is prepended to every allocation, resulting in an overhead of 4 bytes per allocation. +type allocNode struct { + // next is a pointer to the next allocation node. + next *allocNode + + // len is the length of the body of this node in bytes. + len uintptr + + // typ is the memory type of this node. + // If it is nil, there are no pointer slots to scan in the node. + typ *gcType + + // base is the start of the body of this node. + base struct{} +} + +// allocList is a singly linked list of allocations. +// It is stored in ascending address order. +var allocList *allocNode + +// scanList is a stack of allocations to scan. +var scanList *allocNode + +func markRoot(addr, root uintptr) { + markAddr(root) +} + +func markRoots(start, end uintptr) { + // Scan the data as a []unsafe.Pointer, using a temporary gcType value. + // In the future, we can do precise scanning of the globals instead. + t := struct { + t gcType + data [1]byte + }{ + t: gcType{ + size: 1, // 1 pointer-width + }, + data: [1]byte{1}, // scan the pointer-width data + } + t.t.scan(start, end) +} + +// markAddr marks the allocation containing the specified address. +func markAddr(addr uintptr) { + if !isInHeap(addr) { + // The address is not in the heap. + return + } + + // Search the allocation list for the address. + for node, prev := allocList, &allocList; node != nil; node, prev = node.next, &node.next { + baseAddr := uintptr(unsafe.Pointer(&node.base)) + if addr < baseAddr { + // The address comes before this node. + // Therefore the address must either be that of a node header or a free span. + return + } + + endAddr := baseAddr + node.len + if addr < endAddr { + // The address is included in this allocation. + // Move the allocation to the scan stack. + *prev = node.next + scanList, node.next = node, scanList + return + } + } +} + +// scan the memory in [start, end) using the specified element type. +// If the type is larger than the region, the memory will be scanned as if it is a slice of that type. +func (t *gcType) scan(start, end uintptr) { + if t == nil { + // There are no pointers in the type. + return + } + + ptrSize, ptrAlign := unsafe.Sizeof(unsafe.Pointer(nil)), unsafe.Alignof(unsafe.Pointer(nil)) + + // Align the start and end. + start = align(start) + end &^= ptrAlign - 1 + + // Shift the end down so that we do not read past it. + end -= ptrSize - ptrAlign + + width := t.size + + for start < end { + // Process the bitmap a byte at a time. + for i := uintptr(0); i <= width/8; i++ { + mask := *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&t.data)) + i)) + for j := uintptr(0); mask != 0; j++ { + if mask&1 != 0 { + markAddr(*(*uintptr)(unsafe.Pointer(start + 8*i*ptrAlign + j*ptrAlign))) + } + mask >>= 1 + } + } + + // Shift the start up and try to scan the next repeat. + start += width * ptrAlign + } +} + +// GC runs a garbage collection cycle. +func GC() { + // Mark phase: mark all reachable objects, recursively. + markGlobals() + markStack() + var markedTaskQueue task.Queue +runqueueScan: + if baremetal && hasScheduler { + // Channel operations in interrupts may move task pointers around while we are marking. + // Therefore we need to scan the runqueue seperately. + for !runqueue.Empty() { + // Pop the next task off of the runqueue. + t := runqueue.Pop() + + // Mark the task if it has not already been marked. + markRoot(uintptr(unsafe.Pointer(&runqueue)), uintptr(unsafe.Pointer(t))) + + // Push the task onto our temporary queue. + markedTaskQueue.Push(t) + } + } + + var keep *allocNode + for scanList != nil { + // Pop a node off of the scan list. + node := scanList + scanList = node.next + + // Scan the node. + if node.typ != nil { + // The code-gen for AVR on LLVM has. . . issues. + // Hoisting the nil check here saves ~50 clock cycles per iteration. + baseAddr := uintptr(unsafe.Pointer(&node.base)) + endAddr := baseAddr + node.len + node.typ.scan(baseAddr, endAddr) + } + + // Insert the node into the output heap. + var prev *allocNode + keepNode := keep + for keepNode != nil && uintptr(unsafe.Pointer(node)) > uintptr(unsafe.Pointer(keepNode)) { + // Move onto the next node. + prev, keepNode = keepNode, keepNode.next + } + if prev == nil { + keep, node.next = node, keep + } else { + prev.next, node.next = node, keepNode + } + } + + if baremetal && hasScheduler { + // Restore the runqueue. + i := interrupt.Disable() + if !runqueue.Empty() { + // Something new came in while finishing the mark. + interrupt.Restore(i) + goto runqueueScan + } + runqueue = markedTaskQueue + interrupt.Restore(i) + } + + // Sweep phase: replace the heap. + allocList = keep +} + +// findMem searches the heap for a free span large enough to contain an allocation of the specified size. +// If there are no sufficiently large free spans available, this returns nil. +func findMem(size uintptr) *allocNode { + // This memory allocator implementation is effectively the same algorithm applied by avr-libc. + // It loops through the set of all free spans, and selects the smallest span that is large enough to fit the allocation. + + nodeSize := unsafe.Sizeof(allocNode{}) + size + + // best* store the best-fit free span. + var bestDst **allocNode + var bestStart uintptr + var bestSize uintptr + + start := heapStart + dst := &allocList +searchLoop: + for { + // Find the allocation node after this free span. + node := *dst + + // Find the end of this free span. + var end uintptr + if node != nil { + // The node terminates the free span. + end = uintptr(unsafe.Pointer(node)) + } else { + // The free span ends at the end of the heap. + end = heapEnd + } + + // Calculate the size of the free span. + freeSpanSize := end - start + + switch { + case freeSpanSize == nodeSize: + // This span is a perfect fit. + bestDst = dst + bestStart = start + break searchLoop + case freeSpanSize > nodeSize && (bestDst == nil || bestSize > nodeSize): + // This span is a better fit than the previous best. + bestDst = dst + bestStart = start + bestSize = freeSpanSize + } + + // Move to the next free span. + if node == nil { + // That was the last free region. + break searchLoop + } + start = align(uintptr(unsafe.Pointer(&node.base)) + node.len) + dst = &node.next + } + + if bestDst == nil { + // There is no suitable allocation. + return nil + } + + mem := (*allocNode)(unsafe.Pointer(bestStart)) + *bestDst, mem.next = mem, *bestDst + mem.len = size + return mem +} + +// anyPtrType is a special fake type that is used when the type of an allocation is not known. +var anyPtrType = struct { + t gcType + data [1]byte +}{ + t: gcType{ + size: 1, // 1 pointer-width + }, + data: [1]byte{1}, // scan the pointer-width data +} + +// alloc a chunk of untyped memory. +//go:inline +func alloc(size uintptr) unsafe.Pointer { + // Use a placeholder type to scan the entire thing. + return allocTyped(size, &anyPtrType.t.size) +} + +// allocTyped tries to find some free space on the heap, possibly doing a garbage +// collection cycle if needed. If no space is free, it panics. +//go:noinline +func allocTyped(size uintptr, typ *uintptr) unsafe.Pointer { + var ranGC bool +tryAlloc: + // Search for available memory. + node := findMem(size) + if node == nil { + // There is no free span large enough for the allocation. + + if ranGC { + // Even after running the GC, there is not enough memory. + runtimePanic("out of memory") + } + + // Run the garbage collector and try again. + GC() + ranGC = true + goto tryAlloc + } + + // Zero the allocation. + ptr := unsafe.Pointer(&node.base) + memzero(ptr, size) + + // Apply the type to the allocation. + node.typ = (*gcType)(unsafe.Pointer(typ)) + + return ptr +} + +func free(ptr unsafe.Pointer) { + // TODO: free memory on request, when the compiler knows it is unused. +} + +func initHeap() { + // This memory manager requires no initialization other than the zeroing of globals. + // This function is provided for compatability with other memory managers. +} diff --git a/src/runtime/gc_stack_portable.go b/src/runtime/gc_stack_portable.go index d48b249723..13f440cfaa 100644 --- a/src/runtime/gc_stack_portable.go +++ b/src/runtime/gc_stack_portable.go @@ -1,4 +1,4 @@ -// +build gc.conservative gc.extalloc +// +build gc.conservative // +build wasm package runtime diff --git a/src/runtime/gc_stack_raw.go b/src/runtime/gc_stack_raw.go index 74be7fe82d..1e9abde41f 100644 --- a/src/runtime/gc_stack_raw.go +++ b/src/runtime/gc_stack_raw.go @@ -1,4 +1,4 @@ -// +build gc.conservative gc.extalloc +// +build gc.conservative // +build !wasm package runtime diff --git a/src/runtime/runtime_unix_heap.go b/src/runtime/runtime_unix_heap.go index 1aefd2f615..0afa873323 100644 --- a/src/runtime/runtime_unix_heap.go +++ b/src/runtime/runtime_unix_heap.go @@ -1,7 +1,7 @@ // +build darwin linux,!baremetal,!wasi freebsd,!baremetal // +build !nintendoswitch -// +build gc.conservative gc.leaking +// +build !gc.none,!gc.extalloc package runtime diff --git a/src/runtime/string.go b/src/runtime/string.go index c1c12cc78c..bf6bd95975 100644 --- a/src/runtime/string.go +++ b/src/runtime/string.go @@ -50,67 +50,56 @@ func stringLess(x, y string) bool { } // Add two strings together. -func stringConcat(x, y _string) _string { - if x.length == 0 { +func stringConcat(x, y string) string { + switch { + case len(x) == 0: return y - } else if y.length == 0 { + case len(y) == 0: return x - } else { - length := x.length + y.length - buf := alloc(length) - memcpy(buf, unsafe.Pointer(x.ptr), x.length) - memcpy(unsafe.Pointer(uintptr(buf)+x.length), unsafe.Pointer(y.ptr), y.length) - return _string{ptr: (*byte)(buf), length: length} } + + // Create a []byte and unsafe it to a string. + buf := make([]byte, len(x)+len(y)) + copy(buf, x) + copy(buf[len(x):], y) + return *(*string)(unsafe.Pointer(&buf)) } // Create a string from a []byte slice. -func stringFromBytes(x struct { - ptr *byte - len uintptr - cap uintptr -}) _string { - buf := alloc(x.len) - memcpy(buf, unsafe.Pointer(x.ptr), x.len) - return _string{ptr: (*byte)(buf), length: x.len} +func stringFromBytes(x []byte) string { + buf := make([]byte, len(x)) + copy(buf, x) + return *(*string)(unsafe.Pointer(&buf)) } // Convert a string to a []byte slice. -func stringToBytes(x _string) (slice struct { - ptr *byte - len uintptr - cap uintptr -}) { - buf := alloc(x.length) - memcpy(buf, unsafe.Pointer(x.ptr), x.length) - slice.ptr = (*byte)(buf) - slice.len = x.length - slice.cap = x.length - return +func stringToBytes(x string) []byte { + buf := make([]byte, len(x)) + copy(buf, x) + return buf } // Convert a []rune slice to a string. -func stringFromRunes(runeSlice []rune) (s _string) { +func stringFromRunes(runeSlice []rune) string { // Count the number of characters that will be in the string. + var length uintptr for _, r := range runeSlice { _, numBytes := encodeUTF8(r) - s.length += numBytes + length += numBytes } // Allocate memory for the string. - s.ptr = (*byte)(alloc(s.length)) + buf := make([]byte, length) // Encode runes to UTF-8 and store the resulting bytes in the string. index := uintptr(0) for _, r := range runeSlice { array, numBytes := encodeUTF8(r) - for _, c := range array[:numBytes] { - *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(s.ptr)) + index)) = c - index++ - } + copy(buf[index:index+numBytes], array[:numBytes]) + index += numBytes } - return + return *(*string)(unsafe.Pointer(&buf)) } // Convert a string to []rune slice. @@ -129,12 +118,9 @@ func stringToRunes(s string) []rune { } // Create a string from a Unicode code point. -func stringFromUnicode(x rune) _string { +func stringFromUnicode(x rune) string { array, length := encodeUTF8(x) - // Array will be heap allocated. - // The heap most likely doesn't work with blocks below 4 bytes, so there's - // no point in allocating a smaller buffer for the string here. - return _string{ptr: (*byte)(unsafe.Pointer(&array)), length: length} + return string(array[:length]) } // Iterate over a string. diff --git a/targets/avr.json b/targets/avr.json index 31c2d9a750..131564d978 100644 --- a/targets/avr.json +++ b/targets/avr.json @@ -4,7 +4,7 @@ "goos": "linux", "goarch": "arm", "compiler": "avr-gcc", - "gc": "conservative", + "gc": "list", "linker": "avr-gcc", "scheduler": "none", "default-stack-size": 256, diff --git a/targets/cortex-m.json b/targets/cortex-m.json index 6f031546cd..5989e67672 100644 --- a/targets/cortex-m.json +++ b/targets/cortex-m.json @@ -3,7 +3,7 @@ "goos": "linux", "goarch": "arm", "compiler": "clang", - "gc": "conservative", + "gc": "blocks", "scheduler": "tasks", "linker": "ld.lld", "rtlib": "compiler-rt", diff --git a/targets/nintendoswitch.json b/targets/nintendoswitch.json index b7bc5bcd48..e976914534 100644 --- a/targets/nintendoswitch.json +++ b/targets/nintendoswitch.json @@ -7,7 +7,7 @@ "linker": "ld.lld", "rtlib": "compiler-rt", "libc": "picolibc", - "gc": "conservative", + "gc": "blocks", "relocation-model": "pic", "cpu": "cortex-a57", "cflags": [ diff --git a/targets/riscv.json b/targets/riscv.json index 3b727992ce..da6ace8984 100644 --- a/targets/riscv.json +++ b/targets/riscv.json @@ -2,7 +2,7 @@ "goos": "linux", "goarch": "arm", "build-tags": ["tinygo.riscv", "baremetal", "linux", "arm"], - "gc": "conservative", + "gc": "blocks", "compiler": "clang", "linker": "ld.lld", "rtlib": "compiler-rt", diff --git a/targets/xtensa.json b/targets/xtensa.json index 276a8131e4..2717795c59 100644 --- a/targets/xtensa.json +++ b/targets/xtensa.json @@ -3,7 +3,7 @@ "goos": "linux", "goarch": "arm", "build-tags": ["xtensa", "baremetal", "linux", "arm"], - "gc": "conservative", + "gc": "blocks", "scheduler": "none", "compiler": "clang", "cflags": [ diff --git a/transform/allocs.go b/transform/allocs.go index f21dde1233..945279e228 100644 --- a/transform/allocs.go +++ b/transform/allocs.go @@ -21,7 +21,10 @@ const maxStackAlloc = 256 // escape analysis, and within a function looks whether an allocation can escape // to the heap. func OptimizeAllocs(mod llvm.Module) { - allocator := mod.NamedFunction("runtime.alloc") + allocator := mod.NamedFunction("runtime.allocTyped") + if allocator.IsNil() { + allocator = mod.NamedFunction("runtime.alloc") + } if allocator.IsNil() { // nothing to optimize return diff --git a/transform/optimizer.go b/transform/optimizer.go index 917083ce80..decc738b32 100644 --- a/transform/optimizer.go +++ b/transform/optimizer.go @@ -226,5 +226,9 @@ func getFunctionsUsedInTransforms(config *compileopts.Config) []string { default: panic(fmt.Errorf("invalid scheduler %q", config.Scheduler())) } + switch config.GC() { + case "list": + fnused = append(fnused, "runtime.allocTyped") + } return fnused }